summaryrefslogtreecommitdiff
path: root/test/Transforms/SLPVectorizer/X86
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-04-16 16:01:22 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-04-16 16:01:22 +0000
commit71d5a2540a98c81f5bcaeb48805e0e2881f530ef (patch)
tree5343938942df402b49ec7300a1c25a2d4ccd5821 /test/Transforms/SLPVectorizer/X86
parent31bbf64f3a4974a2d6c8b3b27ad2f519caf74057 (diff)
Diffstat (limited to 'test/Transforms/SLPVectorizer/X86')
-rw-r--r--test/Transforms/SLPVectorizer/X86/bitreverse.ll420
-rw-r--r--test/Transforms/SLPVectorizer/X86/blending-shuffle.ll167
-rw-r--r--test/Transforms/SLPVectorizer/X86/extractelement.ll61
-rw-r--r--test/Transforms/SLPVectorizer/X86/horizontal-list.ll1615
-rw-r--r--test/Transforms/SLPVectorizer/X86/horizontal.ll123
-rw-r--r--test/Transforms/SLPVectorizer/X86/reduction_loads.ll27
-rw-r--r--test/Transforms/SLPVectorizer/X86/scheduling.ll6
-rw-r--r--test/Transforms/SLPVectorizer/X86/store-jumbled.ll68
-rw-r--r--test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll984
-rw-r--r--test/Transforms/SLPVectorizer/X86/vector.ll41
10 files changed, 3023 insertions, 489 deletions
diff --git a/test/Transforms/SLPVectorizer/X86/bitreverse.ll b/test/Transforms/SLPVectorizer/X86/bitreverse.ll
index c6d65bbe6840..749e93b04134 100644
--- a/test/Transforms/SLPVectorizer/X86/bitreverse.ll
+++ b/test/Transforms/SLPVectorizer/X86/bitreverse.ll
@@ -22,29 +22,11 @@ declare i16 @llvm.bitreverse.i16(i16)
declare i8 @llvm.bitreverse.i8(i8)
define void @bitreverse_2i64() #0 {
-; SSE-LABEL: @bitreverse_2i64(
-; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
-; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD1]])
-; SSE-NEXT: store i64 [[BITREVERSE0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 0), align 8
-; SSE-NEXT: store i64 [[BITREVERSE1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 1), align 8
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_2i64(
-; AVX-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
-; AVX-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
-; AVX-NEXT: [[BITREVERSE0:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD0]])
-; AVX-NEXT: [[BITREVERSE1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD1]])
-; AVX-NEXT: store i64 [[BITREVERSE0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 0), align 8
-; AVX-NEXT: store i64 [[BITREVERSE1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 1), align 8
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_2i64(
-; XOP-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*), align 8
-; XOP-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP1]])
-; XOP-NEXT: store <2 x i64> [[TMP2]], <2 x i64>* bitcast ([4 x i64]* @dst64 to <2 x i64>*), align 8
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_2i64(
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP1]])
+; CHECK-NEXT: store <2 x i64> [[TMP2]], <2 x i64>* bitcast ([4 x i64]* @dst64 to <2 x i64>*), align 8
+; CHECK-NEXT: ret void
;
%ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
%ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8
@@ -57,40 +39,19 @@ define void @bitreverse_2i64() #0 {
define void @bitreverse_4i64() #0 {
; SSE-LABEL: @bitreverse_4i64(
-; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
-; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 1), align 4
-; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2), align 4
-; SSE-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD3]])
-; SSE-NEXT: store i64 [[BITREVERSE0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 0), align 4
-; SSE-NEXT: store i64 [[BITREVERSE1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 1), align 4
-; SSE-NEXT: store i64 [[BITREVERSE2]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 2), align 4
-; SSE-NEXT: store i64 [[BITREVERSE3]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 3), align 4
+; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*), align 4
+; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2) to <2 x i64>*), align 4
+; SSE-NEXT: [[TMP3:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP1]])
+; SSE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP2]])
+; SSE-NEXT: store <2 x i64> [[TMP3]], <2 x i64>* bitcast ([4 x i64]* @dst64 to <2 x i64>*), align 4
+; SSE-NEXT: store <2 x i64> [[TMP4]], <2 x i64>* bitcast (i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 2) to <2 x i64>*), align 4
; SSE-NEXT: ret void
;
-; AVX1-LABEL: @bitreverse_4i64(
-; AVX1-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
-; AVX1-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 1), align 4
-; AVX1-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2), align 4
-; AVX1-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 3), align 4
-; AVX1-NEXT: [[BITREVERSE0:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD0]])
-; AVX1-NEXT: [[BITREVERSE1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD1]])
-; AVX1-NEXT: [[BITREVERSE2:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD2]])
-; AVX1-NEXT: [[BITREVERSE3:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[LD3]])
-; AVX1-NEXT: store i64 [[BITREVERSE0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 0), align 4
-; AVX1-NEXT: store i64 [[BITREVERSE1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 1), align 4
-; AVX1-NEXT: store i64 [[BITREVERSE2]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 2), align 4
-; AVX1-NEXT: store i64 [[BITREVERSE3]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i64 0, i64 3), align 4
-; AVX1-NEXT: ret void
-;
-; AVX2-LABEL: @bitreverse_4i64(
-; AVX2-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*), align 4
-; AVX2-NEXT: [[TMP2:%.*]] = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> [[TMP1]])
-; AVX2-NEXT: store <4 x i64> [[TMP2]], <4 x i64>* bitcast ([4 x i64]* @dst64 to <4 x i64>*), align 4
-; AVX2-NEXT: ret void
+; AVX-LABEL: @bitreverse_4i64(
+; AVX-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*), align 4
+; AVX-NEXT: [[TMP2:%.*]] = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> [[TMP1]])
+; AVX-NEXT: store <4 x i64> [[TMP2]], <4 x i64>* bitcast ([4 x i64]* @dst64 to <4 x i64>*), align 4
+; AVX-NEXT: ret void
;
; XOP-LABEL: @bitreverse_4i64(
; XOP-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*), align 4
@@ -114,32 +75,11 @@ define void @bitreverse_4i64() #0 {
}
define void @bitreverse_4i32() #0 {
-; SSE-LABEL: @bitreverse_4i32(
-; SSE-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
-; SSE-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 4
-; SSE-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 2), align 4
-; SSE-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 4
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD3]])
-; SSE-NEXT: store i32 [[BITREVERSE0]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 0), align 4
-; SSE-NEXT: store i32 [[BITREVERSE1]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 1), align 4
-; SSE-NEXT: store i32 [[BITREVERSE2]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 2), align 4
-; SSE-NEXT: store i32 [[BITREVERSE3]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 3), align 4
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_4i32(
-; AVX-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*), align 4
-; AVX-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP1]])
-; AVX-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* bitcast ([8 x i32]* @dst32 to <4 x i32>*), align 4
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_4i32(
-; XOP-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*), align 4
-; XOP-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP1]])
-; XOP-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* bitcast ([8 x i32]* @dst32 to <4 x i32>*), align 4
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_4i32(
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*), align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* bitcast ([8 x i32]* @dst32 to <4 x i32>*), align 4
+; CHECK-NEXT: ret void
;
%ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
%ld1 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 4
@@ -158,30 +98,12 @@ define void @bitreverse_4i32() #0 {
define void @bitreverse_8i32() #0 {
; SSE-LABEL: @bitreverse_8i32(
-; SSE-NEXT: [[LD0:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
-; SSE-NEXT: [[LD1:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 1), align 2
-; SSE-NEXT: [[LD2:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 2), align 2
-; SSE-NEXT: [[LD3:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 3), align 2
-; SSE-NEXT: [[LD4:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 4), align 2
-; SSE-NEXT: [[LD5:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 5), align 2
-; SSE-NEXT: [[LD6:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 6), align 2
-; SSE-NEXT: [[LD7:%.*]] = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[LD7]])
-; SSE-NEXT: store i32 [[BITREVERSE0]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 0), align 2
-; SSE-NEXT: store i32 [[BITREVERSE1]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 1), align 2
-; SSE-NEXT: store i32 [[BITREVERSE2]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 2), align 2
-; SSE-NEXT: store i32 [[BITREVERSE3]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 3), align 2
-; SSE-NEXT: store i32 [[BITREVERSE4]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 4), align 2
-; SSE-NEXT: store i32 [[BITREVERSE5]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 5), align 2
-; SSE-NEXT: store i32 [[BITREVERSE6]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 6), align 2
-; SSE-NEXT: store i32 [[BITREVERSE7]], i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 7), align 2
+; SSE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([8 x i32]* @src32 to <4 x i32>*), align 2
+; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 4) to <4 x i32>*), align 2
+; SSE-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP1]])
+; SSE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> [[TMP2]])
+; SSE-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* bitcast ([8 x i32]* @dst32 to <4 x i32>*), align 2
+; SSE-NEXT: store <4 x i32> [[TMP4]], <4 x i32>* bitcast (i32* getelementptr inbounds ([8 x i32], [8 x i32]* @dst32, i32 0, i64 4) to <4 x i32>*), align 2
; SSE-NEXT: ret void
;
; AVX-LABEL: @bitreverse_8i32(
@@ -224,44 +146,11 @@ define void @bitreverse_8i32() #0 {
}
define void @bitreverse_8i16() #0 {
-; SSE-LABEL: @bitreverse_8i16(
-; SSE-NEXT: [[LD0:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align 2
-; SSE-NEXT: [[LD1:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align 2
-; SSE-NEXT: [[LD2:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align 2
-; SSE-NEXT: [[LD3:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align 2
-; SSE-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align 2
-; SSE-NEXT: [[LD5:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align 2
-; SSE-NEXT: [[LD6:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align 2
-; SSE-NEXT: [[LD7:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align 2
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD7]])
-; SSE-NEXT: store i16 [[BITREVERSE0]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), align 2
-; SSE-NEXT: store i16 [[BITREVERSE1]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), align 2
-; SSE-NEXT: store i16 [[BITREVERSE2]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), align 2
-; SSE-NEXT: store i16 [[BITREVERSE3]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), align 2
-; SSE-NEXT: store i16 [[BITREVERSE4]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), align 2
-; SSE-NEXT: store i16 [[BITREVERSE5]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), align 2
-; SSE-NEXT: store i16 [[BITREVERSE6]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), align 2
-; SSE-NEXT: store i16 [[BITREVERSE7]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), align 2
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_8i16(
-; AVX-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*), align 2
-; AVX-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP1]])
-; AVX-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), align 2
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_8i16(
-; XOP-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*), align 2
-; XOP-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP1]])
-; XOP-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), align 2
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_8i16(
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*), align 2
+; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP1]])
+; CHECK-NEXT: store <8 x i16> [[TMP2]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), align 2
+; CHECK-NEXT: ret void
;
%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align 2
%ld1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align 2
@@ -292,54 +181,12 @@ define void @bitreverse_8i16() #0 {
define void @bitreverse_16i16() #0 {
; SSE-LABEL: @bitreverse_16i16(
-; SSE-NEXT: [[LD0:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align 2
-; SSE-NEXT: [[LD1:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 1), align 2
-; SSE-NEXT: [[LD2:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 2), align 2
-; SSE-NEXT: [[LD3:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 3), align 2
-; SSE-NEXT: [[LD4:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 4), align 2
-; SSE-NEXT: [[LD5:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 5), align 2
-; SSE-NEXT: [[LD6:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 6), align 2
-; SSE-NEXT: [[LD7:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align 2
-; SSE-NEXT: [[LD8:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 8), align 2
-; SSE-NEXT: [[LD9:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 9), align 2
-; SSE-NEXT: [[LD10:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 10), align 2
-; SSE-NEXT: [[LD11:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 11), align 2
-; SSE-NEXT: [[LD12:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 12), align 2
-; SSE-NEXT: [[LD13:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 13), align 2
-; SSE-NEXT: [[LD14:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 14), align 2
-; SSE-NEXT: [[LD15:%.*]] = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 15), align 2
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD7]])
-; SSE-NEXT: [[BITREVERSE8:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD8]])
-; SSE-NEXT: [[BITREVERSE9:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD9]])
-; SSE-NEXT: [[BITREVERSE10:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD10]])
-; SSE-NEXT: [[BITREVERSE11:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD11]])
-; SSE-NEXT: [[BITREVERSE12:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD12]])
-; SSE-NEXT: [[BITREVERSE13:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD13]])
-; SSE-NEXT: [[BITREVERSE14:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD14]])
-; SSE-NEXT: [[BITREVERSE15:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[LD15]])
-; SSE-NEXT: store i16 [[BITREVERSE0]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 0), align 2
-; SSE-NEXT: store i16 [[BITREVERSE1]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 1), align 2
-; SSE-NEXT: store i16 [[BITREVERSE2]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 2), align 2
-; SSE-NEXT: store i16 [[BITREVERSE3]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 3), align 2
-; SSE-NEXT: store i16 [[BITREVERSE4]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 4), align 2
-; SSE-NEXT: store i16 [[BITREVERSE5]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 5), align 2
-; SSE-NEXT: store i16 [[BITREVERSE6]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 6), align 2
-; SSE-NEXT: store i16 [[BITREVERSE7]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 7), align 2
-; SSE-NEXT: store i16 [[BITREVERSE8]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 8), align 2
-; SSE-NEXT: store i16 [[BITREVERSE9]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 9), align 2
-; SSE-NEXT: store i16 [[BITREVERSE10]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 10), align 2
-; SSE-NEXT: store i16 [[BITREVERSE11]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 11), align 2
-; SSE-NEXT: store i16 [[BITREVERSE12]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 12), align 2
-; SSE-NEXT: store i16 [[BITREVERSE13]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 13), align 2
-; SSE-NEXT: store i16 [[BITREVERSE14]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 14), align 2
-; SSE-NEXT: store i16 [[BITREVERSE15]], i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 15), align 2
+; SSE-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([16 x i16]* @src16 to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast (i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 8) to <8 x i16>*), align 2
+; SSE-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP1]])
+; SSE-NEXT: [[TMP4:%.*]] = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> [[TMP2]])
+; SSE-NEXT: store <8 x i16> [[TMP3]], <8 x i16>* bitcast ([16 x i16]* @dst16 to <8 x i16>*), align 2
+; SSE-NEXT: store <8 x i16> [[TMP4]], <8 x i16>* bitcast (i16* getelementptr inbounds ([16 x i16], [16 x i16]* @dst16, i16 0, i64 8) to <8 x i16>*), align 2
; SSE-NEXT: ret void
;
; AVX-LABEL: @bitreverse_16i16(
@@ -406,68 +253,11 @@ define void @bitreverse_16i16() #0 {
}
define void @bitreverse_16i8() #0 {
-; SSE-LABEL: @bitreverse_16i8(
-; SSE-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
-; SSE-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
-; SSE-NEXT: [[LD2:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
-; SSE-NEXT: [[LD3:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
-; SSE-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
-; SSE-NEXT: [[LD5:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
-; SSE-NEXT: [[LD6:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
-; SSE-NEXT: [[LD7:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
-; SSE-NEXT: [[LD8:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 8), align 1
-; SSE-NEXT: [[LD9:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 9), align 1
-; SSE-NEXT: [[LD10:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 10), align 1
-; SSE-NEXT: [[LD11:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 11), align 1
-; SSE-NEXT: [[LD12:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 12), align 1
-; SSE-NEXT: [[LD13:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 13), align 1
-; SSE-NEXT: [[LD14:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 14), align 1
-; SSE-NEXT: [[LD15:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 15), align 1
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD7]])
-; SSE-NEXT: [[BITREVERSE8:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD8]])
-; SSE-NEXT: [[BITREVERSE9:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD9]])
-; SSE-NEXT: [[BITREVERSE10:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD10]])
-; SSE-NEXT: [[BITREVERSE11:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD11]])
-; SSE-NEXT: [[BITREVERSE12:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD12]])
-; SSE-NEXT: [[BITREVERSE13:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD13]])
-; SSE-NEXT: [[BITREVERSE14:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD14]])
-; SSE-NEXT: [[BITREVERSE15:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD15]])
-; SSE-NEXT: store i8 [[BITREVERSE0]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 0), align 1
-; SSE-NEXT: store i8 [[BITREVERSE1]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 1), align 1
-; SSE-NEXT: store i8 [[BITREVERSE2]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 2), align 1
-; SSE-NEXT: store i8 [[BITREVERSE3]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 3), align 1
-; SSE-NEXT: store i8 [[BITREVERSE4]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 4), align 1
-; SSE-NEXT: store i8 [[BITREVERSE5]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 5), align 1
-; SSE-NEXT: store i8 [[BITREVERSE6]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 6), align 1
-; SSE-NEXT: store i8 [[BITREVERSE7]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 7), align 1
-; SSE-NEXT: store i8 [[BITREVERSE8]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 8), align 1
-; SSE-NEXT: store i8 [[BITREVERSE9]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 9), align 1
-; SSE-NEXT: store i8 [[BITREVERSE10]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 10), align 1
-; SSE-NEXT: store i8 [[BITREVERSE11]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 11), align 1
-; SSE-NEXT: store i8 [[BITREVERSE12]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 12), align 1
-; SSE-NEXT: store i8 [[BITREVERSE13]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 13), align 1
-; SSE-NEXT: store i8 [[BITREVERSE14]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 14), align 1
-; SSE-NEXT: store i8 [[BITREVERSE15]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 15), align 1
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_16i8(
-; AVX-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
-; AVX-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
-; AVX-NEXT: store <16 x i8> [[TMP2]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_16i8(
-; XOP-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
-; XOP-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
-; XOP-NEXT: store <16 x i8> [[TMP2]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_16i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
+; CHECK-NEXT: store <16 x i8> [[TMP2]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
;
%ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
%ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
@@ -521,122 +311,14 @@ define void @bitreverse_16i8() #0 {
}
define void @bitreverse_32i8() #0 {
-; SSE-LABEL: @bitreverse_32i8(
-; SSE-NEXT: [[LD0:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
-; SSE-NEXT: [[LD1:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
-; SSE-NEXT: [[LD2:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 2), align 1
-; SSE-NEXT: [[LD3:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 3), align 1
-; SSE-NEXT: [[LD4:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 4), align 1
-; SSE-NEXT: [[LD5:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 5), align 1
-; SSE-NEXT: [[LD6:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 6), align 1
-; SSE-NEXT: [[LD7:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1
-; SSE-NEXT: [[LD8:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 8), align 1
-; SSE-NEXT: [[LD9:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 9), align 1
-; SSE-NEXT: [[LD10:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 10), align 1
-; SSE-NEXT: [[LD11:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 11), align 1
-; SSE-NEXT: [[LD12:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 12), align 1
-; SSE-NEXT: [[LD13:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 13), align 1
-; SSE-NEXT: [[LD14:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 14), align 1
-; SSE-NEXT: [[LD15:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 15), align 1
-; SSE-NEXT: [[LD16:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 16), align 1
-; SSE-NEXT: [[LD17:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 17), align 1
-; SSE-NEXT: [[LD18:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 18), align 1
-; SSE-NEXT: [[LD19:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 19), align 1
-; SSE-NEXT: [[LD20:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 20), align 1
-; SSE-NEXT: [[LD21:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 21), align 1
-; SSE-NEXT: [[LD22:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 22), align 1
-; SSE-NEXT: [[LD23:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 23), align 1
-; SSE-NEXT: [[LD24:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 24), align 1
-; SSE-NEXT: [[LD25:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 25), align 1
-; SSE-NEXT: [[LD26:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 26), align 1
-; SSE-NEXT: [[LD27:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 27), align 1
-; SSE-NEXT: [[LD28:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 28), align 1
-; SSE-NEXT: [[LD29:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 29), align 1
-; SSE-NEXT: [[LD30:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 30), align 1
-; SSE-NEXT: [[LD31:%.*]] = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 31), align 1
-; SSE-NEXT: [[BITREVERSE0:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD0]])
-; SSE-NEXT: [[BITREVERSE1:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD1]])
-; SSE-NEXT: [[BITREVERSE2:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD2]])
-; SSE-NEXT: [[BITREVERSE3:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD3]])
-; SSE-NEXT: [[BITREVERSE4:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD4]])
-; SSE-NEXT: [[BITREVERSE5:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD5]])
-; SSE-NEXT: [[BITREVERSE6:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD6]])
-; SSE-NEXT: [[BITREVERSE7:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD7]])
-; SSE-NEXT: [[BITREVERSE8:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD8]])
-; SSE-NEXT: [[BITREVERSE9:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD9]])
-; SSE-NEXT: [[BITREVERSE10:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD10]])
-; SSE-NEXT: [[BITREVERSE11:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD11]])
-; SSE-NEXT: [[BITREVERSE12:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD12]])
-; SSE-NEXT: [[BITREVERSE13:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD13]])
-; SSE-NEXT: [[BITREVERSE14:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD14]])
-; SSE-NEXT: [[BITREVERSE15:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD15]])
-; SSE-NEXT: [[BITREVERSE16:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD16]])
-; SSE-NEXT: [[BITREVERSE17:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD17]])
-; SSE-NEXT: [[BITREVERSE18:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD18]])
-; SSE-NEXT: [[BITREVERSE19:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD19]])
-; SSE-NEXT: [[BITREVERSE20:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD20]])
-; SSE-NEXT: [[BITREVERSE21:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD21]])
-; SSE-NEXT: [[BITREVERSE22:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD22]])
-; SSE-NEXT: [[BITREVERSE23:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD23]])
-; SSE-NEXT: [[BITREVERSE24:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD24]])
-; SSE-NEXT: [[BITREVERSE25:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD25]])
-; SSE-NEXT: [[BITREVERSE26:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD26]])
-; SSE-NEXT: [[BITREVERSE27:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD27]])
-; SSE-NEXT: [[BITREVERSE28:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD28]])
-; SSE-NEXT: [[BITREVERSE29:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD29]])
-; SSE-NEXT: [[BITREVERSE30:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD30]])
-; SSE-NEXT: [[BITREVERSE31:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[LD31]])
-; SSE-NEXT: store i8 [[BITREVERSE0]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 0), align 1
-; SSE-NEXT: store i8 [[BITREVERSE1]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 1), align 1
-; SSE-NEXT: store i8 [[BITREVERSE2]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 2), align 1
-; SSE-NEXT: store i8 [[BITREVERSE3]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 3), align 1
-; SSE-NEXT: store i8 [[BITREVERSE4]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 4), align 1
-; SSE-NEXT: store i8 [[BITREVERSE5]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 5), align 1
-; SSE-NEXT: store i8 [[BITREVERSE6]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 6), align 1
-; SSE-NEXT: store i8 [[BITREVERSE7]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 7), align 1
-; SSE-NEXT: store i8 [[BITREVERSE8]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 8), align 1
-; SSE-NEXT: store i8 [[BITREVERSE9]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 9), align 1
-; SSE-NEXT: store i8 [[BITREVERSE10]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 10), align 1
-; SSE-NEXT: store i8 [[BITREVERSE11]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 11), align 1
-; SSE-NEXT: store i8 [[BITREVERSE12]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 12), align 1
-; SSE-NEXT: store i8 [[BITREVERSE13]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 13), align 1
-; SSE-NEXT: store i8 [[BITREVERSE14]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 14), align 1
-; SSE-NEXT: store i8 [[BITREVERSE15]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 15), align 1
-; SSE-NEXT: store i8 [[BITREVERSE16]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 16), align 1
-; SSE-NEXT: store i8 [[BITREVERSE17]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 17), align 1
-; SSE-NEXT: store i8 [[BITREVERSE18]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 18), align 1
-; SSE-NEXT: store i8 [[BITREVERSE19]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 19), align 1
-; SSE-NEXT: store i8 [[BITREVERSE20]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 20), align 1
-; SSE-NEXT: store i8 [[BITREVERSE21]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 21), align 1
-; SSE-NEXT: store i8 [[BITREVERSE22]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 22), align 1
-; SSE-NEXT: store i8 [[BITREVERSE23]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 23), align 1
-; SSE-NEXT: store i8 [[BITREVERSE24]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 24), align 1
-; SSE-NEXT: store i8 [[BITREVERSE25]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 25), align 1
-; SSE-NEXT: store i8 [[BITREVERSE26]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 26), align 1
-; SSE-NEXT: store i8 [[BITREVERSE27]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 27), align 1
-; SSE-NEXT: store i8 [[BITREVERSE28]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 28), align 1
-; SSE-NEXT: store i8 [[BITREVERSE29]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 29), align 1
-; SSE-NEXT: store i8 [[BITREVERSE30]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 30), align 1
-; SSE-NEXT: store i8 [[BITREVERSE31]], i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 31), align 1
-; SSE-NEXT: ret void
-;
-; AVX-LABEL: @bitreverse_32i8(
-; AVX-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
-; AVX-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 16) to <16 x i8>*), align 1
-; AVX-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
-; AVX-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP2]])
-; AVX-NEXT: store <16 x i8> [[TMP3]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
-; AVX-NEXT: store <16 x i8> [[TMP4]], <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 16) to <16 x i8>*), align 1
-; AVX-NEXT: ret void
-;
-; XOP-LABEL: @bitreverse_32i8(
-; XOP-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
-; XOP-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 16) to <16 x i8>*), align 1
-; XOP-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
-; XOP-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP2]])
-; XOP-NEXT: store <16 x i8> [[TMP3]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
-; XOP-NEXT: store <16 x i8> [[TMP4]], <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 16) to <16 x i8>*), align 1
-; XOP-NEXT: ret void
+; CHECK-LABEL: @bitreverse_32i8(
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([32 x i8]* @src8 to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP1]])
+; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[TMP2]])
+; CHECK-NEXT: store <16 x i8> [[TMP3]], <16 x i8>* bitcast ([32 x i8]* @dst8 to <16 x i8>*), align 1
+; CHECK-NEXT: store <16 x i8> [[TMP4]], <16 x i8>* bitcast (i8* getelementptr inbounds ([32 x i8], [32 x i8]* @dst8, i8 0, i64 16) to <16 x i8>*), align 1
+; CHECK-NEXT: ret void
;
%ld0 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 0), align 1
%ld1 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 1), align 1
diff --git a/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll b/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll
new file mode 100644
index 000000000000..ba0059ed4e51
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll
@@ -0,0 +1,167 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -S -o - -mtriple=x86-64-unknown-linux -mcpu=bdver2 -instcombine | FileCheck %s
+
+define <2 x i8> @g(<2 x i8> %x, <2 x i8> %y) {
+; CHECK-LABEL: @g(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <2 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <2 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <2 x i8> undef, i8 [[X0X0]], i32 0
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <2 x i8> [[INS1]], i8 [[Y1Y1]], i32 1
+; CHECK-NEXT: ret <2 x i8> [[INS2]]
+;
+ %x0 = extractelement <2 x i8> %x, i32 0
+ %y1 = extractelement <2 x i8> %y, i32 1
+ %x0x0 = mul i8 %x0, %x0
+ %y1y1 = mul i8 %y1, %y1
+ %ins1 = insertelement <2 x i8> undef, i8 %x0x0, i32 0
+ %ins2 = insertelement <2 x i8> %ins1, i8 %y1y1, i32 1
+ ret <2 x i8> %ins2
+}
+
+define <4 x i8> @h(<4 x i8> %x, <4 x i8> %y) {
+; CHECK-LABEL: @h(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <4 x i8> undef, i8 [[X0X0]], i32 0
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x i8> [[INS1]], i8 [[X3X3]], i32 1
+; CHECK-NEXT: [[INS3:%.*]] = insertelement <4 x i8> [[INS2]], i8 [[Y1Y1]], i32 2
+; CHECK-NEXT: [[INS4:%.*]] = insertelement <4 x i8> [[INS3]], i8 [[Y2Y2]], i32 3
+; CHECK-NEXT: ret <4 x i8> [[INS4]]
+;
+ %x0 = extractelement <4 x i8> %x, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %y1 = extractelement <4 x i8> %y, i32 1
+ %y2 = extractelement <4 x i8> %y, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %y1y1 = mul i8 %y1, %y1
+ %y2y2 = mul i8 %y2, %y2
+ %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0
+ %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
+ %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
+ %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
+ ret <4 x i8> %ins4
+}
+
+define <4 x i8> @h_undef(<4 x i8> %x, <4 x i8> %y) {
+; CHECK-LABEL: @h_undef(
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 3
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
+; CHECK-NEXT: [[INS2:%.*]] = insertelement <4 x i8> undef, i8 [[X3X3]], i32 1
+; CHECK-NEXT: [[INS3:%.*]] = insertelement <4 x i8> [[INS2]], i8 [[Y1Y1]], i32 2
+; CHECK-NEXT: [[INS4:%.*]] = insertelement <4 x i8> [[INS3]], i8 [[Y2Y2]], i32 3
+; CHECK-NEXT: ret <4 x i8> [[INS4]]
+;
+ %x0 = extractelement <4 x i8> undef, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %y1 = extractelement <4 x i8> %y, i32 1
+ %y2 = extractelement <4 x i8> %y, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %y1y1 = mul i8 %y1, %y1
+ %y2y2 = mul i8 %y2, %y2
+ %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0
+ %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
+ %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
+ %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
+ ret <4 x i8> %ins4
+}
+
+define i8 @i(<4 x i8> %x, <4 x i8> %y) {
+; CHECK-LABEL: @i(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X0X0]], [[X3X3]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[Y1Y1]], [[Y2Y2]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i8 [[TMP3]]
+;
+ %x0 = extractelement <4 x i8> %x, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %y1 = extractelement <4 x i8> %y, i32 1
+ %y2 = extractelement <4 x i8> %y, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %y1y1 = mul i8 %y1, %y1
+ %y2y2 = mul i8 %y2, %y2
+ %1 = add i8 %x0x0, %x3x3
+ %2 = add i8 %y1y1, %y2y2
+ %3 = add i8 %1, %2
+ ret i8 %3
+}
+
+define i8 @j(<4 x i8> %x, <4 x i8> %y) {
+; CHECK-LABEL: @j(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
+; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
+; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X0X0]], [[X3X3]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[Y1Y1]], [[Y2Y2]]
+; CHECK-NEXT: [[TMP3:%.*]] = sdiv i8 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i8 [[TMP3]]
+;
+ %x0 = extractelement <4 x i8> %x, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %y1 = extractelement <4 x i8> %y, i32 1
+ %y2 = extractelement <4 x i8> %y, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %y1y1 = mul i8 %y1, %y1
+ %y2y2 = mul i8 %y2, %y2
+ %1 = add i8 %x0x0, %x3x3
+ %2 = add i8 %y1y1, %y2y2
+ %3 = sdiv i8 %1, %2
+ ret i8 %3
+}
+
+define i8 @k(<4 x i8> %x) {
+; CHECK-LABEL: @k(
+; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
+; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
+; CHECK-NEXT: [[X1:%.*]] = extractelement <4 x i8> [[X]], i32 1
+; CHECK-NEXT: [[X2:%.*]] = extractelement <4 x i8> [[X]], i32 2
+; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
+; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
+; CHECK-NEXT: [[X1X1:%.*]] = mul i8 [[X1]], [[X1]]
+; CHECK-NEXT: [[X2X2:%.*]] = mul i8 [[X2]], [[X2]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X0X0]], [[X3X3]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X1X1]], [[X2X2]]
+; CHECK-NEXT: [[TMP3:%.*]] = sdiv i8 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i8 [[TMP3]]
+;
+ %x0 = extractelement <4 x i8> %x, i32 0
+ %x3 = extractelement <4 x i8> %x, i32 3
+ %x1 = extractelement <4 x i8> %x, i32 1
+ %x2 = extractelement <4 x i8> %x, i32 2
+ %x0x0 = mul i8 %x0, %x0
+ %x3x3 = mul i8 %x3, %x3
+ %x1x1 = mul i8 %x1, %x1
+ %x2x2 = mul i8 %x2, %x2
+ %1 = add i8 %x0x0, %x3x3
+ %2 = add i8 %x1x1, %x2x2
+ %3 = sdiv i8 %1, %2
+ ret i8 %3
+}
diff --git a/test/Transforms/SLPVectorizer/X86/extractelement.ll b/test/Transforms/SLPVectorizer/X86/extractelement.ll
new file mode 100644
index 000000000000..10675f3be8a6
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/extractelement.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-unknown-linux -march=core-avx2 | FileCheck %s
+; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-unknown-linux -march=core-avx2 -slp-threshold=-1 -slp-vectorize-hor-store | FileCheck %s --check-prefix=THRESH1
+; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-unknown-linux -march=core-avx2 -slp-threshold=-2 -slp-vectorize-hor-store | FileCheck %s --check-prefix=THRESH2
+
+@a = global float 0.000000e+00, align 4
+
+define float @f(<2 x float> %x) {
+; CHECK-LABEL: @f(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x float> [[X:%.*]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x float> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP1]], i32 1
+; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP2]], [[TMP3]]
+; CHECK-NEXT: ret float [[ADD]]
+;
+ %x0 = extractelement <2 x float> %x, i32 0
+ %x1 = extractelement <2 x float> %x, i32 1
+ %x0x0 = fmul float %x0, %x0
+ %x1x1 = fmul float %x1, %x1
+ %add = fadd float %x0x0, %x1x1
+ ret float %add
+}
+
+define float @f_used_out_of_tree(<2 x float> %x) {
+; THRESH2-LABEL: @f_used_out_of_tree(
+; THRESH2-NEXT: [[TMP1:%.*]] = extractelement <2 x float> [[X:%.*]], i32 0
+; THRESH2-NEXT: [[TMP2:%.*]] = fmul <2 x float> [[X]], [[X]]
+; THRESH2-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
+; THRESH2-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP2]], i32 1
+; THRESH2-NEXT: [[ADD:%.*]] = fadd float [[TMP3]], [[TMP4]]
+; THRESH2-NEXT: store float [[ADD]], float* @a
+; THRESH2-NEXT: ret float [[TMP1]]
+;
+ %x0 = extractelement <2 x float> %x, i32 0
+ %x1 = extractelement <2 x float> %x, i32 1
+ %x0x0 = fmul float %x0, %x0
+ %x1x1 = fmul float %x1, %x1
+ %add = fadd float %x0x0, %x1x1
+ store float %add, float* @a
+ ret float %x0
+}
+
+define float @f_used_twice_in_tree(<2 x float> %x) {
+; THRESH1-LABEL: @f_used_twice_in_tree(
+; THRESH1-NEXT: [[TMP1:%.*]] = extractelement <2 x float> [[X:%.*]], i32 1
+; THRESH1-NEXT: [[TMP2:%.*]] = insertelement <2 x float> undef, float [[TMP1]], i32 0
+; THRESH1-NEXT: [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[TMP1]], i32 1
+; THRESH1-NEXT: [[TMP4:%.*]] = fmul <2 x float> [[X]], [[TMP3]]
+; THRESH1-NEXT: [[TMP5:%.*]] = extractelement <2 x float> [[TMP4]], i32 0
+; THRESH1-NEXT: [[TMP6:%.*]] = extractelement <2 x float> [[TMP4]], i32 1
+; THRESH1-NEXT: [[ADD:%.*]] = fadd float [[TMP5]], [[TMP6]]
+; THRESH1-NEXT: ret float [[ADD]]
+;
+ %x0 = extractelement <2 x float> %x, i32 0
+ %x1 = extractelement <2 x float> %x, i32 1
+ %x0x0 = fmul float %x0, %x1
+ %x1x1 = fmul float %x1, %x1
+ %add = fadd float %x0x0, %x1x1
+ ret float %add
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/horizontal-list.ll b/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
index 3f9fffb9b624..73844037f12e 100644
--- a/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
+++ b/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -slp-vectorizer -slp-vectorize-hor -slp-vectorize-hor-store -S < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 | FileCheck %s
+; RUN: opt -slp-vectorizer -slp-vectorize-hor -slp-vectorize-hor-store -S < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -slp-threshold=-10 | FileCheck %s --check-prefix=THRESHOLD
@n = external local_unnamed_addr global i32, align 4
@arr = common local_unnamed_addr global [20 x float] zeroinitializer, align 16
@@ -12,29 +13,55 @@ define float @baz() {
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
-; CHECK-NEXT: [[MUL4:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL4]], [[CONV]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), align 4
-; CHECK-NEXT: [[MUL4_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL4_1]], [[ADD]]
-; CHECK-NEXT: [[TMP5:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2) to <2 x float>*), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2) to <2 x float>*), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = fmul fast <2 x float> [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x float> [[TMP7]], i32 0
-; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[TMP8]], [[ADD_1]]
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP7]], i32 1
-; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[TMP9]], [[ADD_2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr to <2 x float>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr1 to <2 x float>*), align 16
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <2 x float> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP3]], i32 0
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP4]], [[CONV]]
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x float> [[TMP3]], i32 1
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP5]], [[ADD]]
+; CHECK-NEXT: [[TMP6:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2) to <2 x float>*), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2) to <2 x float>*), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <2 x float> [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP8]], i32 0
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[TMP9]], [[ADD_1]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x float> [[TMP8]], i32 1
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[TMP10]], [[ADD_2]]
; CHECK-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV]]
-; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[MUL4]], [[ADD7]]
-; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float [[MUL4_1]], [[ADD19]]
-; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float [[TMP8]], [[ADD19_1]]
-; CHECK-NEXT: [[ADD19_3:%.*]] = fadd fast float [[TMP9]], [[ADD19_2]]
+; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[TMP4]], [[ADD7]]
+; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float [[TMP5]], [[ADD19]]
+; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float [[TMP9]], [[ADD19_1]]
+; CHECK-NEXT: [[ADD19_3:%.*]] = fadd fast float [[TMP10]], [[ADD19_2]]
; CHECK-NEXT: store float [[ADD19_3]], float* @res, align 4
; CHECK-NEXT: ret float [[ADD19_3]]
;
+; THRESHOLD-LABEL: @baz(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr to <2 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr1 to <2 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <2 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP3]], i32 0
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[TMP4]], [[CONV]]
+; THRESHOLD-NEXT: [[TMP5:%.*]] = extractelement <2 x float> [[TMP3]], i32 1
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP5]], [[ADD]]
+; THRESHOLD-NEXT: [[TMP6:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2) to <2 x float>*), align 8
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2) to <2 x float>*), align 8
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fmul fast <2 x float> [[TMP7]], [[TMP6]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP8]], i32 0
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float [[TMP9]], [[ADD_1]]
+; THRESHOLD-NEXT: [[TMP10:%.*]] = extractelement <2 x float> [[TMP8]], i32 1
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float [[TMP10]], [[ADD_2]]
+; THRESHOLD-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD19:%.*]] = fadd fast float [[TMP4]], [[ADD7]]
+; THRESHOLD-NEXT: [[ADD19_1:%.*]] = fadd fast float [[TMP5]], [[ADD19]]
+; THRESHOLD-NEXT: [[ADD19_2:%.*]] = fadd fast float [[TMP9]], [[ADD19_1]]
+; THRESHOLD-NEXT: [[ADD19_3:%.*]] = fadd fast float [[TMP10]], [[ADD19_2]]
+; THRESHOLD-NEXT: store float [[ADD19_3]], float* @res, align 4
+; THRESHOLD-NEXT: ret float [[ADD19_3]]
+;
entry:
%0 = load i32, i32* @n, align 4
%mul = mul nsw i32 %0, 3
@@ -70,42 +97,62 @@ define float @bazz() {
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
-; CHECK-NEXT: [[MUL4:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL4]], [[CONV]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), align 4
-; CHECK-NEXT: [[MUL4_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL4_1]], [[ADD]]
-; CHECK-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), align 8
-; CHECK-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), align 8
-; CHECK-NEXT: [[MUL4_2:%.*]] = fmul fast float [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[MUL4_2]], [[ADD_1]]
-; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 3), align 4
-; CHECK-NEXT: [[TMP8:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 3), align 4
-; CHECK-NEXT: [[MUL4_3:%.*]] = fmul fast float [[TMP8]], [[TMP7]]
-; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[MUL4_3]], [[ADD_2]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* bitcast ([20 x float]* @arr to <8 x float>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x float>, <8 x float>* bitcast ([20 x float]* @arr1 to <8 x float>*), align 16
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <8 x float> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float undef, [[CONV]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
; CHECK-NEXT: [[MUL5:%.*]] = shl nsw i32 [[TMP0]], 2
; CHECK-NEXT: [[CONV6:%.*]] = sitofp i32 [[MUL5]] to float
; CHECK-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV6]]
-; CHECK-NEXT: [[TMP9:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 4), align 16
-; CHECK-NEXT: [[TMP10:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 4), align 16
-; CHECK-NEXT: [[MUL18:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
-; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[MUL18]], [[ADD7]]
-; CHECK-NEXT: [[TMP11:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 5), align 4
-; CHECK-NEXT: [[TMP12:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 5), align 4
-; CHECK-NEXT: [[MUL18_1:%.*]] = fmul fast float [[TMP12]], [[TMP11]]
-; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float [[MUL18_1]], [[ADD19]]
-; CHECK-NEXT: [[TMP13:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 6) to <2 x float>*), align 8
-; CHECK-NEXT: [[TMP14:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 6) to <2 x float>*), align 8
-; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <2 x float> [[TMP14]], [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x float> [[TMP15]], i32 0
-; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float [[TMP16]], [[ADD19_1]]
-; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x float> [[TMP15]], i32 1
-; CHECK-NEXT: [[ADD19_3:%.*]] = fadd fast float [[TMP17]], [[ADD19_2]]
-; CHECK-NEXT: store float [[ADD19_3]], float* @res, align 4
-; CHECK-NEXT: ret float [[ADD19_3]]
+; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float undef, [[ADD7]]
+; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float undef, [[ADD19]]
+; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float undef, [[ADD19_1]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP3]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP3]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP4]], [[CONV]]
+; CHECK-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV6]]
+; CHECK-NEXT: [[ADD19_3:%.*]] = fadd fast float undef, [[ADD19_2]]
+; CHECK-NEXT: store float [[BIN_EXTRA5]], float* @res, align 4
+; CHECK-NEXT: ret float [[BIN_EXTRA5]]
+;
+; THRESHOLD-LABEL: @bazz(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* bitcast ([20 x float]* @arr to <8 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load <8 x float>, <8 x float>* bitcast ([20 x float]* @arr1 to <8 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <8 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float undef, [[CONV]]
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; THRESHOLD-NEXT: [[MUL5:%.*]] = shl nsw i32 [[TMP0]], 2
+; THRESHOLD-NEXT: [[CONV6:%.*]] = sitofp i32 [[MUL5]] to float
+; THRESHOLD-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV6]]
+; THRESHOLD-NEXT: [[ADD19:%.*]] = fadd fast float undef, [[ADD7]]
+; THRESHOLD-NEXT: [[ADD19_1:%.*]] = fadd fast float undef, [[ADD19]]
+; THRESHOLD-NEXT: [[ADD19_2:%.*]] = fadd fast float undef, [[ADD19_1]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP3]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP3]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP4]], [[CONV]]
+; THRESHOLD-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV6]]
+; THRESHOLD-NEXT: [[ADD19_3:%.*]] = fadd fast float undef, [[ADD19_2]]
+; THRESHOLD-NEXT: store float [[BIN_EXTRA5]], float* @res, align 4
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA5]]
;
entry:
%0 = load i32, i32* @n, align 4
@@ -155,24 +202,39 @@ define float @bazzz() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float [[MUL_1]], [[MUL]]
-; CHECK-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), align 8
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP7]], [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = fadd fast float [[MUL_2]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 3), align 4
-; CHECK-NEXT: [[TMP10:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 3), align 4
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = fadd fast float [[MUL_3]], [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = fmul fast float [[CONV]], [[TMP11]]
-; CHECK-NEXT: store float [[TMP12]], float* @res, align 4
-; CHECK-NEXT: ret float [[TMP12]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr to <4 x float>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr1 to <4 x float>*), align 16
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd fast float undef, undef
+; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float undef, [[TMP4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast float undef, [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], [[TMP6]]
+; CHECK-NEXT: store float [[TMP8]], float* @res, align 4
+; CHECK-NEXT: ret float [[TMP8]]
+;
+; THRESHOLD-LABEL: @bazzz(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr to <4 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr1 to <4 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = fadd fast float undef, undef
+; THRESHOLD-NEXT: [[TMP5:%.*]] = fadd fast float undef, [[TMP4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; THRESHOLD-NEXT: [[TMP7:%.*]] = fadd fast float undef, [[TMP5]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], [[TMP6]]
+; THRESHOLD-NEXT: store float [[TMP8]], float* @res, align 4
+; THRESHOLD-NEXT: ret float [[TMP8]]
;
entry:
%0 = load i32, i32* @n, align 4
@@ -202,26 +264,42 @@ define i32 @foo() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), align 4
-; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float [[MUL_1]], [[MUL]]
-; CHECK-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), align 8
-; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), align 8
-; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP7]], [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = fadd fast float [[MUL_2]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 3), align 4
-; CHECK-NEXT: [[TMP10:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 3), align 4
-; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = fadd fast float [[MUL_3]], [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = fmul fast float [[CONV]], [[TMP11]]
-; CHECK-NEXT: [[CONV4:%.*]] = fptosi float [[TMP12]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr to <4 x float>*), align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr1 to <4 x float>*), align 16
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd fast float undef, undef
+; CHECK-NEXT: [[TMP5:%.*]] = fadd fast float undef, [[TMP4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast float undef, [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], [[TMP6]]
+; CHECK-NEXT: [[CONV4:%.*]] = fptosi float [[TMP8]] to i32
; CHECK-NEXT: store i32 [[CONV4]], i32* @n, align 4
; CHECK-NEXT: ret i32 [[CONV4]]
;
+; THRESHOLD-LABEL: @foo(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, i32* @n, align 4
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr to <4 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([20 x float]* @arr1 to <4 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = fadd fast float undef, undef
+; THRESHOLD-NEXT: [[TMP5:%.*]] = fadd fast float undef, [[TMP4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[BIN_RDX2]], i32 0
+; THRESHOLD-NEXT: [[TMP7:%.*]] = fadd fast float undef, [[TMP5]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], [[TMP6]]
+; THRESHOLD-NEXT: [[CONV4:%.*]] = fptosi float [[TMP8]] to i32
+; THRESHOLD-NEXT: store i32 [[CONV4]], i32* @n, align 4
+; THRESHOLD-NEXT: ret i32 [[CONV4]]
+;
entry:
%0 = load i32, i32* @n, align 4
%conv = sitofp i32 %0 to float
@@ -269,6 +347,28 @@ define float @bar() {
; CHECK-NEXT: store float [[MAX_0_MUL3_2]], float* @res, align 4
; CHECK-NEXT: ret float [[MAX_0_MUL3_2]]
;
+; THRESHOLD-LABEL: @bar(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr to <2 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <2 x float>, <2 x float>* bitcast ([20 x float]* @arr1 to <2 x float>*), align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = fmul fast <2 x float> [[TMP1]], [[TMP0]]
+; THRESHOLD-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
+; THRESHOLD-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP2]], i32 1
+; THRESHOLD-NEXT: [[CMP4:%.*]] = fcmp fast ogt float [[TMP3]], [[TMP4]]
+; THRESHOLD-NEXT: [[MAX_0_MUL3:%.*]] = select i1 [[CMP4]], float [[TMP3]], float [[TMP4]]
+; THRESHOLD-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[MUL3_1:%.*]] = fmul fast float [[TMP6]], [[TMP5]]
+; THRESHOLD-NEXT: [[CMP4_1:%.*]] = fcmp fast ogt float [[MAX_0_MUL3]], [[MUL3_1]]
+; THRESHOLD-NEXT: [[MAX_0_MUL3_1:%.*]] = select i1 [[CMP4_1]], float [[MAX_0_MUL3]], float [[MUL3_1]]
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[TMP8:%.*]] = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[MUL3_2:%.*]] = fmul fast float [[TMP8]], [[TMP7]]
+; THRESHOLD-NEXT: [[CMP4_2:%.*]] = fcmp fast ogt float [[MAX_0_MUL3_1]], [[MUL3_2]]
+; THRESHOLD-NEXT: [[MAX_0_MUL3_2:%.*]] = select i1 [[CMP4_2]], float [[MAX_0_MUL3_1]], float [[MUL3_2]]
+; THRESHOLD-NEXT: store float [[MAX_0_MUL3_2]], float* @res, align 4
+; THRESHOLD-NEXT: ret float [[MAX_0_MUL3_2]]
+;
entry:
%0 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), align 16
%1 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), align 16
@@ -292,3 +392,1344 @@ entry:
ret float %max.0.mul3.2
}
+define float @f(float* nocapture readonly %x) {
+; CHECK-LABEL: @f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <16 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x float>, <16 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float undef, undef
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; CHECK-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; CHECK-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; CHECK-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; CHECK-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; CHECK-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; CHECK-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; CHECK-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; CHECK-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; CHECK-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; CHECK-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; CHECK-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; CHECK-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; CHECK-NEXT: [[ARRAYIDX_32:%.*]] = getelementptr inbounds float, float* [[X]], i64 32
+; CHECK-NEXT: [[ARRAYIDX_33:%.*]] = getelementptr inbounds float, float* [[X]], i64 33
+; CHECK-NEXT: [[ARRAYIDX_34:%.*]] = getelementptr inbounds float, float* [[X]], i64 34
+; CHECK-NEXT: [[ARRAYIDX_35:%.*]] = getelementptr inbounds float, float* [[X]], i64 35
+; CHECK-NEXT: [[ARRAYIDX_36:%.*]] = getelementptr inbounds float, float* [[X]], i64 36
+; CHECK-NEXT: [[ARRAYIDX_37:%.*]] = getelementptr inbounds float, float* [[X]], i64 37
+; CHECK-NEXT: [[ARRAYIDX_38:%.*]] = getelementptr inbounds float, float* [[X]], i64 38
+; CHECK-NEXT: [[ARRAYIDX_39:%.*]] = getelementptr inbounds float, float* [[X]], i64 39
+; CHECK-NEXT: [[ARRAYIDX_40:%.*]] = getelementptr inbounds float, float* [[X]], i64 40
+; CHECK-NEXT: [[ARRAYIDX_41:%.*]] = getelementptr inbounds float, float* [[X]], i64 41
+; CHECK-NEXT: [[ARRAYIDX_42:%.*]] = getelementptr inbounds float, float* [[X]], i64 42
+; CHECK-NEXT: [[ARRAYIDX_43:%.*]] = getelementptr inbounds float, float* [[X]], i64 43
+; CHECK-NEXT: [[ARRAYIDX_44:%.*]] = getelementptr inbounds float, float* [[X]], i64 44
+; CHECK-NEXT: [[ARRAYIDX_45:%.*]] = getelementptr inbounds float, float* [[X]], i64 45
+; CHECK-NEXT: [[ARRAYIDX_46:%.*]] = getelementptr inbounds float, float* [[X]], i64 46
+; CHECK-NEXT: [[ARRAYIDX_47:%.*]] = getelementptr inbounds float, float* [[X]], i64 47
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX_16]] to <32 x float>*
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x float>, <32 x float>* [[TMP2]], align 4
+; CHECK-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; CHECK-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; CHECK-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; CHECK-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; CHECK-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; CHECK-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; CHECK-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; CHECK-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; CHECK-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; CHECK-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; CHECK-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; CHECK-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; CHECK-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; CHECK-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; CHECK-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; CHECK-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; CHECK-NEXT: [[ADD_32:%.*]] = fadd fast float undef, [[ADD_31]]
+; CHECK-NEXT: [[ADD_33:%.*]] = fadd fast float undef, [[ADD_32]]
+; CHECK-NEXT: [[ADD_34:%.*]] = fadd fast float undef, [[ADD_33]]
+; CHECK-NEXT: [[ADD_35:%.*]] = fadd fast float undef, [[ADD_34]]
+; CHECK-NEXT: [[ADD_36:%.*]] = fadd fast float undef, [[ADD_35]]
+; CHECK-NEXT: [[ADD_37:%.*]] = fadd fast float undef, [[ADD_36]]
+; CHECK-NEXT: [[ADD_38:%.*]] = fadd fast float undef, [[ADD_37]]
+; CHECK-NEXT: [[ADD_39:%.*]] = fadd fast float undef, [[ADD_38]]
+; CHECK-NEXT: [[ADD_40:%.*]] = fadd fast float undef, [[ADD_39]]
+; CHECK-NEXT: [[ADD_41:%.*]] = fadd fast float undef, [[ADD_40]]
+; CHECK-NEXT: [[ADD_42:%.*]] = fadd fast float undef, [[ADD_41]]
+; CHECK-NEXT: [[ADD_43:%.*]] = fadd fast float undef, [[ADD_42]]
+; CHECK-NEXT: [[ADD_44:%.*]] = fadd fast float undef, [[ADD_43]]
+; CHECK-NEXT: [[ADD_45:%.*]] = fadd fast float undef, [[ADD_44]]
+; CHECK-NEXT: [[ADD_46:%.*]] = fadd fast float undef, [[ADD_45]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP3]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP3]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; CHECK-NEXT: [[RDX_SHUF9:%.*]] = shufflevector <16 x float> [[TMP1]], <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX10:%.*]] = fadd fast <16 x float> [[TMP1]], [[RDX_SHUF9]]
+; CHECK-NEXT: [[RDX_SHUF11:%.*]] = shufflevector <16 x float> [[BIN_RDX10]], <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX12:%.*]] = fadd fast <16 x float> [[BIN_RDX10]], [[RDX_SHUF11]]
+; CHECK-NEXT: [[RDX_SHUF13:%.*]] = shufflevector <16 x float> [[BIN_RDX12]], <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX14:%.*]] = fadd fast <16 x float> [[BIN_RDX12]], [[RDX_SHUF13]]
+; CHECK-NEXT: [[RDX_SHUF15:%.*]] = shufflevector <16 x float> [[BIN_RDX14]], <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX16:%.*]] = fadd fast <16 x float> [[BIN_RDX14]], [[RDX_SHUF15]]
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <16 x float> [[BIN_RDX16]], i32 0
+; CHECK-NEXT: [[BIN_RDX17:%.*]] = fadd fast float [[TMP4]], [[TMP5]]
+; CHECK-NEXT: [[ADD_47:%.*]] = fadd fast float undef, [[ADD_46]]
+; CHECK-NEXT: ret float [[BIN_RDX17]]
+;
+; THRESHOLD-LABEL: @f(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; THRESHOLD-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; THRESHOLD-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; THRESHOLD-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; THRESHOLD-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; THRESHOLD-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; THRESHOLD-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; THRESHOLD-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <16 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <16 x float>, <16 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float undef, undef
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; THRESHOLD-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; THRESHOLD-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; THRESHOLD-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; THRESHOLD-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; THRESHOLD-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; THRESHOLD-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; THRESHOLD-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; THRESHOLD-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; THRESHOLD-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; THRESHOLD-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; THRESHOLD-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; THRESHOLD-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; THRESHOLD-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; THRESHOLD-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; THRESHOLD-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; THRESHOLD-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; THRESHOLD-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; THRESHOLD-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; THRESHOLD-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; THRESHOLD-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; THRESHOLD-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; THRESHOLD-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; THRESHOLD-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; THRESHOLD-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; THRESHOLD-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; THRESHOLD-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; THRESHOLD-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; THRESHOLD-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; THRESHOLD-NEXT: [[ARRAYIDX_32:%.*]] = getelementptr inbounds float, float* [[X]], i64 32
+; THRESHOLD-NEXT: [[ARRAYIDX_33:%.*]] = getelementptr inbounds float, float* [[X]], i64 33
+; THRESHOLD-NEXT: [[ARRAYIDX_34:%.*]] = getelementptr inbounds float, float* [[X]], i64 34
+; THRESHOLD-NEXT: [[ARRAYIDX_35:%.*]] = getelementptr inbounds float, float* [[X]], i64 35
+; THRESHOLD-NEXT: [[ARRAYIDX_36:%.*]] = getelementptr inbounds float, float* [[X]], i64 36
+; THRESHOLD-NEXT: [[ARRAYIDX_37:%.*]] = getelementptr inbounds float, float* [[X]], i64 37
+; THRESHOLD-NEXT: [[ARRAYIDX_38:%.*]] = getelementptr inbounds float, float* [[X]], i64 38
+; THRESHOLD-NEXT: [[ARRAYIDX_39:%.*]] = getelementptr inbounds float, float* [[X]], i64 39
+; THRESHOLD-NEXT: [[ARRAYIDX_40:%.*]] = getelementptr inbounds float, float* [[X]], i64 40
+; THRESHOLD-NEXT: [[ARRAYIDX_41:%.*]] = getelementptr inbounds float, float* [[X]], i64 41
+; THRESHOLD-NEXT: [[ARRAYIDX_42:%.*]] = getelementptr inbounds float, float* [[X]], i64 42
+; THRESHOLD-NEXT: [[ARRAYIDX_43:%.*]] = getelementptr inbounds float, float* [[X]], i64 43
+; THRESHOLD-NEXT: [[ARRAYIDX_44:%.*]] = getelementptr inbounds float, float* [[X]], i64 44
+; THRESHOLD-NEXT: [[ARRAYIDX_45:%.*]] = getelementptr inbounds float, float* [[X]], i64 45
+; THRESHOLD-NEXT: [[ARRAYIDX_46:%.*]] = getelementptr inbounds float, float* [[X]], i64 46
+; THRESHOLD-NEXT: [[ARRAYIDX_47:%.*]] = getelementptr inbounds float, float* [[X]], i64 47
+; THRESHOLD-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX_16]] to <32 x float>*
+; THRESHOLD-NEXT: [[TMP3:%.*]] = load <32 x float>, <32 x float>* [[TMP2]], align 4
+; THRESHOLD-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; THRESHOLD-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; THRESHOLD-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; THRESHOLD-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; THRESHOLD-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; THRESHOLD-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; THRESHOLD-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; THRESHOLD-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; THRESHOLD-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; THRESHOLD-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; THRESHOLD-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; THRESHOLD-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; THRESHOLD-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; THRESHOLD-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; THRESHOLD-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; THRESHOLD-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; THRESHOLD-NEXT: [[ADD_32:%.*]] = fadd fast float undef, [[ADD_31]]
+; THRESHOLD-NEXT: [[ADD_33:%.*]] = fadd fast float undef, [[ADD_32]]
+; THRESHOLD-NEXT: [[ADD_34:%.*]] = fadd fast float undef, [[ADD_33]]
+; THRESHOLD-NEXT: [[ADD_35:%.*]] = fadd fast float undef, [[ADD_34]]
+; THRESHOLD-NEXT: [[ADD_36:%.*]] = fadd fast float undef, [[ADD_35]]
+; THRESHOLD-NEXT: [[ADD_37:%.*]] = fadd fast float undef, [[ADD_36]]
+; THRESHOLD-NEXT: [[ADD_38:%.*]] = fadd fast float undef, [[ADD_37]]
+; THRESHOLD-NEXT: [[ADD_39:%.*]] = fadd fast float undef, [[ADD_38]]
+; THRESHOLD-NEXT: [[ADD_40:%.*]] = fadd fast float undef, [[ADD_39]]
+; THRESHOLD-NEXT: [[ADD_41:%.*]] = fadd fast float undef, [[ADD_40]]
+; THRESHOLD-NEXT: [[ADD_42:%.*]] = fadd fast float undef, [[ADD_41]]
+; THRESHOLD-NEXT: [[ADD_43:%.*]] = fadd fast float undef, [[ADD_42]]
+; THRESHOLD-NEXT: [[ADD_44:%.*]] = fadd fast float undef, [[ADD_43]]
+; THRESHOLD-NEXT: [[ADD_45:%.*]] = fadd fast float undef, [[ADD_44]]
+; THRESHOLD-NEXT: [[ADD_46:%.*]] = fadd fast float undef, [[ADD_45]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP3]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP3]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; THRESHOLD-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; THRESHOLD-NEXT: [[RDX_SHUF9:%.*]] = shufflevector <16 x float> [[TMP1]], <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX10:%.*]] = fadd fast <16 x float> [[TMP1]], [[RDX_SHUF9]]
+; THRESHOLD-NEXT: [[RDX_SHUF11:%.*]] = shufflevector <16 x float> [[BIN_RDX10]], <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX12:%.*]] = fadd fast <16 x float> [[BIN_RDX10]], [[RDX_SHUF11]]
+; THRESHOLD-NEXT: [[RDX_SHUF13:%.*]] = shufflevector <16 x float> [[BIN_RDX12]], <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX14:%.*]] = fadd fast <16 x float> [[BIN_RDX12]], [[RDX_SHUF13]]
+; THRESHOLD-NEXT: [[RDX_SHUF15:%.*]] = shufflevector <16 x float> [[BIN_RDX14]], <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX16:%.*]] = fadd fast <16 x float> [[BIN_RDX14]], [[RDX_SHUF15]]
+; THRESHOLD-NEXT: [[TMP5:%.*]] = extractelement <16 x float> [[BIN_RDX16]], i32 0
+; THRESHOLD-NEXT: [[BIN_RDX17:%.*]] = fadd fast float [[TMP4]], [[TMP5]]
+; THRESHOLD-NEXT: [[ADD_47:%.*]] = fadd fast float undef, [[ADD_46]]
+; THRESHOLD-NEXT: ret float [[BIN_RDX17]]
+;
+ entry:
+ %0 = load float, float* %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx.1, align 4
+ %add.1 = fadd fast float %1, %0
+ %arrayidx.2 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx.2, align 4
+ %add.2 = fadd fast float %2, %add.1
+ %arrayidx.3 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx.3, align 4
+ %add.3 = fadd fast float %3, %add.2
+ %arrayidx.4 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx.4, align 4
+ %add.4 = fadd fast float %4, %add.3
+ %arrayidx.5 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx.5, align 4
+ %add.5 = fadd fast float %5, %add.4
+ %arrayidx.6 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx.6, align 4
+ %add.6 = fadd fast float %6, %add.5
+ %arrayidx.7 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx.7, align 4
+ %add.7 = fadd fast float %7, %add.6
+ %arrayidx.8 = getelementptr inbounds float, float* %x, i64 8
+ %8 = load float, float* %arrayidx.8, align 4
+ %add.8 = fadd fast float %8, %add.7
+ %arrayidx.9 = getelementptr inbounds float, float* %x, i64 9
+ %9 = load float, float* %arrayidx.9, align 4
+ %add.9 = fadd fast float %9, %add.8
+ %arrayidx.10 = getelementptr inbounds float, float* %x, i64 10
+ %10 = load float, float* %arrayidx.10, align 4
+ %add.10 = fadd fast float %10, %add.9
+ %arrayidx.11 = getelementptr inbounds float, float* %x, i64 11
+ %11 = load float, float* %arrayidx.11, align 4
+ %add.11 = fadd fast float %11, %add.10
+ %arrayidx.12 = getelementptr inbounds float, float* %x, i64 12
+ %12 = load float, float* %arrayidx.12, align 4
+ %add.12 = fadd fast float %12, %add.11
+ %arrayidx.13 = getelementptr inbounds float, float* %x, i64 13
+ %13 = load float, float* %arrayidx.13, align 4
+ %add.13 = fadd fast float %13, %add.12
+ %arrayidx.14 = getelementptr inbounds float, float* %x, i64 14
+ %14 = load float, float* %arrayidx.14, align 4
+ %add.14 = fadd fast float %14, %add.13
+ %arrayidx.15 = getelementptr inbounds float, float* %x, i64 15
+ %15 = load float, float* %arrayidx.15, align 4
+ %add.15 = fadd fast float %15, %add.14
+ %arrayidx.16 = getelementptr inbounds float, float* %x, i64 16
+ %16 = load float, float* %arrayidx.16, align 4
+ %add.16 = fadd fast float %16, %add.15
+ %arrayidx.17 = getelementptr inbounds float, float* %x, i64 17
+ %17 = load float, float* %arrayidx.17, align 4
+ %add.17 = fadd fast float %17, %add.16
+ %arrayidx.18 = getelementptr inbounds float, float* %x, i64 18
+ %18 = load float, float* %arrayidx.18, align 4
+ %add.18 = fadd fast float %18, %add.17
+ %arrayidx.19 = getelementptr inbounds float, float* %x, i64 19
+ %19 = load float, float* %arrayidx.19, align 4
+ %add.19 = fadd fast float %19, %add.18
+ %arrayidx.20 = getelementptr inbounds float, float* %x, i64 20
+ %20 = load float, float* %arrayidx.20, align 4
+ %add.20 = fadd fast float %20, %add.19
+ %arrayidx.21 = getelementptr inbounds float, float* %x, i64 21
+ %21 = load float, float* %arrayidx.21, align 4
+ %add.21 = fadd fast float %21, %add.20
+ %arrayidx.22 = getelementptr inbounds float, float* %x, i64 22
+ %22 = load float, float* %arrayidx.22, align 4
+ %add.22 = fadd fast float %22, %add.21
+ %arrayidx.23 = getelementptr inbounds float, float* %x, i64 23
+ %23 = load float, float* %arrayidx.23, align 4
+ %add.23 = fadd fast float %23, %add.22
+ %arrayidx.24 = getelementptr inbounds float, float* %x, i64 24
+ %24 = load float, float* %arrayidx.24, align 4
+ %add.24 = fadd fast float %24, %add.23
+ %arrayidx.25 = getelementptr inbounds float, float* %x, i64 25
+ %25 = load float, float* %arrayidx.25, align 4
+ %add.25 = fadd fast float %25, %add.24
+ %arrayidx.26 = getelementptr inbounds float, float* %x, i64 26
+ %26 = load float, float* %arrayidx.26, align 4
+ %add.26 = fadd fast float %26, %add.25
+ %arrayidx.27 = getelementptr inbounds float, float* %x, i64 27
+ %27 = load float, float* %arrayidx.27, align 4
+ %add.27 = fadd fast float %27, %add.26
+ %arrayidx.28 = getelementptr inbounds float, float* %x, i64 28
+ %28 = load float, float* %arrayidx.28, align 4
+ %add.28 = fadd fast float %28, %add.27
+ %arrayidx.29 = getelementptr inbounds float, float* %x, i64 29
+ %29 = load float, float* %arrayidx.29, align 4
+ %add.29 = fadd fast float %29, %add.28
+ %arrayidx.30 = getelementptr inbounds float, float* %x, i64 30
+ %30 = load float, float* %arrayidx.30, align 4
+ %add.30 = fadd fast float %30, %add.29
+ %arrayidx.31 = getelementptr inbounds float, float* %x, i64 31
+ %31 = load float, float* %arrayidx.31, align 4
+ %add.31 = fadd fast float %31, %add.30
+ %arrayidx.32 = getelementptr inbounds float, float* %x, i64 32
+ %32 = load float, float* %arrayidx.32, align 4
+ %add.32 = fadd fast float %32, %add.31
+ %arrayidx.33 = getelementptr inbounds float, float* %x, i64 33
+ %33 = load float, float* %arrayidx.33, align 4
+ %add.33 = fadd fast float %33, %add.32
+ %arrayidx.34 = getelementptr inbounds float, float* %x, i64 34
+ %34 = load float, float* %arrayidx.34, align 4
+ %add.34 = fadd fast float %34, %add.33
+ %arrayidx.35 = getelementptr inbounds float, float* %x, i64 35
+ %35 = load float, float* %arrayidx.35, align 4
+ %add.35 = fadd fast float %35, %add.34
+ %arrayidx.36 = getelementptr inbounds float, float* %x, i64 36
+ %36 = load float, float* %arrayidx.36, align 4
+ %add.36 = fadd fast float %36, %add.35
+ %arrayidx.37 = getelementptr inbounds float, float* %x, i64 37
+ %37 = load float, float* %arrayidx.37, align 4
+ %add.37 = fadd fast float %37, %add.36
+ %arrayidx.38 = getelementptr inbounds float, float* %x, i64 38
+ %38 = load float, float* %arrayidx.38, align 4
+ %add.38 = fadd fast float %38, %add.37
+ %arrayidx.39 = getelementptr inbounds float, float* %x, i64 39
+ %39 = load float, float* %arrayidx.39, align 4
+ %add.39 = fadd fast float %39, %add.38
+ %arrayidx.40 = getelementptr inbounds float, float* %x, i64 40
+ %40 = load float, float* %arrayidx.40, align 4
+ %add.40 = fadd fast float %40, %add.39
+ %arrayidx.41 = getelementptr inbounds float, float* %x, i64 41
+ %41 = load float, float* %arrayidx.41, align 4
+ %add.41 = fadd fast float %41, %add.40
+ %arrayidx.42 = getelementptr inbounds float, float* %x, i64 42
+ %42 = load float, float* %arrayidx.42, align 4
+ %add.42 = fadd fast float %42, %add.41
+ %arrayidx.43 = getelementptr inbounds float, float* %x, i64 43
+ %43 = load float, float* %arrayidx.43, align 4
+ %add.43 = fadd fast float %43, %add.42
+ %arrayidx.44 = getelementptr inbounds float, float* %x, i64 44
+ %44 = load float, float* %arrayidx.44, align 4
+ %add.44 = fadd fast float %44, %add.43
+ %arrayidx.45 = getelementptr inbounds float, float* %x, i64 45
+ %45 = load float, float* %arrayidx.45, align 4
+ %add.45 = fadd fast float %45, %add.44
+ %arrayidx.46 = getelementptr inbounds float, float* %x, i64 46
+ %46 = load float, float* %arrayidx.46, align 4
+ %add.46 = fadd fast float %46, %add.45
+ %arrayidx.47 = getelementptr inbounds float, float* %x, i64 47
+ %47 = load float, float* %arrayidx.47, align 4
+ %add.47 = fadd fast float %47, %add.46
+ ret float %add.47
+}
+
+define float @f1(float* nocapture readonly %x, i32 %a, i32 %b) {
+; CHECK-LABEL: @f1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[REM:%.*]] = srem i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[REM]] to float
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; CHECK-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; CHECK-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; CHECK-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; CHECK-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; CHECK-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; CHECK-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; CHECK-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; CHECK-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; CHECK-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; CHECK-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; CHECK-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; CHECK-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <32 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <32 x float>, <32 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float undef, [[CONV]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; CHECK-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; CHECK-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; CHECK-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; CHECK-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; CHECK-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; CHECK-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; CHECK-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; CHECK-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; CHECK-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; CHECK-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; CHECK-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; CHECK-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; CHECK-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; CHECK-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; CHECK-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP1]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[CONV]]
+; CHECK-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; CHECK-NEXT: ret float [[BIN_EXTRA]]
+;
+; THRESHOLD-LABEL: @f1(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[REM:%.*]] = srem i32 [[A:%.*]], [[B:%.*]]
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[REM]] to float
+; THRESHOLD-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; THRESHOLD-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; THRESHOLD-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; THRESHOLD-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; THRESHOLD-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; THRESHOLD-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; THRESHOLD-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; THRESHOLD-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; THRESHOLD-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; THRESHOLD-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; THRESHOLD-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; THRESHOLD-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; THRESHOLD-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; THRESHOLD-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; THRESHOLD-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; THRESHOLD-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; THRESHOLD-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; THRESHOLD-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; THRESHOLD-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; THRESHOLD-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; THRESHOLD-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; THRESHOLD-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; THRESHOLD-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; THRESHOLD-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <32 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <32 x float>, <32 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float undef, [[CONV]]
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; THRESHOLD-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; THRESHOLD-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; THRESHOLD-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; THRESHOLD-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; THRESHOLD-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; THRESHOLD-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; THRESHOLD-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; THRESHOLD-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; THRESHOLD-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; THRESHOLD-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; THRESHOLD-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; THRESHOLD-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; THRESHOLD-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; THRESHOLD-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; THRESHOLD-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; THRESHOLD-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; THRESHOLD-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; THRESHOLD-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; THRESHOLD-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; THRESHOLD-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; THRESHOLD-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; THRESHOLD-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; THRESHOLD-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; THRESHOLD-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; THRESHOLD-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; THRESHOLD-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; THRESHOLD-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP1]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP1]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; THRESHOLD-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA]]
+;
+ entry:
+ %rem = srem i32 %a, %b
+ %conv = sitofp i32 %rem to float
+ %0 = load float, float* %x, align 4
+ %add = fadd fast float %0, %conv
+ %arrayidx.1 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx.1, align 4
+ %add.1 = fadd fast float %1, %add
+ %arrayidx.2 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx.2, align 4
+ %add.2 = fadd fast float %2, %add.1
+ %arrayidx.3 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx.3, align 4
+ %add.3 = fadd fast float %3, %add.2
+ %arrayidx.4 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx.4, align 4
+ %add.4 = fadd fast float %4, %add.3
+ %arrayidx.5 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx.5, align 4
+ %add.5 = fadd fast float %5, %add.4
+ %arrayidx.6 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx.6, align 4
+ %add.6 = fadd fast float %6, %add.5
+ %arrayidx.7 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx.7, align 4
+ %add.7 = fadd fast float %7, %add.6
+ %arrayidx.8 = getelementptr inbounds float, float* %x, i64 8
+ %8 = load float, float* %arrayidx.8, align 4
+ %add.8 = fadd fast float %8, %add.7
+ %arrayidx.9 = getelementptr inbounds float, float* %x, i64 9
+ %9 = load float, float* %arrayidx.9, align 4
+ %add.9 = fadd fast float %9, %add.8
+ %arrayidx.10 = getelementptr inbounds float, float* %x, i64 10
+ %10 = load float, float* %arrayidx.10, align 4
+ %add.10 = fadd fast float %10, %add.9
+ %arrayidx.11 = getelementptr inbounds float, float* %x, i64 11
+ %11 = load float, float* %arrayidx.11, align 4
+ %add.11 = fadd fast float %11, %add.10
+ %arrayidx.12 = getelementptr inbounds float, float* %x, i64 12
+ %12 = load float, float* %arrayidx.12, align 4
+ %add.12 = fadd fast float %12, %add.11
+ %arrayidx.13 = getelementptr inbounds float, float* %x, i64 13
+ %13 = load float, float* %arrayidx.13, align 4
+ %add.13 = fadd fast float %13, %add.12
+ %arrayidx.14 = getelementptr inbounds float, float* %x, i64 14
+ %14 = load float, float* %arrayidx.14, align 4
+ %add.14 = fadd fast float %14, %add.13
+ %arrayidx.15 = getelementptr inbounds float, float* %x, i64 15
+ %15 = load float, float* %arrayidx.15, align 4
+ %add.15 = fadd fast float %15, %add.14
+ %arrayidx.16 = getelementptr inbounds float, float* %x, i64 16
+ %16 = load float, float* %arrayidx.16, align 4
+ %add.16 = fadd fast float %16, %add.15
+ %arrayidx.17 = getelementptr inbounds float, float* %x, i64 17
+ %17 = load float, float* %arrayidx.17, align 4
+ %add.17 = fadd fast float %17, %add.16
+ %arrayidx.18 = getelementptr inbounds float, float* %x, i64 18
+ %18 = load float, float* %arrayidx.18, align 4
+ %add.18 = fadd fast float %18, %add.17
+ %arrayidx.19 = getelementptr inbounds float, float* %x, i64 19
+ %19 = load float, float* %arrayidx.19, align 4
+ %add.19 = fadd fast float %19, %add.18
+ %arrayidx.20 = getelementptr inbounds float, float* %x, i64 20
+ %20 = load float, float* %arrayidx.20, align 4
+ %add.20 = fadd fast float %20, %add.19
+ %arrayidx.21 = getelementptr inbounds float, float* %x, i64 21
+ %21 = load float, float* %arrayidx.21, align 4
+ %add.21 = fadd fast float %21, %add.20
+ %arrayidx.22 = getelementptr inbounds float, float* %x, i64 22
+ %22 = load float, float* %arrayidx.22, align 4
+ %add.22 = fadd fast float %22, %add.21
+ %arrayidx.23 = getelementptr inbounds float, float* %x, i64 23
+ %23 = load float, float* %arrayidx.23, align 4
+ %add.23 = fadd fast float %23, %add.22
+ %arrayidx.24 = getelementptr inbounds float, float* %x, i64 24
+ %24 = load float, float* %arrayidx.24, align 4
+ %add.24 = fadd fast float %24, %add.23
+ %arrayidx.25 = getelementptr inbounds float, float* %x, i64 25
+ %25 = load float, float* %arrayidx.25, align 4
+ %add.25 = fadd fast float %25, %add.24
+ %arrayidx.26 = getelementptr inbounds float, float* %x, i64 26
+ %26 = load float, float* %arrayidx.26, align 4
+ %add.26 = fadd fast float %26, %add.25
+ %arrayidx.27 = getelementptr inbounds float, float* %x, i64 27
+ %27 = load float, float* %arrayidx.27, align 4
+ %add.27 = fadd fast float %27, %add.26
+ %arrayidx.28 = getelementptr inbounds float, float* %x, i64 28
+ %28 = load float, float* %arrayidx.28, align 4
+ %add.28 = fadd fast float %28, %add.27
+ %arrayidx.29 = getelementptr inbounds float, float* %x, i64 29
+ %29 = load float, float* %arrayidx.29, align 4
+ %add.29 = fadd fast float %29, %add.28
+ %arrayidx.30 = getelementptr inbounds float, float* %x, i64 30
+ %30 = load float, float* %arrayidx.30, align 4
+ %add.30 = fadd fast float %30, %add.29
+ %arrayidx.31 = getelementptr inbounds float, float* %x, i64 31
+ %31 = load float, float* %arrayidx.31, align 4
+ %add.31 = fadd fast float %31, %add.30
+ ret float %add.31
+}
+
+define float @loadadd31(float* nocapture readonly %x) {
+; CHECK-LABEL: @loadadd31(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX_2]] to <4 x float>*
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast float* [[ARRAYIDX_6]] to <8 x float>*
+; CHECK-NEXT: [[TMP5:%.*]] = load <8 x float>, <8 x float>* [[TMP4]], align 4
+; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; CHECK-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; CHECK-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; CHECK-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; CHECK-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; CHECK-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; CHECK-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; CHECK-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; CHECK-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; CHECK-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; CHECK-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[ARRAYIDX_14]] to <16 x float>*
+; CHECK-NEXT: [[TMP7:%.*]] = load <16 x float>, <16 x float>* [[TMP6]], align 4
+; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; CHECK-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; CHECK-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; CHECK-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; CHECK-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; CHECK-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; CHECK-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; CHECK-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; CHECK-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; CHECK-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; CHECK-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; CHECK-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; CHECK-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; CHECK-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <16 x float> [[TMP7]], <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x float> [[TMP7]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <16 x float> [[BIN_RDX]], <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <16 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <16 x float> [[BIN_RDX2]], <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <16 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <16 x float> [[BIN_RDX4]], <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX6:%.*]] = fadd fast <16 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <16 x float> [[BIN_RDX6]], i32 0
+; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <8 x float> [[TMP5]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX8:%.*]] = fadd fast <8 x float> [[TMP5]], [[RDX_SHUF7]]
+; CHECK-NEXT: [[RDX_SHUF9:%.*]] = shufflevector <8 x float> [[BIN_RDX8]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX10:%.*]] = fadd fast <8 x float> [[BIN_RDX8]], [[RDX_SHUF9]]
+; CHECK-NEXT: [[RDX_SHUF11:%.*]] = shufflevector <8 x float> [[BIN_RDX10]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX12:%.*]] = fadd fast <8 x float> [[BIN_RDX10]], [[RDX_SHUF11]]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <8 x float> [[BIN_RDX12]], i32 0
+; CHECK-NEXT: [[BIN_RDX13:%.*]] = fadd fast float [[TMP8]], [[TMP9]]
+; CHECK-NEXT: [[RDX_SHUF14:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX15:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF14]]
+; CHECK-NEXT: [[RDX_SHUF16:%.*]] = shufflevector <4 x float> [[BIN_RDX15]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX17:%.*]] = fadd fast <4 x float> [[BIN_RDX15]], [[RDX_SHUF16]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x float> [[BIN_RDX17]], i32 0
+; CHECK-NEXT: [[BIN_RDX18:%.*]] = fadd fast float [[BIN_RDX13]], [[TMP10]]
+; CHECK-NEXT: [[TMP11:%.*]] = fadd fast float [[BIN_RDX18]], [[TMP1]]
+; CHECK-NEXT: [[TMP12:%.*]] = fadd fast float [[TMP11]], [[TMP0]]
+; CHECK-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; CHECK-NEXT: ret float [[TMP12]]
+;
+; THRESHOLD-LABEL: @loadadd31(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[TMP0:%.*]] = load float, float* [[ARRAYIDX]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP1]], [[TMP0]]
+; THRESHOLD-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX_2]] to <4 x float>*
+; THRESHOLD-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float undef, [[ADD_1]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float undef, [[ADD_2]]
+; THRESHOLD-NEXT: [[ADD_4:%.*]] = fadd fast float undef, [[ADD_3]]
+; THRESHOLD-NEXT: [[ADD_5:%.*]] = fadd fast float undef, [[ADD_4]]
+; THRESHOLD-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; THRESHOLD-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; THRESHOLD-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; THRESHOLD-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; THRESHOLD-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; THRESHOLD-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; THRESHOLD-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; THRESHOLD-NEXT: [[TMP4:%.*]] = bitcast float* [[ARRAYIDX_6]] to <8 x float>*
+; THRESHOLD-NEXT: [[TMP5:%.*]] = load <8 x float>, <8 x float>* [[TMP4]], align 4
+; THRESHOLD-NEXT: [[ADD_6:%.*]] = fadd fast float undef, [[ADD_5]]
+; THRESHOLD-NEXT: [[ADD_7:%.*]] = fadd fast float undef, [[ADD_6]]
+; THRESHOLD-NEXT: [[ADD_8:%.*]] = fadd fast float undef, [[ADD_7]]
+; THRESHOLD-NEXT: [[ADD_9:%.*]] = fadd fast float undef, [[ADD_8]]
+; THRESHOLD-NEXT: [[ADD_10:%.*]] = fadd fast float undef, [[ADD_9]]
+; THRESHOLD-NEXT: [[ADD_11:%.*]] = fadd fast float undef, [[ADD_10]]
+; THRESHOLD-NEXT: [[ADD_12:%.*]] = fadd fast float undef, [[ADD_11]]
+; THRESHOLD-NEXT: [[ADD_13:%.*]] = fadd fast float undef, [[ADD_12]]
+; THRESHOLD-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; THRESHOLD-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; THRESHOLD-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; THRESHOLD-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; THRESHOLD-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; THRESHOLD-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; THRESHOLD-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; THRESHOLD-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; THRESHOLD-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; THRESHOLD-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; THRESHOLD-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; THRESHOLD-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; THRESHOLD-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; THRESHOLD-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; THRESHOLD-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; THRESHOLD-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; THRESHOLD-NEXT: [[TMP6:%.*]] = bitcast float* [[ARRAYIDX_14]] to <16 x float>*
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load <16 x float>, <16 x float>* [[TMP6]], align 4
+; THRESHOLD-NEXT: [[ADD_14:%.*]] = fadd fast float undef, [[ADD_13]]
+; THRESHOLD-NEXT: [[ADD_15:%.*]] = fadd fast float undef, [[ADD_14]]
+; THRESHOLD-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; THRESHOLD-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; THRESHOLD-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; THRESHOLD-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; THRESHOLD-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; THRESHOLD-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; THRESHOLD-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; THRESHOLD-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; THRESHOLD-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; THRESHOLD-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; THRESHOLD-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; THRESHOLD-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; THRESHOLD-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <16 x float> [[TMP7]], <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x float> [[TMP7]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <16 x float> [[BIN_RDX]], <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <16 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <16 x float> [[BIN_RDX2]], <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <16 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <16 x float> [[BIN_RDX4]], <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX6:%.*]] = fadd fast <16 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = extractelement <16 x float> [[BIN_RDX6]], i32 0
+; THRESHOLD-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <8 x float> [[TMP5]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX8:%.*]] = fadd fast <8 x float> [[TMP5]], [[RDX_SHUF7]]
+; THRESHOLD-NEXT: [[RDX_SHUF9:%.*]] = shufflevector <8 x float> [[BIN_RDX8]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX10:%.*]] = fadd fast <8 x float> [[BIN_RDX8]], [[RDX_SHUF9]]
+; THRESHOLD-NEXT: [[RDX_SHUF11:%.*]] = shufflevector <8 x float> [[BIN_RDX10]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX12:%.*]] = fadd fast <8 x float> [[BIN_RDX10]], [[RDX_SHUF11]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = extractelement <8 x float> [[BIN_RDX12]], i32 0
+; THRESHOLD-NEXT: [[BIN_RDX13:%.*]] = fadd fast float [[TMP8]], [[TMP9]]
+; THRESHOLD-NEXT: [[RDX_SHUF14:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX15:%.*]] = fadd fast <4 x float> [[TMP3]], [[RDX_SHUF14]]
+; THRESHOLD-NEXT: [[RDX_SHUF16:%.*]] = shufflevector <4 x float> [[BIN_RDX15]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX17:%.*]] = fadd fast <4 x float> [[BIN_RDX15]], [[RDX_SHUF16]]
+; THRESHOLD-NEXT: [[TMP10:%.*]] = extractelement <4 x float> [[BIN_RDX17]], i32 0
+; THRESHOLD-NEXT: [[BIN_RDX18:%.*]] = fadd fast float [[BIN_RDX13]], [[TMP10]]
+; THRESHOLD-NEXT: [[TMP11:%.*]] = fadd fast float [[BIN_RDX18]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP12:%.*]] = fadd fast float [[TMP11]], [[TMP0]]
+; THRESHOLD-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; THRESHOLD-NEXT: ret float [[TMP12]]
+;
+ entry:
+ %arrayidx = getelementptr inbounds float, float* %x, i64 1
+ %0 = load float, float* %arrayidx, align 4
+ %arrayidx.1 = getelementptr inbounds float, float* %x, i64 2
+ %1 = load float, float* %arrayidx.1, align 4
+ %add.1 = fadd fast float %1, %0
+ %arrayidx.2 = getelementptr inbounds float, float* %x, i64 3
+ %2 = load float, float* %arrayidx.2, align 4
+ %add.2 = fadd fast float %2, %add.1
+ %arrayidx.3 = getelementptr inbounds float, float* %x, i64 4
+ %3 = load float, float* %arrayidx.3, align 4
+ %add.3 = fadd fast float %3, %add.2
+ %arrayidx.4 = getelementptr inbounds float, float* %x, i64 5
+ %4 = load float, float* %arrayidx.4, align 4
+ %add.4 = fadd fast float %4, %add.3
+ %arrayidx.5 = getelementptr inbounds float, float* %x, i64 6
+ %5 = load float, float* %arrayidx.5, align 4
+ %add.5 = fadd fast float %5, %add.4
+ %arrayidx.6 = getelementptr inbounds float, float* %x, i64 7
+ %6 = load float, float* %arrayidx.6, align 4
+ %add.6 = fadd fast float %6, %add.5
+ %arrayidx.7 = getelementptr inbounds float, float* %x, i64 8
+ %7 = load float, float* %arrayidx.7, align 4
+ %add.7 = fadd fast float %7, %add.6
+ %arrayidx.8 = getelementptr inbounds float, float* %x, i64 9
+ %8 = load float, float* %arrayidx.8, align 4
+ %add.8 = fadd fast float %8, %add.7
+ %arrayidx.9 = getelementptr inbounds float, float* %x, i64 10
+ %9 = load float, float* %arrayidx.9, align 4
+ %add.9 = fadd fast float %9, %add.8
+ %arrayidx.10 = getelementptr inbounds float, float* %x, i64 11
+ %10 = load float, float* %arrayidx.10, align 4
+ %add.10 = fadd fast float %10, %add.9
+ %arrayidx.11 = getelementptr inbounds float, float* %x, i64 12
+ %11 = load float, float* %arrayidx.11, align 4
+ %add.11 = fadd fast float %11, %add.10
+ %arrayidx.12 = getelementptr inbounds float, float* %x, i64 13
+ %12 = load float, float* %arrayidx.12, align 4
+ %add.12 = fadd fast float %12, %add.11
+ %arrayidx.13 = getelementptr inbounds float, float* %x, i64 14
+ %13 = load float, float* %arrayidx.13, align 4
+ %add.13 = fadd fast float %13, %add.12
+ %arrayidx.14 = getelementptr inbounds float, float* %x, i64 15
+ %14 = load float, float* %arrayidx.14, align 4
+ %add.14 = fadd fast float %14, %add.13
+ %arrayidx.15 = getelementptr inbounds float, float* %x, i64 16
+ %15 = load float, float* %arrayidx.15, align 4
+ %add.15 = fadd fast float %15, %add.14
+ %arrayidx.16 = getelementptr inbounds float, float* %x, i64 17
+ %16 = load float, float* %arrayidx.16, align 4
+ %add.16 = fadd fast float %16, %add.15
+ %arrayidx.17 = getelementptr inbounds float, float* %x, i64 18
+ %17 = load float, float* %arrayidx.17, align 4
+ %add.17 = fadd fast float %17, %add.16
+ %arrayidx.18 = getelementptr inbounds float, float* %x, i64 19
+ %18 = load float, float* %arrayidx.18, align 4
+ %add.18 = fadd fast float %18, %add.17
+ %arrayidx.19 = getelementptr inbounds float, float* %x, i64 20
+ %19 = load float, float* %arrayidx.19, align 4
+ %add.19 = fadd fast float %19, %add.18
+ %arrayidx.20 = getelementptr inbounds float, float* %x, i64 21
+ %20 = load float, float* %arrayidx.20, align 4
+ %add.20 = fadd fast float %20, %add.19
+ %arrayidx.21 = getelementptr inbounds float, float* %x, i64 22
+ %21 = load float, float* %arrayidx.21, align 4
+ %add.21 = fadd fast float %21, %add.20
+ %arrayidx.22 = getelementptr inbounds float, float* %x, i64 23
+ %22 = load float, float* %arrayidx.22, align 4
+ %add.22 = fadd fast float %22, %add.21
+ %arrayidx.23 = getelementptr inbounds float, float* %x, i64 24
+ %23 = load float, float* %arrayidx.23, align 4
+ %add.23 = fadd fast float %23, %add.22
+ %arrayidx.24 = getelementptr inbounds float, float* %x, i64 25
+ %24 = load float, float* %arrayidx.24, align 4
+ %add.24 = fadd fast float %24, %add.23
+ %arrayidx.25 = getelementptr inbounds float, float* %x, i64 26
+ %25 = load float, float* %arrayidx.25, align 4
+ %add.25 = fadd fast float %25, %add.24
+ %arrayidx.26 = getelementptr inbounds float, float* %x, i64 27
+ %26 = load float, float* %arrayidx.26, align 4
+ %add.26 = fadd fast float %26, %add.25
+ %arrayidx.27 = getelementptr inbounds float, float* %x, i64 28
+ %27 = load float, float* %arrayidx.27, align 4
+ %add.27 = fadd fast float %27, %add.26
+ %arrayidx.28 = getelementptr inbounds float, float* %x, i64 29
+ %28 = load float, float* %arrayidx.28, align 4
+ %add.28 = fadd fast float %28, %add.27
+ %arrayidx.29 = getelementptr inbounds float, float* %x, i64 30
+ %29 = load float, float* %arrayidx.29, align 4
+ %add.29 = fadd fast float %29, %add.28
+ ret float %add.29
+}
+
+define float @extra_args(float* nocapture readonly %x, i32 %a, i32 %b) {
+; CHECK-LABEL: @extra_args(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], 3.000000e+00
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; CHECK-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4]], [[CONV]]
+; CHECK-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD5]]
+; CHECK-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_1]]
+; CHECK-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; CHECK-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD4_3]]
+; CHECK-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV]]
+; CHECK-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; CHECK-NEXT: ret float [[BIN_EXTRA5]]
+;
+; THRESHOLD-LABEL: @extra_args(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], 3.000000e+00
+; THRESHOLD-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; THRESHOLD-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD5]]
+; THRESHOLD-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_1]]
+; THRESHOLD-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; THRESHOLD-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD4_3]]
+; THRESHOLD-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; THRESHOLD-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA5]]
+;
+ entry:
+ %mul = mul nsw i32 %b, %a
+ %conv = sitofp i32 %mul to float
+ %0 = load float, float* %x, align 4
+ %add = fadd fast float %conv, 3.000000e+00
+ %add1 = fadd fast float %0, %add
+ %arrayidx3 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx3, align 4
+ %add4 = fadd fast float %1, %add1
+ %add5 = fadd fast float %add4, %conv
+ %arrayidx3.1 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx3.1, align 4
+ %add4.1 = fadd fast float %2, %add5
+ %arrayidx3.2 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx3.2, align 4
+ %add4.2 = fadd fast float %3, %add4.1
+ %arrayidx3.3 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx3.3, align 4
+ %add4.3 = fadd fast float %4, %add4.2
+ %arrayidx3.4 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx3.4, align 4
+ %add4.4 = fadd fast float %5, %add4.3
+ %arrayidx3.5 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx3.5, align 4
+ %add4.5 = fadd fast float %6, %add4.4
+ %arrayidx3.6 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx3.6, align 4
+ %add4.6 = fadd fast float %7, %add4.5
+ ret float %add4.6
+}
+
+define float @extra_args_same_several_times(float* nocapture readonly %x, i32 %a, i32 %b) {
+; CHECK-LABEL: @extra_args_same_several_times(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], 3.000000e+00
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; CHECK-NEXT: [[ADD41:%.*]] = fadd fast float [[ADD4]], 5.000000e+00
+; CHECK-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD41]], [[CONV]]
+; CHECK-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD5]]
+; CHECK-NEXT: [[ADD4_11:%.*]] = fadd fast float [[ADD4_1]], 5.000000e+00
+; CHECK-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_11]]
+; CHECK-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; CHECK-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD4_3]]
+; CHECK-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], 5.000000e+00
+; CHECK-NEXT: [[BIN_EXTRA6:%.*]] = fadd fast float [[BIN_EXTRA5]], 5.000000e+00
+; CHECK-NEXT: [[BIN_EXTRA7:%.*]] = fadd fast float [[BIN_EXTRA6]], [[CONV]]
+; CHECK-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; CHECK-NEXT: ret float [[BIN_EXTRA7]]
+;
+; THRESHOLD-LABEL: @extra_args_same_several_times(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], 3.000000e+00
+; THRESHOLD-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; THRESHOLD-NEXT: [[ADD41:%.*]] = fadd fast float [[ADD4]], 5.000000e+00
+; THRESHOLD-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD41]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD5]]
+; THRESHOLD-NEXT: [[ADD4_11:%.*]] = fadd fast float [[ADD4_1]], 5.000000e+00
+; THRESHOLD-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_11]]
+; THRESHOLD-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; THRESHOLD-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD4_3]]
+; THRESHOLD-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; THRESHOLD-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], 5.000000e+00
+; THRESHOLD-NEXT: [[BIN_EXTRA6:%.*]] = fadd fast float [[BIN_EXTRA5]], 5.000000e+00
+; THRESHOLD-NEXT: [[BIN_EXTRA7:%.*]] = fadd fast float [[BIN_EXTRA6]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA7]]
+;
+ entry:
+ %mul = mul nsw i32 %b, %a
+ %conv = sitofp i32 %mul to float
+ %0 = load float, float* %x, align 4
+ %add = fadd fast float %conv, 3.000000e+00
+ %add1 = fadd fast float %0, %add
+ %arrayidx3 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx3, align 4
+ %add4 = fadd fast float %1, %add1
+ %add41 = fadd fast float %add4, 5.000000e+00
+ %add5 = fadd fast float %add41, %conv
+ %arrayidx3.1 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx3.1, align 4
+ %add4.1 = fadd fast float %2, %add5
+ %add4.11 = fadd fast float %add4.1, 5.000000e+00
+ %arrayidx3.2 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx3.2, align 4
+ %add4.2 = fadd fast float %3, %add4.11
+ %arrayidx3.3 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx3.3, align 4
+ %add4.3 = fadd fast float %4, %add4.2
+ %arrayidx3.4 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx3.4, align 4
+ %add4.4 = fadd fast float %5, %add4.3
+ %arrayidx3.5 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx3.5, align 4
+ %add4.5 = fadd fast float %6, %add4.4
+ %arrayidx3.6 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx3.6, align 4
+ %add4.6 = fadd fast float %7, %add4.5
+ ret float %add4.6
+}
+
+define float @extra_args_no_replace(float* nocapture readonly %x, i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @extra_args_no_replace(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; CHECK-NEXT: [[CONVC:%.*]] = sitofp i32 [[C:%.*]] to float
+; CHECK-NEXT: [[ADDC:%.*]] = fadd fast float [[CONVC]], 3.000000e+00
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], [[ADDC]]
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; CHECK-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD4]]
+; CHECK-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_1]]
+; CHECK-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; CHECK-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4_3]], [[CONV]]
+; CHECK-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD5]]
+; CHECK-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV]]
+; CHECK-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; CHECK-NEXT: ret float [[BIN_EXTRA5]]
+;
+; THRESHOLD-LABEL: @extra_args_no_replace(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[B:%.*]], [[A:%.*]]
+; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[CONVC:%.*]] = sitofp i32 [[C:%.*]] to float
+; THRESHOLD-NEXT: [[ADDC:%.*]] = fadd fast float [[CONVC]], 3.000000e+00
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[CONV]], [[ADDC]]
+; THRESHOLD-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[X:%.*]], i64 1
+; THRESHOLD-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; THRESHOLD-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; THRESHOLD-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; THRESHOLD-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; THRESHOLD-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; THRESHOLD-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X]] to <8 x float>*
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4
+; THRESHOLD-NEXT: [[ADD1:%.*]] = fadd fast float undef, [[ADD]]
+; THRESHOLD-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]]
+; THRESHOLD-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD4]]
+; THRESHOLD-NEXT: [[ADD4_2:%.*]] = fadd fast float undef, [[ADD4_1]]
+; THRESHOLD-NEXT: [[ADD4_3:%.*]] = fadd fast float undef, [[ADD4_2]]
+; THRESHOLD-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4_3]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_4:%.*]] = fadd fast float undef, [[ADD5]]
+; THRESHOLD-NEXT: [[ADD4_5:%.*]] = fadd fast float undef, [[ADD4_4]]
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = fadd fast <8 x float> [[TMP1]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x float> [[BIN_RDX]], <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = fadd fast <8 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x float> [[BIN_RDX2]], <8 x float> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX4:%.*]] = fadd fast <8 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; THRESHOLD-NEXT: [[TMP2:%.*]] = extractelement <8 x float> [[BIN_RDX4]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = fadd fast float [[TMP2]], [[ADD]]
+; THRESHOLD-NEXT: [[BIN_EXTRA5:%.*]] = fadd fast float [[BIN_EXTRA]], [[CONV]]
+; THRESHOLD-NEXT: [[ADD4_6:%.*]] = fadd fast float undef, [[ADD4_5]]
+; THRESHOLD-NEXT: ret float [[BIN_EXTRA5]]
+;
+ entry:
+ %mul = mul nsw i32 %b, %a
+ %conv = sitofp i32 %mul to float
+ %0 = load float, float* %x, align 4
+ %convc = sitofp i32 %c to float
+ %addc = fadd fast float %convc, 3.000000e+00
+ %add = fadd fast float %conv, %addc
+ %add1 = fadd fast float %0, %add
+ %arrayidx3 = getelementptr inbounds float, float* %x, i64 1
+ %1 = load float, float* %arrayidx3, align 4
+ %add4 = fadd fast float %1, %add1
+ %arrayidx3.1 = getelementptr inbounds float, float* %x, i64 2
+ %2 = load float, float* %arrayidx3.1, align 4
+ %add4.1 = fadd fast float %2, %add4
+ %arrayidx3.2 = getelementptr inbounds float, float* %x, i64 3
+ %3 = load float, float* %arrayidx3.2, align 4
+ %add4.2 = fadd fast float %3, %add4.1
+ %arrayidx3.3 = getelementptr inbounds float, float* %x, i64 4
+ %4 = load float, float* %arrayidx3.3, align 4
+ %add4.3 = fadd fast float %4, %add4.2
+ %add5 = fadd fast float %add4.3, %conv
+ %arrayidx3.4 = getelementptr inbounds float, float* %x, i64 5
+ %5 = load float, float* %arrayidx3.4, align 4
+ %add4.4 = fadd fast float %5, %add5
+ %arrayidx3.5 = getelementptr inbounds float, float* %x, i64 6
+ %6 = load float, float* %arrayidx3.5, align 4
+ %add4.5 = fadd fast float %6, %add4.4
+ %arrayidx3.6 = getelementptr inbounds float, float* %x, i64 7
+ %7 = load float, float* %arrayidx3.6, align 4
+ %add4.6 = fadd fast float %7, %add4.5
+ ret float %add4.6
+}
+
+define i32 @wobble(i32 %arg, i32 %bar) {
+; CHECK-LABEL: @wobble(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[ARG:%.*]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[ARG]], i32 1
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[ARG]], i32 2
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[ARG]], i32 3
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[BAR:%.*]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[BAR]], i32 1
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[BAR]], i32 2
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[BAR]], i32 3
+; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i32> [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[TMP8]], i32 3
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq <4 x i32> [[TMP8]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = sext <4 x i1> [[TMP10]] to <4 x i32>
+; CHECK-NEXT: [[R1:%.*]] = add nuw i32 [[ARG]], undef
+; CHECK-NEXT: [[R2:%.*]] = add nsw i32 [[R1]], undef
+; CHECK-NEXT: [[R3:%.*]] = add nsw i32 [[R2]], undef
+; CHECK-NEXT: [[R4:%.*]] = add nsw i32 [[R3]], undef
+; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP11]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[RDX_SHUF]]
+; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
+; CHECK-NEXT: [[BIN_EXTRA:%.*]] = add nuw i32 [[TMP12]], [[ARG]]
+; CHECK-NEXT: [[BIN_EXTRA3:%.*]] = add nsw i32 [[BIN_EXTRA]], [[TMP9]]
+; CHECK-NEXT: [[R5:%.*]] = add nsw i32 [[R4]], undef
+; CHECK-NEXT: ret i32 [[BIN_EXTRA3]]
+;
+; THRESHOLD-LABEL: @wobble(
+; THRESHOLD-NEXT: bb:
+; THRESHOLD-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[ARG:%.*]], i32 0
+; THRESHOLD-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[ARG]], i32 1
+; THRESHOLD-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[ARG]], i32 2
+; THRESHOLD-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[ARG]], i32 3
+; THRESHOLD-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[BAR:%.*]], i32 0
+; THRESHOLD-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[BAR]], i32 1
+; THRESHOLD-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[BAR]], i32 2
+; THRESHOLD-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[BAR]], i32 3
+; THRESHOLD-NEXT: [[TMP8:%.*]] = xor <4 x i32> [[TMP3]], [[TMP7]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[TMP8]], i32 3
+; THRESHOLD-NEXT: [[TMP10:%.*]] = icmp eq <4 x i32> [[TMP8]], zeroinitializer
+; THRESHOLD-NEXT: [[TMP11:%.*]] = sext <4 x i1> [[TMP10]] to <4 x i32>
+; THRESHOLD-NEXT: [[R1:%.*]] = add nuw i32 [[ARG]], undef
+; THRESHOLD-NEXT: [[R2:%.*]] = add nsw i32 [[R1]], undef
+; THRESHOLD-NEXT: [[R3:%.*]] = add nsw i32 [[R2]], undef
+; THRESHOLD-NEXT: [[R4:%.*]] = add nsw i32 [[R3]], undef
+; THRESHOLD-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP11]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[RDX_SHUF]]
+; THRESHOLD-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; THRESHOLD-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; THRESHOLD-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
+; THRESHOLD-NEXT: [[BIN_EXTRA:%.*]] = add nuw i32 [[TMP12]], [[ARG]]
+; THRESHOLD-NEXT: [[BIN_EXTRA3:%.*]] = add nsw i32 [[BIN_EXTRA]], [[TMP9]]
+; THRESHOLD-NEXT: [[R5:%.*]] = add nsw i32 [[R4]], undef
+; THRESHOLD-NEXT: ret i32 [[BIN_EXTRA3]]
+;
+ bb:
+ %x1 = xor i32 %arg, %bar
+ %i1 = icmp eq i32 %x1, 0
+ %s1 = sext i1 %i1 to i32
+ %x2 = xor i32 %arg, %bar
+ %i2 = icmp eq i32 %x2, 0
+ %s2 = sext i1 %i2 to i32
+ %x3 = xor i32 %arg, %bar
+ %i3 = icmp eq i32 %x3, 0
+ %s3 = sext i1 %i3 to i32
+ %x4 = xor i32 %arg, %bar
+ %i4 = icmp eq i32 %x4, 0
+ %s4 = sext i1 %i4 to i32
+ %r1 = add nuw i32 %arg, %s1
+ %r2 = add nsw i32 %r1, %s2
+ %r3 = add nsw i32 %r2, %s3
+ %r4 = add nsw i32 %r3, %s4
+ %r5 = add nsw i32 %r4, %x4
+ ret i32 %r5
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/horizontal.ll b/test/Transforms/SLPVectorizer/X86/horizontal.ll
index f6efd26a4c20..080f850f91cf 100644
--- a/test/Transforms/SLPVectorizer/X86/horizontal.ll
+++ b/test/Transforms/SLPVectorizer/X86/horizontal.ll
@@ -624,9 +624,9 @@ define void @i32_red_example4(i32* %res) {
; STORE-LABEL: @i32_red_example4(
; STORE: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([32 x i32]* @arr_i32 to <4 x i32>*), align 16
; STORE: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP0]], [[RDX_SHUF]]
+; STORE-NEXT: [[BIN_RDX:%.*]] = add nsw <4 x i32> [[TMP0]], [[RDX_SHUF]]
; STORE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; STORE-NEXT: [[BIN_RDX2:%.*]] = add nsw <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; STORE-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
; STORE: store i32 [[TMP1]], i32* %res, align 16
; STORE-NEXT: ret void
@@ -647,11 +647,11 @@ define void @i32_red_example8(i32* %res) {
; STORE-LABEL: @i32_red_example8(
; STORE: [[TMP0:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr_i32 to <8 x i32>*), align 16
; STORE: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP0]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP0]], [[RDX_SHUF]]
+; STORE-NEXT: [[BIN_RDX:%.*]] = add nsw <8 x i32> [[TMP0]], [[RDX_SHUF]]
; STORE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; STORE-NEXT: [[BIN_RDX2:%.*]] = add nsw <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; STORE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; STORE-NEXT: [[BIN_RDX4:%.*]] = add nsw <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
; STORE-NEXT: [[TMP1:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
; STORE: store i32 [[TMP1]], i32* %res, align 16
; STORE-NEXT: ret void
@@ -680,13 +680,13 @@ define void @i32_red_example16(i32* %res) {
; STORE-LABEL: @i32_red_example16(
; STORE: [[TMP0:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([32 x i32]* @arr_i32 to <16 x i32>*), align 16
; STORE: [[RDX_SHUF:%.*]] = shufflevector <16 x i32> [[TMP0]], <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP0]], [[RDX_SHUF]]
+; STORE-NEXT: [[BIN_RDX:%.*]] = add nsw <16 x i32> [[TMP0]], [[RDX_SHUF]]
; STORE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <16 x i32> [[BIN_RDX]], <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX2:%.*]] = add <16 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; STORE-NEXT: [[BIN_RDX2:%.*]] = add nsw <16 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; STORE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <16 x i32> [[BIN_RDX2]], <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX4:%.*]] = add <16 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; STORE-NEXT: [[BIN_RDX4:%.*]] = add nsw <16 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
; STORE-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <16 x i32> [[BIN_RDX4]], <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX6:%.*]] = add <16 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
+; STORE-NEXT: [[BIN_RDX6:%.*]] = add nsw <16 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
; STORE-NEXT: [[TMP1:%.*]] = extractelement <16 x i32> [[BIN_RDX6]], i32 0
; STORE: store i32 [[TMP1]], i32* %res, align 16
; STORE-NEXT: ret void
@@ -731,15 +731,15 @@ define void @i32_red_example32(i32* %res) {
; STORE-LABEL: @i32_red_example32(
; STORE: [[TMP0:%.*]] = load <32 x i32>, <32 x i32>* bitcast ([32 x i32]* @arr_i32 to <32 x i32>*), align 16
; STORE: [[RDX_SHUF:%.*]] = shufflevector <32 x i32> [[TMP0]], <32 x i32> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX:%.*]] = add <32 x i32> [[TMP0]], [[RDX_SHUF]]
+; STORE-NEXT: [[BIN_RDX:%.*]] = add nsw <32 x i32> [[TMP0]], [[RDX_SHUF]]
; STORE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x i32> [[BIN_RDX]], <32 x i32> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX2:%.*]] = add <32 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; STORE-NEXT: [[BIN_RDX2:%.*]] = add nsw <32 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; STORE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x i32> [[BIN_RDX2]], <32 x i32> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX4:%.*]] = add <32 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; STORE-NEXT: [[BIN_RDX4:%.*]] = add nsw <32 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
; STORE-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x i32> [[BIN_RDX4]], <32 x i32> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX6:%.*]] = add <32 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
+; STORE-NEXT: [[BIN_RDX6:%.*]] = add nsw <32 x i32> [[BIN_RDX4]], [[RDX_SHUF5]]
; STORE-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x i32> [[BIN_RDX6]], <32 x i32> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; STORE-NEXT: [[BIN_RDX8:%.*]] = add <32 x i32> [[BIN_RDX6]], [[RDX_SHUF7]]
+; STORE-NEXT: [[BIN_RDX8:%.*]] = add nsw <32 x i32> [[BIN_RDX6]], [[RDX_SHUF7]]
; STORE-NEXT: [[TMP1:%.*]] = extractelement <32 x i32> [[BIN_RDX8]], i32 0
; STORE: store i32 [[TMP1]], i32* %res, align 16
; STORE-NEXT: ret void
@@ -812,3 +812,98 @@ entry:
ret void
}
+declare i32 @foobar(i32)
+
+define void @i32_red_call(i32 %val) {
+; CHECK-LABEL: @i32_red_call(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8
+; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4
+; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP3]], [[ADD_1]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16
+; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP4]], [[ADD_2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4
+; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP5]], [[ADD_3]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8
+; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP6]], [[ADD_4]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4
+; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP7]], [[ADD_5]]
+; CHECK-NEXT: [[RES:%.*]] = call i32 @foobar(i32 [[ADD_6]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16
+ %1 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4
+ %add = add nsw i32 %1, %0
+ %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8
+ %add.1 = add nsw i32 %2, %add
+ %3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4
+ %add.2 = add nsw i32 %3, %add.1
+ %4 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16
+ %add.3 = add nsw i32 %4, %add.2
+ %5 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4
+ %add.4 = add nsw i32 %5, %add.3
+ %6 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8
+ %add.5 = add nsw i32 %6, %add.4
+ %7 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4
+ %add.6 = add nsw i32 %7, %add.5
+ %res = call i32 @foobar(i32 %add.6)
+ ret void
+}
+
+define void @i32_red_invoke(i32 %val) personality i32 (...)* @__gxx_personality_v0 {
+; CHECK-LABEL: @i32_red_invoke(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8
+; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP2]], [[ADD]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4
+; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP3]], [[ADD_1]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16
+; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP4]], [[ADD_2]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4
+; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP5]], [[ADD_3]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8
+; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP6]], [[ADD_4]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4
+; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP7]], [[ADD_5]]
+; CHECK-NEXT: [[RES:%.*]] = invoke i32 @foobar(i32 [[ADD_6]])
+; CHECK-NEXT: to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]]
+; CHECK: exception:
+; CHECK-NEXT: [[CLEANUP:%.*]] = landingpad i8
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: br label [[NORMAL]]
+; CHECK: normal:
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16
+ %1 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4
+ %add = add nsw i32 %1, %0
+ %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8
+ %add.1 = add nsw i32 %2, %add
+ %3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4
+ %add.2 = add nsw i32 %3, %add.1
+ %4 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16
+ %add.3 = add nsw i32 %4, %add.2
+ %5 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4
+ %add.4 = add nsw i32 %5, %add.3
+ %6 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8
+ %add.5 = add nsw i32 %6, %add.4
+ %7 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4
+ %add.6 = add nsw i32 %7, %add.5
+ %res = invoke i32 @foobar(i32 %add.6) to label %normal unwind label %exception
+exception:
+ %cleanup = landingpad i8 cleanup
+ br label %normal
+normal:
+ ret void
+}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/Transforms/SLPVectorizer/X86/reduction_loads.ll b/test/Transforms/SLPVectorizer/X86/reduction_loads.ll
index 76b3b9174a51..47a6a44611d8 100644
--- a/test/Transforms/SLPVectorizer/X86/reduction_loads.ll
+++ b/test/Transforms/SLPVectorizer/X86/reduction_loads.ll
@@ -5,17 +5,17 @@
define i32 @test(i32* nocapture readonly %p) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* %p, i64 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* %p, i64 2
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* %p, i64 3
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* %p, i64 4
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* %p, i64 5
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* %p, i64 6
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* %p, i64 7
-; CHECK-NEXT: br label %for.body
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
+; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
+; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
+; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
+; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* %p to <8 x i32>*
+; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[BIN_EXTRA:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>, [[TMP1]]
; CHECK-NEXT: [[ADD:%.*]] = add i32 undef, [[SUM]]
@@ -32,10 +32,11 @@ define i32 @test(i32* nocapture readonly %p) {
; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
-; CHECK-NEXT: [[ADD_7:%.*]] = add i32 [[TMP3]], [[SUM]]
-; CHECK-NEXT: br i1 true, label %for.end, label %for.body
+; CHECK-NEXT: [[BIN_EXTRA]] = add i32 [[TMP3]], [[SUM]]
+; CHECK-NEXT: [[ADD_7:%.*]] = add i32 undef, [[ADD_6]]
+; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
-; CHECK-NEXT: ret i32 [[ADD_7]]
+; CHECK-NEXT: ret i32 [[BIN_EXTRA]]
;
entry:
%arrayidx.1 = getelementptr inbounds i32, i32* %p, i64 1
diff --git a/test/Transforms/SLPVectorizer/X86/scheduling.ll b/test/Transforms/SLPVectorizer/X86/scheduling.ll
index 5377ee82cf97..c4f521c8963e 100644
--- a/test/Transforms/SLPVectorizer/X86/scheduling.ll
+++ b/test/Transforms/SLPVectorizer/X86/scheduling.ll
@@ -8,11 +8,11 @@ define i32 @foo(i32* nocapture readonly %diff) #0 {
; CHECK: [[S1:%.+]] = add nsw <4 x i32>
; CHECK: store <4 x i32> [[S1]],
; CHECK: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[S1]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[S1]], [[RDX_SHUF]]
+; CHECK-NEXT: [[BIN_RDX:%.*]] = add nsw <4 x i32> [[S1]], [[RDX_SHUF]]
; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT: [[BIN_RDX2:%.*]] = add nsw <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
-; CHECK-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP15]],
+; CHECK: [[ADD52:%.*]] = add nsw i32 [[TMP15]],
; CHECK: ret i32 [[ADD52]]
;
entry:
diff --git a/test/Transforms/SLPVectorizer/X86/store-jumbled.ll b/test/Transforms/SLPVectorizer/X86/store-jumbled.ll
new file mode 100644
index 000000000000..1b2c76384e0b
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/store-jumbled.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s
+
+
+
+define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) {
+; CHECK-LABEL: @jumbled-load(
+; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0
+; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1
+; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4
+; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2
+; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4
+; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3
+; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4
+; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0
+; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4
+; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1
+; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4
+; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2
+; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4
+; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3
+; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4
+; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_1]], [[LOAD_5]]
+; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_6]]
+; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_3]], [[LOAD_7]]
+; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_4]], [[LOAD_8]]
+; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
+; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1
+; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2
+; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3
+; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_9]], align 4
+; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_7]], align 4
+; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_10]], align 4
+; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_8]], align 4
+; CHECK-NEXT: ret i32 undef
+;
+ %in.addr = getelementptr inbounds i32, i32* %in, i64 0
+ %load.1 = load i32, i32* %in.addr, align 4
+ %gep.1 = getelementptr inbounds i32, i32* %in.addr, i64 1
+ %load.2 = load i32, i32* %gep.1, align 4
+ %gep.2 = getelementptr inbounds i32, i32* %in.addr, i64 2
+ %load.3 = load i32, i32* %gep.2, align 4
+ %gep.3 = getelementptr inbounds i32, i32* %in.addr, i64 3
+ %load.4 = load i32, i32* %gep.3, align 4
+ %inn.addr = getelementptr inbounds i32, i32* %inn, i64 0
+ %load.5 = load i32, i32* %inn.addr, align 4
+ %gep.4 = getelementptr inbounds i32, i32* %inn.addr, i64 1
+ %load.6 = load i32, i32* %gep.4, align 4
+ %gep.5 = getelementptr inbounds i32, i32* %inn.addr, i64 2
+ %load.7 = load i32, i32* %gep.5, align 4
+ %gep.6 = getelementptr inbounds i32, i32* %inn.addr, i64 3
+ %load.8 = load i32, i32* %gep.6, align 4
+ %mul.1 = mul i32 %load.1, %load.5
+ %mul.2 = mul i32 %load.2, %load.6
+ %mul.3 = mul i32 %load.3, %load.7
+ %mul.4 = mul i32 %load.4, %load.8
+ %gep.7 = getelementptr inbounds i32, i32* %out, i64 0
+ %gep.8 = getelementptr inbounds i32, i32* %out, i64 1
+ %gep.9 = getelementptr inbounds i32, i32* %out, i64 2
+ %gep.10 = getelementptr inbounds i32, i32* %out, i64 3
+ store i32 %mul.1, i32* %gep.9, align 4
+ store i32 %mul.2, i32* %gep.7, align 4
+ store i32 %mul.3, i32* %gep.10, align 4
+ store i32 %mul.4, i32* %gep.8, align 4
+
+ ret i32 undef
+}
diff --git a/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll b/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
new file mode 100644
index 000000000000..2b593b78652f
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
@@ -0,0 +1,984 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 < %s | FileCheck %s
+
+define void @add0(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @add0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <4 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> <i32 1, i32 1, i32 2, i32 3>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %add = add nsw i32 %0, 1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %add, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %add3 = add nsw i32 %1, 1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %add3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %add6 = add nsw i32 %2, 2
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %add6, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %add9 = add nsw i32 %3, 3
+ store i32 %add9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add1(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @add1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[TMP0]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP1]], 1
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[ADD3]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP2]], 2
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[ADD6]], i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP3]], 3
+; CHECK-NEXT: store i32 [[ADD9]], i32* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %0, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %add3 = add nsw i32 %1, 1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %add3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %add6 = add nsw i32 %2, 2
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %add6, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %add9 = add nsw i32 %3, 3
+ store i32 %add9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub0(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @sub0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[TMP0]], -1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[SUB]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[TMP1]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[SUB5:%.*]] = add nsw i32 [[TMP2]], -2
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[SUB5]], i32* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = add nsw i32 [[TMP3]], -3
+; CHECK-NEXT: store i32 [[SUB8]], i32* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %sub = add nsw i32 %0, -1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %sub, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %1, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %sub5 = add nsw i32 %2, -2
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %sub5, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %sub8 = add nsw i32 %3, -3
+ store i32 %sub8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @sub1(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @sub1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <4 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> <i32 4, i32 -1, i32 -2, i32 -3>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %add = add nsw i32 %0, 4
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %add, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %sub = add nsw i32 %1, -1
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %sub, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %sub5 = add nsw i32 %2, -2
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %sub5, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %sub8 = add nsw i32 %3, -3
+ store i32 %sub8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @sub2(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @sub2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <4 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> <i32 -1, i32 -1, i32 -2, i32 -3>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %sub = add nsw i32 %0, -1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %sub, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %sub3 = add nsw i32 %1, -1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %sub3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %sub6 = add nsw i32 %2, -2
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %sub6, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %sub9 = add nsw i32 %3, -3
+ store i32 %sub9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @addsub0(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @addsub0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[TMP0]], -1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[SUB]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[TMP1]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[SUB5:%.*]] = add nsw i32 [[TMP2]], -2
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[SUB5]], i32* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP3]], -3
+; CHECK-NEXT: store i32 [[SUB8]], i32* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %sub = add nsw i32 %0, -1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %sub, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %1, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %sub5 = add nsw i32 %2, -2
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %sub5, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %sub8 = sub nsw i32 %3, -3
+ store i32 %sub8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @addsub1(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @addsub1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[TMP0]], -1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[SUB]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SUB1:%.*]] = sub nsw i32 [[TMP1]], -1
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[SUB1]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[TMP2]], i32* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP3]], -3
+; CHECK-NEXT: store i32 [[SUB8]], i32* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %sub = add nsw i32 %0, -1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %sub, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %sub1 = sub nsw i32 %1, -1
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %sub1, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %2, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %sub8 = sub nsw i32 %3, -3
+ store i32 %sub8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @mul(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @mul(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 257
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[MUL]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP1]], -3
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[MUL3]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[TMP2]], i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[MUL9:%.*]] = mul nsw i32 [[TMP3]], -9
+; CHECK-NEXT: store i32 [[MUL9]], i32* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %mul = mul nsw i32 %0, 257
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %mul, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %mul3 = mul nsw i32 %1, -3
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %mul3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %2, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %mul9 = mul nsw i32 %3, -9
+ store i32 %mul9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @shl0(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @shl0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: store i32 [[TMP0]], i32* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[TMP1]], 1
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: store i32 [[SHL]], i32* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[SHL5:%.*]] = shl i32 [[TMP2]], 2
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: store i32 [[SHL5]], i32* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SHL8:%.*]] = shl i32 [[TMP3]], 3
+; CHECK-NEXT: store i32 [[SHL8]], i32* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %0, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %shl = shl i32 %1, 1
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %shl, i32* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %shl5 = shl i32 %2, 2
+ %incdec.ptr6 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %shl5, i32* %incdec.ptr3, align 4
+ %3 = load i32, i32* %incdec.ptr4, align 4
+ %shl8 = shl i32 %3, 3
+ store i32 %shl8, i32* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @shl1(i32* noalias %dst, i32* noalias %src) {
+; CHECK-LABEL: @shl1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <4 x i32>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = shl <4 x i32> [[TMP1]], <i32 7, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <4 x i32>*
+; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds i32, i32* %src, i64 1
+ %0 = load i32, i32* %src, align 4
+ %shl = shl i32 %0, 7
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %dst, i64 1
+ store i32 %shl, i32* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %src, i64 2
+ %1 = load i32, i32* %incdec.ptr, align 4
+ %shl3 = shl i32 %1, 1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %dst, i64 2
+ store i32 %shl3, i32* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %src, i64 3
+ %2 = load i32, i32* %incdec.ptr2, align 4
+ %shl6 = shl i32 %2, 2
+ %incdec.ptr7 = getelementptr inbounds i32, i32* %dst, i64 3
+ store i32 %shl6, i32* %incdec.ptr4, align 4
+ %3 = load i32, i32* %incdec.ptr5, align 4
+ %shl9 = shl i32 %3, 3
+ store i32 %shl9, i32* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add0f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @add0f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> <float 1.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd fast float %0, 1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %add3 = fadd fast float %1, 1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %add3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd fast float %2, 2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd fast float %3, 3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add1f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @add1f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[TMP0]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[ADD3:%.*]] = fadd fast float [[TMP1]], 1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[ADD3]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = fadd fast float [[TMP2]], 2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[ADD6]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = fadd fast float [[TMP3]], 3.000000e+00
+; CHECK-NEXT: store float [[ADD9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %0, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %add3 = fadd fast float %1, 1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %add3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd fast float %2, 2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd fast float %3, 3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub0f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub0f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP0]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[ADD]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[TMP1]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = fadd fast float [[TMP2]], -2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[ADD6]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = fadd fast float [[TMP3]], -3.000000e+00
+; CHECK-NEXT: store float [[ADD9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %1, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd fast float %2, -2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd fast float %3, -3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub1f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub1f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> <float 4.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd fast float %0, 4.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub = fadd fast float %1, -1.000000e+00
+ %incdec.ptr3 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub, float* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub5 = fadd fast float %2, -2.000000e+00
+ %incdec.ptr6 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub5, float* %incdec.ptr3, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %sub8 = fadd fast float %3, -3.000000e+00
+ store float %sub8, float* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @sub2f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub2f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> <float -1.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub3 = fadd fast float %1, -1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub6 = fadd fast float %2, -2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %sub9 = fadd fast float %3, -3.000000e+00
+ store float %sub9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @addsub0f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @addsub0f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fadd fast float [[TMP0]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[SUB]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[TMP1]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[SUB5:%.*]] = fadd fast float [[TMP2]], -2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[SUB5]], float* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = fsub fast float [[TMP3]], -3.000000e+00
+; CHECK-NEXT: store float [[SUB8]], float* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %incdec.ptr3 = getelementptr inbounds float, float* %dst, i64 2
+ store float %1, float* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub5 = fadd fast float %2, -2.000000e+00
+ %incdec.ptr6 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub5, float* %incdec.ptr3, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %sub8 = fsub fast float %3, -3.000000e+00
+ store float %sub8, float* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @addsub1f(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @addsub1f(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fadd fast float [[TMP0]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[SUB]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SUB1:%.*]] = fsub fast float [[TMP1]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[SUB1]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[TMP2]], float* [[INCDEC_PTR3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[SUB8:%.*]] = fsub fast float [[TMP3]], -3.000000e+00
+; CHECK-NEXT: store float [[SUB8]], float* [[INCDEC_PTR6]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub1 = fsub fast float %1, -1.000000e+00
+ %incdec.ptr3 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub1, float* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %incdec.ptr6 = getelementptr inbounds float, float* %dst, i64 3
+ store float %2, float* %incdec.ptr3, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %sub8 = fsub fast float %3, -3.000000e+00
+ store float %sub8, float* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @mulf(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @mulf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fmul fast float [[TMP0]], 2.570000e+02
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[SUB]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SUB3:%.*]] = fmul fast float [[TMP1]], -3.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[SUB3]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[TMP2]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[SUB9:%.*]] = fmul fast float [[TMP3]], -9.000000e+00
+; CHECK-NEXT: store float [[SUB9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fmul fast float %0, 2.570000e+02
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub3 = fmul fast float %1, -3.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %2, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %sub9 = fmul fast float %3, -9.000000e+00
+ store float %sub9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add0fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @add0fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> <float 1.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd float %0, 1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %add3 = fadd float %1, 1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %add3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd float %2, 2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd float %3, 3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @add1fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @add1fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[TMP0]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[ADD3:%.*]] = fadd float [[TMP1]], 1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[ADD3]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = fadd float [[TMP2]], 2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[ADD6]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = fadd float [[TMP3]], 3.000000e+00
+; CHECK-NEXT: store float [[ADD9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %0, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %add3 = fadd float %1, 1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %add3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd float %2, 2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd float %3, 3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub0fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub0fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP0]], -1.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[ADD]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[TMP1]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[ADD6:%.*]] = fadd float [[TMP2]], -2.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[ADD6]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[ADD9:%.*]] = fadd float [[TMP3]], -3.000000e+00
+; CHECK-NEXT: store float [[ADD9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd fast float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %1, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %add6 = fadd float %2, -2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %add6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %add9 = fadd float %3, -3.000000e+00
+ store float %add9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @sub1fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub1fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> <float 4.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %add = fadd float %0, 4.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %add, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub = fadd float %1, -1.000000e+00
+ %incdec.ptr3 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub, float* %incdec.ptr1, align 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub5 = fadd float %2, -2.000000e+00
+ %incdec.ptr6 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub5, float* %incdec.ptr3, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %sub8 = fadd float %3, -3.000000e+00
+ store float %sub8, float* %incdec.ptr6, align 4
+ ret void
+}
+
+define void @sub2fn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @sub2fn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[SRC]] to <4 x float>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> <float -1.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[DST]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP2]], <4 x float>* [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fadd float %0, -1.000000e+00
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub3 = fadd float %1, -1.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %sub6 = fadd float %2, -2.000000e+00
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %sub6, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %sub9 = fadd float %3, -3.000000e+00
+ store float %sub9, float* %incdec.ptr7, align 4
+ ret void
+}
+
+define void @mulfn(float* noalias %dst, float* noalias %src) {
+; CHECK-LABEL: @mulfn(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fmul float [[TMP0]], 2.570000e+02
+; CHECK-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 1
+; CHECK-NEXT: store float [[SUB]], float* [[DST]], align 4
+; CHECK-NEXT: [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[SUB3:%.*]] = fmul float [[TMP1]], -3.000000e+00
+; CHECK-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
+; CHECK-NEXT: store float [[SUB3]], float* [[INCDEC_PTR1]], align 4
+; CHECK-NEXT: [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[INCDEC_PTR2]], align 4
+; CHECK-NEXT: [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
+; CHECK-NEXT: store float [[TMP2]], float* [[INCDEC_PTR4]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[INCDEC_PTR5]], align 4
+; CHECK-NEXT: [[SUB9:%.*]] = fmul fast float [[TMP3]], -9.000000e+00
+; CHECK-NEXT: store float [[SUB9]], float* [[INCDEC_PTR7]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %incdec.ptr = getelementptr inbounds float, float* %src, i64 1
+ %0 = load float, float* %src, align 4
+ %sub = fmul float %0, 2.570000e+02
+ %incdec.ptr1 = getelementptr inbounds float, float* %dst, i64 1
+ store float %sub, float* %dst, align 4
+ %incdec.ptr2 = getelementptr inbounds float, float* %src, i64 2
+ %1 = load float, float* %incdec.ptr, align 4
+ %sub3 = fmul float %1, -3.000000e+00
+ %incdec.ptr4 = getelementptr inbounds float, float* %dst, i64 2
+ store float %sub3, float* %incdec.ptr1, align 4
+ %incdec.ptr5 = getelementptr inbounds float, float* %src, i64 3
+ %2 = load float, float* %incdec.ptr2, align 4
+ %incdec.ptr7 = getelementptr inbounds float, float* %dst, i64 3
+ store float %2, float* %incdec.ptr4, align 4
+ %3 = load float, float* %incdec.ptr5, align 4
+ %sub9 = fmul fast float %3, -9.000000e+00
+ store float %sub9, float* %incdec.ptr7, align 4
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/vector.ll b/test/Transforms/SLPVectorizer/X86/vector.ll
index 02a18979c659..e1f3fa50ccdb 100644
--- a/test/Transforms/SLPVectorizer/X86/vector.ll
+++ b/test/Transforms/SLPVectorizer/X86/vector.ll
@@ -1,14 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
; Make sure that we are not crashing or changing the code.
-;CHECK: test
-;CHECK: icmp
-;CHECK: ret
define void @test(<4 x i32> %in, <4 x i32> %in2) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: [[K:%.*]] = icmp eq <4 x i32> [[IN:%.*]], [[IN2:%.*]]
+; CHECK-NEXT: ret void
+;
%k = icmp eq <4 x i32> %in, %in2
ret void
}
+define i1 @cmpv2f32(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @cmpv2f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X0:%.*]] = extractelement <2 x i32> [[X:%.*]], i32 0
+; CHECK-NEXT: [[Y0:%.*]] = extractelement <2 x i32> [[Y:%.*]], i32 0
+; CHECK-NEXT: [[CMP0:%.*]] = icmp eq i32 [[X0]], [[Y0]]
+; CHECK-NEXT: br i1 [[CMP0]], label [[IF:%.*]], label [[ENDIF:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[X1:%.*]] = extractelement <2 x i32> [[X]], i32 1
+; CHECK-NEXT: [[Y1:%.*]] = extractelement <2 x i32> [[Y]], i32 1
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[X1]], [[Y1]]
+; CHECK-NEXT: br label [[ENDIF]]
+; CHECK: endif:
+; CHECK-NEXT: [[AND_OF_CMPS:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[CMP1]], [[IF]] ]
+; CHECK-NEXT: ret i1 [[AND_OF_CMPS]]
+;
+ entry:
+ %x0 = extractelement <2 x i32> %x, i32 0
+ %y0 = extractelement <2 x i32> %y, i32 0
+ %cmp0 = icmp eq i32 %x0, %y0
+ br i1 %cmp0, label %if, label %endif
+
+ if:
+ %x1 = extractelement <2 x i32> %x, i32 1
+ %y1 = extractelement <2 x i32> %y, i32 1
+ %cmp1 = icmp eq i32 %x1, %y1
+ br label %endif
+
+ endif:
+ %and_of_cmps = phi i1 [ false, %entry ], [ %cmp1, %if ]
+ ret i1 %and_of_cmps
+}
+