summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/insertelement-zero.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/insertelement-zero.ll')
-rw-r--r--test/CodeGen/X86/insertelement-zero.ll264
1 files changed, 169 insertions, 95 deletions
diff --git a/test/CodeGen/X86/insertelement-zero.ll b/test/CodeGen/X86/insertelement-zero.ll
index 4e582de22a1f..65c1c0957adf 100644
--- a/test/CodeGen/X86/insertelement-zero.ll
+++ b/test/CodeGen/X86/insertelement-zero.ll
@@ -10,37 +10,72 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
define <2 x double> @insert_v2f64_z1(<2 x double> %a) {
-; SSE-LABEL: insert_v2f64_z1:
-; SSE: # BB#0:
-; SSE-NEXT: xorpd %xmm1, %xmm1
-; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; SSE-NEXT: retq
+; SSE2-LABEL: insert_v2f64_z1:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: insert_v2f64_z1:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorpd %xmm1, %xmm1
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: insert_v2f64_z1:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorpd %xmm1, %xmm1
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: insert_v2f64_z1:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorpd %xmm1, %xmm1
+; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v2f64_z1:
; AVX: # BB#0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT: retq
%1 = insertelement <2 x double> %a, double 0.0, i32 0
ret <2 x double> %1
}
define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) {
-; SSE-LABEL: insert_v4f64_0zz3:
-; SSE: # BB#0:
-; SSE-NEXT: xorpd %xmm2, %xmm2
-; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
-; SSE-NEXT: retq
+; SSE2-LABEL: insert_v4f64_0zz3:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorpd %xmm2, %xmm2
+; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: insert_v4f64_0zz3:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorpd %xmm2, %xmm2
+; SSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: insert_v4f64_0zz3:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorpd %xmm2, %xmm2
+; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: insert_v4f64_0zz3:
+; SSE41: # BB#0:
+; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; SSE41-NEXT: xorpd %xmm2, %xmm2
+; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v4f64_0zz3:
; AVX: # BB#0:
-; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm0[0],xmm1[0]
-; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = xmm1[0],xmm2[1]
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
; AVX-NEXT: retq
%1 = insertelement <4 x double> %a, double 0.0, i32 1
%2 = insertelement <4 x double> %1, double 0.0, i32 2
@@ -68,15 +103,21 @@ define <2 x i64> @insert_v2i64_z1(<2 x i64> %a) {
;
; SSE41-LABEL: insert_v2i64_z1:
; SSE41: # BB#0:
-; SSE41-NEXT: xorl %eax, %eax
-; SSE41-NEXT: pinsrq $0, %rax, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
-; AVX-LABEL: insert_v2i64_z1:
-; AVX: # BB#0:
-; AVX-NEXT: xorl %eax, %eax
-; AVX-NEXT: vpinsrq $0, %rax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: insert_v2i64_z1:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: insert_v2i64_z1:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX2-NEXT: retq
%1 = insertelement <2 x i64> %a, i64 0, i32 0
ret <2 x i64> %1
}
@@ -102,24 +143,20 @@ define <4 x i64> @insert_v4i64_01z3(<4 x i64> %a) {
;
; SSE41-LABEL: insert_v4i64_01z3:
; SSE41: # BB#0:
-; SSE41-NEXT: xorl %eax, %eax
-; SSE41-NEXT: pinsrq $0, %rax, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v4i64_01z3:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: xorl %eax, %eax
-; AVX1-NEXT: vpinsrq $0, %rax, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v4i64_01z3:
; AVX2: # BB#0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: xorl %eax, %eax
-; AVX2-NEXT: vpinsrq $0, %rax, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: retq
%1 = insertelement <4 x i64> %a, i64 0, i32 2
ret <4 x i64> %1
@@ -150,13 +187,13 @@ define <4 x float> @insert_v4f32_01z3(<4 x float> %a) {
; SSE41-LABEL: insert_v4f32_01z3:
; SSE41: # BB#0:
; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v4f32_01z3:
; AVX: # BB#0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX-NEXT: retq
%1 = insertelement <4 x float> %a, float 0.0, i32 2
ret <4 x float> %1
@@ -191,16 +228,13 @@ define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) {
; SSE41: # BB#0:
; SSE41-NEXT: xorps %xmm2, %xmm2
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
-; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v8f32_z12345z7:
; AVX: # BB#0:
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX-NEXT: retq
%1 = insertelement <8 x float> %a, float 0.0, i32 0
%2 = insertelement <8 x float> %1, float 0.0, i32 6
@@ -234,15 +268,21 @@ define <4 x i32> @insert_v4i32_01z3(<4 x i32> %a) {
;
; SSE41-LABEL: insert_v4i32_01z3:
; SSE41: # BB#0:
-; SSE41-NEXT: xorl %eax, %eax
-; SSE41-NEXT: pinsrd $2, %eax, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
-; AVX-LABEL: insert_v4i32_01z3:
-; AVX: # BB#0:
-; AVX-NEXT: xorl %eax, %eax
-; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: insert_v4i32_01z3:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: insert_v4i32_01z3:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; AVX2-NEXT: retq
%1 = insertelement <4 x i32> %a, i32 0, i32 2
ret <4 x i32> %1
}
@@ -280,29 +320,21 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
;
; SSE41-LABEL: insert_v8i32_z12345z7:
; SSE41: # BB#0:
-; SSE41-NEXT: xorl %eax, %eax
-; SSE41-NEXT: pinsrd $0, %eax, %xmm0
-; SSE41-NEXT: pinsrd $2, %eax, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v8i32_z12345z7:
; AVX1: # BB#0:
-; AVX1-NEXT: xorl %eax, %eax
-; AVX1-NEXT: vpinsrd $0, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v8i32_z12345z7:
; AVX2: # BB#0:
-; AVX2-NEXT: xorl %eax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX2-NEXT: retq
%1 = insertelement <8 x i32> %a, i32 0, i32 0
%2 = insertelement <8 x i32> %1, i32 0, i32 6
@@ -310,18 +342,37 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) {
}
define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) {
-; SSE-LABEL: insert_v8i16_z12345z7:
-; SSE: # BB#0:
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: pinsrw $0, %eax, %xmm0
-; SSE-NEXT: pinsrw $6, %eax, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: insert_v8i16_z12345z7:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: pinsrw $0, %eax, %xmm0
+; SSE2-NEXT: pinsrw $6, %eax, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: insert_v8i16_z12345z7:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorl %eax, %eax
+; SSE3-NEXT: pinsrw $0, %eax, %xmm0
+; SSE3-NEXT: pinsrw $6, %eax, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: insert_v8i16_z12345z7:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorl %eax, %eax
+; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
+; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: insert_v8i16_z12345z7:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
+; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v8i16_z12345z7:
; AVX: # BB#0:
-; AVX-NEXT: xorl %eax, %eax
-; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> %a, i16 0, i32 0
%2 = insertelement <8 x i16> %1, i16 0, i32 6
@@ -329,35 +380,58 @@ define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) {
}
define <16 x i16> @insert_v16i16_z12345z789ABZDEz(<16 x i16> %a) {
-; SSE-LABEL: insert_v16i16_z12345z789ABZDEz:
-; SSE: # BB#0:
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: pinsrw $0, %eax, %xmm0
-; SSE-NEXT: pinsrw $6, %eax, %xmm0
-; SSE-NEXT: pinsrw $7, %eax, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: insert_v16i16_z12345z789ABZDEz:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: pinsrw $0, %eax, %xmm0
+; SSE2-NEXT: pinsrw $6, %eax, %xmm0
+; SSE2-NEXT: pinsrw $7, %eax, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: insert_v16i16_z12345z789ABZDEz:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorl %eax, %eax
+; SSE3-NEXT: pinsrw $0, %eax, %xmm0
+; SSE3-NEXT: pinsrw $6, %eax, %xmm0
+; SSE3-NEXT: pinsrw $7, %eax, %xmm1
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: insert_v16i16_z12345z789ABZDEz:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorl %eax, %eax
+; SSSE3-NEXT: pinsrw $0, %eax, %xmm0
+; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
+; SSSE3-NEXT: pinsrw $7, %eax, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: insert_v16i16_z12345z789ABZDEz:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
+; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v16i16_z12345z789ABZDEz:
; AVX1: # BB#0:
-; AVX1-NEXT: xorl %eax, %eax
-; AVX1-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v16i16_z12345z789ABZDEz:
; AVX2: # BB#0:
-; AVX2-NEXT: xorl %eax, %eax
-; AVX2-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%1 = insertelement <16 x i16> %a, i16 0, i32 0