diff options
Diffstat (limited to 'test/CodeGen')
94 files changed, 6441 insertions, 939 deletions
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index 02848021dbc09..ac3d4b17f739f 100644 --- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -1541,3 +1541,12 @@ define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2) %res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> ret <16 x i8> %res } + +; CHECK-LABEL: test_constant_vector +; CHECK: [[UNDEF:%[0-9]+]](s16) = IMPLICIT_DEF +; CHECK: [[F:%[0-9]+]](s16) = G_FCONSTANT half 0xH3C00 +; CHECK: [[M:%[0-9]+]](<4 x s16>) = G_MERGE_VALUES [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16) +; CHECK: %d0 = COPY [[M]](<4 x s16>) +define <4 x half> @test_constant_vector() { + ret <4 x half> <half undef, half undef, half undef, half 0xH3C00> +} diff --git a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll index 5a76661180f22..e01bd2a9f7c85 100644 --- a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll +++ b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll @@ -12,33 +12,33 @@ entry: store i32 %in, i32* %in.addr, align 4 call void @llvm.dbg.declare(metadata i32* %in.addr, metadata !11, metadata !12), !dbg !13 call void @llvm.dbg.declare(metadata i32 %in, metadata !11, metadata !12), !dbg !13 - ret void, !dbg !14 + ret void, !dbg !13 } ; CHECK-LABEL: name: debug_declare_vla -; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use _, !11, !12, debug-location !13 -define void @debug_declare_vla(i32 %in) #0 !dbg !7 { +; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use _, !15, !12, debug-location !16 +define void @debug_declare_vla(i32 %in) #0 !dbg !14 { entry: %vla.addr = alloca i32, i32 %in - call void @llvm.dbg.declare(metadata i32* %vla.addr, metadata !11, metadata !12), !dbg !13 - ret void, !dbg !14 + call void @llvm.dbg.declare(metadata i32* %vla.addr, metadata !15, metadata !12), !dbg !16 + ret void, !dbg !16 } ; CHECK-LABEL: name: debug_value ; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0 -define void @debug_value(i32 %in) #0 !dbg !7 { +define void @debug_value(i32 %in) #0 !dbg !17 { %addr = alloca i32 -; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use _, !11, !12, debug-location !13 - call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !11, metadata !12), !dbg !13 +; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use _, !18, !12, debug-location !19 + call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !18, metadata !12), !dbg !19 store i32 %in, i32* %addr -; CHECK: DBG_VALUE debug-use %1(p0), debug-use _, !11, !15, debug-location !13 - call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !11, metadata !15), !dbg !13 -; CHECK: DBG_VALUE 123, 0, !11, !12, debug-location !13 - call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !11, metadata !12), !dbg !13 -; CHECK: DBG_VALUE float 1.000000e+00, 0, !11, !12, debug-location !13 - call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !11, metadata !12), !dbg !13 -; CHECK: DBG_VALUE _, 0, !11, !12, debug-location !13 - call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !11, metadata !12), !dbg !13 +; CHECK: DBG_VALUE debug-use %1(p0), debug-use _, !18, !20, debug-location !19 + call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !18, metadata !20), !dbg !19 +; CHECK: DBG_VALUE 123, 0, !18, !12, debug-location !19 + call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !18, metadata !12), !dbg !19 +; CHECK: DBG_VALUE float 1.000000e+00, 0, !18, !12, debug-location !19 + call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !18, metadata !12), !dbg !19 +; CHECK: DBG_VALUE _, 0, !18, !12, debug-location !19 + call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !18, metadata !12), !dbg !19 ret void } @@ -64,5 +64,10 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) !11 = !DILocalVariable(name: "in", arg: 1, scope: !7, file: !1, line: 1, type: !10) !12 = !DIExpression() !13 = !DILocation(line: 1, column: 14, scope: !7) -!14 = !DILocation(line: 2, column: 1, scope: !7) -!15 = !DIExpression(DW_OP_deref) +!14 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!15 = !DILocalVariable(name: "in", arg: 1, scope: !14, file: !1, line: 1, type: !10) +!16 = !DILocation(line: 1, column: 14, scope: !14) +!17 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!18 = !DILocalVariable(name: "in", arg: 1, scope: !17, file: !1, line: 1, type: !10) +!19 = !DILocation(line: 1, column: 14, scope: !17) +!20 = !DIExpression(DW_OP_deref) diff --git a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir index 2f36ec8d2aaa9..790cd6517dd3a 100644 --- a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir +++ b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir @@ -9,8 +9,8 @@ ret void } - define void @test_dbg_value_dead(i32 %a) !dbg !5 { - call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !7, metadata !9), !dbg !10 + define void @test_dbg_value_dead(i32 %a) !dbg !11 { + call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !12, metadata !9), !dbg !13 ret void } @@ -30,6 +30,9 @@ !8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) !9 = !DIExpression() !10 = !DILocation(line: 1, column: 1, scope: !5) + !11 = distinct !DISubprogram(name: "test_dbg_value", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) + !12 = !DILocalVariable(name: "in", arg: 1, scope: !11, file: !1, line: 1, type: !8) + !13 = !DILocation(line: 1, column: 1, scope: !11) ... --- diff --git a/test/CodeGen/AArch64/fadd-combines.ll b/test/CodeGen/AArch64/fadd-combines.ll new file mode 100644 index 0000000000000..c106f293ccffb --- /dev/null +++ b/test/CodeGen/AArch64/fadd-combines.ll @@ -0,0 +1,78 @@ +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -verify-machineinstrs | FileCheck %s + +; CHECK-LABEL: test1: +; CHECK: fadd d1, d1, d1 +; CHECK: fsub d0, d0, d1 +define double @test1(double %a, double %b) local_unnamed_addr #0 { +entry: + %mul = fmul double %b, -2.000000e+00 + %add1 = fadd double %a, %mul + ret double %add1 +} + +; DAGCombine will canonicalize 'a - 2.0*b' to 'a + -2.0*b' +; CHECK-LABEL: test2: +; CHECK: fadd d1, d1, d1 +; CHECK: fsub d0, d0, d1 +define double @test2(double %a, double %b) local_unnamed_addr #0 { +entry: + %mul = fmul double %b, 2.000000e+00 + %add1 = fsub double %a, %mul + ret double %add1 +} + +; CHECK-LABEL: test3: +; CHECK: fmul d0, d0, d1 +; CHECK: fadd d1, d2, d2 +; CHECK: fsub d0, d0, d1 +define double @test3(double %a, double %b, double %c) local_unnamed_addr #0 { +entry: + %mul = fmul double %a, %b + %mul1 = fmul double %c, 2.000000e+00 + %sub = fsub double %mul, %mul1 + ret double %sub +} + +; CHECK-LABEL: test4: +; CHECK: fmul d0, d0, d1 +; CHECK: fadd d1, d2, d2 +; CHECK: fsub d0, d0, d1 +define double @test4(double %a, double %b, double %c) local_unnamed_addr #0 { +entry: + %mul = fmul double %a, %b + %mul1 = fmul double %c, -2.000000e+00 + %add2 = fadd double %mul, %mul1 + ret double %add2 +} + +; CHECK-LABEL: test5: +; CHECK: fadd v1.4s, v1.4s, v1.4s +; CHECK: fsub v0.4s, v0.4s, v1.4s +define <4 x float> @test5(<4 x float> %a, <4 x float> %b) { + %mul = fmul <4 x float> %b, <float -2.0, float -2.0, float -2.0, float -2.0> + %add = fadd <4 x float> %a, %mul + ret <4 x float> %add +} + +; CHECK-LABEL: test6: +; CHECK: fadd v1.4s, v1.4s, v1.4s +; CHECK: fsub v0.4s, v0.4s, v1.4s +define <4 x float> @test6(<4 x float> %a, <4 x float> %b) { + %mul = fmul <4 x float> %b, <float 2.0, float 2.0, float 2.0, float 2.0> + %add = fsub <4 x float> %a, %mul + ret <4 x float> %add +} + +; Don't fold (fadd A, (fmul B, -2.0)) -> (fsub A, (fadd B, B)) if the fmul has +; multiple uses. +; CHECK-LABEL: test7: +; CHECK: fmul +define double @test7(double %a, double %b) local_unnamed_addr #0 { +entry: + %mul = fmul double %b, -2.000000e+00 + %add1 = fadd double %a, %mul + call void @use(double %mul) + ret double %add1 +} + +declare void @use(double) diff --git a/test/CodeGen/AArch64/loh.mir b/test/CodeGen/AArch64/loh.mir index 1d08ebdc5790a..6e4bb5cfaee6d 100644 --- a/test/CodeGen/AArch64/loh.mir +++ b/test/CodeGen/AArch64/loh.mir @@ -180,7 +180,6 @@ body: | %x9 = ADRP target-flags(aarch64-page, aarch64-got) @g5 bb.13: - successors: %bb.14 ; Cannot produce a LOH for multiple users ; CHECK-NOT: MCLOH_AdrpAdd %x10 = ADRP target-flags(aarch64-page) @g0 diff --git a/test/CodeGen/AArch64/machine-copy-remove.mir b/test/CodeGen/AArch64/machine-copy-remove.mir index 6f2d3a3009b02..50c03ddb40374 100644 --- a/test/CodeGen/AArch64/machine-copy-remove.mir +++ b/test/CodeGen/AArch64/machine-copy-remove.mir @@ -7,20 +7,16 @@ name: test1 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 %x0 = COPY %x1 CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -38,20 +34,16 @@ name: test2 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 %x1 = COPY %x0 CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -69,7 +61,6 @@ name: test3 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x0 = COPY %x1 @@ -77,13 +68,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -101,7 +89,6 @@ name: test4 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x1 = COPY %x0 @@ -109,13 +96,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -133,7 +117,6 @@ name: test5 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x1 = COPY %x0 @@ -141,13 +124,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -165,7 +145,6 @@ name: test6 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x2 = COPY %x0 @@ -173,13 +152,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -197,7 +173,6 @@ name: test7 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x2 = COPY %x0 @@ -206,13 +181,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -232,14 +204,12 @@ name: test8 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 %x1 = COPY %x0 CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 liveins: %x0, %x2 %x0, %x1 = LDPXi %x2, 0 @@ -248,7 +218,6 @@ body: | B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -267,20 +236,17 @@ name: test9 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 CBNZX %x0, %bb.2 bb.1: - successors: %bb.3 liveins: %x0, %x2 %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.1, %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -304,7 +270,6 @@ name: test10 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -312,7 +277,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7 @@ -332,7 +296,6 @@ name: test11 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %x0, %x1 dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv @@ -340,7 +303,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7, implicit-def %x0 @@ -360,7 +322,6 @@ name: test12 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %x0, %x1 dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv @@ -368,7 +329,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7 @@ -388,7 +348,6 @@ name: test13 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -396,7 +355,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7, implicit-def %x0 @@ -413,7 +371,6 @@ name: test14 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1, %x2 dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -423,7 +380,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7 @@ -440,7 +396,6 @@ name: test15 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1, %x2 dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -448,7 +403,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1, %x2 %w0 = LDRWui %x1, 0 @@ -467,7 +421,6 @@ name: test16 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = SUBSWri %w0, 7, 0, implicit-def %nzcv @@ -476,7 +429,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w2 = MOVi32imm 7 @@ -493,7 +445,6 @@ name: test17 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %w0 = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -501,7 +452,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7 @@ -520,14 +470,12 @@ name: test18 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %x0, %x1 CBNZX killed %x0, %bb.2 B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %x0 = MOVi64imm 4252017623040 @@ -547,7 +495,6 @@ name: test19 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv @@ -555,7 +502,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm -1 @@ -575,7 +521,6 @@ name: test20 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv @@ -583,7 +528,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %x0 = MOVi64imm -1 @@ -603,7 +547,6 @@ name: test21 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %x0, %x1 dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv @@ -611,7 +554,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm -1 @@ -629,7 +571,6 @@ name: test22 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv @@ -637,7 +578,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %x0 = MOVi64imm -1 @@ -654,7 +594,6 @@ name: test23 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = SUBSWri killed %w0, 1, 12, implicit-def %nzcv @@ -662,7 +601,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 4096 diff --git a/test/CodeGen/AArch64/machine-sink-zr.mir b/test/CodeGen/AArch64/machine-sink-zr.mir index 535fba0dc63bc..2cf2bc488237f 100644 --- a/test/CodeGen/AArch64/machine-sink-zr.mir +++ b/test/CodeGen/AArch64/machine-sink-zr.mir @@ -17,7 +17,6 @@ body: | ; CHECK-LABEL: bb.0: ; CHECK-NOT: COPY %wzr bb.0: - successors: %bb.3, %bb.1 liveins: %w0 %0 = COPY %w0 @@ -28,13 +27,9 @@ body: | ; CHECK: COPY %wzr bb.1: - successors: %bb.2 - B %bb.2 bb.2: - successors: %bb.3, %bb.2 - %2 = PHI %0, %bb.1, %4, %bb.2 %w0 = COPY %1 %3 = SUBSWri %2, 1, 0, implicit-def dead %nzcv diff --git a/test/CodeGen/AArch64/regcoal-physreg.mir b/test/CodeGen/AArch64/regcoal-physreg.mir index 813106366968d..f88b7482acacf 100644 --- a/test/CodeGen/AArch64/regcoal-physreg.mir +++ b/test/CodeGen/AArch64/regcoal-physreg.mir @@ -93,7 +93,6 @@ body: | name: func1 body: | bb.0: - successors: %bb.1, %bb.2 ; Cannot coalesce physreg because we have reads on other CFG paths (we ; currently abort for any control flow) ; CHECK-NOT: %fp = SUBXri @@ -117,7 +116,6 @@ body: | name: func2 body: | bb.0: - successors: %bb.1, %bb.2 ; We can coalesce copies from physreg to vreg across multiple blocks. ; CHECK-NOT: COPY ; CHECK: CBZX undef %x0, %bb.1 diff --git a/test/CodeGen/AArch64/xray-attribute-instrumentation.ll b/test/CodeGen/AArch64/xray-attribute-instrumentation.ll index d0f5f40e156c9..38b62a72a20f5 100644 --- a/test/CodeGen/AArch64/xray-attribute-instrumentation.ll +++ b/test/CodeGen/AArch64/xray-attribute-instrumentation.ll @@ -26,6 +26,7 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" } ; CHECK: .p2align 4 ; CHECK-NEXT: .xword .Lxray_synthetic_0 +; CHECK-NEXT: .xword .Lxray_fn_idx_synth_0 ; CHECK-NEXT: .section xray_instr_map,{{.*}} ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .xword .Lxray_sled_0 diff --git a/test/CodeGen/AArch64/xray-tail-call-sled.ll b/test/CodeGen/AArch64/xray-tail-call-sled.ll index 6ada3ce8d551b..fb89950b99c84 100644 --- a/test/CodeGen/AArch64/xray-tail-call-sled.ll +++ b/test/CodeGen/AArch64/xray-tail-call-sled.ll @@ -29,10 +29,16 @@ define i32 @callee() nounwind noinline uwtable "function-instrument"="xray-alway }
; CHECK: .p2align 4
; CHECK-NEXT: .xword .Lxray_synthetic_0
+; CHECK-NEXT: .xword .Lxray_fn_idx_synth_0
; CHECK-NEXT: .section xray_instr_map,{{.*}}
; CHECK-LABEL: Lxray_synthetic_0:
; CHECK: .xword .Lxray_sled_0
; CHECK: .xword .Lxray_sled_1
+; CHECK-LABEL: Lxray_synthetic_end0:
+; CHECK: .section xray_fn_idx,{{.*}}
+; CHECK-LABEL: Lxray_fn_idx_synth_0:
+; CHECK: .xword .Lxray_synthetic_0
+; CHECK-NEXT: .xword .Lxray_synthetic_end0
define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-always" {
; CHECK: .p2align 2
@@ -63,7 +69,13 @@ define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-alway }
; CHECK: .p2align 4
; CHECK-NEXT: .xword .Lxray_synthetic_1
+; CHECK-NEXT: .xword .Lxray_fn_idx_synth_1
; CHECK-NEXT: .section xray_instr_map,{{.*}}
; CHECK-LABEL: Lxray_synthetic_1:
; CHECK: .xword .Lxray_sled_2
; CHECK: .xword .Lxray_sled_3
+; CHECK-LABEL: Lxray_synthetic_end1:
+; CHECK: .section xray_fn_idx,{{.*}}
+; CHECK-LABEL: Lxray_fn_idx_synth_1:
+; CHECK: .xword .Lxray_synthetic_1
+; CHECK-NEXT: .xword .Lxray_synthetic_end1
diff --git a/test/CodeGen/AMDGPU/detect-dead-lanes.mir b/test/CodeGen/AMDGPU/detect-dead-lanes.mir index 32e6f7cc0cdc7..3148b9b8ff9db 100644 --- a/test/CodeGen/AMDGPU/detect-dead-lanes.mir +++ b/test/CodeGen/AMDGPU/detect-dead-lanes.mir @@ -294,7 +294,6 @@ registers: - { id: 5, class: sreg_128 } body: | bb.0: - successors: %bb.1 S_NOP 0, implicit-def %0 S_NOP 0, implicit-def %1 S_NOP 0, implicit-def %2 @@ -302,7 +301,6 @@ body: | S_BRANCH %bb.1 bb.1: - successors: %bb.1, %bb.2 %4 = PHI %3, %bb.0, %5, %bb.1 ; let's swiffle some lanes around for fun... @@ -348,7 +346,6 @@ registers: - { id: 6, class: sreg_128 } body: | bb.0: - successors: %bb.1 S_NOP 0, implicit-def %0 S_NOP 0, implicit-def %1 S_NOP 0, implicit-def dead %2 @@ -357,7 +354,6 @@ body: | S_BRANCH %bb.1 bb.1: - successors: %bb.1, %bb.2 %5 = PHI %4, %bb.0, %6, %bb.1 ; rotate lanes, but skip sub2 lane... @@ -396,13 +392,11 @@ registers: - { id: 3, class: sreg_128 } body: | bb.0: - successors: %bb.1 S_NOP 0, implicit-def %0 %1 = REG_SEQUENCE %0, %subreg.sub0 S_BRANCH %bb.1 bb.1: - successors: %bb.1, %bb.2 %2 = PHI %1, %bb.0, %3, %bb.1 ; rotate subreg lanes, skipping sub1 diff --git a/test/CodeGen/AMDGPU/fmuladd.f32.ll b/test/CodeGen/AMDGPU/fmuladd.f32.ll index fb605dd2e4bd4..e422550266924 100644 --- a/test/CodeGen/AMDGPU/fmuladd.f32.ll +++ b/test/CodeGen/AMDGPU/fmuladd.f32.ll @@ -191,8 +191,8 @@ define amdgpu_kernel void @fadd_b_a_a_f32(float addrspace(1)* %out, ; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]] -; GCN-DENORM-SLOWFMA: v_mul_f32_e32 [[TMP:v[0-9]+]], -2.0, [[R1]] -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] +; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] +; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] ; SI-DENORM: buffer_store_dword [[RESULT]] ; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] @@ -251,8 +251,8 @@ define amdgpu_kernel void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, ; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], -[[R1]], 2.0, [[R2]] -; GCN-DENORM-SLOWFMA: v_mul_f32_e32 [[TMP:v[0-9]+]], -2.0, [[R1]] -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] +; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] +; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] ; SI-DENORM: buffer_store_dword [[RESULT]] ; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] diff --git a/test/CodeGen/AMDGPU/inserted-wait-states.mir b/test/CodeGen/AMDGPU/inserted-wait-states.mir index 1479303712d0f..c6fe6debd225a 100644 --- a/test/CodeGen/AMDGPU/inserted-wait-states.mir +++ b/test/CodeGen/AMDGPU/inserted-wait-states.mir @@ -77,19 +77,16 @@ name: div_fmas body: | bb.0: - successors: %bb.1 %vcc = S_MOV_B64 0 %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec S_BRANCH %bb.1 bb.1: - successors: %bb.2 implicit %vcc = V_CMP_EQ_I32_e32 %vgpr1, %vgpr2, implicit %exec %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec S_BRANCH %bb.2 bb.2: - successors: %bb.3 %vcc = V_CMP_EQ_I32_e64 %vgpr1, %vgpr2, implicit %exec %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec S_BRANCH %bb.3 @@ -130,19 +127,16 @@ name: s_getreg body: | bb.0: - successors: %bb.1 S_SETREG_B32 %sgpr0, 1 %sgpr1 = S_GETREG_B32 1 S_BRANCH %bb.1 bb.1: - successors: %bb.2 S_SETREG_IMM32_B32 0, 1 %sgpr1 = S_GETREG_B32 1 S_BRANCH %bb.2 bb.2: - successors: %bb.3 S_SETREG_B32 %sgpr0, 1 %sgpr1 = S_MOV_B32 0 %sgpr2 = S_GETREG_B32 1 @@ -178,13 +172,11 @@ name: s_setreg body: | bb.0: - successors: %bb.1 S_SETREG_B32 %sgpr0, 1 S_SETREG_B32 %sgpr1, 1 S_BRANCH %bb.1 bb.1: - successors: %bb.2 S_SETREG_B32 %sgpr0, 64 S_SETREG_B32 %sgpr1, 128 S_BRANCH %bb.2 @@ -237,7 +229,6 @@ name: vmem_gt_8dw_store body: | bb.0: - successors: %bb.1 BUFFER_STORE_DWORD_OFFSET %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec %vgpr3 = V_MOV_B32_e32 0, implicit %exec BUFFER_STORE_DWORDX3_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec @@ -310,19 +301,16 @@ name: readwrite_lane body: | bb.0: - successors: %bb.1 %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec %sgpr4 = V_READLANE_B32 %vgpr4, %sgpr0 S_BRANCH %bb.1 bb.1: - successors: %bb.2 %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec %vgpr4 = V_WRITELANE_B32 %sgpr0, %sgpr0 S_BRANCH %bb.2 bb.2: - successors: %bb.3 %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec %sgpr4 = V_READLANE_B32 %vgpr4, %vcc_lo S_BRANCH %bb.3 @@ -352,7 +340,6 @@ name: rfe body: | bb.0: - successors: %bb.1 S_SETREG_B32 %sgpr0, 3 S_RFE_B64 %sgpr2_sgpr3 S_BRANCH %bb.1 @@ -382,7 +369,6 @@ name: s_mov_fed_b32 body: | bb.0: - successors: %bb.1 %sgpr0 = S_MOV_FED_B32 %sgpr0 %sgpr0 = S_MOV_B32 %sgpr0 S_BRANCH %bb.1 @@ -423,19 +409,16 @@ name: s_movrel body: | bb.0: - successors: %bb.1 %m0 = S_MOV_B32 0 %sgpr0 = S_MOVRELS_B32 %sgpr0, implicit %m0 S_BRANCH %bb.1 bb.1: - successors: %bb.2 %m0 = S_MOV_B32 0 %sgpr0_sgpr1 = S_MOVRELS_B64 %sgpr0_sgpr1, implicit %m0 S_BRANCH %bb.2 bb.2: - successors: %bb.3 %m0 = S_MOV_B32 0 %sgpr0 = S_MOVRELD_B32 %sgpr0, implicit %m0 S_BRANCH %bb.3 @@ -475,19 +458,16 @@ name: v_interp body: | bb.0: - successors: %bb.1 %m0 = S_MOV_B32 0 %vgpr0 = V_INTERP_P1_F32 %vgpr0, 0, 0, implicit %m0, implicit %exec S_BRANCH %bb.1 bb.1: - successors: %bb.2 %m0 = S_MOV_B32 0 %vgpr0 = V_INTERP_P2_F32 %vgpr0, %vgpr1, 0, 0, implicit %m0, implicit %exec S_BRANCH %bb.2 bb.2: - successors: %bb.3 %m0 = S_MOV_B32 0 %vgpr0 = V_INTERP_P1_F32_16bank %vgpr0, 0, 0, implicit %m0, implicit %exec S_BRANCH %bb.3 diff --git a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir index bc1dafe0ea1e2..67642282f75b0 100644 --- a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir +++ b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir @@ -53,7 +53,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.entry: - successors: %bb.2.if, %bb.1.else liveins: %sgpr0_sgpr1 %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) @@ -62,7 +61,6 @@ body: | S_CBRANCH_VCCNZ %bb.2.if, implicit undef %vcc bb.1.else: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 100, implicit %exec @@ -71,7 +69,6 @@ body: | S_BRANCH %bb.3.done bb.2.if: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 9, implicit %exec diff --git a/test/CodeGen/AMDGPU/lds-size.ll b/test/CodeGen/AMDGPU/lds-size.ll index c65817abd489d..ff78c3bcb18cf 100644 --- a/test/CodeGen/AMDGPU/lds-size.ll +++ b/test/CodeGen/AMDGPU/lds-size.ll @@ -1,4 +1,5 @@ ; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=ALL -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefix=ALL -check-prefix=HSA %s ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=ALL -check-prefix=EG %s ; This test makes sure we do not double count global values when they are @@ -11,6 +12,9 @@ ; EG-NEXT: .long 1 ; ALL: {{^}}test: +; HSA: granulated_lds_size = 0 +; HSA: workgroup_group_segment_byte_size = 4 + ; GCN: ; LDSByteSize: 4 bytes/workgroup (compile time only) @lds = internal unnamed_addr addrspace(3) global i32 undef, align 4 diff --git a/test/CodeGen/AMDGPU/liveness.mir b/test/CodeGen/AMDGPU/liveness.mir index 48762e3f2ab42..6fd8466492d08 100644 --- a/test/CodeGen/AMDGPU/liveness.mir +++ b/test/CodeGen/AMDGPU/liveness.mir @@ -16,13 +16,11 @@ registers: - { id: 0, class: sreg_64 } body: | bb.0: - successors: %bb.1, %bb.2 S_NOP 0, implicit-def undef %0.sub0 S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc S_BRANCH %bb.2 bb.1: - successors: %bb.2 S_NOP 0, implicit-def %0.sub1 S_NOP 0, implicit %0.sub1 S_BRANCH %bb.2 diff --git a/test/CodeGen/AMDGPU/local-stack-slot-bug.ll b/test/CodeGen/AMDGPU/local-stack-slot-bug.ll deleted file mode 100644 index d3e0f0be4b5f3..0000000000000 --- a/test/CodeGen/AMDGPU/local-stack-slot-bug.ll +++ /dev/null @@ -1,26 +0,0 @@ -; RUN: llc -march=amdgcn -mcpu=verde -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s - -; This used to fail due to a v_add_i32 instruction with an illegal immediate -; operand that was created during Local Stack Slot Allocation. Test case derived -; from https://bugs.freedesktop.org/show_bug.cgi?id=96602 -; -; CHECK-LABEL: {{^}}main: - -; CHECK-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x200 -; CHECK-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0x400{{$}} -; CHECK-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0 -; CHECK-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]] - -; CHECK-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[CLAMP_IDX]], [[K]] -; CHECK-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[CLAMP_IDX]], [[ZERO]] - -; CHECK: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen -; CHECK: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen -define amdgpu_ps float @main(i32 %idx) { -main_body: - %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx - %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx - %r = fadd float %v1, %v2 - ret float %r -} diff --git a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir index 2de6b59e59e96..b5dc9d9dac841 100644 --- a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir +++ b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir @@ -176,7 +176,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -189,7 +188,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -236,7 +234,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -248,7 +245,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -295,7 +291,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -307,7 +302,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -356,7 +350,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -370,7 +363,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -418,7 +410,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr6 = S_MOV_B32 -1 @@ -433,7 +424,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7 %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) @@ -480,7 +470,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -494,7 +483,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -544,7 +532,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -557,7 +544,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1, %sgpr2_sgpr3 S_SLEEP 0, implicit %sgpr2_sgpr3 %sgpr7 = S_MOV_B32 61440 @@ -606,7 +592,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -618,7 +603,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -665,7 +649,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -677,7 +660,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -724,7 +706,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -736,7 +717,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs.mir b/test/CodeGen/AMDGPU/rename-independent-subregs.mir index fc2e4426ba48f..31ad26e769796 100644 --- a/test/CodeGen/AMDGPU/rename-independent-subregs.mir +++ b/test/CodeGen/AMDGPU/rename-independent-subregs.mir @@ -49,7 +49,6 @@ registers: - { id: 1, class: sreg_128 } body: | bb.0: - successors: %bb.1, %bb.2 S_NOP 0, implicit-def undef %0.sub2 S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc S_BRANCH %bb.2 diff --git a/test/CodeGen/AMDGPU/scratch-simple.ll b/test/CodeGen/AMDGPU/scratch-simple.ll new file mode 100644 index 0000000000000..60b9b56a48d1f --- /dev/null +++ b/test/CodeGen/AMDGPU/scratch-simple.ll @@ -0,0 +1,103 @@ +; RUN: llc -march=amdgcn -mcpu=verde -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s + +; This used to fail due to a v_add_i32 instruction with an illegal immediate +; operand that was created during Local Stack Slot Allocation. Test case derived +; from https://bugs.freedesktop.org/show_bug.cgi?id=96602 +; +; GCN-LABEL: {{^}}ps_main: + +; GCN-DAG: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x200 +; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0x400{{$}} +; GCN-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0 +; GCN-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]] + +; GCN-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[CLAMP_IDX]], [[K]] +; GCN-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[CLAMP_IDX]], [[ZERO]] + +; GCN: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_ps float @ps_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}vs_main: +; GCN: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_vs float @vs_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}cs_main: +; GCN: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_cs float @cs_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}hs_main: +; SI: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_hs float @hs_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}gs_main: +; SI: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_gs float @gs_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}hs_ir_uses_scratch_offset: +; SI: s_mov_b32 [[SWO:s[0-9]+]], s6 +; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: s_mov_b32 s2, s5 +define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg %swo, i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %f = fadd float %v1, %v2 + %r1 = insertvalue <{i32, i32, i32, float}> undef, i32 %swo, 2 + %r2 = insertvalue <{i32, i32, i32, float}> %r1, float %f, 3 + ret <{i32, i32, i32, float}> %r2 +} + +; GCN-LABEL: {{^}}gs_ir_uses_scratch_offset: +; SI: s_mov_b32 [[SWO:s[0-9]+]], s6 +; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: s_mov_b32 s2, s5 +define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg %swo, i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %f = fadd float %v1, %v2 + %r1 = insertvalue <{i32, i32, i32, float}> undef, i32 %swo, 2 + %r2 = insertvalue <{i32, i32, i32, float}> %r1, float %f, 3 + ret <{i32, i32, i32, float}> %r2 +} diff --git a/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir b/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir index 20052e865a54e..18176de53793b 100644 --- a/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir +++ b/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir @@ -20,12 +20,10 @@ body: | ; GCN: V_ADD_I32 bb.0: liveins: %vgpr0 - successors: %bb.1 %7 = COPY %vgpr0 %8 = S_MOV_B32 0 bb.1: - successors: %bb.1, %bb.2 %0 = PHI %8, %bb.0, %0, %bb.1, %2, %bb.2 %9 = V_MOV_B32_e32 9, implicit %exec %10 = V_CMP_EQ_U32_e64 %7, %9, implicit %exec @@ -33,7 +31,6 @@ body: | S_BRANCH %bb.1 bb.2: - successors: %bb.1 SI_END_CF %1, implicit-def %exec, implicit-def %scc, implicit %exec %11 = S_MOV_B32 1 %2 = S_ADD_I32 %0, %11, implicit-def %scc diff --git a/test/CodeGen/AMDGPU/subreg-intervals.mir b/test/CodeGen/AMDGPU/subreg-intervals.mir index c477fe9bc6d34..62816da25b2c4 100644 --- a/test/CodeGen/AMDGPU/subreg-intervals.mir +++ b/test/CodeGen/AMDGPU/subreg-intervals.mir @@ -31,17 +31,14 @@ registers: - { id: 0, class: sreg_64 } body: | bb.0: - successors: %bb.1, %bb.2 S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc S_BRANCH %bb.2 bb.1: - successors: %bb.3 S_NOP 0, implicit-def undef %0.sub0 S_BRANCH %bb.3 bb.2: - successors: %bb.3 S_NOP 0, implicit-def %0 S_BRANCH %bb.3 diff --git a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir index 5e5465800c3a3..6eb937e71b1b6 100644 --- a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir +++ b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir @@ -75,7 +75,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.entry: - successors: %bb.2.if, %bb.1.else liveins: %sgpr0_sgpr1 %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 9, 0 :: (non-temporal dereferenceable invariant load 4 from `float addrspace(2)* undef`) @@ -86,7 +85,6 @@ body: | S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc bb.2.if: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 9, implicit %exec @@ -95,7 +93,6 @@ body: | S_BRANCH %bb.3.done bb.1.else: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 100, implicit %exec @@ -141,7 +138,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.entry: - successors: %bb.2.if, %bb.1.else liveins: %sgpr0_sgpr1 %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) @@ -150,7 +146,6 @@ body: | S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc bb.2.if: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 9, implicit %exec @@ -159,7 +154,6 @@ body: | S_BRANCH %bb.3.done bb.1.else: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 100, implicit %exec diff --git a/test/CodeGen/AMDGPU/waitcnt-looptest.ll b/test/CodeGen/AMDGPU/waitcnt-looptest.ll new file mode 100644 index 0000000000000..2a3ce4dfd191b --- /dev/null +++ b/test/CodeGen/AMDGPU/waitcnt-looptest.ll @@ -0,0 +1,146 @@ +; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global | FileCheck --check-prefix=GCN %s + +; Check that the waitcnt insertion algorithm correctly propagates wait counts +; from before a loop to the loop header. + +; GCN-LABEL: {{^}}testKernel +; GCN: BB0_1: +; GCN: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_f32_e64 +; GCN: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_f32_e32 +; GCN: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_f32_e32 + +@data_generic = addrspace(1) global [100 x float] [float 0.000000e+00, float 0x3FB99999A0000000, float 0x3FC99999A0000000, float 0x3FD3333340000000, float 0x3FD99999A0000000, float 5.000000e-01, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000, float 0x3FECCCCCC0000000, float 1.000000e+00, float 0x3FF19999A0000000, float 0x3FF3333340000000, float 0x3FF4CCCCC0000000, float 0x3FF6666660000000, float 1.500000e+00, float 0x3FF99999A0000000, float 0x3FFB333340000000, float 0x3FFCCCCCC0000000, float 0x3FFE666660000000, float 2.000000e+00, float 0x4000CCCCC0000000, float 0x40019999A0000000, float 0x4002666660000000, float 0x4003333340000000, float 2.500000e+00, float 0x4004CCCCC0000000, float 0x40059999A0000000, float 0x4006666660000000, float 0x4007333340000000, float 3.000000e+00, float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000, float 3.500000e+00, float 0x400CCCCCC0000000, float 0x400D9999A0000000, float 0x400E666660000000, float 0x400F333340000000, float 4.000000e+00, float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000, float 4.500000e+00, float 0x4012666660000000, float 0x4012CCCCC0000000, float 0x4013333340000000, float 0x40139999A0000000, float 5.000000e+00, float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000, float 5.500000e+00, float 0x4016666660000000, float 0x4016CCCCC0000000, float 0x4017333340000000, float 0x40179999A0000000, float 6.000000e+00, float 0x4018666660000000, float 0x4018CCCCC0000000, float 0x4019333340000000, float 0x40199999A0000000, float 6.500000e+00, float 0x401A666660000000, float 0x401ACCCCC0000000, float 0x401B333340000000, float 0x401B9999A0000000, float 7.000000e+00, float 0x401C666660000000, float 0x401CCCCCC0000000, float 0x401D333340000000, float 0x401D9999A0000000, float 7.500000e+00, float 0x401E666660000000, float 0x401ECCCCC0000000, float 0x401F333340000000, float 0x401F9999A0000000, float 8.000000e+00, float 0x4020333340000000, float 0x4020666660000000, float 0x40209999A0000000, float 0x4020CCCCC0000000, float 8.500000e+00, float 0x4021333340000000, float 0x4021666660000000, float 0x40219999A0000000, float 0x4021CCCCC0000000, float 9.000000e+00, float 0x4022333340000000, float 0x4022666660000000, float 0x40229999A0000000, float 0x4022CCCCC0000000, float 9.500000e+00, float 0x4023333340000000, float 0x4023666660000000, float 0x40239999A0000000, float 0x4023CCCCC0000000], align 4 +@data_reference = addrspace(1) global [100 x float] [float 0.000000e+00, float 0x3FB99999A0000000, float 0x3FC99999A0000000, float 0x3FD3333340000000, float 0x3FD99999A0000000, float 5.000000e-01, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000, float 0x3FECCCCCC0000000, float 1.000000e+00, float 0x3FF19999A0000000, float 0x3FF3333340000000, float 0x3FF4CCCCC0000000, float 0x3FF6666660000000, float 1.500000e+00, float 0x3FF99999A0000000, float 0x3FFB333340000000, float 0x3FFCCCCCC0000000, float 0x3FFE666660000000, float 2.000000e+00, float 0x4000CCCCC0000000, float 0x40019999A0000000, float 0x4002666660000000, float 0x4003333340000000, float 2.500000e+00, float 0x4004CCCCC0000000, float 0x40059999A0000000, float 0x4006666660000000, float 0x4007333340000000, float 3.000000e+00, float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000, float 3.500000e+00, float 0x400CCCCCC0000000, float 0x400D9999A0000000, float 0x400E666660000000, float 0x400F333340000000, float 4.000000e+00, float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000, float 4.500000e+00, float 0x4012666660000000, float 0x4012CCCCC0000000, float 0x4013333340000000, float 0x40139999A0000000, float 5.000000e+00, float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000, float 5.500000e+00, float 0x4016666660000000, float 0x4016CCCCC0000000, float 0x4017333340000000, float 0x40179999A0000000, float 6.000000e+00, float 0x4018666660000000, float 0x4018CCCCC0000000, float 0x4019333340000000, float 0x40199999A0000000, float 6.500000e+00, float 0x401A666660000000, float 0x401ACCCCC0000000, float 0x401B333340000000, float 0x401B9999A0000000, float 7.000000e+00, float 0x401C666660000000, float 0x401CCCCCC0000000, float 0x401D333340000000, float 0x401D9999A0000000, float 7.500000e+00, float 0x401E666660000000, float 0x401ECCCCC0000000, float 0x401F333340000000, float 0x401F9999A0000000, float 8.000000e+00, float 0x4020333340000000, float 0x4020666660000000, float 0x40209999A0000000, float 0x4020CCCCC0000000, float 8.500000e+00, float 0x4021333340000000, float 0x4021666660000000, float 0x40219999A0000000, float 0x4021CCCCC0000000, float 9.000000e+00, float 0x4022333340000000, float 0x4022666660000000, float 0x40229999A0000000, float 0x4022CCCCC0000000, float 9.500000e+00, float 0x4023333340000000, float 0x4023666660000000, float 0x40239999A0000000, float 0x4023CCCCC0000000], align 4 + +define amdgpu_kernel void @testKernel(i32 addrspace(1)* nocapture %arg) local_unnamed_addr #0 { +bb: + store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float> addrspace(4)* bitcast (float addrspace(4)* getelementptr ([100 x float], [100 x float] addrspace(4)* addrspacecast ([100 x float] addrspace(1)* @data_generic to [100 x float] addrspace(4)*), i64 0, i64 4) to <2 x float> addrspace(4)*), align 4 + store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float> addrspace(4)* bitcast (float addrspace(4)* getelementptr ([100 x float], [100 x float] addrspace(4)* addrspacecast ([100 x float] addrspace(1)* @data_reference to [100 x float] addrspace(4)*), i64 0, i64 4) to <2 x float> addrspace(4)*), align 4 + br label %bb18 + +bb1: ; preds = %bb18 + %tmp = tail call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() + %tmp2 = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = tail call i32 @llvm.amdgcn.workgroup.id.x() + %tmp4 = getelementptr inbounds i8, i8 addrspace(2)* %tmp, i64 4 + %tmp5 = bitcast i8 addrspace(2)* %tmp4 to i16 addrspace(2)* + %tmp6 = load i16, i16 addrspace(2)* %tmp5, align 4 + %tmp7 = zext i16 %tmp6 to i32 + %tmp8 = mul i32 %tmp3, %tmp7 + %tmp9 = add i32 %tmp8, %tmp2 + %tmp10 = tail call i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() + %tmp11 = zext i32 %tmp9 to i64 + %tmp12 = bitcast i8 addrspace(2)* %tmp10 to i64 addrspace(2)* + %tmp13 = load i64, i64 addrspace(2)* %tmp12, align 8 + %tmp14 = add i64 %tmp13, %tmp11 + %tmp15 = zext i1 %tmp99 to i32 + %tmp16 = and i64 %tmp14, 4294967295 + %tmp17 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp16 + store i32 %tmp15, i32 addrspace(1)* %tmp17, align 4 + ret void + +bb18: ; preds = %bb18, %bb + %tmp19 = phi i64 [ 0, %bb ], [ %tmp102, %bb18 ] + %tmp20 = phi i32 [ 0, %bb ], [ %tmp100, %bb18 ] + %tmp21 = phi i1 [ true, %bb ], [ %tmp99, %bb18 ] + %tmp22 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp19 + %tmp23 = load float, float addrspace(1)* %tmp22, align 4 + %tmp24 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp19 + %tmp25 = load float, float addrspace(1)* %tmp24, align 4 + %tmp26 = fcmp oeq float %tmp23, %tmp25 + %tmp27 = and i1 %tmp21, %tmp26 + %tmp28 = or i32 %tmp20, 1 + %tmp29 = sext i32 %tmp28 to i64 + %tmp30 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp29 + %tmp31 = load float, float addrspace(1)* %tmp30, align 4 + %tmp32 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp29 + %tmp33 = load float, float addrspace(1)* %tmp32, align 4 + %tmp34 = fcmp oeq float %tmp31, %tmp33 + %tmp35 = and i1 %tmp27, %tmp34 + %tmp36 = add nuw nsw i32 %tmp20, 2 + %tmp37 = sext i32 %tmp36 to i64 + %tmp38 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp37 + %tmp39 = load float, float addrspace(1)* %tmp38, align 4 + %tmp40 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp37 + %tmp41 = load float, float addrspace(1)* %tmp40, align 4 + %tmp42 = fcmp oeq float %tmp39, %tmp41 + %tmp43 = and i1 %tmp35, %tmp42 + %tmp44 = add nuw nsw i32 %tmp20, 3 + %tmp45 = sext i32 %tmp44 to i64 + %tmp46 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp45 + %tmp47 = load float, float addrspace(1)* %tmp46, align 4 + %tmp48 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp45 + %tmp49 = load float, float addrspace(1)* %tmp48, align 4 + %tmp50 = fcmp oeq float %tmp47, %tmp49 + %tmp51 = and i1 %tmp43, %tmp50 + %tmp52 = add nuw nsw i32 %tmp20, 4 + %tmp53 = sext i32 %tmp52 to i64 + %tmp54 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp53 + %tmp55 = load float, float addrspace(1)* %tmp54, align 4 + %tmp56 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp53 + %tmp57 = load float, float addrspace(1)* %tmp56, align 4 + %tmp58 = fcmp oeq float %tmp55, %tmp57 + %tmp59 = and i1 %tmp51, %tmp58 + %tmp60 = add nuw nsw i32 %tmp20, 5 + %tmp61 = sext i32 %tmp60 to i64 + %tmp62 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp61 + %tmp63 = load float, float addrspace(1)* %tmp62, align 4 + %tmp64 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp61 + %tmp65 = load float, float addrspace(1)* %tmp64, align 4 + %tmp66 = fcmp oeq float %tmp63, %tmp65 + %tmp67 = and i1 %tmp59, %tmp66 + %tmp68 = add nuw nsw i32 %tmp20, 6 + %tmp69 = sext i32 %tmp68 to i64 + %tmp70 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp69 + %tmp71 = load float, float addrspace(1)* %tmp70, align 4 + %tmp72 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp69 + %tmp73 = load float, float addrspace(1)* %tmp72, align 4 + %tmp74 = fcmp oeq float %tmp71, %tmp73 + %tmp75 = and i1 %tmp67, %tmp74 + %tmp76 = add nuw nsw i32 %tmp20, 7 + %tmp77 = sext i32 %tmp76 to i64 + %tmp78 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp77 + %tmp79 = load float, float addrspace(1)* %tmp78, align 4 + %tmp80 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp77 + %tmp81 = load float, float addrspace(1)* %tmp80, align 4 + %tmp82 = fcmp oeq float %tmp79, %tmp81 + %tmp83 = and i1 %tmp75, %tmp82 + %tmp84 = add nuw nsw i32 %tmp20, 8 + %tmp85 = sext i32 %tmp84 to i64 + %tmp86 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp85 + %tmp87 = load float, float addrspace(1)* %tmp86, align 4 + %tmp88 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp85 + %tmp89 = load float, float addrspace(1)* %tmp88, align 4 + %tmp90 = fcmp oeq float %tmp87, %tmp89 + %tmp91 = and i1 %tmp83, %tmp90 + %tmp92 = add nuw nsw i32 %tmp20, 9 + %tmp93 = sext i32 %tmp92 to i64 + %tmp94 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp93 + %tmp95 = load float, float addrspace(1)* %tmp94, align 4 + %tmp96 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp93 + %tmp97 = load float, float addrspace(1)* %tmp96, align 4 + %tmp98 = fcmp oeq float %tmp95, %tmp97 + %tmp99 = and i1 %tmp91, %tmp98 + %tmp100 = add nuw nsw i32 %tmp20, 10 + %tmp101 = icmp eq i32 %tmp100, 100 + %tmp102 = sext i32 %tmp100 to i64 + br i1 %tmp101, label %bb1, label %bb18 +} + +; Function Attrs: nounwind readnone speculatable +declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #1 + +; Function Attrs: nounwind readnone speculatable +declare i32 @llvm.amdgcn.workitem.id.x() #1 + +; Function Attrs: nounwind readnone speculatable +declare i32 @llvm.amdgcn.workgroup.id.x() #1 + +; Function Attrs: nounwind readnone speculatable +declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #1 + +attributes #0 = { "target-cpu"="fiji" "target-features"="-flat-for-global" } +attributes #1 = { nounwind readnone speculatable } diff --git a/test/CodeGen/ARM/ARMLoadStoreDBG.mir b/test/CodeGen/ARM/ARMLoadStoreDBG.mir index 0e6f80bfb48bc..cf5388ac1ccb9 100644 --- a/test/CodeGen/ARM/ARMLoadStoreDBG.mir +++ b/test/CodeGen/ARM/ARMLoadStoreDBG.mir @@ -118,7 +118,6 @@ stack: - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r7' } body: | bb.0.entry: - successors: %bb.1, %bb.2.if.end liveins: %r0, %r1, %r2, %r3, %lr, %r7 DBG_VALUE debug-use %r0, debug-use _, !18, !27, debug-location !28 diff --git a/test/CodeGen/ARM/acle-intrinsics-v5.ll b/test/CodeGen/ARM/acle-intrinsics-v5.ll new file mode 100644 index 0000000000000..407bea1488630 --- /dev/null +++ b/test/CodeGen/ARM/acle-intrinsics-v5.ll @@ -0,0 +1,110 @@ +; RUN: llc -O1 -mtriple=armv5te-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=armv7-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv6t2-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv7em-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv8m.main-none-none-eabi -mattr=+dsp %s -o - | FileCheck %s +define i32 @smulbb(i32 %a, i32 %b) { +; CHECK-LABEL: smulbb +; CHECK: smulbb r0, r0, r1 + %tmp = call i32 @llvm.arm.smulbb(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smulbt(i32 %a, i32 %b) { +; CHECK-LABEL: smulbt +; CHECK: smulbt r0, r0, r1 + %tmp = call i32 @llvm.arm.smulbt(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smultb(i32 %a, i32 %b) { +; CHECK-LABEL: smultb +; CHECK: smultb r0, r0, r1 + %tmp = call i32 @llvm.arm.smultb(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smultt(i32 %a, i32 %b) { +; CHECK-LABEL: smultt +; CHECK: smultt r0, r0, r1 + %tmp = call i32 @llvm.arm.smultt(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smulwb(i32 %a, i32 %b) { +; CHECK-LABEL: smulwb +; CHECK: smulwb r0, r0, r1 + %tmp = call i32 @llvm.arm.smulwb(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smulwt(i32 %a, i32 %b) { +; CHECK-LABEL: smulwt +; CHECK: smulwt r0, r0, r1 + %tmp = call i32 @llvm.arm.smulwt(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @acc_mults(i32 %a, i32 %b, i32 %acc) { +; CHECK-LABEL: acc_mults +; CHECK: smlabb r2, r0, r1, r2 +; CHECK: smlabt r2, r0, r1, r2 +; CHECK: smlatb r2, r0, r1, r2 +; CHECK: smlatt r2, r0, r1, r2 +; CHECK: smlawb r2, r0, r1, r2 +; CHECK: smlawt r0, r0, r1, r2 + %acc1 = call i32 @llvm.arm.smlabb(i32 %a, i32 %b, i32 %acc) + %acc2 = call i32 @llvm.arm.smlabt(i32 %a, i32 %b, i32 %acc1) + %acc3 = call i32 @llvm.arm.smlatb(i32 %a, i32 %b, i32 %acc2) + %acc4 = call i32 @llvm.arm.smlatt(i32 %a, i32 %b, i32 %acc3) + %acc5 = call i32 @llvm.arm.smlawb(i32 %a, i32 %b, i32 %acc4) + %acc6 = call i32 @llvm.arm.smlawt(i32 %a, i32 %b, i32 %acc5) + ret i32 %acc6 +} + +define i32 @qadd(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qadd +; CHECK: qadd r0, r0, r1 + %tmp = call i32 @llvm.arm.qadd(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qsub(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qsub +; CHECK: qsub r0, r0, r1 + %tmp = call i32 @llvm.arm.qsub(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qdadd(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qdadd +; CHECK: qdadd r0, r0, r1 + %dbl = call i32 @llvm.arm.qadd(i32 %a, i32 %a) + %add = call i32 @llvm.arm.qadd(i32 %dbl, i32 %b) + ret i32 %add +} + +define i32 @qdsub(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qdsub +; CHECK: qdsub r0, r0, r1 + %dbl = call i32 @llvm.arm.qadd(i32 %b, i32 %b) + %add = call i32 @llvm.arm.qsub(i32 %a, i32 %dbl) + ret i32 %add +} + +declare i32 @llvm.arm.smulbb(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smulbt(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smultb(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smultt(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smulwb(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smulwt(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smlabb(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlabt(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlatb(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlatt(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlawb(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlawt(i32, i32, i32) nounwind +declare i32 @llvm.arm.qadd(i32, i32) nounwind +declare i32 @llvm.arm.qsub(i32, i32) nounwind diff --git a/test/CodeGen/ARM/acle-intrinsics.ll b/test/CodeGen/ARM/acle-intrinsics.ll new file mode 100644 index 0000000000000..0c20744e126bd --- /dev/null +++ b/test/CodeGen/ARM/acle-intrinsics.ll @@ -0,0 +1,481 @@ +; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=armv7-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv6t2-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv7em-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv8m.main-none-none-eabi -mattr=+dsp %s -o - | FileCheck %s + + +; upper-bound of the immediate argument +define i32 @ssat1(i32 %a) nounwind { +; CHECK-LABEL: ssat1 +; CHECK: ssat r0, #32, r0 + %tmp = call i32 @llvm.arm.ssat(i32 %a, i32 32) + ret i32 %tmp +} + +; lower-bound of the immediate argument +define i32 @ssat2(i32 %a) nounwind { +; CHECK-LABEL: ssat2 +; CHECK: ssat r0, #1, r0 + %tmp = call i32 @llvm.arm.ssat(i32 %a, i32 1) + ret i32 %tmp +} + +; upper-bound of the immediate argument +define i32 @usat1(i32 %a) nounwind { +; CHECK-LABEL: usat1 +; CHECK: usat r0, #31, r0 + %tmp = call i32 @llvm.arm.usat(i32 %a, i32 31) + ret i32 %tmp +} + +; lower-bound of the immediate argument +define i32 @usat2(i32 %a) nounwind { +; CHECK-LABEL: usat2 +; CHECK: usat r0, #0, r0 + %tmp = call i32 @llvm.arm.usat(i32 %a, i32 0) + ret i32 %tmp +} + +define i32 @ssat16 (i32 %a) nounwind { +; CHECK-LABEL: ssat16 +; CHECK: ssat16 r0, #1, r0 +; CHECK: ssat16 r0, #16, r0 + %tmp = call i32 @llvm.arm.ssat16(i32 %a, i32 1) + %tmp2 = call i32 @llvm.arm.ssat16(i32 %tmp, i32 16) + ret i32 %tmp2 +} + +define i32 @usat16(i32 %a) nounwind { +; CHECK-LABEL: usat16 +; CHECK: usat16 r0, #0, r0 +; CHECK: usat16 r0, #15, r0 + %tmp = call i32 @llvm.arm.usat16(i32 %a, i32 0) + %tmp2 = call i32 @llvm.arm.usat16(i32 %tmp, i32 15) + ret i32 %tmp2 +} + +define i32 @pack_unpack(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: pack_unpack +; CHECK: sxtab16 r0, r0, r1 +; CHECK: sxtb16 r0, r0 +; CHECK: uxtab16 r0, r1, r0 +; CHECK: uxtb16 r0, r0 + %tmp = call i32 @llvm.arm.sxtab16(i32 %a, i32 %b) + %tmp1 = call i32 @llvm.arm.sxtb16(i32 %tmp) + %tmp2 = call i32 @llvm.arm.uxtab16(i32 %b, i32 %tmp1) + %tmp3 = call i32 @llvm.arm.uxtb16(i32 %tmp2) + ret i32 %tmp3 +} + +define i32 @sel(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: sel +; CHECK sel r0, r0, r1 + %tmp = call i32 @llvm.arm.sel(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qadd8 +; CHECK: qadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.qadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qsub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qsub8 +; CHECK: qsub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.qsub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @sadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: sadd8 +; CHECK: sadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.sadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shadd8 +; CHECK: shadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.shadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shsub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shsub8 +; CHECK: shsub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.shsub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @ssub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: ssub8 +; CHECK: ssub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.ssub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uadd8 +; CHECK: uadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhadd8 +; CHECK: uhadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uhadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhsub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhsub8 +; CHECK: uhsub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uhsub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqadd8 +; CHECK: uqadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uqadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqsub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqsub8 +; CHECK: uqsub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uqsub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @usub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: usub8 +; CHECK: usub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.usub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @usad(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: usad +; CHECK: usad8 r0, r0, r1 +; CHECK: usada8 r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.usad8(i32 %a, i32 %b) + %tmp1 = call i32 @llvm.arm.usada8(i32 %tmp, i32 %b, i32 %c) + ret i32 %tmp1 +} + +define i32 @qadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qadd16 +; CHECK: qadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.qadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qasx +; CHECK: qasx r0, r0, r1 + %tmp = call i32 @llvm.arm.qasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qsax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qsax +; CHECK: qsax r0, r0, r1 + %tmp = call i32 @llvm.arm.qsax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qsub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qsub16 +; CHECK: qsub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.qsub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @sadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: sadd16 +; CHECK: sadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.sadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @sasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: sasx +; CHECK: sasx r0, r0, r1 + %tmp = call i32 @llvm.arm.sasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shadd16 +; CHECK: shadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.shadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shasx +; CHECK: shasx r0, r0, r1 + %tmp = call i32 @llvm.arm.shasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shsax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shsax +; CHECK: shsax r0, r0, r1 + %tmp = call i32 @llvm.arm.shsax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shsub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shsub16 +; CHECK: shsub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.shsub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @ssax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: ssax +; CHECK: ssax r0, r0, r1 + %tmp = call i32 @llvm.arm.ssax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @ssub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: ssub16 +; CHECK: ssub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.ssub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uadd16 +; CHECK: uadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uasx +; CHECK: uasx r0, r0, r1 + %tmp = call i32 @llvm.arm.uasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhadd16 +; CHECK: uhadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uhadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhasx +; CHECK: uhasx r0, r0, r1 + %tmp = call i32 @llvm.arm.uhasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhsax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhsax +; CHECK: uhsax r0, r0, r1 + %tmp = call i32 @llvm.arm.uhsax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhsub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhsub16 +; CHECK: uhsub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uhsub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqadd16 +; CHECK: uqadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uqadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqasx +; CHECK: uqasx r0, r0, r1 + %tmp = call i32 @llvm.arm.uqasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqsax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqsax +; CHECK: uqsax r0, r0, r1 + %tmp = call i32 @llvm.arm.uqsax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqsub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqsub16 +; CHECK: uqsub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uqsub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @usax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: usax +; CHECK: usax r0, r0, r1 + %tmp = call i32 @llvm.arm.usax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @usub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: usub16 +; CHECK: usub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.usub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smlad(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: smlad +; CHECK: smlad r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.smlad(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +define i32 @smladx(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: smladx +; CHECK: smladx r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.smladx(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +define i64 @smlald(i32 %a, i32 %b, i64 %c) nounwind { +; CHECK-LABEL: smlald +; CHECK: smlald r2, r3, r0, r1 + %tmp = call i64 @llvm.arm.smlald(i32 %a, i32 %b, i64 %c) + ret i64 %tmp +} + +define i64 @smlaldx(i32 %a, i32 %b, i64 %c) nounwind { +; CHECK-LABEL: smlaldx +; CHECK: smlaldx r2, r3, r0, r1 + %tmp = call i64 @llvm.arm.smlaldx(i32 %a, i32 %b, i64 %c) + ret i64 %tmp +} + +define i32 @smlsd(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: smlsd +; CHECK: smlsd r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.smlsd(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +define i32 @smlsdx(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: smlsdx +; CHECK: smlsdx r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.smlsdx(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +define i64 @smlsld(i32 %a, i32 %b, i64 %c) nounwind { +; CHECK-LABEL: smlsld +; CHECK: smlsld r2, r3, r0, r1 + %tmp = call i64 @llvm.arm.smlsld(i32 %a, i32 %b, i64 %c) + ret i64 %tmp +} + +define i64 @smlsldx(i32 %a, i32 %b, i64 %c) nounwind { +; CHECK-LABEL: smlsldx +; CHECK: smlsldx r2, r3, r0, r1 + %tmp = call i64 @llvm.arm.smlsldx(i32 %a, i32 %b, i64 %c) + ret i64 %tmp +} + +define i32 @smuad(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: smuad +; CHECK: smuad r0, r0, r1 + %tmp = call i32 @llvm.arm.smuad(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smuadx(i32 %a, i32 %b) nounwind { +;CHECK-LABEL: smuadx +; CHECK: smuadx r0, r0, r1 + %tmp = call i32 @llvm.arm.smuadx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smusd(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: smusd +; CHECK: smusd r0, r0, r1 + %tmp = call i32 @llvm.arm.smusd(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smusdx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: smusdx +; CHECK: smusdx r0, r0, r1 + %tmp = call i32 @llvm.arm.smusdx(i32 %a, i32 %b) + ret i32 %tmp +} +declare i32 @llvm.arm.ssat(i32, i32) nounwind readnone +declare i32 @llvm.arm.usat(i32, i32) nounwind readnone +declare i32 @llvm.arm.ssat16(i32, i32) nounwind +declare i32 @llvm.arm.usat16(i32, i32) nounwind +declare i32 @llvm.arm.sxtab16(i32, i32) +declare i32 @llvm.arm.sxtb16(i32) +declare i32 @llvm.arm.uxtab16(i32, i32) +declare i32 @llvm.arm.uxtb16(i32) +declare i32 @llvm.arm.sel(i32, i32) nounwind +declare i32 @llvm.arm.qadd8(i32, i32) nounwind +declare i32 @llvm.arm.qsub8(i32, i32) nounwind +declare i32 @llvm.arm.sadd8(i32, i32) nounwind +declare i32 @llvm.arm.shadd8(i32, i32) nounwind +declare i32 @llvm.arm.shsub8(i32, i32) nounwind +declare i32 @llvm.arm.ssub8(i32, i32) nounwind +declare i32 @llvm.arm.uadd8(i32, i32) nounwind +declare i32 @llvm.arm.uhadd8(i32, i32) nounwind +declare i32 @llvm.arm.uhsub8(i32, i32) nounwind +declare i32 @llvm.arm.uqadd8(i32, i32) nounwind +declare i32 @llvm.arm.uqsub8(i32, i32) nounwind +declare i32 @llvm.arm.usub8(i32, i32) nounwind +declare i32 @llvm.arm.usad8(i32, i32) nounwind readnone +declare i32 @llvm.arm.usada8(i32, i32, i32) nounwind readnone +declare i32 @llvm.arm.qadd16(i32, i32) nounwind +declare i32 @llvm.arm.qasx(i32, i32) nounwind +declare i32 @llvm.arm.qsax(i32, i32) nounwind +declare i32 @llvm.arm.qsub16(i32, i32) nounwind +declare i32 @llvm.arm.sadd16(i32, i32) nounwind +declare i32 @llvm.arm.sasx(i32, i32) nounwind +declare i32 @llvm.arm.shadd16(i32, i32) nounwind +declare i32 @llvm.arm.shasx(i32, i32) nounwind +declare i32 @llvm.arm.shsax(i32, i32) nounwind +declare i32 @llvm.arm.shsub16(i32, i32) nounwind +declare i32 @llvm.arm.ssax(i32, i32) nounwind +declare i32 @llvm.arm.ssub16(i32, i32) nounwind +declare i32 @llvm.arm.uadd16(i32, i32) nounwind +declare i32 @llvm.arm.uasx(i32, i32) nounwind +declare i32 @llvm.arm.usax(i32, i32) nounwind +declare i32 @llvm.arm.uhadd16(i32, i32) nounwind +declare i32 @llvm.arm.uhasx(i32, i32) nounwind +declare i32 @llvm.arm.uhsax(i32, i32) nounwind +declare i32 @llvm.arm.uhsub16(i32, i32) nounwind +declare i32 @llvm.arm.uqadd16(i32, i32) nounwind +declare i32 @llvm.arm.uqasx(i32, i32) nounwind +declare i32 @llvm.arm.uqsax(i32, i32) nounwind +declare i32 @llvm.arm.uqsub16(i32, i32) nounwind +declare i32 @llvm.arm.usub16(i32, i32) nounwind +declare i32 @llvm.arm.smlad(i32, i32, i32) nounwind +declare i32 @llvm.arm.smladx(i32, i32, i32) nounwind +declare i64 @llvm.arm.smlald(i32, i32, i64) nounwind +declare i64 @llvm.arm.smlaldx(i32, i32, i64) nounwind +declare i32 @llvm.arm.smlsd(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlsdx(i32, i32, i32) nounwind +declare i64 @llvm.arm.smlsld(i32, i32, i64) nounwind +declare i64 @llvm.arm.smlsldx(i32, i32, i64) nounwind +declare i32 @llvm.arm.smuad(i32, i32) nounwind +declare i32 @llvm.arm.smuadx(i32, i32) nounwind +declare i32 @llvm.arm.smusd(i32, i32) nounwind +declare i32 @llvm.arm.smusdx(i32, i32) nounwind diff --git a/test/CodeGen/ARM/alloca-align.ll b/test/CodeGen/ARM/alloca-align.ll new file mode 100644 index 0000000000000..3bba156f0ee06 --- /dev/null +++ b/test/CodeGen/ARM/alloca-align.ll @@ -0,0 +1,24 @@ +; RUN: llc -o - %s | FileCheck %s +target triple="arm--" + +@glob = external global i32* + +declare void @bar(i32*, [20000 x i8]* byval) + +; CHECK-LABEL: foo: +; We should see the stack getting additional alignment +; CHECK: sub sp, sp, #16 +; CHECK: bic sp, sp, #31 +; And a base pointer getting used. +; CHECK: mov r6, sp +; Which is passed to the call +; CHECK: add [[REG:r[0-9]+]], r6, #19456 +; CHECK: add r0, [[REG]], #536 +; CHECK: bl bar +define void @foo([20000 x i8]* %addr) { + %tmp = alloca [4 x i32], align 32 + %tmp0 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 0 + call void @bar(i32* %tmp0, [20000 x i8]* byval %addr) + ret void +} + diff --git a/test/CodeGen/ARM/cmp1-peephole-thumb.mir b/test/CodeGen/ARM/cmp1-peephole-thumb.mir index 5ace58fd06584..3e87ced0ee57d 100644 --- a/test/CodeGen/ARM/cmp1-peephole-thumb.mir +++ b/test/CodeGen/ARM/cmp1-peephole-thumb.mir @@ -55,7 +55,6 @@ frameInfo: # CHECK-NOT: tCMPi8 body: | bb.0.entry: - successors: %bb.1.entry(0x40000000), %bb.2.entry(0x40000000) liveins: %r0, %r1 %1 = COPY %r1 @@ -67,8 +66,6 @@ body: | tBcc %bb.2.entry, 0, %cpsr bb.1.entry: - successors: %bb.2.entry(0x80000000) - bb.2.entry: %5 = PHI %4, %bb.1.entry, %3, %bb.0.entry diff --git a/test/CodeGen/ARM/cmp2-peephole-thumb.mir b/test/CodeGen/ARM/cmp2-peephole-thumb.mir index 6e9ca70f1741d..a31086d2113eb 100644 --- a/test/CodeGen/ARM/cmp2-peephole-thumb.mir +++ b/test/CodeGen/ARM/cmp2-peephole-thumb.mir @@ -76,7 +76,6 @@ stack: # CHECK-NEXT: tCMPi8 body: | bb.0.entry: - successors: %bb.1.if.then(0x40000000), %bb.2.if.end(0x40000000) liveins: %r0, %r1 %1 = COPY %r1 @@ -88,15 +87,11 @@ body: | tB %bb.1.if.then, 14, _ bb.1.if.then: - successors: %bb.3.return(0x80000000) - %4, %cpsr = tMOVi8 42, 14, _ tSTRspi killed %4, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval) tB %bb.3.return, 14, _ bb.2.if.end: - successors: %bb.3.return(0x80000000) - %3, %cpsr = tMOVi8 1, 14, _ tSTRspi killed %3, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval) diff --git a/test/CodeGen/ARM/dbg-range-extension.mir b/test/CodeGen/ARM/dbg-range-extension.mir index 466f693969489..a79607705c1c7 100644 --- a/test/CodeGen/ARM/dbg-range-extension.mir +++ b/test/CodeGen/ARM/dbg-range-extension.mir @@ -209,7 +209,6 @@ stack: - { id: 5, type: spill-slot, offset: -24, size: 4, alignment: 4, callee-saved-register: '%r4' } body: | bb.0.entry: - successors: %bb.5.if.end, %bb.1.if.then liveins: %r0, %r4, %r5, %r6, %r7, %r11, %lr %sp = frame-setup STMDB_UPD %sp, 14, _, killed %r4, killed %r5, killed %r6, killed %r7, killed %r11, killed %lr @@ -232,7 +231,6 @@ body: | Bcc %bb.5.if.end, 0, killed %cpsr bb.1.if.then: - successors: %bb.3.for.cond liveins: %r4, %r5 %r0 = MOVi 12, 14, _, _, debug-location !26 @@ -245,7 +243,6 @@ body: | B %bb.3.for.cond bb.2.for.body: - successors: %bb.3.for.cond liveins: %r4, %r5, %r6, %r7 %r1 = ADDrr %r5, %r7, 14, _, _, debug-location !36 @@ -255,7 +252,6 @@ body: | DBG_VALUE debug-use %r7, debug-use _, !18, !20, debug-location !28 bb.3.for.cond: - successors: %bb.2.for.body, %bb.4.for.cond.cleanup liveins: %r4, %r5, %r6, %r7 DBG_VALUE debug-use %r7, debug-use _, !18, !20, debug-location !28 @@ -263,7 +259,6 @@ body: | Bcc %bb.2.for.body, 11, killed %cpsr, debug-location !33 bb.4.for.cond.cleanup: - successors: %bb.5.if.end liveins: %r4, %r5, %r6 %r0 = MOVr %r5, 14, _, _, debug-location !34 diff --git a/test/CodeGen/ARM/sat-arith.ll b/test/CodeGen/ARM/sat-arith.ll deleted file mode 100644 index 4844ed1bd21e2..0000000000000 --- a/test/CodeGen/ARM/sat-arith.ll +++ /dev/null @@ -1,63 +0,0 @@ -; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s -check-prefix=ARM -check-prefix=CHECK -; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s -check-prefix=THUMB -check-prefix=CHECK - -; CHECK-LABEL: qadd -define i32 @qadd() nounwind { -; CHECK-DAG: mov{{s?}} [[R0:.*]], #8 -; CHECK-DAG: mov{{s?}} [[R1:.*]], #128 -; CHECK-ARM: qadd [[R0]], [[R1]], [[R0]] -; CHECK-THRUMB: qadd [[R0]], [[R0]], [[R1]] - %tmp = call i32 @llvm.arm.qadd(i32 128, i32 8) - ret i32 %tmp -} - -; CHECK-LABEL: qsub -define i32 @qsub() nounwind { -; CHECK-DAG: mov{{s?}} [[R0:.*]], #8 -; CHECK-DAG: mov{{s?}} [[R1:.*]], #128 -; CHECK-ARM: qsub [[R0]], [[R1]], [[R0]] -; CHECK-THRUMB: qadd [[R0]], [[R1]], [[R0]] - %tmp = call i32 @llvm.arm.qsub(i32 128, i32 8) - ret i32 %tmp -} - -; upper-bound of the immediate argument -; CHECK-LABEL: ssat1 -define i32 @ssat1() nounwind { -; CHECK: mov{{s?}} [[R0:.*]], #128 -; CHECK: ssat [[R1:.*]], #32, [[R0]] - %tmp = call i32 @llvm.arm.ssat(i32 128, i32 32) - ret i32 %tmp -} - -; lower-bound of the immediate argument -; CHECK-LABEL: ssat2 -define i32 @ssat2() nounwind { -; CHECK: mov{{s?}} [[R0:.*]], #128 -; CHECK: ssat [[R1:.*]], #1, [[R0]] - %tmp = call i32 @llvm.arm.ssat(i32 128, i32 1) - ret i32 %tmp -} - -; upper-bound of the immediate argument -; CHECK-LABEL: usat1 -define i32 @usat1() nounwind { -; CHECK: mov{{s?}} [[R0:.*]], #128 -; CHECK: usat [[R1:.*]], #31, [[R0]] - %tmp = call i32 @llvm.arm.usat(i32 128, i32 31) - ret i32 %tmp -} - -; lower-bound of the immediate argument -; CHECK-LABEL: usat2 -define i32 @usat2() nounwind { -; CHECK: mov{{s?}} [[R0:.*]], #128 -; CHECK: usat [[R1:.*]], #0, [[R0]] - %tmp = call i32 @llvm.arm.usat(i32 128, i32 0) - ret i32 %tmp -} - -declare i32 @llvm.arm.qadd(i32, i32) nounwind -declare i32 @llvm.arm.qsub(i32, i32) nounwind -declare i32 @llvm.arm.ssat(i32, i32) nounwind readnone -declare i32 @llvm.arm.usat(i32, i32) nounwind readnone diff --git a/test/CodeGen/ARM/vabs.ll b/test/CodeGen/ARM/vabs.ll index 38c6d6c28aedf..4295b32d25fc7 100644 --- a/test/CodeGen/ARM/vabs.ll +++ b/test/CodeGen/ARM/vabs.ll @@ -8,6 +8,22 @@ define <8 x i8> @vabss8(<8 x i8>* %A) nounwind { ret <8 x i8> %tmp2 } +define <8 x i8> @vabss8_fold(<8 x i8>* %A) nounwind { +; CHECK-LABEL: vabss8_fold: +; CHECK: vldr d16, .LCPI1_0 +; CHECK: .LCPI1_0: +; CHECK-NEXT: .byte 128 @ 0x80 +; CHECK-NEXT: .byte 127 @ 0x7f +; CHECK-NEXT: .byte 1 @ 0x1 +; CHECK-NEXT: .byte 0 @ 0x0 +; CHECK-NEXT: .byte 1 @ 0x1 +; CHECK-NEXT: .byte 127 @ 0x7f +; CHECK-NEXT: .byte 128 @ 0x80 +; CHECK-NEXT: .byte 1 @ 0x1 + %tmp1 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> <i8 -128, i8 -127, i8 -1, i8 0, i8 1, i8 127, i8 128, i8 255>) + ret <8 x i8> %tmp1 +} + define <4 x i16> @vabss16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: vabss16: ;CHECK: vabs.s16 @@ -16,6 +32,18 @@ define <4 x i16> @vabss16(<4 x i16>* %A) nounwind { ret <4 x i16> %tmp2 } +define <4 x i16> @vabss16_fold() nounwind { +; CHECK-LABEL: vabss16_fold: +; CHECK: vldr d16, .LCPI3_0 +; CHECK: .LCPI3_0: +; CHECK-NEXT: .short 32768 @ 0x8000 +; CHECK-NEXT: .short 32767 @ 0x7fff +; CHECK-NEXT: .short 255 @ 0xff +; CHECK-NEXT: .short 32768 @ 0x8000 + %tmp1 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> <i16 -32768, i16 -32767, i16 255, i16 32768>) + ret <4 x i16> %tmp1 +} + define <2 x i32> @vabss32(<2 x i32>* %A) nounwind { ;CHECK-LABEL: vabss32: ;CHECK: vabs.s32 @@ -24,6 +52,16 @@ define <2 x i32> @vabss32(<2 x i32>* %A) nounwind { ret <2 x i32> %tmp2 } +define <2 x i32> @vabss32_fold() nounwind { +; CHECK-LABEL: vabss32_fold: +; CHECK: vldr d16, .LCPI5_0 +; CHECK: .LCPI5_0: +; CHECK-NEXT: .long 2147483647 @ 0x7fffffff +; CHECK-NEXT: .long 2147483648 @ 0x80000000 + %tmp1 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> <i32 -2147483647, i32 2147483648>) + ret <2 x i32> %tmp1 +} + define <2 x float> @vabsf32(<2 x float>* %A) nounwind { ;CHECK-LABEL: vabsf32: ;CHECK: vabs.f32 diff --git a/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll b/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll index 93c3cb14fb738..5e3c45c3454d8 100644 --- a/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll +++ b/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll @@ -25,7 +25,13 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" } ; CHECK: .p2align 4 ; CHECK-NEXT: .long {{.*}}Lxray_synthetic_0 +; CHECK-NEXT: .long {{.*}}Lxray_fn_idx_synth_0 ; CHECK-NEXT: .section {{.*}}xray_instr_map{{.*}} ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .long {{.*}}Lxray_sled_0 ; CHECK: .long {{.*}}Lxray_sled_1 +; CHECK-LABEL: Lxray_synthetic_end0: +; CHECK: .section {{.*}}xray_fn_idx{{.*}} +; CHECK-LABEL: Lxray_fn_idx_synth_0: +; CHECK: .long {{.*}}Lxray_synthetic_0 +; CHECK-NEXT: .long {{.*}}Lxray_synthetic_end0 diff --git a/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll b/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll index d14590b886794..739151fbdd5e5 100644 --- a/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll +++ b/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll @@ -25,7 +25,14 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" } ; CHECK: .p2align 4 ; CHECK-NEXT: .long {{.*}}Lxray_synthetic_0 +; CHECK-NEXT: .long {{.*}}Lxray_fn_idx_synth_0 ; CHECK-NEXT: .section {{.*}}xray_instr_map{{.*}} ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .long {{.*}}Lxray_sled_0 ; CHECK: .long {{.*}}Lxray_sled_1 +; CHECK-LABEL: Lxray_synthetic_end0: +; CHECK: .section {{.*}}xray_fn_idx{{.*}} +; CHECK-LABEL: Lxray_fn_idx_synth_0: +; CHECK: .long {{.*}}xray_synthetic_0 +; CHECK-NEXT: .long {{.*}}xray_synthetic_end0 + diff --git a/test/CodeGen/BPF/dwarfdump.ll b/test/CodeGen/BPF/dwarfdump.ll index 7ae64dfb56827..6a6913011e644 100644 --- a/test/CodeGen/BPF/dwarfdump.ll +++ b/test/CodeGen/BPF/dwarfdump.ll @@ -1,5 +1,7 @@ ; RUN: llc -O2 -march=bpfel %s -o %t -filetype=obj ; RUN: llvm-dwarfdump -debug-dump=line %t | FileCheck %s +; RUN: llc -O2 -march=bpfeb %s -o %t -filetype=obj +; RUN: llvm-dwarfdump -debug-dump=line %t | FileCheck %s source_filename = "testprog.c" target datalayout = "e-m:e-p:64:64-i64:64-n32:64-S128" diff --git a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir new file mode 100644 index 0000000000000..a746d826265b6 --- /dev/null +++ b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir @@ -0,0 +1,59 @@ +# RUN: llc -march=hexagon -run-pass branch-folder -run-pass if-converter -verify-machineinstrs %s -o - | FileCheck %s + +# The hoisting of common instructions from successors could cause registers +# to no longer be live-in in the successor blocks. The liveness was updated +# to include potential new live-in registres, but not to remove registers +# that were no longer live-in. +# This could cause if-converter to generate incorrect code. +# +# In this testcase, the "r1 = A2_sxth r0<kill>" was hoisted, and since r0 +# was killed, it was no longer live-in in either successor. The if-converter +# then created code, where the first predicated instruction has incorrect +# implicit use of r0: +# +# BB#0: +# Live Ins: %R0 +# %R1<def> = A2_sxth %R0<kill> ; hoisted, kills r0 +# A2_nop %P0<imp-def> +# %R0<def> = C2_cmoveit %P0, 2, %R0<imp-use> ; predicated A2_tfrsi +# %R0<def> = C2_cmoveif %P0, 1, %R0<imp-use> ; predicated A2_tfrsi +# %R0<def> = A2_add %R0<kill>, %R1<kill> +# J2_jumpr %R31, %PC<imp-def,dead> +# + +# CHECK: %r1 = A2_sxth killed %r0 +# CHECK: %r0 = C2_cmoveit %p0, 2 +# CHECK-NOT: implicit-def %r0 +# CHECK: %r0 = C2_cmoveif %p0, 1, implicit %r0 + +--- +name: fred +tracksRegLiveness: true + +body: | + bb.0: + liveins: %r0 + successors: %bb.1, %bb.2 + + A2_nop implicit-def %p0 + J2_jumpt killed %p0, %bb.2, implicit-def dead %pc + + bb.1: + successors: %bb.3 + liveins: %r0 + %r1 = A2_sxth killed %r0 + %r0 = A2_tfrsi 1 + J2_jump %bb.3, implicit-def %pc + + bb.2: + successors: %bb.3 + liveins: %r0 + %r1 = A2_sxth killed %r0 + %r0 = A2_tfrsi 2 + + bb.3: + liveins: %r0, %r1 + %r0 = A2_add killed %r0, killed %r1 + J2_jumpr %r31, implicit-def dead %pc +... + diff --git a/test/CodeGen/Hexagon/rdf-cover-use.ll b/test/CodeGen/Hexagon/rdf-cover-use.ll new file mode 100644 index 0000000000000..4f3de0868aa6e --- /dev/null +++ b/test/CodeGen/Hexagon/rdf-cover-use.ll @@ -0,0 +1,38 @@ +; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s + +; Check for sane output. +; CHECK: vmpyweh + +target triple = "hexagon" + +declare i32 @llvm.hexagon.S2.clb(i32) #0 +declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32) #0 +declare i32 @llvm.hexagon.S2.vrndpackwh(i64) #0 +declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64) #0 + +define i64 @fred(i32 %a0, i32 %a1) local_unnamed_addr #1 { +b2: + br i1 undef, label %b15, label %b3 + +b3: ; preds = %b2 + %v4 = tail call i32 @llvm.hexagon.S2.clb(i32 %a1) #0 + %v5 = add nsw i32 %v4, -32 + %v6 = zext i32 %v5 to i64 + %v7 = shl nuw i64 %v6, 32 + %v8 = or i64 %v7, 0 + %v9 = tail call i32 @llvm.hexagon.S2.asl.r.r(i32 %a0, i32 0) + %v10 = tail call i32 @llvm.hexagon.S2.vrndpackwh(i64 %v8) + %v11 = sext i32 %v9 to i64 + %v12 = sext i32 %v10 to i64 + %v13 = tail call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %v11, i64 %v12) + %v14 = and i64 %v13, 4294967295 + br label %b15 + +b15: ; preds = %b3, %b2 + %v16 = phi i64 [ %v14, %b3 ], [ 0, %b2 ] + %v17 = or i64 0, %v16 + ret i64 %v17 +} + +attributes #0 = { nounwind readnone } +attributes #1 = { nounwind "target-cpu"="hexagonv55" } diff --git a/test/CodeGen/Hexagon/swp-matmul-bitext.ll b/test/CodeGen/Hexagon/swp-matmul-bitext.ll index 9c425ae6a0988..3b26d141238ad 100644 --- a/test/CodeGen/Hexagon/swp-matmul-bitext.ll +++ b/test/CodeGen/Hexagon/swp-matmul-bitext.ll @@ -1,17 +1,16 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-bsb-sched=0 -enable-pipeliner < %s | FileCheck %s -; RUN: llc -march=hexagon -mcpu=hexagonv5 -enable-pipeliner < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-pipeliner < %s | FileCheck %s ; From coremark. Test that we pipeline the matrix multiplication bitextract ; function. The pipelined code should have two packets. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: = extractu([[REG2:(r[0-9]+)]], -; CHECK: = extractu([[REG2]], -; CHECK: [[REG0:(r[0-9]+)]] = memh -; CHECK: [[REG1:(r[0-9]+)]] = memh +; CHECK: [[REG0:(r[0-9]+)]] = mpyi([[REG1:(r[0-9]+)]],[[REG2:(r[0-9]+)]]) ; CHECK: += mpyi -; CHECK: [[REG2]] = mpyi([[REG0]],[[REG1]]) +; CHECK: [[REG1:(r[0-9]+)]] = memh +; CHECK: = extractu([[REG0:(r[0-9]+)]], +; CHECK: = extractu([[REG0]], +; CHECK: [[REG2:(r[0-9]+)]] = memh ; CHECK: endloop0 %union_h2_sem_t = type { i32 } diff --git a/test/CodeGen/MIR/Generic/branch-probabilities.ll b/test/CodeGen/MIR/Generic/branch-probabilities.ll deleted file mode 100644 index 8d119316b1345..0000000000000 --- a/test/CodeGen/MIR/Generic/branch-probabilities.ll +++ /dev/null @@ -1,28 +0,0 @@ -; RUN: llc -stop-after machine-sink %s -o %t.mir -; RUN: FileCheck %s < %t.mir -; RUN: llc %t.mir -run-pass machine-sink -; Check that branch probabilities are printed in a format that can then be parsed. -; This test fails on powerpc because of an undefined physical register use in the MIR. See PR31062. -; XFAIL: powerpc - -declare void @foo() -declare void @bar() - -define void @test(i1 %c) { -; CHECK-LABEL: name: test -entry: - br i1 %c, label %then, label %else - -then: - call void @foo() - br label %end -; CHECK: successors: %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}}), %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}}) - -else: - call void @bar() - br label %end -; CHECK: successors: %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}}) - -end: - ret void -} diff --git a/test/CodeGen/MIR/X86/auto-successor.mir b/test/CodeGen/MIR/X86/auto-successor.mir new file mode 100644 index 0000000000000..23b4f91b3b604 --- /dev/null +++ b/test/CodeGen/MIR/X86/auto-successor.mir @@ -0,0 +1,61 @@ +# RUN: llc -mtriple=x86_64-- -o - %s -run-pass=none -verify-machineinstrs -simplify-mir | FileCheck %s +--- +# We shouldn't need any explicit successor lists in these examples +# CHECK-LABEL: name: func0 +# CHECK: bb.0: +# CHECK-NOT: successors +# CHECK: JE_1 %bb.1, implicit undef %eflags +# CHECK: JMP_1 %bb.3 +# CHECK: bb.1: +# CHECK-NOT: successors +# CHECK: bb.2: +# CHECK-NOT: successors +# CHECK: JE_1 %bb.1, implicit undef %eflags +# CHECK: bb.3: +# CHECK: RETQ undef %eax +name: func0 +body: | + bb.0: + JE_1 %bb.1, implicit undef %eflags + JMP_1 %bb.3 + + bb.1: + + bb.2: + JE_1 %bb.1, implicit undef %eflags + + bb.3: + JE_1 %bb.4, implicit undef %eflags ; condjump+fallthrough to same block + + bb.4: + RETQ undef %eax +... +--- +# Some cases that need explicit successors: +# CHECK-LABEL: name: func1 +name: func1 +body: | + bb.0: + ; CHECK: bb.0: + ; CHECK: successors: %bb.3, %bb.1 + successors: %bb.3, %bb.1 ; different order than operands + JE_1 %bb.1, implicit undef %eflags + JMP_1 %bb.3 + + bb.1: + ; CHECK: bb.1: + ; CHECK: successors: %bb.2, %bb.1 + successors: %bb.2, %bb.1 ; different order (fallthrough variant) + JE_1 %bb.1, implicit undef %eflags + + bb.2: + ; CHECK: bb.2: + ; CHECK: successors: %bb.1(0x60000000), %bb.3(0x20000000) + successors: %bb.1(3), %bb.3(1) ; branch probabilities not normalized + JE_1 %bb.1, implicit undef %eflags + + bb.3: + ; CHECK: bb.3: + ; CHECK: RETQ undef %eax + RETQ undef %eax +... diff --git a/test/CodeGen/MIR/X86/branch-probabilities.mir b/test/CodeGen/MIR/X86/branch-probabilities.mir new file mode 100644 index 0000000000000..4aacd2d5cef1d --- /dev/null +++ b/test/CodeGen/MIR/X86/branch-probabilities.mir @@ -0,0 +1,18 @@ +# RUN: llc -o - %s -mtriple=x86_64-- -run-pass=none | FileCheck %s +--- +# Check that branch probabilities are printed correctly as hex numbers. +# CHECK-LABEL: name: test +# CHECK: bb.0: +# CHECK-NEXT: successors: %bb.1(0x66666666), %bb.2(0x1999999a) +name: test +body: | + bb.0: + successors: %bb.1(4), %bb.2(1) + JE_1 %bb.2, implicit undef %eflags + + bb.1: + NOOP + + bb.2: + RETQ undef %eax +... diff --git a/test/CodeGen/MIR/X86/successor-basic-blocks.mir b/test/CodeGen/MIR/X86/successor-basic-blocks.mir index 395272bb23c02..ffeb04af9e40d 100644 --- a/test/CodeGen/MIR/X86/successor-basic-blocks.mir +++ b/test/CodeGen/MIR/X86/successor-basic-blocks.mir @@ -32,7 +32,6 @@ name: foo body: | ; CHECK-LABEL: bb.0.entry: - ; CHECK: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000) ; CHECK-LABEL: bb.1.less: bb.0.entry: successors: %bb.1.less, %bb.2.exit diff --git a/test/CodeGen/PowerPC/restore-r30.ll b/test/CodeGen/PowerPC/restore-r30.ll new file mode 100644 index 0000000000000..216d5a709340d --- /dev/null +++ b/test/CodeGen/PowerPC/restore-r30.ll @@ -0,0 +1,30 @@ +; RUN: llc -march=ppc32 -relocation-model=pic < %s | FileCheck %s + +; The load restoring r30 at the end of the function was placed out of order +; relative to its uses as the PIC base pointer. +; This was because the r30 operand was not marked as "def" which allowed +; the post-RA scheduler to move it over other uses of r30. + +; CHECK-LABEL: fred +; CHECK: lwz 30, 24(1) +; R30 should not appear in an instruction after it's been restored. +; CHECK-NOT: 30, + +target datalayout = "E-m:e-p:32:32-i64:64-n32" +target triple = "powerpc" + +define double @fred(i64 %a) #0 { +entry: + %0 = lshr i64 %a, 32 + %conv = trunc i64 %0 to i32 + %conv1 = sitofp i32 %conv to double + %mul = fmul double %conv1, 0x41F0000000000000 + %and = and i64 %a, 4294967295 + %or = or i64 %and, 4841369599423283200 + %sub = fadd double %mul, 0xC330000000000000 + %1 = bitcast i64 %or to double + %add = fadd double %sub, %1 + ret double %add +} + +attributes #0 = { norecurse nounwind readnone "target-cpu"="ppc" "use-soft-float"="false" } diff --git a/test/CodeGen/SystemZ/copy-physreg-128.ll b/test/CodeGen/SystemZ/copy-physreg-128.ll new file mode 100644 index 0000000000000..408316140605e --- /dev/null +++ b/test/CodeGen/SystemZ/copy-physreg-128.ll @@ -0,0 +1,68 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -join-liveintervals=false -verify-machineinstrs | FileCheck %s +; +; Check that copyPhysReg() properly adds impl-use operands of the super +; register while lowering a COPY of a GR128 bit reg. + +define void @autogen_SD5585(i32*, i64) { +; CHECK: .text +BB: + %L5 = load i1, i1* undef + %I8 = insertelement <8 x i64> undef, i64 %1, i32 3 + %I21 = insertelement <8 x i64> zeroinitializer, i64 475435, i32 5 + br label %CF290 + +CF290: ; preds = %CF290, %BB + %B29 = urem <8 x i64> %I8, %I21 + %Cmp31 = icmp sge i1 undef, undef + br i1 %Cmp31, label %CF290, label %CF296 + +CF296: ; preds = %CF290 + %FC36 = sitofp <8 x i64> %B29 to <8 x double> + br label %CF302 + +CF302: ; preds = %CF307, %CF296 + %Shuff49 = shufflevector <8 x i64> undef, <8 x i64> zeroinitializer, <8 x i32> <i32 undef, i32 9, i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5> + %L69 = load i16, i16* undef + br label %CF307 + +CF307: ; preds = %CF302 + %Cmp84 = icmp ne i16 undef, %L69 + br i1 %Cmp84, label %CF302, label %CF301 + +CF301: ; preds = %CF307 + %B126 = or i32 514315, undef + br label %CF280 + +CF280: ; preds = %CF280, %CF301 + %I139 = insertelement <8 x i64> %Shuff49, i64 undef, i32 2 + %B155 = udiv <8 x i64> %I8, %I139 + %Cmp157 = icmp ne i64 -1, undef + br i1 %Cmp157, label %CF280, label %CF281 + +CF281: ; preds = %CF280 + %Cmp164 = icmp slt i1 %L5, %Cmp84 + br label %CF282 + +CF282: ; preds = %CF304, %CF281 + br label %CF289 + +CF289: ; preds = %CF289, %CF282 + store i32 %B126, i32* %0 + %Cmp219 = icmp slt i64 undef, undef + br i1 %Cmp219, label %CF289, label %CF304 + +CF304: ; preds = %CF289 + %Cmp234 = icmp ult i64 0, undef + br i1 %Cmp234, label %CF282, label %CF283 + +CF283: ; preds = %CF308, %CF283, %CF304 + %E251 = extractelement <8 x i64> %B155, i32 0 + br i1 undef, label %CF283, label %CF308 + +CF308: ; preds = %CF283 + store i1 %Cmp164, i1* undef + br i1 undef, label %CF283, label %CF293 + +CF293: ; preds = %CF308 + ret void +} diff --git a/test/CodeGen/X86/2014-08-29-CompactUnwind.ll b/test/CodeGen/X86/2014-08-29-CompactUnwind.ll index e7e8bb724fc05..f6d6bd3ed6f74 100644 --- a/test/CodeGen/X86/2014-08-29-CompactUnwind.ll +++ b/test/CodeGen/X86/2014-08-29-CompactUnwind.ll @@ -24,7 +24,7 @@ target triple = "x86_64-apple-macosx10.9.0" ; CHECK-NOT: {{compact encoding:.*0x0309f800}} ; CHECK: {{compact encoding:.*0x030df800}} -define void @__asan_report_error() #0 { +define void @__asan_report_error(i64 %step) #0 { %str.i = alloca i64, align 8 %stack = alloca [256 x i64], align 8 br label %print_shadow_bytes.exit.i @@ -37,7 +37,7 @@ print_shadow_bytes.exit.i: ; preds = %print_shadow_bytes.exit.i, %0 %reg17 = shl i64 %iv.i, 1 %reg19 = inttoptr i64 %reg17 to i8* call void (i64*, i8*, ...) @append(i64* %str.i, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str2, i64 0, i64 0), i8* %reg16, i8* %reg19) - %iv.next.i = add nsw i64 %iv.i, 0 + %iv.next.i = add nsw i64 %iv.i, %step br label %print_shadow_bytes.exit.i } diff --git a/test/CodeGen/X86/GlobalISel/gep.ll b/test/CodeGen/X86/GlobalISel/gep.ll new file mode 100644 index 0000000000000..bc5b0152b24ae --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/gep.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64_GISEL +; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 + +define i32* @test_gep_i8(i32 *%arr, i8 %ind) { +; X64_GISEL-LABEL: test_gep_i8: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $4, %rax +; X64_GISEL-NEXT: movsbq %sil, %rcx +; X64_GISEL-NEXT: imulq %rax, %rcx +; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i8: +; X64: # BB#0: +; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> +; X64-NEXT: movsbq %sil, %rax +; X64-NEXT: leaq (%rdi,%rax,4), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i8 %ind + ret i32* %arrayidx +} + +define i32* @test_gep_i8_const(i32 *%arr) { +; X64_GISEL-LABEL: test_gep_i8_const: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $80, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i8_const: +; X64: # BB#0: +; X64-NEXT: leaq 80(%rdi), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i8 20 + ret i32* %arrayidx +} + +define i32* @test_gep_i16(i32 *%arr, i16 %ind) { +; X64_GISEL-LABEL: test_gep_i16: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $4, %rax +; X64_GISEL-NEXT: movswq %si, %rcx +; X64_GISEL-NEXT: imulq %rax, %rcx +; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i16: +; X64: # BB#0: +; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> +; X64-NEXT: movswq %si, %rax +; X64-NEXT: leaq (%rdi,%rax,4), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i16 %ind + ret i32* %arrayidx +} + +define i32* @test_gep_i16_const(i32 *%arr) { +; X64_GISEL-LABEL: test_gep_i16_const: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $80, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i16_const: +; X64: # BB#0: +; X64-NEXT: leaq 80(%rdi), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i16 20 + ret i32* %arrayidx +} + +define i32* @test_gep_i32(i32 *%arr, i32 %ind) { +; X64_GISEL-LABEL: test_gep_i32: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $4, %rax +; X64_GISEL-NEXT: movslq %esi, %rcx +; X64_GISEL-NEXT: imulq %rax, %rcx +; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i32: +; X64: # BB#0: +; X64-NEXT: movslq %esi, %rax +; X64-NEXT: leaq (%rdi,%rax,4), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i32 %ind + ret i32* %arrayidx +} + +define i32* @test_gep_i32_const(i32 *%arr) { +; X64_GISEL-LABEL: test_gep_i32_const: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $20, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i32_const: +; X64: # BB#0: +; X64-NEXT: leaq 20(%rdi), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i32 5 + ret i32* %arrayidx +} + +define i32* @test_gep_i64(i32 *%arr, i64 %ind) { +; X64_GISEL-LABEL: test_gep_i64: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $4, %rax +; X64_GISEL-NEXT: imulq %rsi, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i64: +; X64: # BB#0: +; X64-NEXT: leaq (%rdi,%rsi,4), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i64 %ind + ret i32* %arrayidx +} + +define i32* @test_gep_i64_const(i32 *%arr) { +; X64_GISEL-LABEL: test_gep_i64_const: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $20, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i64_const: +; X64: # BB#0: +; X64-NEXT: leaq 20(%rdi), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i64 5 + ret i32* %arrayidx +} + diff --git a/test/CodeGen/X86/GlobalISel/legalize-gep.mir b/test/CodeGen/X86/GlobalISel/legalize-gep.mir new file mode 100644 index 0000000000000..4fdb9b910ad78 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-gep.mir @@ -0,0 +1,101 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s + +--- | + define void @test_gep_i8() { + %arrayidx = getelementptr i32, i32* undef, i8 5 + ret void + } + + define void @test_gep_i16() { + %arrayidx = getelementptr i32, i32* undef, i16 5 + ret void + } + + define void @test_gep_i32() { + %arrayidx = getelementptr i32, i32* undef, i32 5 + ret void + } + + define void @test_gep_i64() { + %arrayidx = getelementptr i32, i32* undef, i64 5 + ret void + } +... +--- +name: test_gep_i8 +# CHECK-LABEL: name: test_gep_i8 +legalized: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: %0(p0) = IMPLICIT_DEF +# CHECK-NEXT: %1(s8) = G_CONSTANT i8 20 +# CHECK-NEXT: %3(s32) = G_SEXT %1(s8) +# CHECK-NEXT: %2(p0) = G_GEP %0, %3(s32) +# CHECK-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s8) = G_CONSTANT i8 20 + %2(p0) = G_GEP %0, %1(s8) + RET 0 +... +--- +name: test_gep_i16 +# CHECK-LABEL: name: test_gep_i16 +legalized: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: %0(p0) = IMPLICIT_DEF +# CHECK-NEXT: %1(s16) = G_CONSTANT i16 20 +# CHECK-NEXT: %3(s32) = G_SEXT %1(s16) +# CHECK-NEXT: %2(p0) = G_GEP %0, %3(s32) +# CHECK-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s16) = G_CONSTANT i16 20 + %2(p0) = G_GEP %0, %1(s16) + RET 0 +... +--- +name: test_gep_i32 +# CHECK-LABEL: name: test_gep_i32 +legalized: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: %0(p0) = IMPLICIT_DEF +# CHECK-NEXT: %1(s32) = G_CONSTANT i32 20 +# CHECK-NEXT: %2(p0) = G_GEP %0, %1(s32) +# CHECK-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s32) = G_CONSTANT i32 20 + %2(p0) = G_GEP %0, %1(s32) + RET 0 +... +--- +name: test_gep_i64 +# CHECK-LABEL: name: test_gep_i64 +legalized: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: %0(p0) = IMPLICIT_DEF +# CHECK-NEXT: %1(s64) = G_CONSTANT i64 20 +# CHECK-NEXT: %2(p0) = G_GEP %0, %1(s64) +# CHECK-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s64) = G_CONSTANT i64 20 + %2(p0) = G_GEP %0, %1(s64) + RET 0 +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir new file mode 100644 index 0000000000000..0d66a63841071 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir @@ -0,0 +1,115 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s + +--- | + define i16 @test_mul_i16(i16 %arg1, i16 %arg2) { + %ret = mul i16 %arg1, %arg2 + ret i16 %ret + } + + define i32 @test_mul_i32(i32 %arg1, i32 %arg2) { + %ret = mul i32 %arg1, %arg2 + ret i32 %ret + } + + define i64 @test_mul_i64(i64 %arg1, i64 %arg2) { + %ret = mul i64 %arg1, %arg2 + ret i64 %ret + } + +... +--- +name: test_mul_i16 +# CHECK-LABEL: name: test_mul_i16 +alignment: 4 +legalized: false +regBankSelected: false +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: _ } +# CHECK-NEXT: - { id: 1, class: _ } +# CHECK-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: body: | +# CHECK-NEXT: bb.0 (%ir-block.0): +# CHECK-NEXT: %0(s16) = COPY %edi +# CHECK-NEXT: %1(s16) = COPY %esi +# CHECK-NEXT: %2(s16) = G_MUL %0, %1 +# CHECK-NEXT: %ax = COPY %2(s16) +# CHECK-NEXT: RET 0, implicit %ax +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s16) = COPY %edi + %1(s16) = COPY %esi + %2(s16) = G_MUL %0, %1 + %ax = COPY %2(s16) + RET 0, implicit %ax + +... +--- +name: test_mul_i32 +# CHECK-LABEL: name: test_mul_i32 +alignment: 4 +legalized: false +regBankSelected: false +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: _ } +# CHECK-NEXT: - { id: 1, class: _ } +# CHECK-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: body: | +# CHECK-NEXT: bb.0 (%ir-block.0): +# CHECK-NEXT: %0(s32) = COPY %edi +# CHECK-NEXT: %1(s32) = COPY %esi +# CHECK-NEXT: %2(s32) = G_MUL %0, %1 +# CHECK-NEXT: %eax = COPY %2(s32) +# CHECK-NEXT: RET 0, implicit %eax +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s32) = COPY %edi + %1(s32) = COPY %esi + %2(s32) = G_MUL %0, %1 + %eax = COPY %2(s32) + RET 0, implicit %eax + +... +--- +name: test_mul_i64 +# CHECK-LABEL: name: test_mul_i64 +alignment: 4 +legalized: false +regBankSelected: false +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: _ } +# CHECK-NEXT: - { id: 1, class: _ } +# CHECK-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: body: | +# CHECK-NEXT: bb.0 (%ir-block.0): +# CHECK-NEXT: %0(s64) = COPY %rdi +# CHECK-NEXT: %1(s64) = COPY %rsi +# CHECK-NEXT: %2(s64) = G_MUL %0, %1 +# CHECK-NEXT: %rax = COPY %2(s64) +# CHECK-NEXT: RET 0, implicit %rax +body: | + bb.1 (%ir-block.0): + liveins: %rdi, %rsi + + %0(s64) = COPY %rdi + %1(s64) = COPY %rsi + %2(s64) = G_MUL %0, %1 + %rax = COPY %2(s64) + RET 0, implicit %rax + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir new file mode 100644 index 0000000000000..be62832b008a0 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir @@ -0,0 +1,111 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL +--- | + define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) #0 { + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) #0 { + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) #1 { + %ret = mul <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret + } + + attributes #0 = { "target-features"="+sse4.1" } + attributes #1 = { "target-features"="+sse4.1,+avx512vl,+avx512f,+avx512dq" } + +... +--- +name: test_mul_v8i16 +# ALL-LABEL: name: test_mul_v8i16 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s16>) = COPY %xmm0 +# ALL-NEXT: %1(<8 x s16>) = COPY %xmm1 +# ALL-NEXT: %2(<8 x s16>) = G_MUL %0, %1 +# ALL-NEXT: %xmm0 = COPY %2(<8 x s16>) +# ALL-NEXT: RET 0, implicit %xmm0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_MUL %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v4i32 +# ALL-LABEL: name: test_mul_v4i32 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<4 x s32>) = COPY %xmm0 +# ALL-NEXT: %1(<4 x s32>) = COPY %xmm1 +# ALL-NEXT: %2(<4 x s32>) = G_MUL %0, %1 +# ALL-NEXT: %xmm0 = COPY %2(<4 x s32>) +# ALL-NEXT: RET 0, implicit %xmm0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_MUL %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v2i64 +# ALL-LABEL: name: test_mul_v2i64 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<2 x s64>) = COPY %xmm0 +# ALL-NEXT: %1(<2 x s64>) = COPY %xmm1 +# ALL-NEXT: %2(<2 x s64>) = G_MUL %0, %1 +# ALL-NEXT: %xmm0 = COPY %2(<2 x s64>) +# ALL-NEXT: RET 0, implicit %xmm0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<2 x s64>) = COPY %xmm0 + %1(<2 x s64>) = COPY %xmm1 + %2(<2 x s64>) = G_MUL %0, %1 + %xmm0 = COPY %2(<2 x s64>) + RET 0, implicit %xmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir new file mode 100644 index 0000000000000..d99303c3ba3b4 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir @@ -0,0 +1,111 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL +--- | + define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) #0 { + %ret = mul <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret + } + + define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #0 { + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret + } + + define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) #1 { + %ret = mul <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret + } + + attributes #0 = { "target-features"="+avx2" } + attributes #1 = { "target-features"="+avx2,+avx512vl,+avx512f,+avx512dq" } + +... +--- +name: test_mul_v16i16 +# ALL-LABEL: name: test_mul_v16i16 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<16 x s16>) = COPY %ymm0 +# ALL-NEXT: %1(<16 x s16>) = COPY %ymm1 +# ALL-NEXT: %2(<16 x s16>) = G_MUL %0, %1 +# ALL-NEXT: %ymm0 = COPY %2(<16 x s16>) +# ALL-NEXT: RET 0, implicit %ymm0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = COPY %ymm0 + %1(<16 x s16>) = COPY %ymm1 + %2(<16 x s16>) = G_MUL %0, %1 + %ymm0 = COPY %2(<16 x s16>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v8i32 +# ALL-LABEL: name: test_mul_v8i32 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s32>) = COPY %ymm0 +# ALL-NEXT: %1(<8 x s32>) = COPY %ymm1 +# ALL-NEXT: %2(<8 x s32>) = G_MUL %0, %1 +# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>) +# ALL-NEXT: RET 0, implicit %ymm0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = COPY %ymm0 + %1(<8 x s32>) = COPY %ymm1 + %2(<8 x s32>) = G_MUL %0, %1 + %ymm0 = COPY %2(<8 x s32>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v4i64 +# ALL-LABEL: name: test_mul_v4i64 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<4 x s64>) = COPY %ymm0 +# ALL-NEXT: %1(<4 x s64>) = COPY %ymm1 +# ALL-NEXT: %2(<4 x s64>) = G_MUL %0, %1 +# ALL-NEXT: %ymm0 = COPY %2(<4 x s64>) +# ALL-NEXT: RET 0, implicit %ymm0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<4 x s64>) = COPY %ymm0 + %1(<4 x s64>) = COPY %ymm1 + %2(<4 x s64>) = G_MUL %0, %1 + %ymm0 = COPY %2(<4 x s64>) + RET 0, implicit %ymm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir new file mode 100644 index 0000000000000..24eefd30c2ac8 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir @@ -0,0 +1,113 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL + +--- | + define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #0 { + %ret = mul <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret + } + + define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #1 { + %ret = mul <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret + } + + define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #2 { + %ret = mul <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret + } + + attributes #0 = { "target-features"="+avx512f,+avx512bw" } + attributes #1 = { "target-features"="+avx512f" } + attributes #2 = { "target-features"="+avx512f,+avx512dq" } + +... +--- +name: test_mul_v32i16 +# ALL-LABEL: name: test_mul_v32i16 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<32 x s16>) = COPY %zmm0 +# ALL-NEXT: %1(<32 x s16>) = COPY %zmm1 +# ALL-NEXT: %2(<32 x s16>) = G_MUL %0, %1 +# ALL-NEXT: %zmm0 = COPY %2(<32 x s16>) +# ALL-NEXT: RET 0, implicit %zmm0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<32 x s16>) = COPY %zmm0 + %1(<32 x s16>) = COPY %zmm1 + %2(<32 x s16>) = G_MUL %0, %1 + %zmm0 = COPY %2(<32 x s16>) + RET 0, implicit %zmm0 + +... +--- +name: test_mul_v16i32 +# ALL-LABEL: name: test_mul_v16i32 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<16 x s32>) = COPY %zmm0 +# ALL-NEXT: %1(<16 x s32>) = COPY %zmm1 +# ALL-NEXT: %2(<16 x s32>) = G_MUL %0, %1 +# ALL-NEXT: %zmm0 = COPY %2(<16 x s32>) +# ALL-NEXT: RET 0, implicit %zmm0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<16 x s32>) = COPY %zmm0 + %1(<16 x s32>) = COPY %zmm1 + %2(<16 x s32>) = G_MUL %0, %1 + %zmm0 = COPY %2(<16 x s32>) + RET 0, implicit %zmm0 + +... +--- +name: test_mul_v8i64 +# ALL-LABEL: name: test_mul_v8i64 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s64>) = COPY %zmm0 +# ALL-NEXT: %1(<8 x s64>) = COPY %zmm1 +# ALL-NEXT: %2(<8 x s64>) = G_MUL %0, %1 +# ALL-NEXT: %zmm0 = COPY %2(<8 x s64>) +# ALL-NEXT: RET 0, implicit %zmm0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<8 x s64>) = COPY %zmm0 + %1(<8 x s64>) = COPY %zmm1 + %2(<8 x s64>) = G_MUL %0, %1 + %zmm0 = COPY %2(<8 x s64>) + RET 0, implicit %zmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/mul-scalar.ll b/test/CodeGen/X86/GlobalISel/mul-scalar.ll new file mode 100644 index 0000000000000..529e81c43304b --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/mul-scalar.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 + +;TODO: instruction selection not supported yet +;define i8 @test_mul_i8(i8 %arg1, i8 %arg2) { +; %ret = mul i8 %arg1, %arg2 +; ret i8 %ret +;} + +define i16 @test_mul_i16(i16 %arg1, i16 %arg2) { +; X64-LABEL: test_mul_i16: +; X64: # BB#0: +; X64-NEXT: imulw %di, %si +; X64-NEXT: movl %esi, %eax +; X64-NEXT: retq + %ret = mul i16 %arg1, %arg2 + ret i16 %ret +} + +define i32 @test_mul_i32(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_mul_i32: +; X64: # BB#0: +; X64-NEXT: imull %edi, %esi +; X64-NEXT: movl %esi, %eax +; X64-NEXT: retq + %ret = mul i32 %arg1, %arg2 + ret i32 %ret +} + +define i64 @test_mul_i64(i64 %arg1, i64 %arg2) { +; X64-LABEL: test_mul_i64: +; X64: # BB#0: +; X64-NEXT: imulq %rdi, %rsi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: retq + %ret = mul i64 %arg1, %arg2 + ret i64 %ret +} + diff --git a/test/CodeGen/X86/GlobalISel/mul-vec.ll b/test/CodeGen/X86/GlobalISel/mul-vec.ll new file mode 100644 index 0000000000000..83615a718528f --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/mul-vec.ll @@ -0,0 +1,84 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel < %s -o - | FileCheck %s --check-prefix=SKX + +define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) { +; SKX-LABEL: test_mul_v8i16: +; SKX: # BB#0: +; SKX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret +} + +define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { +; SKX-LABEL: test_mul_v4i32: +; SKX: # BB#0: +; SKX-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret +} + +define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) { +; SKX-LABEL: test_mul_v2i64: +; SKX: # BB#0: +; SKX-NEXT: vpmullq %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = mul <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret +} + +define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) { +; SKX-LABEL: test_mul_v16i16: +; SKX: # BB#0: +; SKX-NEXT: vpmullw %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = mul <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret +} + +define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) { +; SKX-LABEL: test_mul_v8i32: +; SKX: # BB#0: +; SKX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret +} + +define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) { +; SKX-LABEL: test_mul_v4i64: +; SKX: # BB#0: +; SKX-NEXT: vpmullq %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = mul <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret +} + +define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) { +; SKX-LABEL: test_mul_v32i16: +; SKX: # BB#0: +; SKX-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = mul <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret +} + +define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) { +; SKX-LABEL: test_mul_v16i32: +; SKX: # BB#0: +; SKX-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = mul <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret +} + +define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) { +; SKX-LABEL: test_mul_v8i64: +; SKX: # BB#0: +; SKX-NEXT: vpmullq %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = mul <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret +} + diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir new file mode 100644 index 0000000000000..446db56b992c5 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir @@ -0,0 +1,31 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY + +--- | + define void @test_mul_vec256() { + ret void + } +... +--- +name: test_mul_vec256 +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_mul_vec256 +# CHECK: registers: +# CHECK: - { id: 0, class: vecr } +# CHECK: - { id: 1, class: vecr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(<8 x s32>) = IMPLICIT_DEF + %1(<8 x s32>) = G_MUL %0, %0 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir new file mode 100644 index 0000000000000..f824ee12dcfb8 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir @@ -0,0 +1,33 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY + +--- | + + define void @test_mul_vec512() { + ret void + } + +... +--- +name: test_mul_vec512 +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_mul_vec512 +# CHECK: registers: +# CHECK: - { id: 0, class: vecr } +# CHECK: - { id: 1, class: vecr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(<16 x s32>) = IMPLICIT_DEF + %1(<16 x s32>) = G_MUL %0, %0 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir index 8e04239041a87..3a65a9003773c 100644 --- a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir +++ b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir @@ -27,6 +27,10 @@ ret i64 %ret } + define void @test_mul_gpr() { + ret void + } + define float @test_add_float(float %arg1, float %arg2) { %ret = fadd float %arg1, %arg2 ret float %ret @@ -110,6 +114,12 @@ ret void } + define void @test_gep() { + %p1 = getelementptr i32, i32* undef, i32 5 + %p2 = getelementptr i32, i32* undef, i64 5 + ret void + } + ... --- name: test_add_i8 @@ -220,6 +230,45 @@ body: | ... --- +name: test_mul_gpr +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_mul_gpr +# CHECK: registers: +# CHECK: - { id: 0, class: gpr } +# CHECK: - { id: 1, class: gpr } +# CHECK: - { id: 2, class: gpr } +# CHECK: - { id: 3, class: gpr } +# CHECK: - { id: 4, class: gpr } +# CHECK: - { id: 5, class: gpr } +# CHECK: - { id: 6, class: gpr } +# CHECK: - { id: 7, class: gpr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } + - { id: 3, class: _ } + - { id: 4, class: _ } + - { id: 5, class: _ } + - { id: 6, class: _ } + - { id: 7, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(s64) = IMPLICIT_DEF + %1(s32) = IMPLICIT_DEF + %2(s16) = IMPLICIT_DEF + %3(s8) = IMPLICIT_DEF + %4(s64) = G_MUL %0, %0 + %5(s32) = G_MUL %1, %1 + %6(s16) = G_MUL %2, %2 + %7(s8) = G_MUL %3, %3 + RET 0 +... +--- name: test_add_float alignment: 4 legalized: true @@ -660,3 +709,29 @@ body: | RET 0 ... +--- +name: test_gep +legalized: true +# CHECK-LABEL: name: test_gep +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gpr } +# CHECK-NEXT: - { id: 1, class: gpr } +# CHECK-NEXT: - { id: 2, class: gpr } +# CHECK-NEXT: - { id: 3, class: gpr } +# CHECK-NEXT: - { id: 4, class: gpr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } + - { id: 3, class: _ } + - { id: 4, class: _ } +body: | + bb.0 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s32) = G_CONSTANT i32 20 + %2(p0) = G_GEP %0, %1(s32) + %3(s64) = G_CONSTANT i64 20 + %4(p0) = G_GEP %0, %3(s64) + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-gep.mir b/test/CodeGen/X86/GlobalISel/select-gep.mir new file mode 100644 index 0000000000000..2c89b7057c3d2 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-gep.mir @@ -0,0 +1,37 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK + +--- | + define i32* @test_gep_i32(i32* %arr) { + %arrayidx = getelementptr i32, i32* %arr, i32 5 + ret i32* %arrayidx + } +... +--- +name: test_gep_i32 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +# CHECK-LABEL: name: test_gep_i32 +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr64 } +# CHECK-NEXT: - { id: 1, class: gr64_nosp } +# CHECK-NEXT: - { id: 2, class: gr64 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# CHECK: body: +# CHECK: %1 = MOV64ri32 20 +# CHECK-NEXT: %2 = LEA64r %0, 1, %1, 0, _ +body: | + bb.1 (%ir-block.0): + liveins: %rdi + + %0(p0) = COPY %rdi + %1(s64) = G_CONSTANT i64 20 + %2(p0) = G_GEP %0, %1(s64) + %rax = COPY %2(p0) + RET 0, implicit %rax + +... diff --git a/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir b/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir new file mode 100644 index 0000000000000..34a77acc2d1e9 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir @@ -0,0 +1,112 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL + +--- | + define i16 @test_mul_i16(i16 %arg1, i16 %arg2) { + %ret = mul i16 %arg1, %arg2 + ret i16 %ret + } + + define i32 @test_mul_i32(i32 %arg1, i32 %arg2) { + %ret = mul i32 %arg1, %arg2 + ret i32 %ret + } + + define i64 @test_mul_i64(i64 %arg1, i64 %arg2) { + %ret = mul i64 %arg1, %arg2 + ret i64 %ret + } + +... +--- +name: test_mul_i16 +# ALL-LABEL: name: test_mul_i16 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr16 } +# ALL-NEXT: - { id: 1, class: gr16 } +# ALL-NEXT: - { id: 2, class: gr16 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: body: | +# ALL: %0 = COPY %di +# ALL-NEXT: %1 = COPY %si +# ALL-NEXT: %2 = IMUL16rr %0, %1, implicit-def %eflags +# ALL-NEXT: %ax = COPY %2 +# ALL-NEXT: RET 0, implicit %ax +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s16) = COPY %edi + %1(s16) = COPY %esi + %2(s16) = G_MUL %0, %1 + %ax = COPY %2(s16) + RET 0, implicit %ax + +... +--- +name: test_mul_i32 +# ALL-LABEL: name: test_mul_i32 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr32 } +# ALL-NEXT: - { id: 1, class: gr32 } +# ALL-NEXT: - { id: 2, class: gr32 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: body: | +# ALL: %0 = COPY %edi +# ALL-NEXT: %1 = COPY %esi +# ALL-NEXT: %2 = IMUL32rr %0, %1, implicit-def %eflags +# ALL-NEXT: %eax = COPY %2 +# ALL-NEXT: RET 0, implicit %eax +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s32) = COPY %edi + %1(s32) = COPY %esi + %2(s32) = G_MUL %0, %1 + %eax = COPY %2(s32) + RET 0, implicit %eax + +... +--- +name: test_mul_i64 +# ALL-LABEL: name: test_mul_i64 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr64 } +# ALL-NEXT: - { id: 1, class: gr64 } +# ALL-NEXT: - { id: 2, class: gr64 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: body: | +# ALL: %0 = COPY %rdi +# ALL-NEXT: %1 = COPY %rsi +# ALL-NEXT: %2 = IMUL64rr %0, %1, implicit-def %eflags +# ALL-NEXT: %rax = COPY %2 +# ALL-NEXT: RET 0, implicit %rax +body: | + bb.1 (%ir-block.0): + liveins: %rdi, %rsi + + %0(s64) = COPY %rdi + %1(s64) = COPY %rsi + %2(s64) = G_MUL %0, %1 + %rax = COPY %2(s64) + RET 0, implicit %rax + +... diff --git a/test/CodeGen/X86/GlobalISel/select-mul-vec.mir b/test/CodeGen/X86/GlobalISel/select-mul-vec.mir new file mode 100644 index 0000000000000..5f8ab1e4f1896 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-mul-vec.mir @@ -0,0 +1,480 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + +--- | + define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) #0 { + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <8 x i16> @test_mul_v8i16_avx(<8 x i16> %arg1, <8 x i16> %arg2) #1 { + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <8 x i16> @test_mul_v8i16_avx512bwvl(<8 x i16> %arg1, <8 x i16> %arg2) #2 { + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) #3 { + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <4 x i32> @test_mul_v4i32_avx(<4 x i32> %arg1, <4 x i32> %arg2) #1 { + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <4 x i32> @test_mul_v4i32_avx512vl(<4 x i32> %arg1, <4 x i32> %arg2) #4 { + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) #5 { + %ret = mul <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret + } + + define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) #6 { + %ret = mul <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret + } + + define <16 x i16> @test_mul_v16i16_avx512bwvl(<16 x i16> %arg1, <16 x i16> %arg2) #2 { + %ret = mul <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret + } + + define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #6 { + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret + } + + define <8 x i32> @test_mul_v8i32_avx512vl(<8 x i32> %arg1, <8 x i32> %arg2) #4 { + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret + } + + define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) #5 { + %ret = mul <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret + } + + define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #7 { + %ret = mul <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret + } + + define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #8 { + %ret = mul <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret + } + + define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #9 { + %ret = mul <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret + } + + attributes #0 = { "target-features"="+sse2" } + attributes #1 = { "target-features"="+avx" } + attributes #2 = { "target-features"="+avx512vl,+avx512f,+avx512bw" } + attributes #3 = { "target-features"="+sse4.1" } + attributes #4 = { "target-features"="+avx512vl,+avx512f" } + attributes #5 = { "target-features"="+avx2,+avx512vl,+avx512f,+avx512dq" } + attributes #6 = { "target-features"="+avx2" } + attributes #7 = { "target-features"="+avx512f,+avx512bw" } + attributes #8 = { "target-features"="+avx512f" } + attributes #9 = { "target-features"="+avx512f,+avx512dq" } + +... +--- +name: test_mul_v8i16 +# CHECK-LABEL: name: test_mul_v8i16 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128 } +# CHECK-NEXT: - { id: 1, class: vr128 } +# CHECK-NEXT: - { id: 2, class: vr128 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = PMULLWrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_MUL %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v8i16_avx +# CHECK-LABEL: name: test_mul_v8i16_avx +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128 } +# CHECK-NEXT: - { id: 1, class: vr128 } +# CHECK-NEXT: - { id: 2, class: vr128 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_MUL %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v8i16_avx512bwvl +# CHECK-LABEL: name: test_mul_v8i16_avx512bwvl +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128x } +# CHECK-NEXT: - { id: 1, class: vr128x } +# CHECK-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_MUL %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v4i32 +# CHECK-LABEL: name: test_mul_v4i32 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128 } +# CHECK-NEXT: - { id: 1, class: vr128 } +# CHECK-NEXT: - { id: 2, class: vr128 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = PMULLDrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_MUL %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v4i32_avx +# CHECK-LABEL: name: test_mul_v4i32_avx +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128 } +# CHECK-NEXT: - { id: 1, class: vr128 } +# CHECK-NEXT: - { id: 2, class: vr128 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_MUL %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v4i32_avx512vl +# CHECK-LABEL: name: test_mul_v4i32_avx512vl +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128x } +# CHECK-NEXT: - { id: 1, class: vr128x } +# CHECK-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_MUL %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v2i64 +# CHECK-LABEL: name: test_mul_v2i64 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128x } +# CHECK-NEXT: - { id: 1, class: vr128x } +# CHECK-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLQZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<2 x s64>) = COPY %xmm0 + %1(<2 x s64>) = COPY %xmm1 + %2(<2 x s64>) = G_MUL %0, %1 + %xmm0 = COPY %2(<2 x s64>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v16i16 +# CHECK-LABEL: name: test_mul_v16i16 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256 } +# CHECK-NEXT: - { id: 1, class: vr256 } +# CHECK-NEXT: - { id: 2, class: vr256 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWYrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = COPY %ymm0 + %1(<16 x s16>) = COPY %ymm1 + %2(<16 x s16>) = G_MUL %0, %1 + %ymm0 = COPY %2(<16 x s16>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v16i16_avx512bwvl +# CHECK-LABEL: name: test_mul_v16i16_avx512bwvl +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256x } +# CHECK-NEXT: - { id: 1, class: vr256x } +# CHECK-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = COPY %ymm0 + %1(<16 x s16>) = COPY %ymm1 + %2(<16 x s16>) = G_MUL %0, %1 + %ymm0 = COPY %2(<16 x s16>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v8i32 +# CHECK-LABEL: name: test_mul_v8i32 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256 } +# CHECK-NEXT: - { id: 1, class: vr256 } +# CHECK-NEXT: - { id: 2, class: vr256 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDYrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = COPY %ymm0 + %1(<8 x s32>) = COPY %ymm1 + %2(<8 x s32>) = G_MUL %0, %1 + %ymm0 = COPY %2(<8 x s32>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v8i32_avx512vl +# CHECK-LABEL: name: test_mul_v8i32_avx512vl +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256x } +# CHECK-NEXT: - { id: 1, class: vr256x } +# CHECK-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = COPY %ymm0 + %1(<8 x s32>) = COPY %ymm1 + %2(<8 x s32>) = G_MUL %0, %1 + %ymm0 = COPY %2(<8 x s32>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v4i64 +# CHECK-LABEL: name: test_mul_v4i64 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256x } +# CHECK-NEXT: - { id: 1, class: vr256x } +# CHECK-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLQZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<4 x s64>) = COPY %ymm0 + %1(<4 x s64>) = COPY %ymm1 + %2(<4 x s64>) = G_MUL %0, %1 + %ymm0 = COPY %2(<4 x s64>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v32i16 +# CHECK-LABEL: name: test_mul_v32i16 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr512 } +# CHECK-NEXT: - { id: 1, class: vr512 } +# CHECK-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<32 x s16>) = COPY %zmm0 + %1(<32 x s16>) = COPY %zmm1 + %2(<32 x s16>) = G_MUL %0, %1 + %zmm0 = COPY %2(<32 x s16>) + RET 0, implicit %zmm0 + +... +--- +name: test_mul_v16i32 +# CHECK-LABEL: name: test_mul_v16i32 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr512 } +# CHECK-NEXT: - { id: 1, class: vr512 } +# CHECK-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<16 x s32>) = COPY %zmm0 + %1(<16 x s32>) = COPY %zmm1 + %2(<16 x s32>) = G_MUL %0, %1 + %zmm0 = COPY %2(<16 x s32>) + RET 0, implicit %zmm0 + +... +--- +name: test_mul_v8i64 +# CHECK-LABEL: name: test_mul_v8i64 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr512 } +# CHECK-NEXT: - { id: 1, class: vr512 } +# CHECK-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLQZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<8 x s64>) = COPY %zmm0 + %1(<8 x s64>) = COPY %zmm1 + %2(<8 x s64>) = G_MUL %0, %1 + %zmm0 = COPY %2(<8 x s64>) + RET 0, implicit %zmm0 + +... diff --git a/test/CodeGen/X86/addcarry.ll b/test/CodeGen/X86/addcarry.ll index 5e95cd832789b..be550e3fe2d16 100644 --- a/test/CodeGen/X86/addcarry.ll +++ b/test/CodeGen/X86/addcarry.ll @@ -204,3 +204,70 @@ entry: %6 = add i64 %4, %5 ret i64 %6 } + +%S = type { [4 x i64] } + +define %S @readd(%S* nocapture readonly %this, %S %arg.b) { +; CHECK-LABEL: readd: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: addq (%rsi), %rdx +; CHECK-NEXT: movq 8(%rsi), %r10 +; CHECK-NEXT: adcq $0, %r10 +; CHECK-NEXT: sbbq %rax, %rax +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: addq %rcx, %r10 +; CHECK-NEXT: adcq 16(%rsi), %rax +; CHECK-NEXT: sbbq %rcx, %rcx +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: addq %r8, %rax +; CHECK-NEXT: adcq 24(%rsi), %rcx +; CHECK-NEXT: addq %r9, %rcx +; CHECK-NEXT: movq %rdx, (%rdi) +; CHECK-NEXT: movq %r10, 8(%rdi) +; CHECK-NEXT: movq %rax, 16(%rdi) +; CHECK-NEXT: movq %rcx, 24(%rdi) +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: retq +entry: + %0 = extractvalue %S %arg.b, 0 + %.elt6 = extractvalue [4 x i64] %0, 1 + %.elt8 = extractvalue [4 x i64] %0, 2 + %.elt10 = extractvalue [4 x i64] %0, 3 + %.elt = extractvalue [4 x i64] %0, 0 + %1 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 0 + %2 = load i64, i64* %1, align 8 + %3 = zext i64 %2 to i128 + %4 = zext i64 %.elt to i128 + %5 = add nuw nsw i128 %3, %4 + %6 = trunc i128 %5 to i64 + %7 = lshr i128 %5, 64 + %8 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 1 + %9 = load i64, i64* %8, align 8 + %10 = zext i64 %9 to i128 + %11 = add nuw nsw i128 %7, %10 + %12 = zext i64 %.elt6 to i128 + %13 = add nuw nsw i128 %11, %12 + %14 = trunc i128 %13 to i64 + %15 = lshr i128 %13, 64 + %16 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 2 + %17 = load i64, i64* %16, align 8 + %18 = zext i64 %17 to i128 + %19 = add nuw nsw i128 %15, %18 + %20 = zext i64 %.elt8 to i128 + %21 = add nuw nsw i128 %19, %20 + %22 = lshr i128 %21, 64 + %23 = trunc i128 %21 to i64 + %24 = getelementptr inbounds %S, %S* %this, i64 0,i32 0, i64 3 + %25 = load i64, i64* %24, align 8 + %26 = zext i64 %25 to i128 + %27 = add nuw nsw i128 %22, %26 + %28 = zext i64 %.elt10 to i128 + %29 = add nuw nsw i128 %27, %28 + %30 = trunc i128 %29 to i64 + %31 = insertvalue [4 x i64] undef, i64 %6, 0 + %32 = insertvalue [4 x i64] %31, i64 %14, 1 + %33 = insertvalue [4 x i64] %32, i64 %23, 2 + %34 = insertvalue [4 x i64] %33, i64 %30, 3 + %35 = insertvalue %S undef, [4 x i64] %34, 0 + ret %S %35 +} diff --git a/test/CodeGen/X86/avx-isa-check.ll b/test/CodeGen/X86/avx-isa-check.ll index dffc8078e44f9..5d66dfde0bc67 100644 --- a/test/CodeGen/X86/avx-isa-check.ll +++ b/test/CodeGen/X86/avx-isa-check.ll @@ -680,3 +680,8 @@ define <4 x double> @_inreg4xdouble(double %a) { %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer ret <4 x double> %c } + +define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #0 { + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret +} diff --git a/test/CodeGen/X86/avx1-logical-load-folding.ll b/test/CodeGen/X86/avx1-logical-load-folding.ll index 90e00c965391e..7073eb2247632 100644 --- a/test/CodeGen/X86/avx1-logical-load-folding.ll +++ b/test/CodeGen/X86/avx1-logical-load-folding.ll @@ -1,10 +1,26 @@ -; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s - -target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-apple-macosx10.9.0" +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -O3 -disable-peephole -mtriple=i686-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -O3 -disable-peephole -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X64 ; Function Attrs: nounwind ssp uwtable define void @test1(float* %A, float* %C) #0 { +; X86-LABEL: test1: +; X86: ## BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: vmovaps (%ecx), %ymm0 +; X86-NEXT: vandps LCPI0_0, %ymm0, %ymm0 +; X86-NEXT: vmovss %xmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test1: +; X64: ## BB#0: +; X64-NEXT: vmovaps (%rdi), %ymm0 +; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovss %xmm0, (%rsi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %tmp1 = bitcast float* %A to <8 x float>* %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32 %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32> @@ -13,12 +29,27 @@ define void @test1(float* %A, float* %C) #0 { %tmp6 = extractelement <8 x float> %tmp5, i32 0 store float %tmp6, float* %C ret void - - ; CHECK: vandps LCPI0_0(%rip), %ymm0, %ymm0 } ; Function Attrs: nounwind ssp uwtable define void @test2(float* %A, float* %C) #0 { +; X86-LABEL: test2: +; X86: ## BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: vmovaps (%ecx), %ymm0 +; X86-NEXT: vorps LCPI1_0, %ymm0, %ymm0 +; X86-NEXT: vmovss %xmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test2: +; X64: ## BB#0: +; X64-NEXT: vmovaps (%rdi), %ymm0 +; X64-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovss %xmm0, (%rsi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %tmp1 = bitcast float* %A to <8 x float>* %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32 %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32> @@ -27,12 +58,27 @@ define void @test2(float* %A, float* %C) #0 { %tmp6 = extractelement <8 x float> %tmp5, i32 0 store float %tmp6, float* %C ret void - - ; CHECK: vorps LCPI1_0(%rip), %ymm0, %ymm0 } ; Function Attrs: nounwind ssp uwtable define void @test3(float* %A, float* %C) #0 { +; X86-LABEL: test3: +; X86: ## BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: vmovaps (%ecx), %ymm0 +; X86-NEXT: vxorps LCPI2_0, %ymm0, %ymm0 +; X86-NEXT: vmovss %xmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test3: +; X64: ## BB#0: +; X64-NEXT: vmovaps (%rdi), %ymm0 +; X64-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovss %xmm0, (%rsi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %tmp1 = bitcast float* %A to <8 x float>* %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32 %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32> @@ -41,11 +87,26 @@ define void @test3(float* %A, float* %C) #0 { %tmp6 = extractelement <8 x float> %tmp5, i32 0 store float %tmp6, float* %C ret void - - ; CHECK: vxorps LCPI2_0(%rip), %ymm0, %ymm0 } define void @test4(float* %A, float* %C) #0 { +; X86-LABEL: test4: +; X86: ## BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: vmovaps (%ecx), %ymm0 +; X86-NEXT: vandnps LCPI3_0, %ymm0, %ymm0 +; X86-NEXT: vmovss %xmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test4: +; X64: ## BB#0: +; X64-NEXT: vmovaps (%rdi), %ymm0 +; X64-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovss %xmm0, (%rsi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %tmp1 = bitcast float* %A to <8 x float>* %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32 %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32> @@ -55,6 +116,4 @@ define void @test4(float* %A, float* %C) #0 { %tmp7 = extractelement <8 x float> %tmp6, i32 0 store float %tmp7, float * %C ret void - - ;CHECK: vandnps LCPI3_0(%rip), %ymm0, %ymm0 } diff --git a/test/CodeGen/X86/avx2-schedule.ll b/test/CodeGen/X86/avx2-schedule.ll new file mode 100644 index 0000000000000..042bc217b97cf --- /dev/null +++ b/test/CodeGen/X86/avx2-schedule.ll @@ -0,0 +1,338 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1 + +define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) { +; HASWELL-LABEL: test_pabsb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpabsb (%rdi), %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pabsb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [6:1.00] +; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0) + %2 = load <32 x i8>, <32 x i8> *%a1, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2) + %4 = or <32 x i8> %1, %3 + ret <32 x i8> %4 +} +declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone + +define <8 x i32> @test_pabsd(<8 x i32> %a0, <8 x i32> *%a1) { +; HASWELL-LABEL: test_pabsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpabsd (%rdi), %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pabsd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [6:1.00] +; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0) + %2 = load <8 x i32>, <8 x i32> *%a1, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2) + %4 = or <8 x i32> %1, %3 + ret <8 x i32> %4 +} +declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone + +define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) { +; HASWELL-LABEL: test_pabsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpabsw (%rdi), %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pabsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [6:1.00] +; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0) + %2 = load <16 x i16>, <16 x i16> *%a1, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2) + %4 = or <16 x i16> %1, %3 + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone + +define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; HASWELL-LABEL: test_paddb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_paddb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = add <32 x i8> %a0, %a1 + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = add <32 x i8> %1, %2 + ret <32 x i8> %3 +} + +define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; HASWELL-LABEL: test_paddd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_paddd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = add <8 x i32> %a0, %a1 + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = add <8 x i32> %1, %2 + ret <8 x i32> %3 +} + +define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_paddq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_paddq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = add <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = add <4 x i64> %1, %2 + ret <4 x i64> %3 +} + +define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; HASWELL-LABEL: test_paddw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_paddw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = add <16 x i16> %a0, %a1 + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = add <16 x i16> %1, %2 + ret <16 x i16> %3 +} + +define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_pand: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pand: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = and <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = and <4 x i64> %1, %2 + %4 = add <4 x i64> %3, %a1 + ret <4 x i64> %4 +} + +define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_pandn: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pandn: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [6:1.00] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1> + %2 = and <4 x i64> %a1, %1 + %3 = load <4 x i64>, <4 x i64> *%a2, align 32 + %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1> + %5 = and <4 x i64> %3, %4 + %6 = add <4 x i64> %2, %5 + ret <4 x i64> %6 +} + +define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; HASWELL-LABEL: test_pmulld: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00] +; HASWELL-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [10:2.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pmulld: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = mul <8 x i32> %a0, %a1 + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = mul <8 x i32> %1, %2 + ret <8 x i32> %3 +} + +define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; HASWELL-LABEL: test_pmullw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pmullw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = mul <16 x i16> %a0, %a1 + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = mul <16 x i16> %1, %2 + ret <16 x i16> %3 +} + +define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_por: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_por: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = or <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = or <4 x i64> %1, %2 + %4 = add <4 x i64> %3, %a1 + ret <4 x i64> %4 +} + +define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; HASWELL-LABEL: test_psubb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_psubb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = sub <32 x i8> %a0, %a1 + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = sub <32 x i8> %1, %2 + ret <32 x i8> %3 +} + +define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; HASWELL-LABEL: test_psubd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_psubd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = sub <8 x i32> %a0, %a1 + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = sub <8 x i32> %1, %2 + ret <8 x i32> %3 +} + +define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_psubq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_psubq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = sub <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = sub <4 x i64> %1, %2 + ret <4 x i64> %3 +} + +define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; HASWELL-LABEL: test_psubw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_psubw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = sub <16 x i16> %a0, %a1 + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = sub <16 x i16> %1, %2 + ret <16 x i16> %3 +} + +define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_pxor: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pxor: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = xor <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = xor <4 x i64> %1, %2 + %4 = add <4 x i64> %3, %a1 + ret <4 x i64> %4 +} + +!0 = !{i32 1} diff --git a/test/CodeGen/X86/avx512vl-arith.ll b/test/CodeGen/X86/avx512vl-arith.ll index ef01d8656dac1..9c056cdee1966 100644 --- a/test/CodeGen/X86/avx512vl-arith.ll +++ b/test/CodeGen/X86/avx512vl-arith.ll @@ -1,36 +1,42 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl| FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl --show-mc-encoding| FileCheck %s ; 256-bit -; CHECK-LABEL: vpaddq256_test -; CHECK: vpaddq %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone { +; CHECK-LABEL: vpaddq256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i64> %i, %j ret <4 x i64> %x } -; CHECK-LABEL: vpaddq256_fold_test -; CHECK: vpaddq (%rdi), %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind { +; CHECK-LABEL: vpaddq256_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <4 x i64>, <4 x i64>* %j, align 4 %x = add <4 x i64> %i, %tmp ret <4 x i64> %x } -; CHECK-LABEL: vpaddq256_broadcast_test -; CHECK: vpaddq LCP{{.*}}(%rip){1to4}, %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind { +; CHECK-LABEL: vpaddq256_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI2_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i64> %i, <i64 1, i64 1, i64 1, i64 1> ret <4 x i64> %x } -; CHECK-LABEL: vpaddq256_broadcast2_test -; CHECK: vpaddq (%rdi){1to4}, %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind { +; CHECK-LABEL: vpaddq256_broadcast2_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %j = load i64, i64* %j.ptr %j.0 = insertelement <4 x i64> undef, i64 %j, i32 0 %j.v = shufflevector <4 x i64> %j.0, <4 x i64> undef, <4 x i32> zeroinitializer @@ -38,55 +44,68 @@ define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind ret <4 x i64> %x } -; CHECK-LABEL: vpaddd256_test -; CHECK: vpaddd %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone { +; CHECK-LABEL: vpaddd256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <8 x i32> %i, %j ret <8 x i32> %x } -; CHECK-LABEL: vpaddd256_fold_test -; CHECK: vpaddd (%rdi), %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind { +; CHECK-LABEL: vpaddd256_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <8 x i32>, <8 x i32>* %j, align 4 %x = add <8 x i32> %i, %tmp ret <8 x i32> %x } -; CHECK-LABEL: vpaddd256_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpaddd256_broadcast_test(<8 x i32> %i) nounwind { +; CHECK-LABEL: vpaddd256_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI6_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> ret <8 x i32> %x } -; CHECK-LABEL: vpaddd256_mask_test -; CHECK: vpaddd %ymm{{.*%k[1-7].*}} -; CHECK: ret define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_mask_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb] +; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04] +; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, %j %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_maskz_test -; CHECK: vpaddd %ymm{{.*{%k[1-7]} {z}.*}} -; CHECK: ret define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_maskz_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb] +; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04] +; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, %j %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_mask_fold_test -; CHECK: vpaddd (%rdi), %ymm{{.*%k[1-7]}} -; CHECK: ret define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_mask_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %j = load <8 x i32>, <8 x i32>* %j.ptr %x = add <8 x i32> %i, %j @@ -94,20 +113,27 @@ define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_mask_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]}}} -; CHECK: ret define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_mask_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI10_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_maskz_fold_test -; CHECK: vpaddd (%rdi), %ymm{{.*{%k[1-7]} {z}}} -; CHECK: ret define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_maskz_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %j = load <8 x i32>, <8 x i32>* %j.ptr %x = add <8 x i32> %i, %j @@ -115,96 +141,111 @@ define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_maskz_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]} {z}}} -; CHECK: ret define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_maskz_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI12_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer ret <8 x i32> %r } -; CHECK-LABEL: vpsubq256_test -; CHECK: vpsubq %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpsubq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone { +; CHECK-LABEL: vpsubq256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfb,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <4 x i64> %i, %j ret <4 x i64> %x } -; CHECK-LABEL: vpsubd256_test -; CHECK: vpsubd %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpsubd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone { +; CHECK-LABEL: vpsubd256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfa,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <8 x i32> %i, %j ret <8 x i32> %x } -; CHECK-LABEL: vpmulld256_test -; CHECK: vpmulld %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpmulld256_test(<8 x i32> %i, <8 x i32> %j) { +; CHECK-LABEL: vpmulld256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x40,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = mul <8 x i32> %i, %j ret <8 x i32> %x } -; CHECK-LABEL: test_vaddpd_256 -; CHECK: vaddpd{{.*}} -; CHECK: ret define <4 x double> @test_vaddpd_256(<4 x double> %y, <4 x double> %x) { +; CHECK-LABEL: test_vaddpd_256: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0] +; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <4 x double> %x, %y ret <4 x double> %add.i } -; CHECK-LABEL: test_fold_vaddpd_256 -; CHECK: vaddpd LCP{{.*}}(%rip){{.*}} -; CHECK: ret define <4 x double> @test_fold_vaddpd_256(<4 x double> %y) { +; CHECK-LABEL: test_fold_vaddpd_256: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI17_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <4 x double> %y, <double 4.500000e+00, double 3.400000e+00, double 4.500000e+00, double 5.600000e+00> ret <4 x double> %add.i } -; CHECK-LABEL: test_broadcast_vaddpd_256 -; CHECK: LCP{{.*}}(%rip){1to8}, %ymm0, %ymm0 -; CHECK: ret define <8 x float> @test_broadcast_vaddpd_256(<8 x float> %a) nounwind { +; CHECK-LABEL: test_broadcast_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x58,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI18_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %b = fadd <8 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> ret <8 x float> %b } -; CHECK-LABEL: test_mask_vaddps_256 -; CHECK: vaddps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vaddps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x58,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fadd <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } -; CHECK-LABEL: test_mask_vmulps_256 -; CHECK: vmulps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmulps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vmulps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x59,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fmul <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } -; CHECK-LABEL: test_mask_vminps_256 -; CHECK: vminps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1)nounwind readnone { +; CHECK-LABEL: test_mask_vminps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vminps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5d,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %cmp_res = fcmp olt <8 x float> %i, %j %min = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j @@ -212,12 +253,13 @@ define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, ret <8 x float> %r } -; CHECK-LABEL: test_mask_vmaxps_256 -; CHECK: vmaxps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmaxps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vmaxps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5f,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %cmp_res = fcmp ogt <8 x float> %i, %j %max = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j @@ -225,48 +267,52 @@ define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, ret <8 x float> %r } -; CHECK-LABEL: test_mask_vsubps_256 -; CHECK: vsubps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vsubps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vsubps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5c,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fsub <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } -; CHECK-LABEL: test_mask_vdivps_256 -; CHECK: vdivps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vdivps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vdivps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5e,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fdiv <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } -; CHECK-LABEL: test_mask_vmulpd_256 -; CHECK: vmulpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmulpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vmulpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x59,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fmul <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } -; CHECK-LABEL: test_mask_vminpd_256 -; CHECK: vminpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vminpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vminpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5d,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %cmp_res = fcmp olt <4 x double> %i, %j %min = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j @@ -274,12 +320,13 @@ define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, ret <4 x double> %r } -; CHECK-LABEL: test_mask_vmaxpd_256 -; CHECK: vmaxpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmaxpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vmaxpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5f,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %cmp_res = fcmp ogt <4 x double> %i, %j %max = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j @@ -287,59 +334,65 @@ define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, ret <4 x double> %r } -; CHECK-LABEL: test_mask_vsubpd_256 -; CHECK: vsubpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vsubpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vsubpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5c,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fsub <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } -; CHECK-LABEL: test_mask_vdivpd_256 -; CHECK: vdivpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vdivpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vdivpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5e,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fdiv <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } -; CHECK-LABEL: test_mask_vaddpd_256 -; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fadd <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } -; CHECK-LABEL: test_maskz_vaddpd_256 -; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}}} -; CHECK: ret -define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, - <4 x i64> %mask1) nounwind readnone { +define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_maskz_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb] +; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04] +; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fadd <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer ret <4 x double> %r } -; CHECK-LABEL: test_mask_fold_vaddpd_256 -; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}.*}} -; CHECK: ret -define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double>* %j, <4 x i64> %mask1) - nounwind { +define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind { +; CHECK-LABEL: test_mask_fold_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb] +; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04] +; CHECK-NEXT: vaddpd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load <4 x double>, <4 x double>* %j %x = fadd <4 x double> %i, %tmp @@ -347,11 +400,13 @@ define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> % ret <4 x double> %r } -; CHECK-LABEL: test_maskz_fold_vaddpd_256 -; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}.*}} -; CHECK: ret -define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, - <4 x i64> %mask1) nounwind { +define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind { +; CHECK-LABEL: test_maskz_fold_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vaddpd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load <4 x double>, <4 x double>* %j %x = fadd <4 x double> %i, %tmp @@ -359,43 +414,46 @@ define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* % ret <4 x double> %r } -; CHECK-LABEL: test_broadcast2_vaddpd_256 -; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*}} -; CHECK: ret define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind { +; CHECK-LABEL: test_broadcast2_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 - %c = shufflevector <4 x double> %b, <4 x double> undef, - <4 x i32> zeroinitializer + %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i ret <4 x double> %x } -; CHECK-LABEL: test_mask_broadcast_vaddpd_256 -; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]}.*}} -; CHECK: ret -define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, - double* %j, <4 x i64> %mask1) nounwind { +define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, double* %j, <4 x i64> %mask1) nounwind { +; CHECK-LABEL: test_mask_broadcast_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm0, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc0] +; CHECK-NEXT: vpcmpneqq %ymm0, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xc8,0x04] +; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm1, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x39,0x58,0x0f] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 - %c = shufflevector <4 x double> %b, <4 x double> undef, - <4 x i32> zeroinitializer + %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %i ret <4 x double> %r } -; CHECK-LABEL: test_maskz_broadcast_vaddpd_256 -; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]} {z}.*}} -; CHECK: ret -define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, - <4 x i64> %mask1) nounwind { +define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, <4 x i64> %mask1) nounwind { +; CHECK-LABEL: test_maskz_broadcast_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 - %c = shufflevector <4 x double> %b, <4 x double> undef, - <4 x i32> zeroinitializer + %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer ret <4 x double> %r @@ -403,27 +461,30 @@ define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j ; 128-bit -; CHECK-LABEL: vpaddq128_test -; CHECK: vpaddq %xmm{{.*}} -; CHECK: ret define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone { +; CHECK-LABEL: vpaddq128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <2 x i64> %i, %j ret <2 x i64> %x } -; CHECK-LABEL: vpaddq128_fold_test -; CHECK: vpaddq (%rdi), %xmm{{.*}} -; CHECK: ret define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind { +; CHECK-LABEL: vpaddq128_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <2 x i64>, <2 x i64>* %j, align 4 %x = add <2 x i64> %i, %tmp ret <2 x i64> %x } -; CHECK-LABEL: vpaddq128_broadcast2_test -; CHECK: vpaddq (%rdi){1to2}, %xmm{{.*}} -; CHECK: ret define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind { +; CHECK-LABEL: vpaddq128_broadcast2_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0xd4,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load i64, i64* %j %j.0 = insertelement <2 x i64> undef, i64 %tmp, i32 0 %j.1 = insertelement <2 x i64> %j.0, i64 %tmp, i32 1 @@ -431,55 +492,68 @@ define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind { ret <2 x i64> %x } -; CHECK-LABEL: vpaddd128_test -; CHECK: vpaddd %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone { +; CHECK-LABEL: vpaddd128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i32> %i, %j ret <4 x i32> %x } -; CHECK-LABEL: vpaddd128_fold_test -; CHECK: vpaddd (%rdi), %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind { +; CHECK-LABEL: vpaddd128_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <4 x i32>, <4 x i32>* %j, align 4 %x = add <4 x i32> %i, %tmp ret <4 x i32> %x } -; CHECK-LABEL: vpaddd128_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpaddd128_broadcast_test(<4 x i32> %i) nounwind { +; CHECK-LABEL: vpaddd128_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI42_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1> ret <4 x i32> %x } -; CHECK-LABEL: vpaddd128_mask_test -; CHECK: vpaddd %xmm{{.*%k[1-7].*}} -; CHECK: ret define <4 x i32> @vpaddd128_mask_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_mask_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] +; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, %j %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_maskz_test -; CHECK: vpaddd %xmm{{.*{%k[1-7]} {z}.*}} -; CHECK: ret define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_maskz_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] +; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, %j %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_mask_fold_test -; CHECK: vpaddd (%rdi), %xmm{{.*%k[1-7]}} -; CHECK: ret define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_mask_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %j = load <4 x i32>, <4 x i32>* %j.ptr %x = add <4 x i32> %i, %j @@ -487,20 +561,27 @@ define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_mask_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]}}} -; CHECK: ret define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_mask_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI46_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1> %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_maskz_fold_test -; CHECK: vpaddd (%rdi), %xmm{{.*{%k[1-7]} {z}}} -; CHECK: ret define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_maskz_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %j = load <4 x i32>, <4 x i32>* %j.ptr %x = add <4 x i32> %i, %j @@ -508,96 +589,111 @@ define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_maskz_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]} {z}}} -; CHECK: ret define <4 x i32> @vpaddd128_maskz_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_maskz_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI48_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1> %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer ret <4 x i32> %r } -; CHECK-LABEL: vpsubq128_test -; CHECK: vpsubq %xmm{{.*}} -; CHECK: ret define <2 x i64> @vpsubq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone { +; CHECK-LABEL: vpsubq128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <2 x i64> %i, %j ret <2 x i64> %x } -; CHECK-LABEL: vpsubd128_test -; CHECK: vpsubd %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpsubd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone { +; CHECK-LABEL: vpsubd128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <4 x i32> %i, %j ret <4 x i32> %x } -; CHECK-LABEL: vpmulld128_test -; CHECK: vpmulld %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpmulld128_test(<4 x i32> %i, <4 x i32> %j) { +; CHECK-LABEL: vpmulld128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x40,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = mul <4 x i32> %i, %j ret <4 x i32> %x } -; CHECK-LABEL: test_vaddpd_128 -; CHECK: vaddpd{{.*}} -; CHECK: ret define <2 x double> @test_vaddpd_128(<2 x double> %y, <2 x double> %x) { +; CHECK-LABEL: test_vaddpd_128: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0] +; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <2 x double> %x, %y ret <2 x double> %add.i } -; CHECK-LABEL: test_fold_vaddpd_128 -; CHECK: vaddpd LCP{{.*}}(%rip){{.*}} -; CHECK: ret define <2 x double> @test_fold_vaddpd_128(<2 x double> %y) { +; CHECK-LABEL: test_fold_vaddpd_128: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vaddpd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI53_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <2 x double> %y, <double 4.500000e+00, double 3.400000e+00> ret <2 x double> %add.i } -; CHECK-LABEL: test_broadcast_vaddpd_128 -; CHECK: LCP{{.*}}(%rip){1to4}, %xmm0, %xmm0 -; CHECK: ret define <4 x float> @test_broadcast_vaddpd_128(<4 x float> %a) nounwind { +; CHECK-LABEL: test_broadcast_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x58,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI54_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %b = fadd <4 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> ret <4 x float> %b } -; CHECK-LABEL: test_mask_vaddps_128 -; CHECK: vaddps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vaddps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x58,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fadd <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } -; CHECK-LABEL: test_mask_vmulps_128 -; CHECK: vmulps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmulps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vmulps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x59,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fmul <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } -; CHECK-LABEL: test_mask_vminps_128 -; CHECK: vminps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vminps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vminps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5d,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %cmp_res = fcmp olt <4 x float> %i, %j %min = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j @@ -605,12 +701,13 @@ define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, ret <4 x float> %r } -; CHECK-LABEL: test_mask_vmaxps_128 -; CHECK: vmaxps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmaxps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vmaxps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5f,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %cmp_res = fcmp ogt <4 x float> %i, %j %max = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j @@ -618,12 +715,13 @@ define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, ret <4 x float> %r } -; CHECK-LABEL: test_mask_vsubps_128 -; CHECK: vsubps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vsubps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vsubps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5c,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fsub <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst @@ -631,36 +729,39 @@ define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, } -; CHECK-LABEL: test_mask_vdivps_128 -; CHECK: vdivps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vdivps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vdivps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5e,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fdiv <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } -; CHECK-LABEL: test_mask_vmulpd_128 -; CHECK: vmulpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmulpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vmulpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x59,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fmul <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } -; CHECK-LABEL: test_mask_vminpd_128 -; CHECK: vminpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vminpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vminpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5d,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %cmp_res = fcmp olt <2 x double> %i, %j %min = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j @@ -668,12 +769,13 @@ define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, ret <2 x double> %r } -; CHECK-LABEL: test_mask_vmaxpd_128 -; CHECK: vmaxpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmaxpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vmaxpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5f,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %cmp_res = fcmp ogt <2 x double> %i, %j %max = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j @@ -681,46 +783,52 @@ define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, ret <2 x double> %r } -; CHECK-LABEL: test_mask_vsubpd_128 -; CHECK: vsubpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vsubpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vsubpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5c,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fsub <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } -; CHECK-LABEL: test_mask_vdivpd_128 -; CHECK: vdivpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vdivpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vdivpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5e,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fdiv <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } -; CHECK-LABEL: test_mask_vaddpd_128 -; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fadd <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } -; CHECK-LABEL: test_maskz_vaddpd_128 -; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}}} -; CHECK: ret define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j, +; CHECK-LABEL: test_maskz_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] +; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04] +; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] <2 x i64> %mask1) nounwind readnone { %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fadd <2 x double> %i, %j @@ -728,12 +836,13 @@ define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j, ret <2 x double> %r } -; CHECK-LABEL: test_mask_fold_vaddpd_128 -; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}.*}} -; CHECK: ret -define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double>* %j, <2 x i64> %mask1) - nounwind { +define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind { +; CHECK-LABEL: test_mask_fold_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] +; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04] +; CHECK-NEXT: vaddpd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load <2 x double>, <2 x double>* %j %x = fadd <2 x double> %i, %tmp @@ -741,11 +850,13 @@ define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> % ret <2 x double> %r } -; CHECK-LABEL: test_maskz_fold_vaddpd_128 -; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}.*}} -; CHECK: ret -define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j, - <2 x i64> %mask1) nounwind { +define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind { +; CHECK-LABEL: test_maskz_fold_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vaddpd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load <2 x double>, <2 x double>* %j %x = fadd <2 x double> %i, %tmp @@ -753,10 +864,11 @@ define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* % ret <2 x double> %r } -; CHECK-LABEL: test_broadcast2_vaddpd_128 -; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*}} -; CHECK: ret define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind { +; CHECK-LABEL: test_broadcast2_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1 @@ -764,12 +876,14 @@ define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nou ret <2 x double> %x } -; CHECK-LABEL: test_mask_broadcast_vaddpd_128 -; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]}.*}} -; CHECK: ret -define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i, - double* %j, <2 x i64> %mask1) - nounwind { +define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i, double* %j, <2 x i64> %mask1) nounwind { +; CHECK-LABEL: test_mask_broadcast_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0] +; CHECK-NEXT: vpcmpneqq %xmm0, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xc8,0x04] +; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm1, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x19,0x58,0x0f] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 @@ -779,11 +893,13 @@ define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x doub ret <2 x double> %r } -; CHECK-LABEL: test_maskz_broadcast_vaddpd_128 -; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]} {z}.*}} -; CHECK: ret -define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j, - <2 x i64> %mask1) nounwind { +define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j, <2 x i64> %mask1) nounwind { +; CHECK-LABEL: test_maskz_broadcast_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 diff --git a/test/CodeGen/X86/branchfolding-undef.mir b/test/CodeGen/X86/branchfolding-undef.mir index 0da167b332579..1a7dfb941875f 100644 --- a/test/CodeGen/X86/branchfolding-undef.mir +++ b/test/CodeGen/X86/branchfolding-undef.mir @@ -16,7 +16,6 @@ name: func tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 JE_1 %bb.1, implicit undef %eflags JMP_1 %bb.2 diff --git a/test/CodeGen/X86/build-vector-128.ll b/test/CodeGen/X86/build-vector-128.ll new file mode 100644 index 0000000000000..8c3a6790ffa6c --- /dev/null +++ b/test/CodeGen/X86/build-vector-128.ll @@ -0,0 +1,428 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE-32 --check-prefix=SSE2-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE-64 --check-prefix=SSE2-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE-32 --check-prefix=SSE41-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE-64 --check-prefix=SSE41-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX1-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX1-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX2-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX2-64 + +define <2 x double> @test_buildvector_v2f64(double %a0, double %a1) { +; SSE-32-LABEL: test_buildvector_v2f64: +; SSE-32: # BB#0: +; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0 +; SSE-32-NEXT: retl +; +; SSE-64-LABEL: test_buildvector_v2f64: +; SSE-64: # BB#0: +; SSE-64-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v2f64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v2f64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-64-NEXT: retq + %ins0 = insertelement <2 x double> undef, double %a0, i32 0 + %ins1 = insertelement <2 x double> %ins0, double %a1, i32 1 + ret <2 x double> %ins1 +} + +define <4 x float> @test_buildvector_v4f32(float %a0, float %a1, float %a2, float %a3) { +; SSE-32-LABEL: test_buildvector_v4f32: +; SSE-32: # BB#0: +; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0 +; SSE-32-NEXT: retl +; +; SSE2-64-LABEL: test_buildvector_v4f32: +; SSE2-64: # BB#0: +; SSE2-64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-64-NEXT: retq +; +; SSE41-64-LABEL: test_buildvector_v4f32: +; SSE41-64: # BB#0: +; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; SSE41-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v4f32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v4f32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; AVX-64-NEXT: retq + %ins0 = insertelement <4 x float> undef, float %a0, i32 0 + %ins1 = insertelement <4 x float> %ins0, float %a1, i32 1 + %ins2 = insertelement <4 x float> %ins1, float %a2, i32 2 + %ins3 = insertelement <4 x float> %ins2, float %a3, i32 3 + ret <4 x float> %ins3 +} + +define <2 x i64> @test_buildvector_v2i64(i64 %a0, i64 %a1) { +; SSE2-32-LABEL: test_buildvector_v2i64: +; SSE2-32: # BB#0: +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-32-NEXT: retl +; +; SSE-64-LABEL: test_buildvector_v2i64: +; SSE-64: # BB#0: +; SSE-64-NEXT: movq %rsi, %xmm1 +; SSE-64-NEXT: movq %rdi, %xmm0 +; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-64-NEXT: retq +; +; SSE41-32-LABEL: test_buildvector_v2i64: +; SSE41-32: # BB#0: +; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrd $2, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: retl +; +; AVX-32-LABEL: test_buildvector_v2i64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v2i64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovq %rsi, %xmm0 +; AVX-64-NEXT: vmovq %rdi, %xmm1 +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX-64-NEXT: retq + %ins0 = insertelement <2 x i64> undef, i64 %a0, i32 0 + %ins1 = insertelement <2 x i64> %ins0, i64 %a1, i32 1 + ret <2 x i64> %ins1 +} + +define <4 x i32> @test_buildvector_v4i32(i32 %f0, i32 %f1, i32 %f2, i32 %f3) { +; SSE-32-LABEL: test_buildvector_v4i32: +; SSE-32: # BB#0: +; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0 +; SSE-32-NEXT: retl +; +; SSE2-64-LABEL: test_buildvector_v4i32: +; SSE2-64: # BB#0: +; SSE2-64-NEXT: movd %ecx, %xmm0 +; SSE2-64-NEXT: movd %esi, %xmm1 +; SSE2-64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-64-NEXT: movd %edx, %xmm2 +; SSE2-64-NEXT: movd %edi, %xmm0 +; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-64-NEXT: retq +; +; SSE41-64-LABEL: test_buildvector_v4i32: +; SSE41-64: # BB#0: +; SSE41-64-NEXT: movd %edi, %xmm0 +; SSE41-64-NEXT: pinsrd $1, %esi, %xmm0 +; SSE41-64-NEXT: pinsrd $2, %edx, %xmm0 +; SSE41-64-NEXT: pinsrd $3, %ecx, %xmm0 +; SSE41-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v4i32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v4i32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovd %edi, %xmm0 +; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <4 x i32> undef, i32 %f0, i32 0 + %ins1 = insertelement <4 x i32> %ins0, i32 %f1, i32 1 + %ins2 = insertelement <4 x i32> %ins1, i32 %f2, i32 2 + %ins3 = insertelement <4 x i32> %ins2, i32 %f3, i32 3 + ret <4 x i32> %ins3 +} + +define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) { +; SSE2-32-LABEL: test_buildvector_v8i16: +; SSE2-32: # BB#0: +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-32-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-32-NEXT: retl +; +; SSE2-64-LABEL: test_buildvector_v8i16: +; SSE2-64: # BB#0: +; SSE2-64-NEXT: movd %ecx, %xmm0 +; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-64-NEXT: movd %r9d, %xmm1 +; SSE2-64-NEXT: movd %esi, %xmm2 +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-64-NEXT: movd %edx, %xmm1 +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-64-NEXT: movd %r8d, %xmm3 +; SSE2-64-NEXT: movd %edi, %xmm0 +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-64-NEXT: retq +; +; SSE41-32-LABEL: test_buildvector_v8i16: +; SSE41-32: # BB#0: +; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-32-NEXT: pinsrw $1, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $2, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $3, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $4, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $5, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $6, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $7, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: retl +; +; SSE41-64-LABEL: test_buildvector_v8i16: +; SSE41-64: # BB#0: +; SSE41-64-NEXT: movd %edi, %xmm0 +; SSE41-64-NEXT: pinsrw $1, %esi, %xmm0 +; SSE41-64-NEXT: pinsrw $2, %edx, %xmm0 +; SSE41-64-NEXT: pinsrw $3, %ecx, %xmm0 +; SSE41-64-NEXT: pinsrw $4, %r8d, %xmm0 +; SSE41-64-NEXT: pinsrw $5, %r9d, %xmm0 +; SSE41-64-NEXT: pinsrw $6, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrw $7, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v8i16: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v8i16: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovd %edi, %xmm0 +; AVX-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0 + %ins1 = insertelement <8 x i16> %ins0, i16 %a1, i32 1 + %ins2 = insertelement <8 x i16> %ins1, i16 %a2, i32 2 + %ins3 = insertelement <8 x i16> %ins2, i16 %a3, i32 3 + %ins4 = insertelement <8 x i16> %ins3, i16 %a4, i32 4 + %ins5 = insertelement <8 x i16> %ins4, i16 %a5, i32 5 + %ins6 = insertelement <8 x i16> %ins5, i16 %a6, i32 6 + %ins7 = insertelement <8 x i16> %ins6, i16 %a7, i32 7 + ret <8 x i16> %ins7 +} + +define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) { +; SSE2-32-LABEL: test_buildvector_v16i8: +; SSE2-32: # BB#0: +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-32-NEXT: retl +; +; SSE2-64-LABEL: test_buildvector_v16i8: +; SSE2-64: # BB#0: +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-64-NEXT: movd %ecx, %xmm0 +; SSE2-64-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-64-NEXT: movd %r9d, %xmm1 +; SSE2-64-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE2-64-NEXT: movd %esi, %xmm2 +; SSE2-64-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-64-NEXT: movd %edx, %xmm3 +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-64-NEXT: movd %r8d, %xmm1 +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-64-NEXT: movd %edi, %xmm0 +; SSE2-64-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-64-NEXT: retq +; +; SSE41-32-LABEL: test_buildvector_v16i8: +; SSE41-32: # BB#0: +; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-32-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $2, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $3, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $5, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $6, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $7, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $9, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $10, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $11, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $12, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $13, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $14, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $15, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: retl +; +; SSE41-64-LABEL: test_buildvector_v16i8: +; SSE41-64: # BB#0: +; SSE41-64-NEXT: movd %edi, %xmm0 +; SSE41-64-NEXT: pinsrb $1, %esi, %xmm0 +; SSE41-64-NEXT: pinsrb $2, %edx, %xmm0 +; SSE41-64-NEXT: pinsrb $3, %ecx, %xmm0 +; SSE41-64-NEXT: pinsrb $4, %r8d, %xmm0 +; SSE41-64-NEXT: pinsrb $5, %r9d, %xmm0 +; SSE41-64-NEXT: pinsrb $6, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $7, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $8, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $9, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $10, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $11, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $12, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $13, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $14, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $15, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v16i8: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v16i8: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovd %edi, %xmm0 +; AVX-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <16 x i8> undef, i8 %a0, i32 0 + %ins1 = insertelement <16 x i8> %ins0, i8 %a1, i32 1 + %ins2 = insertelement <16 x i8> %ins1, i8 %a2, i32 2 + %ins3 = insertelement <16 x i8> %ins2, i8 %a3, i32 3 + %ins4 = insertelement <16 x i8> %ins3, i8 %a4, i32 4 + %ins5 = insertelement <16 x i8> %ins4, i8 %a5, i32 5 + %ins6 = insertelement <16 x i8> %ins5, i8 %a6, i32 6 + %ins7 = insertelement <16 x i8> %ins6, i8 %a7, i32 7 + %ins8 = insertelement <16 x i8> %ins7, i8 %a8, i32 8 + %ins9 = insertelement <16 x i8> %ins8, i8 %a9, i32 9 + %ins10 = insertelement <16 x i8> %ins9, i8 %a10, i32 10 + %ins11 = insertelement <16 x i8> %ins10, i8 %a11, i32 11 + %ins12 = insertelement <16 x i8> %ins11, i8 %a12, i32 12 + %ins13 = insertelement <16 x i8> %ins12, i8 %a13, i32 13 + %ins14 = insertelement <16 x i8> %ins13, i8 %a14, i32 14 + %ins15 = insertelement <16 x i8> %ins14, i8 %a15, i32 15 + ret <16 x i8> %ins15 +} diff --git a/test/CodeGen/X86/build-vector-256.ll b/test/CodeGen/X86/build-vector-256.ll new file mode 100644 index 0000000000000..1ced1fc3a3822 --- /dev/null +++ b/test/CodeGen/X86/build-vector-256.ll @@ -0,0 +1,434 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX1-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX1-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX2-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX2-64 + +define <4 x double> @test_buildvector_v4f64(double %a0, double %a1, double %a2, double %a3) { +; AVX-32-LABEL: test_buildvector_v4f64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v4f64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <4 x double> undef, double %a0, i32 0 + %ins1 = insertelement <4 x double> %ins0, double %a1, i32 1 + %ins2 = insertelement <4 x double> %ins1, double %a2, i32 2 + %ins3 = insertelement <4 x double> %ins2, double %a3, i32 3 + ret <4 x double> %ins3 +} + +define <8 x float> @test_buildvector_v8f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) { +; AVX-32-LABEL: test_buildvector_v8f32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v8f32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <8 x float> undef, float %a0, i32 0 + %ins1 = insertelement <8 x float> %ins0, float %a1, i32 1 + %ins2 = insertelement <8 x float> %ins1, float %a2, i32 2 + %ins3 = insertelement <8 x float> %ins2, float %a3, i32 3 + %ins4 = insertelement <8 x float> %ins3, float %a4, i32 4 + %ins5 = insertelement <8 x float> %ins4, float %a5, i32 5 + %ins6 = insertelement <8 x float> %ins5, float %a6, i32 6 + %ins7 = insertelement <8 x float> %ins6, float %a7, i32 7 + ret <8 x float> %ins7 +} + +define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) { +; AVX1-32-LABEL: test_buildvector_v4i64: +; AVX1-32: # BB#0: +; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-32-NEXT: retl +; +; AVX1-64-LABEL: test_buildvector_v4i64: +; AVX1-64: # BB#0: +; AVX1-64-NEXT: vmovq %rcx, %xmm0 +; AVX1-64-NEXT: vmovq %rdx, %xmm1 +; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-64-NEXT: vmovq %rsi, %xmm1 +; AVX1-64-NEXT: vmovq %rdi, %xmm2 +; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-64-NEXT: retq +; +; AVX2-32-LABEL: test_buildvector_v4i64: +; AVX2-32: # BB#0: +; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_buildvector_v4i64: +; AVX2-64: # BB#0: +; AVX2-64-NEXT: vmovq %rcx, %xmm0 +; AVX2-64-NEXT: vmovq %rdx, %xmm1 +; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-64-NEXT: vmovq %rsi, %xmm1 +; AVX2-64-NEXT: vmovq %rdi, %xmm2 +; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-64-NEXT: retq + %ins0 = insertelement <4 x i64> undef, i64 %a0, i32 0 + %ins1 = insertelement <4 x i64> %ins0, i64 %a1, i32 1 + %ins2 = insertelement <4 x i64> %ins1, i64 %a2, i32 2 + %ins3 = insertelement <4 x i64> %ins2, i64 %a3, i32 3 + ret <4 x i64> %ins3 +} + +define <8 x i32> @test_buildvector_v8i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) { +; AVX-32-LABEL: test_buildvector_v8i32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 +; AVX-32-NEXT: retl +; +; AVX1-64-LABEL: test_buildvector_v8i32: +; AVX1-64: # BB#0: +; AVX1-64-NEXT: vmovd %edi, %xmm0 +; AVX1-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX1-64-NEXT: vmovd %r8d, %xmm1 +; AVX1-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-64-NEXT: retq +; +; AVX2-64-LABEL: test_buildvector_v8i32: +; AVX2-64: # BB#0: +; AVX2-64-NEXT: vmovd %edi, %xmm0 +; AVX2-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX2-64-NEXT: vmovd %r8d, %xmm1 +; AVX2-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-64-NEXT: retq + %ins0 = insertelement <8 x i32> undef, i32 %a0, i32 0 + %ins1 = insertelement <8 x i32> %ins0, i32 %a1, i32 1 + %ins2 = insertelement <8 x i32> %ins1, i32 %a2, i32 2 + %ins3 = insertelement <8 x i32> %ins2, i32 %a3, i32 3 + %ins4 = insertelement <8 x i32> %ins3, i32 %a4, i32 4 + %ins5 = insertelement <8 x i32> %ins4, i32 %a5, i32 5 + %ins6 = insertelement <8 x i32> %ins5, i32 %a6, i32 6 + %ins7 = insertelement <8 x i32> %ins6, i32 %a7, i32 7 + ret <8 x i32> %ins7 +} + +define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) { +; AVX1-32-LABEL: test_buildvector_v16i16: +; AVX1-32: # BB#0: +; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-32-NEXT: retl +; +; AVX1-64-LABEL: test_buildvector_v16i16: +; AVX1-64: # BB#0: +; AVX1-64-NEXT: vmovd %edi, %xmm0 +; AVX1-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-64-NEXT: retq +; +; AVX2-32-LABEL: test_buildvector_v16i16: +; AVX2-32: # BB#0: +; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_buildvector_v16i16: +; AVX2-64: # BB#0: +; AVX2-64-NEXT: vmovd %edi, %xmm0 +; AVX2-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-64-NEXT: retq + %ins0 = insertelement <16 x i16> undef, i16 %a0, i32 0 + %ins1 = insertelement <16 x i16> %ins0, i16 %a1, i32 1 + %ins2 = insertelement <16 x i16> %ins1, i16 %a2, i32 2 + %ins3 = insertelement <16 x i16> %ins2, i16 %a3, i32 3 + %ins4 = insertelement <16 x i16> %ins3, i16 %a4, i32 4 + %ins5 = insertelement <16 x i16> %ins4, i16 %a5, i32 5 + %ins6 = insertelement <16 x i16> %ins5, i16 %a6, i32 6 + %ins7 = insertelement <16 x i16> %ins6, i16 %a7, i32 7 + %ins8 = insertelement <16 x i16> %ins7, i16 %a8, i32 8 + %ins9 = insertelement <16 x i16> %ins8, i16 %a9, i32 9 + %ins10 = insertelement <16 x i16> %ins9, i16 %a10, i32 10 + %ins11 = insertelement <16 x i16> %ins10, i16 %a11, i32 11 + %ins12 = insertelement <16 x i16> %ins11, i16 %a12, i32 12 + %ins13 = insertelement <16 x i16> %ins12, i16 %a13, i32 13 + %ins14 = insertelement <16 x i16> %ins13, i16 %a14, i32 14 + %ins15 = insertelement <16 x i16> %ins14, i16 %a15, i32 15 + ret <16 x i16> %ins15 +} + +define <32 x i8> @test_buildvector_v32i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) { +; AVX1-32-LABEL: test_buildvector_v32i8: +; AVX1-32: # BB#0: +; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-32-NEXT: retl +; +; AVX1-64-LABEL: test_buildvector_v32i8: +; AVX1-64: # BB#0: +; AVX1-64-NEXT: vmovd %edi, %xmm0 +; AVX1-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-64-NEXT: retq +; +; AVX2-32-LABEL: test_buildvector_v32i8: +; AVX2-32: # BB#0: +; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_buildvector_v32i8: +; AVX2-64: # BB#0: +; AVX2-64-NEXT: vmovd %edi, %xmm0 +; AVX2-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-64-NEXT: retq + %ins0 = insertelement <32 x i8> undef, i8 %a0, i32 0 + %ins1 = insertelement <32 x i8> %ins0, i8 %a1, i32 1 + %ins2 = insertelement <32 x i8> %ins1, i8 %a2, i32 2 + %ins3 = insertelement <32 x i8> %ins2, i8 %a3, i32 3 + %ins4 = insertelement <32 x i8> %ins3, i8 %a4, i32 4 + %ins5 = insertelement <32 x i8> %ins4, i8 %a5, i32 5 + %ins6 = insertelement <32 x i8> %ins5, i8 %a6, i32 6 + %ins7 = insertelement <32 x i8> %ins6, i8 %a7, i32 7 + %ins8 = insertelement <32 x i8> %ins7, i8 %a8, i32 8 + %ins9 = insertelement <32 x i8> %ins8, i8 %a9, i32 9 + %ins10 = insertelement <32 x i8> %ins9, i8 %a10, i32 10 + %ins11 = insertelement <32 x i8> %ins10, i8 %a11, i32 11 + %ins12 = insertelement <32 x i8> %ins11, i8 %a12, i32 12 + %ins13 = insertelement <32 x i8> %ins12, i8 %a13, i32 13 + %ins14 = insertelement <32 x i8> %ins13, i8 %a14, i32 14 + %ins15 = insertelement <32 x i8> %ins14, i8 %a15, i32 15 + %ins16 = insertelement <32 x i8> %ins15, i8 %a16, i32 16 + %ins17 = insertelement <32 x i8> %ins16, i8 %a17, i32 17 + %ins18 = insertelement <32 x i8> %ins17, i8 %a18, i32 18 + %ins19 = insertelement <32 x i8> %ins18, i8 %a19, i32 19 + %ins20 = insertelement <32 x i8> %ins19, i8 %a20, i32 20 + %ins21 = insertelement <32 x i8> %ins20, i8 %a21, i32 21 + %ins22 = insertelement <32 x i8> %ins21, i8 %a22, i32 22 + %ins23 = insertelement <32 x i8> %ins22, i8 %a23, i32 23 + %ins24 = insertelement <32 x i8> %ins23, i8 %a24, i32 24 + %ins25 = insertelement <32 x i8> %ins24, i8 %a25, i32 25 + %ins26 = insertelement <32 x i8> %ins25, i8 %a26, i32 26 + %ins27 = insertelement <32 x i8> %ins26, i8 %a27, i32 27 + %ins28 = insertelement <32 x i8> %ins27, i8 %a28, i32 28 + %ins29 = insertelement <32 x i8> %ins28, i8 %a29, i32 29 + %ins30 = insertelement <32 x i8> %ins29, i8 %a30, i32 30 + %ins31 = insertelement <32 x i8> %ins30, i8 %a31, i32 31 + ret <32 x i8> %ins31 +} diff --git a/test/CodeGen/X86/build-vector-512.ll b/test/CodeGen/X86/build-vector-512.ll new file mode 100644 index 0000000000000..21737cca93a10 --- /dev/null +++ b/test/CodeGen/X86/build-vector-512.ll @@ -0,0 +1,712 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX512F-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX512F-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX512BW-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX512BW-64 + +define <8 x double> @test_buildvector_v8f64(double %a0, double %a1, double %a2, double %a3, double %a4, double %a5, double %a6, double %a7) { +; AVX-32-LABEL: test_buildvector_v8f64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v8f64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0] +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm4[0],xmm5[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX-64-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <8 x double> undef, double %a0, i32 0 + %ins1 = insertelement <8 x double> %ins0, double %a1, i32 1 + %ins2 = insertelement <8 x double> %ins1, double %a2, i32 2 + %ins3 = insertelement <8 x double> %ins2, double %a3, i32 3 + %ins4 = insertelement <8 x double> %ins3, double %a4, i32 4 + %ins5 = insertelement <8 x double> %ins4, double %a5, i32 5 + %ins6 = insertelement <8 x double> %ins5, double %a6, i32 6 + %ins7 = insertelement <8 x double> %ins6, double %a7, i32 7 + ret <8 x double> %ins7 +} + +define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15) { +; AVX-32-LABEL: test_buildvector_v16f32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v16f32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] +; AVX-64-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX-64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <16 x float> undef, float %a0, i32 0 + %ins1 = insertelement <16 x float> %ins0, float %a1, i32 1 + %ins2 = insertelement <16 x float> %ins1, float %a2, i32 2 + %ins3 = insertelement <16 x float> %ins2, float %a3, i32 3 + %ins4 = insertelement <16 x float> %ins3, float %a4, i32 4 + %ins5 = insertelement <16 x float> %ins4, float %a5, i32 5 + %ins6 = insertelement <16 x float> %ins5, float %a6, i32 6 + %ins7 = insertelement <16 x float> %ins6, float %a7, i32 7 + %ins8 = insertelement <16 x float> %ins7, float %a8, i32 8 + %ins9 = insertelement <16 x float> %ins8, float %a9, i32 9 + %ins10 = insertelement <16 x float> %ins9, float %a10, i32 10 + %ins11 = insertelement <16 x float> %ins10, float %a11, i32 11 + %ins12 = insertelement <16 x float> %ins11, float %a12, i32 12 + %ins13 = insertelement <16 x float> %ins12, float %a13, i32 13 + %ins14 = insertelement <16 x float> %ins13, float %a14, i32 14 + %ins15 = insertelement <16 x float> %ins14, float %a15, i32 15 + ret <16 x float> %ins15 +} + +define <8 x i64> @test_buildvector_v8i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7) { +; AVX-32-LABEL: test_buildvector_v8i64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v8i64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovq %rcx, %xmm0 +; AVX-64-NEXT: vmovq %rdx, %xmm1 +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX-64-NEXT: vmovq %rsi, %xmm1 +; AVX-64-NEXT: vmovq %rdi, %xmm2 +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX-64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX-64-NEXT: vmovq %r9, %xmm1 +; AVX-64-NEXT: vmovq %r8, %xmm2 +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX-64-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVX-64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <8 x i64> undef, i64 %a0, i32 0 + %ins1 = insertelement <8 x i64> %ins0, i64 %a1, i32 1 + %ins2 = insertelement <8 x i64> %ins1, i64 %a2, i32 2 + %ins3 = insertelement <8 x i64> %ins2, i64 %a3, i32 3 + %ins4 = insertelement <8 x i64> %ins3, i64 %a4, i32 4 + %ins5 = insertelement <8 x i64> %ins4, i64 %a5, i32 5 + %ins6 = insertelement <8 x i64> %ins5, i64 %a6, i32 6 + %ins7 = insertelement <8 x i64> %ins6, i64 %a7, i32 7 + ret <8 x i64> %ins7 +} + +define <16 x i32> @test_buildvector_v16i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15) { +; AVX-32-LABEL: test_buildvector_v16i32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v16i32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovd %edi, %xmm0 +; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX-64-NEXT: vmovd %r8d, %xmm1 +; AVX-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 +; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-64-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX-64-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX-64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <16 x i32> undef, i32 %a0, i32 0 + %ins1 = insertelement <16 x i32> %ins0, i32 %a1, i32 1 + %ins2 = insertelement <16 x i32> %ins1, i32 %a2, i32 2 + %ins3 = insertelement <16 x i32> %ins2, i32 %a3, i32 3 + %ins4 = insertelement <16 x i32> %ins3, i32 %a4, i32 4 + %ins5 = insertelement <16 x i32> %ins4, i32 %a5, i32 5 + %ins6 = insertelement <16 x i32> %ins5, i32 %a6, i32 6 + %ins7 = insertelement <16 x i32> %ins6, i32 %a7, i32 7 + %ins8 = insertelement <16 x i32> %ins7, i32 %a8, i32 8 + %ins9 = insertelement <16 x i32> %ins8, i32 %a9, i32 9 + %ins10 = insertelement <16 x i32> %ins9, i32 %a10, i32 10 + %ins11 = insertelement <16 x i32> %ins10, i32 %a11, i32 11 + %ins12 = insertelement <16 x i32> %ins11, i32 %a12, i32 12 + %ins13 = insertelement <16 x i32> %ins12, i32 %a13, i32 13 + %ins14 = insertelement <16 x i32> %ins13, i32 %a14, i32 14 + %ins15 = insertelement <16 x i32> %ins14, i32 %a15, i32 15 + ret <16 x i32> %ins15 +} + +define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15, i16 %a16, i16 %a17, i16 %a18, i16 %a19, i16 %a20, i16 %a21, i16 %a22, i16 %a23, i16 %a24, i16 %a25, i16 %a26, i16 %a27, i16 %a28, i16 %a29, i16 %a30, i16 %a31) { +; AVX512F-32-LABEL: test_buildvector_v32i16: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512F-32-NEXT: retl +; +; AVX512F-64-LABEL: test_buildvector_v32i16: +; AVX512F-64: # BB#0: +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; AVX512F-64-NEXT: vmovd %edi, %xmm0 +; AVX512F-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512F-64-NEXT: retq +; +; AVX512BW-32-LABEL: test_buildvector_v32i16: +; AVX512BW-32: # BB#0: +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512BW-32-NEXT: retl +; +; AVX512BW-64-LABEL: test_buildvector_v32i16: +; AVX512BW-64: # BB#0: +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-64-NEXT: vmovd %edi, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $4, %r8d, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $5, %r9d, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-64-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512BW-64-NEXT: retq + %ins0 = insertelement <32 x i16> undef, i16 %a0, i32 0 + %ins1 = insertelement <32 x i16> %ins0, i16 %a1, i32 1 + %ins2 = insertelement <32 x i16> %ins1, i16 %a2, i32 2 + %ins3 = insertelement <32 x i16> %ins2, i16 %a3, i32 3 + %ins4 = insertelement <32 x i16> %ins3, i16 %a4, i32 4 + %ins5 = insertelement <32 x i16> %ins4, i16 %a5, i32 5 + %ins6 = insertelement <32 x i16> %ins5, i16 %a6, i32 6 + %ins7 = insertelement <32 x i16> %ins6, i16 %a7, i32 7 + %ins8 = insertelement <32 x i16> %ins7, i16 %a8, i32 8 + %ins9 = insertelement <32 x i16> %ins8, i16 %a9, i32 9 + %ins10 = insertelement <32 x i16> %ins9, i16 %a10, i32 10 + %ins11 = insertelement <32 x i16> %ins10, i16 %a11, i32 11 + %ins12 = insertelement <32 x i16> %ins11, i16 %a12, i32 12 + %ins13 = insertelement <32 x i16> %ins12, i16 %a13, i32 13 + %ins14 = insertelement <32 x i16> %ins13, i16 %a14, i32 14 + %ins15 = insertelement <32 x i16> %ins14, i16 %a15, i32 15 + %ins16 = insertelement <32 x i16> %ins15, i16 %a16, i32 16 + %ins17 = insertelement <32 x i16> %ins16, i16 %a17, i32 17 + %ins18 = insertelement <32 x i16> %ins17, i16 %a18, i32 18 + %ins19 = insertelement <32 x i16> %ins18, i16 %a19, i32 19 + %ins20 = insertelement <32 x i16> %ins19, i16 %a20, i32 20 + %ins21 = insertelement <32 x i16> %ins20, i16 %a21, i32 21 + %ins22 = insertelement <32 x i16> %ins21, i16 %a22, i32 22 + %ins23 = insertelement <32 x i16> %ins22, i16 %a23, i32 23 + %ins24 = insertelement <32 x i16> %ins23, i16 %a24, i32 24 + %ins25 = insertelement <32 x i16> %ins24, i16 %a25, i32 25 + %ins26 = insertelement <32 x i16> %ins25, i16 %a26, i32 26 + %ins27 = insertelement <32 x i16> %ins26, i16 %a27, i32 27 + %ins28 = insertelement <32 x i16> %ins27, i16 %a28, i32 28 + %ins29 = insertelement <32 x i16> %ins28, i16 %a29, i32 29 + %ins30 = insertelement <32 x i16> %ins29, i16 %a30, i32 30 + %ins31 = insertelement <32 x i16> %ins30, i16 %a31, i32 31 + ret <32 x i16> %ins31 +} + +define <64 x i8> @test_buildvector_v64i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31, i8 %a32, i8 %a33, i8 %a34, i8 %a35, i8 %a36, i8 %a37, i8 %a38, i8 %a39, i8 %a40, i8 %a41, i8 %a42, i8 %a43, i8 %a44, i8 %a45, i8 %a46, i8 %a47, i8 %a48, i8 %a49, i8 %a50, i8 %a51, i8 %a52, i8 %a53, i8 %a54, i8 %a55, i8 %a56, i8 %a57, i8 %a58, i8 %a59, i8 %a60, i8 %a61, i8 %a62, i8 %a63) { +; AVX512F-32-LABEL: test_buildvector_v64i8: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512F-32-NEXT: retl +; +; AVX512F-64-LABEL: test_buildvector_v64i8: +; AVX512F-64: # BB#0: +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; AVX512F-64-NEXT: vmovd %edi, %xmm0 +; AVX512F-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512F-64-NEXT: retq +; +; AVX512BW-32-LABEL: test_buildvector_v64i8: +; AVX512BW-32: # BB#0: +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512BW-32-NEXT: retl +; +; AVX512BW-64-LABEL: test_buildvector_v64i8: +; AVX512BW-64: # BB#0: +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-64-NEXT: vmovd %edi, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $1, %esi, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $2, %edx, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $4, %r8d, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $5, %r9d, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-64-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512BW-64-NEXT: retq + %ins0 = insertelement <64 x i8> undef, i8 %a0, i32 0 + %ins1 = insertelement <64 x i8> %ins0, i8 %a1, i32 1 + %ins2 = insertelement <64 x i8> %ins1, i8 %a2, i32 2 + %ins3 = insertelement <64 x i8> %ins2, i8 %a3, i32 3 + %ins4 = insertelement <64 x i8> %ins3, i8 %a4, i32 4 + %ins5 = insertelement <64 x i8> %ins4, i8 %a5, i32 5 + %ins6 = insertelement <64 x i8> %ins5, i8 %a6, i32 6 + %ins7 = insertelement <64 x i8> %ins6, i8 %a7, i32 7 + %ins8 = insertelement <64 x i8> %ins7, i8 %a8, i32 8 + %ins9 = insertelement <64 x i8> %ins8, i8 %a9, i32 9 + %ins10 = insertelement <64 x i8> %ins9, i8 %a10, i32 10 + %ins11 = insertelement <64 x i8> %ins10, i8 %a11, i32 11 + %ins12 = insertelement <64 x i8> %ins11, i8 %a12, i32 12 + %ins13 = insertelement <64 x i8> %ins12, i8 %a13, i32 13 + %ins14 = insertelement <64 x i8> %ins13, i8 %a14, i32 14 + %ins15 = insertelement <64 x i8> %ins14, i8 %a15, i32 15 + %ins16 = insertelement <64 x i8> %ins15, i8 %a16, i32 16 + %ins17 = insertelement <64 x i8> %ins16, i8 %a17, i32 17 + %ins18 = insertelement <64 x i8> %ins17, i8 %a18, i32 18 + %ins19 = insertelement <64 x i8> %ins18, i8 %a19, i32 19 + %ins20 = insertelement <64 x i8> %ins19, i8 %a20, i32 20 + %ins21 = insertelement <64 x i8> %ins20, i8 %a21, i32 21 + %ins22 = insertelement <64 x i8> %ins21, i8 %a22, i32 22 + %ins23 = insertelement <64 x i8> %ins22, i8 %a23, i32 23 + %ins24 = insertelement <64 x i8> %ins23, i8 %a24, i32 24 + %ins25 = insertelement <64 x i8> %ins24, i8 %a25, i32 25 + %ins26 = insertelement <64 x i8> %ins25, i8 %a26, i32 26 + %ins27 = insertelement <64 x i8> %ins26, i8 %a27, i32 27 + %ins28 = insertelement <64 x i8> %ins27, i8 %a28, i32 28 + %ins29 = insertelement <64 x i8> %ins28, i8 %a29, i32 29 + %ins30 = insertelement <64 x i8> %ins29, i8 %a30, i32 30 + %ins31 = insertelement <64 x i8> %ins30, i8 %a31, i32 31 + %ins32 = insertelement <64 x i8> %ins31, i8 %a32, i32 32 + %ins33 = insertelement <64 x i8> %ins32, i8 %a33, i32 33 + %ins34 = insertelement <64 x i8> %ins33, i8 %a34, i32 34 + %ins35 = insertelement <64 x i8> %ins34, i8 %a35, i32 35 + %ins36 = insertelement <64 x i8> %ins35, i8 %a36, i32 36 + %ins37 = insertelement <64 x i8> %ins36, i8 %a37, i32 37 + %ins38 = insertelement <64 x i8> %ins37, i8 %a38, i32 38 + %ins39 = insertelement <64 x i8> %ins38, i8 %a39, i32 39 + %ins40 = insertelement <64 x i8> %ins39, i8 %a40, i32 40 + %ins41 = insertelement <64 x i8> %ins40, i8 %a41, i32 41 + %ins42 = insertelement <64 x i8> %ins41, i8 %a42, i32 42 + %ins43 = insertelement <64 x i8> %ins42, i8 %a43, i32 43 + %ins44 = insertelement <64 x i8> %ins43, i8 %a44, i32 44 + %ins45 = insertelement <64 x i8> %ins44, i8 %a45, i32 45 + %ins46 = insertelement <64 x i8> %ins45, i8 %a46, i32 46 + %ins47 = insertelement <64 x i8> %ins46, i8 %a47, i32 47 + %ins48 = insertelement <64 x i8> %ins47, i8 %a48, i32 48 + %ins49 = insertelement <64 x i8> %ins48, i8 %a49, i32 49 + %ins50 = insertelement <64 x i8> %ins49, i8 %a50, i32 50 + %ins51 = insertelement <64 x i8> %ins50, i8 %a51, i32 51 + %ins52 = insertelement <64 x i8> %ins51, i8 %a52, i32 52 + %ins53 = insertelement <64 x i8> %ins52, i8 %a53, i32 53 + %ins54 = insertelement <64 x i8> %ins53, i8 %a54, i32 54 + %ins55 = insertelement <64 x i8> %ins54, i8 %a55, i32 55 + %ins56 = insertelement <64 x i8> %ins55, i8 %a56, i32 56 + %ins57 = insertelement <64 x i8> %ins56, i8 %a57, i32 57 + %ins58 = insertelement <64 x i8> %ins57, i8 %a58, i32 58 + %ins59 = insertelement <64 x i8> %ins58, i8 %a59, i32 59 + %ins60 = insertelement <64 x i8> %ins59, i8 %a60, i32 60 + %ins61 = insertelement <64 x i8> %ins60, i8 %a61, i32 61 + %ins62 = insertelement <64 x i8> %ins61, i8 %a62, i32 62 + %ins63 = insertelement <64 x i8> %ins62, i8 %a63, i32 63 + ret <64 x i8> %ins63 +} diff --git a/test/CodeGen/X86/combine-abs.ll b/test/CodeGen/X86/combine-abs.ll index ac8f790a2ead6..887abe99f6ed8 100644 --- a/test/CodeGen/X86/combine-abs.ll +++ b/test/CodeGen/X86/combine-abs.ll @@ -1,5 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL ; fold (abs c1) -> c2 define <4 x i32> @combine_v4i32_abs_constant() { @@ -27,10 +29,10 @@ define <8 x i16> @combine_v8i16_abs_abs(<8 x i16> %a) { ; CHECK-NEXT: vpabsw %xmm0, %xmm0 ; CHECK-NEXT: retq %a1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a) - %n2 = sub <8 x i16> zeroinitializer, %a1 - %c2 = icmp slt <8 x i16> %a1, zeroinitializer - %a2 = select <8 x i1> %c2, <8 x i16> %n2, <8 x i16> %a1 - ret <8 x i16> %a2 + %s2 = ashr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> + %a2 = add <8 x i16> %a1, %s2 + %x2 = xor <8 x i16> %a2, %s2 + ret <8 x i16> %x2 } define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) { @@ -46,17 +48,29 @@ define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) { } define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) { -; CHECK-LABEL: combine_v4i64_abs_abs: -; CHECK: # BB#0: -; CHECK-NEXT: vpsrad $31, %ymm0, %ymm1 -; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] -; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vpsrad $31, %ymm0, %ymm1 -; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] -; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: retq +; AVX2-LABEL: combine_v4i64_abs_abs: +; AVX2: # BB#0: +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1 +; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1 +; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_v4i64_abs_abs: +; AVX512F: # BB#0: +; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def> +; AVX512F-NEXT: vpabsq %zmm0, %zmm0 +; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill> +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: combine_v4i64_abs_abs: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpabsq %ymm0, %ymm0 +; AVX512VL-NEXT: retq %n1 = sub <4 x i64> zeroinitializer, %a %b1 = icmp slt <4 x i64> %a, zeroinitializer %a1 = select <4 x i1> %b1, <4 x i64> %n1, <4 x i64> %a diff --git a/test/CodeGen/X86/commuted-blend-mask.ll b/test/CodeGen/X86/commuted-blend-mask.ll index e6322cbb7a14b..37830509d5a27 100644 --- a/test/CodeGen/X86/commuted-blend-mask.ll +++ b/test/CodeGen/X86/commuted-blend-mask.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s ; When commuting the operands of a SSE blend, make sure that the resulting blend ; mask can be encoded as a imm8. @@ -7,7 +8,7 @@ ; pblendw $4294967103, %xmm1, %xmm0 define <4 x i32> @test(<4 x i32> %a, <4 x i32> %b) { - ;CHECK: pblendw $63, %xmm1, %xmm0 +; CHECK: pblendw $63, %xmm1, %xmm0 %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 6, i32 3> ret <4 x i32> %shuffle } diff --git a/test/CodeGen/X86/ctpop-combine.ll b/test/CodeGen/X86/ctpop-combine.ll index b7031a817e82d..bbfc2ead04c69 100644 --- a/test/CodeGen/X86/ctpop-combine.ll +++ b/test/CodeGen/X86/ctpop-combine.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=corei7 | FileCheck %s +declare i8 @llvm.ctpop.i8(i8) nounwind readnone declare i64 @llvm.ctpop.i64(i64) nounwind readnone define i32 @test1(i64 %x) nounwind readnone { @@ -48,3 +49,16 @@ define i32 @test3(i64 %x) nounwind readnone { %conv = zext i1 %cmp to i32 ret i32 %conv } + +define i8 @test4(i8 %x) nounwind readnone { +; CHECK-LABEL: test4: +; CHECK: # BB#0: +; CHECK-NEXT: andl $127, %edi +; CHECK-NEXT: popcntw %di, %ax +; CHECK-NEXT: # kill: %AL<def> %AL<kill> %AX<kill> +; CHECK-NEXT: retq + %x2 = and i8 %x, 127 + %count = tail call i8 @llvm.ctpop.i8(i8 %x2) + %and = and i8 %count, 7 + ret i8 %and +} diff --git a/test/CodeGen/X86/dbg-baseptr.ll b/test/CodeGen/X86/dbg-baseptr.ll index f69c78af73677..fb0da1b50d11c 100644 --- a/test/CodeGen/X86/dbg-baseptr.ll +++ b/test/CodeGen/X86/dbg-baseptr.ll @@ -16,12 +16,12 @@ define i32 @f0(%struct.s* byval align 8 %input) !dbg !8 { ; CHECK-LABEL: f1: ; CHECK: DEBUG_VALUE: f:input <- [%RBP+16] -define i32 @f1(%struct.s* byval align 8 %input) !dbg !8 { +define i32 @f1(%struct.s* byval align 8 %input) !dbg !19 { %val = load i64, i64* @glob ; this alloca should force FP usage. %stackspace = alloca i32, i64 %val, align 1 store i32* %stackspace, i32** @ptr - call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18 + call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !20, metadata !17), !dbg !21 ret i32 42 } @@ -37,11 +37,11 @@ define i32 @f1(%struct.s* byval align 8 %input) !dbg !8 { ; The parameter should still be referenced through RBP though. ; CHECK-NOT: DEBUG_VALUE: f:input <- [%RBX ; CHECK: DEBUG_VALUE: f:input <- [%RBP+16] -define i32 @f2(%struct.s* byval align 8 %input) !dbg !8 { +define i32 @f2(%struct.s* byval align 8 %input) !dbg !22 { %val = load i64, i64* @glob %stackspace = alloca i32, i64 %val, align 64 store i32* %stackspace, i32** @ptr - call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18 + call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !23, metadata !17), !dbg !24 ret i32 42 } @@ -73,3 +73,10 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) !17 = !DIExpression() !18 = !DILocation(line: 5, scope: !8) + +!19 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5) +!20 = !DILocalVariable(name: "input", arg: 1, scope: !19, file: !3, line: 5, type: !9) +!21 = !DILocation(line: 5, scope: !19) +!22 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5) +!23 = !DILocalVariable(name: "input", arg: 1, scope: !22, file: !3, line: 5, type: !9) +!24 = !DILocation(line: 5, scope: !22) diff --git a/test/CodeGen/X86/eflags-copy-expansion.mir b/test/CodeGen/X86/eflags-copy-expansion.mir index 36044b4d20594..28f47c3c2496a 100644 --- a/test/CodeGen/X86/eflags-copy-expansion.mir +++ b/test/CodeGen/X86/eflags-copy-expansion.mir @@ -25,7 +25,6 @@ liveins: body: | bb.0.entry: liveins: %edi - successors: %bb.1.false NOOP implicit-def %al ; The bug was triggered only when LivePhysReg is used, which diff --git a/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll b/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll index e86d094ac341f..f9ecf707810b3 100644 --- a/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll +++ b/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll @@ -35,8 +35,8 @@ define void @fn2NoDebug(%struct.Buffer* byval align 64 %p1) { ; CHECK-NEXT: pop ; CHECK-NEXT: ret -define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !4 { - call void @llvm.dbg.declare(metadata %struct.Buffer* %p1, metadata !5, metadata !6), !dbg !7 +define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 { + call void @llvm.dbg.declare(metadata %struct.Buffer* %p1, metadata !9, metadata !6), !dbg !10 ret void } @@ -64,3 +64,6 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) !5 = !DILocalVariable(name: "w", scope: !4) !6 = !DIExpression() !7 = !DILocation(line: 210, column: 12, scope: !4) +!8 = distinct !DISubprogram(name: "withDebug", unit: !0) +!9 = !DILocalVariable(name: "w", scope: !8) +!10 = !DILocation(line: 210, column: 12, scope: !8) diff --git a/test/CodeGen/X86/implicit-null-checks.mir b/test/CodeGen/X86/implicit-null-checks.mir index 39bfedaa7814a..d0ba057fa009c 100644 --- a/test/CodeGen/X86/implicit-null-checks.mir +++ b/test/CodeGen/X86/implicit-null-checks.mir @@ -384,14 +384,12 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %esi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %esi, %rdi %eax = MOV32ri 2200000 @@ -427,7 +425,6 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %esi, %rdi, %rdx %eax = MOV32rm killed %rdx, 1, _, 0, _ :: (volatile load 4 from %ir.ptr) @@ -435,7 +432,6 @@ body: | JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %esi, %rdi %eax = MOV32ri 2200000 @@ -444,7 +440,6 @@ body: | JE_1 %bb.4.ret_100, implicit %eflags bb.2.ret_200: - successors: %bb.3.is_null %eax = MOV32ri 200 @@ -472,14 +467,12 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %esi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %esi, %rdi %eax = MOV32ri 2200000 @@ -515,14 +508,12 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %rsi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %rsi, %rdi %rdi = MOV64ri 5000 @@ -557,14 +548,12 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %rsi, %rdi, %rdx TEST64rr %rdi, %rdi, implicit-def %eflags JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %rsi, %rdi, %rdx %rbx = MOV64rr %rdx @@ -603,7 +592,6 @@ calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', # CHECK: CALL64pcrel32 body: | bb.0.entry: - successors: %bb.2.leave, %bb.1.stay liveins: %rdi, %rbx frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp @@ -645,7 +633,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -680,7 +667,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -712,7 +698,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.1.is_null(0x30000000), %bb.2.not_null(0x50000000) liveins: %rsi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -745,7 +730,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.1.is_null(0x30000000), %bb.2.not_null(0x50000000) liveins: %rsi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -779,7 +763,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -810,7 +793,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -842,7 +824,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -874,7 +855,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -910,7 +890,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -941,7 +920,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -974,7 +952,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1006,7 +983,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1042,7 +1018,6 @@ calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rbx frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp @@ -1082,7 +1057,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1116,7 +1090,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1149,7 +1122,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1182,7 +1154,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1214,7 +1185,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1246,7 +1216,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1279,7 +1248,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags diff --git a/test/CodeGen/X86/invalid-liveness.mir b/test/CodeGen/X86/invalid-liveness.mir index ca862472ba86b..c1da65e0be698 100644 --- a/test/CodeGen/X86/invalid-liveness.mir +++ b/test/CodeGen/X86/invalid-liveness.mir @@ -16,12 +16,10 @@ registers: - { id: 0, class: gr32 } body: | bb.0: - successors: %bb.2, %bb.3 JG_1 %bb.2, implicit %eflags JMP_1 %bb.3 bb.2: - successors: %bb.3 %0 = IMPLICIT_DEF JMP_1 %bb.3 diff --git a/test/CodeGen/X86/machine-region-info.mir b/test/CodeGen/X86/machine-region-info.mir index 0998fe97c2353..78823a3eb0068 100644 --- a/test/CodeGen/X86/machine-region-info.mir +++ b/test/CodeGen/X86/machine-region-info.mir @@ -4,67 +4,48 @@ name: fun body: | bb.0: - successors: %bb.1, %bb.7 - CMP32ri8 %edi, 40, implicit-def %eflags JNE_1 %bb.7, implicit killed %eflags JMP_1 %bb.1 bb.1: - successors: %bb.2, %bb.11 - CMP32ri8 %edi, 1, implicit-def %eflags JNE_1 %bb.11, implicit killed %eflags JMP_1 %bb.2 bb.2: - successors: %bb.3, %bb.5 - CMP32ri8 %edi, 2, implicit-def %eflags JNE_1 %bb.5, implicit killed %eflags JMP_1 %bb.3 bb.3: - successors: %bb.4, %bb.5 - CMP32ri8 %edi, 90, implicit-def %eflags JNE_1 %bb.5, implicit killed %eflags JMP_1 %bb.4 bb.4: - successors: %bb.5 bb.5: - successors: %bb.6, %bb.11 - CMP32ri8 %edi, 4, implicit-def %eflags JNE_1 %bb.11, implicit killed %eflags JMP_1 %bb.6 bb.6: - successors: %bb.11 - JMP_1 %bb.11 bb.7: - successors: %bb.9, %bb.8 - CMP32ri8 %edi, 5, implicit-def %eflags JE_1 %bb.9, implicit killed %eflags JMP_1 %bb.8 bb.8: - successors: %bb.9 bb.9: - successors: %bb.11, %bb.10 - CMP32ri8 %edi, 6, implicit-def %eflags JE_1 %bb.11, implicit killed %eflags JMP_1 %bb.10 bb.10: - successors: %bb.11 bb.11: RET 0 @@ -74,10 +55,10 @@ body: | # CHECK: Region tree: # CHECK-NEXT: [0] BB#0 => <Function Return> # CHECK-NEXT: [1] BB#0 => BB#11 +# CHECK-NEXT: [2] BB#7 => BB#9 +# CHECK-NEXT: [2] BB#9 => BB#11 # CHECK-NEXT: [2] BB#1 => BB#11 # CHECK-NEXT: [3] BB#2 => BB#5 # CHECK-NEXT: [4] BB#3 => BB#5 # CHECK-NEXT: [3] BB#5 => BB#11 -# CHECK-NEXT: [2] BB#7 => BB#9 -# CHECK-NEXT: [2] BB#9 => BB#11 # CHECK-NEXT: End region tree diff --git a/test/CodeGen/X86/ms-inline-asm-avx512.ll b/test/CodeGen/X86/ms-inline-asm-avx512.ll new file mode 100644 index 0000000000000..be60f5bca1619 --- /dev/null +++ b/test/CodeGen/X86/ms-inline-asm-avx512.ll @@ -0,0 +1,24 @@ +; RUN: llc < %s | FileCheck %s + +; Generated from clang/test/CodeGen/ms-inline-asm-avx512.c + +target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-windows-msvc" + +; Function Attrs: noinline nounwind +define void @ignore_fe_size() #0 { +entry: + %c = alloca i8, align 1 + call void asm sideeffect inteldialect "vaddps xmm1, xmm2, $1{1to4}\0A\09vaddps xmm1, xmm2, $2\0A\09mov eax, $3\0A\09mov $0, rax", "=*m,*m,*m,*m,~{eax},~{xmm1},~{dirflag},~{fpsr},~{flags}"(i8* %c, i8* %c, i8* %c, i8* %c) #1 + ret void +} + +; CHECK-LABEL: ignore_fe_size: +; CHECK: vaddps 7(%rsp){1to4}, %xmm2, %xmm1 +; CHECK: vaddps 7(%rsp), %xmm2, %xmm1 +; CHECK: movl 7(%rsp), %eax +; CHECK: movq %rax, 7(%rsp) +; CHECK: retq + +attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind } diff --git a/test/CodeGen/X86/pr27681.mir b/test/CodeGen/X86/pr27681.mir index 3e931b182e4e9..002761bc1e687 100644 --- a/test/CodeGen/X86/pr27681.mir +++ b/test/CodeGen/X86/pr27681.mir @@ -25,7 +25,6 @@ stack: - { id: 2, type: spill-slot, offset: -32, size: 4, alignment: 4 } body: | bb.0: - successors: %bb.1 liveins: %ebp, %ebx, %edi, %esi frame-setup PUSH32r killed %ebp, implicit-def %esp, implicit %esp @@ -41,7 +40,6 @@ body: | %edx = MOV32ri 6 bb.1: - successors: %bb.3, %bb.2 liveins: %eax, %ebp, %ebx, %ecx, %edi, %edx %ebp = SHR32rCL killed %ebp, implicit-def dead %eflags, implicit %cl @@ -66,7 +64,6 @@ body: | JE_1 %bb.3, implicit %eflags bb.2: - successors: %bb.3 liveins: %cl, %eax, %ebp, %esi OR32mr %esp, 1, _, 8, _, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1) diff --git a/test/CodeGen/X86/pr32907.ll b/test/CodeGen/X86/pr32907.ll new file mode 100644 index 0000000000000..bc03fbe068439 --- /dev/null +++ b/test/CodeGen/X86/pr32907.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 + +define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) { +; SSE-LABEL: PR32907: +; SSE: # BB#0: # %entry +; SSE-NEXT: psubq %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: psubq %xmm0, %xmm1 +; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: pandn %xmm0, %xmm2 +; SSE-NEXT: por %xmm2, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX2-LABEL: PR32907: +; AVX2: # BB#0: # %entry +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm2 +; AVX2-NEXT: vpandn %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: PR32907: +; AVX512: # BB#0: # %entry +; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $63, %zmm0, %zmm1 +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm2 +; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq +entry: + %sub13.i = sub <2 x i64> %astype.i, %astype6.i + %x.lobit.i.i = ashr <2 x i64> %sub13.i, <i64 63, i64 63> + %sub.i.i = sub <2 x i64> zeroinitializer, %sub13.i + %0 = xor <2 x i64> %x.lobit.i.i, <i64 -1, i64 -1> + %1 = and <2 x i64> %sub13.i, %0 + %2 = and <2 x i64> %x.lobit.i.i, %sub.i.i + %cond.i.i = or <2 x i64> %1, %2 + ret <2 x i64> %cond.i.i +} diff --git a/test/CodeGen/X86/pre-coalesce.mir b/test/CodeGen/X86/pre-coalesce.mir index 11805fe090b42..17d447dd097b9 100644 --- a/test/CodeGen/X86/pre-coalesce.mir +++ b/test/CodeGen/X86/pre-coalesce.mir @@ -83,8 +83,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.entry: - successors: %bb.4(0x30000000), %bb.1.while.body.preheader(0x50000000) - %0 = MOV64rm %rip, 1, _, @b, _ :: (dereferenceable load 8 from @b) %12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0) TEST8rr %12, %12, implicit-def %eflags @@ -92,17 +90,12 @@ body: | JNE_1 %bb.1.while.body.preheader, implicit killed %eflags bb.4: - successors: %bb.3.while.end(0x80000000) - %10 = COPY %11 JMP_1 %bb.3.while.end bb.1.while.body.preheader: - successors: %bb.2.while.body(0x80000000) bb.2.while.body: - successors: %bb.3.while.end(0x04000000), %bb.2.while.body(0x7c000000) - %8 = MOVSX32rr8 %12 %10 = COPY %11 %10 = SHL32ri %10, 5, implicit-def dead %eflags diff --git a/test/CodeGen/X86/regcall-no-plt.ll b/test/CodeGen/X86/regcall-no-plt.ll new file mode 100644 index 0000000000000..d525448b60ca8 --- /dev/null +++ b/test/CodeGen/X86/regcall-no-plt.ll @@ -0,0 +1,44 @@ +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic < %s | FileCheck %s +; RUN: llc -mtriple=x86_64-freebsd -relocation-model=pic < %s | FileCheck %s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; According to x86-64 psABI, xmm0-xmm7 can be used to pass function parameters. +;; However regcall calling convention uses also xmm8-xmm15 to pass function +;; parameters which violates x86-64 psABI. +;; Detail info about it can be found at: +;; https://sourceware.org/bugzilla/show_bug.cgi?id=21265 +;; +;; We encounter the violation symptom when using PIC with lazy binding +;; optimization. +;; In that case the PLT mechanism as described in x86_64 psABI will +;; not preserve xmm8-xmm15 registers and will lead to miscompilation. +;; +;; The agreed solution is to disable PLT for regcall calling convention for +;; SystemV using ELF format. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +declare void @lazy() +declare x86_regcallcc void @regcall_not_lazy() + +; CHECK-LABEL: foo: +; CHECK: callq lazy@PLT +; CHECK: callq *regcall_not_lazy@GOTPCREL(%rip) +define void @foo() nounwind { + call void @lazy() + call void @regcall_not_lazy() + ret void +} + +; CHECK-LABEL: tail_call_regcall: +; CHECK: jmpq *regcall_not_lazy@GOTPCREL(%rip) +define void @tail_call_regcall() nounwind { + tail call void @regcall_not_lazy() + ret void +} + +; CHECK-LABEL: tail_call_regular: +; CHECK: jmp lazy +define void @tail_call_regular() nounwind { + tail call void @lazy() + ret void +} diff --git a/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/test/CodeGen/X86/shuffle-vs-trunc-512.ll index d053c63dcdb37..a3ba589758009 100644 --- a/test/CodeGen/X86/shuffle-vs-trunc-512.ll +++ b/test/CodeGen/X86/shuffle-vs-trunc-512.ll @@ -392,8 +392,10 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind { ; AVX512BW-LABEL: shuffle_v32i16_to_v8i16: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 +; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] ; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax -; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1 +; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 ; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2 ; AVX512BW-NEXT: vmovd %xmm2, %eax ; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 @@ -416,8 +418,10 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind { ; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16: ; AVX512BWVL: # BB#0: ; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0 +; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] ; AVX512BWVL-NEXT: vpextrw $4, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1 +; AVX512BWVL-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 ; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2 ; AVX512BWVL-NEXT: vmovd %xmm2, %eax ; AVX512BWVL-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 diff --git a/test/CodeGen/X86/stack-folding-int-avx512.ll b/test/CodeGen/X86/stack-folding-int-avx512.ll index 04a7d11590147..38e19efb71326 100644 --- a/test/CodeGen/X86/stack-folding-int-avx512.ll +++ b/test/CodeGen/X86/stack-folding-int-avx512.ll @@ -204,8 +204,8 @@ define <64 x i8> @stack_fold_pabsb_maskz(<64 x i8> %a0, i64 %mask) { } define <16 x i32> @stack_fold_pabsd(<16 x i32> %a0) { - ;check-label: stack_fold_pabsd - ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte folded reload + ;CHECK-LABEL: stack_fold_pabsd + ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> undef, i16 -1) ret <16 x i32> %2 @@ -213,16 +213,16 @@ define <16 x i32> @stack_fold_pabsd(<16 x i32> %a0) { declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16) nounwind readnone define <16 x i32> @stack_fold_pabsd_mask(<16 x i32> %passthru, <16 x i32> %a0, i16 %mask) { - ;check-label: stack_fold_pabsd - ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte folded reload + ;CHECK-LABEL: stack_fold_pabsd_mask + ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) ret <16 x i32> %2 } define <16 x i32> @stack_fold_pabsd_maskz(<16 x i32> %a0, i16 %mask) { - ;check-label: stack_fold_pabsd - ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte folded reload + ;CHECK-LABEL: stack_fold_pabsd_maskz + ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> zeroinitializer, i16 %mask) ret <16 x i32> %2 diff --git a/test/CodeGen/X86/vec_partial.ll b/test/CodeGen/X86/vec_partial.ll index e5ac81add7f6b..ee15c2af6dd2e 100644 --- a/test/CodeGen/X86/vec_partial.ll +++ b/test/CodeGen/X86/vec_partial.ll @@ -1,12 +1,18 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 ; PR11580 define <3 x float> @addf3(<3 x float> %x) { -; CHECK-LABEL: addf3: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: addps {{.*}}(%rip), %xmm0 -; CHECK-NEXT: retq +; X86-LABEL: addf3: +; X86: # BB#0: # %entry +; X86-NEXT: addps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: addf3: +; X64: # BB#0: # %entry +; X64-NEXT: addps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq entry: %add = fadd <3 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> ret <3 x float> %add @@ -14,9 +20,13 @@ entry: ; PR11580 define <4 x float> @cvtf3_f4(<3 x float> %x) { -; CHECK-LABEL: cvtf3_f4: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: retq +; X86-LABEL: cvtf3_f4: +; X86: # BB#0: # %entry +; X86-NEXT: retl +; +; X64-LABEL: cvtf3_f4: +; X64: # BB#0: # %entry +; X64-NEXT: retq entry: %extractVec = shufflevector <3 x float> %x, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> ret <4 x float> %extractVec @@ -24,9 +34,13 @@ entry: ; PR11580 define <3 x float> @cvtf4_f3(<4 x float> %x) { -; CHECK-LABEL: cvtf4_f3: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: retq +; X86-LABEL: cvtf4_f3: +; X86: # BB#0: # %entry +; X86-NEXT: retl +; +; X64-LABEL: cvtf4_f3: +; X64: # BB#0: # %entry +; X64-NEXT: retq entry: %extractVec = shufflevector <4 x float> %x, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> ret <3 x float> %extractVec diff --git a/test/CodeGen/X86/vec_reassociate.ll b/test/CodeGen/X86/vec_reassociate.ll index 0d3373528f583..5234b0c8a77cd 100644 --- a/test/CodeGen/X86/vec_reassociate.ll +++ b/test/CodeGen/X86/vec_reassociate.ll @@ -1,10 +1,17 @@ -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @add_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: paddd %xmm1, %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: add_4i32: +; X86: # BB#0: +; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_4i32: +; X64: # BB#0: +; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: retq %1 = add <4 x i32> %a0, <i32 1, i32 -2, i32 3, i32 -4> %2 = add <4 x i32> %a1, <i32 -1, i32 2, i32 -3, i32 4> %3 = add <4 x i32> %1, %2 @@ -12,10 +19,15 @@ define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @add_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: paddd %xmm1, %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: add_4i32_commute: +; X86: # BB#0: +; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_4i32_commute: +; X64: # BB#0: +; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: retq %1 = add <4 x i32> <i32 1, i32 -2, i32 3, i32 -4>, %a0 %2 = add <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, %a1 %3 = add <4 x i32> %1, %2 @@ -23,11 +35,17 @@ define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @mul_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: pmulld %xmm1, %xmm0 - ;CHECK-NEXT: pmulld .LCPI2_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: mul_4i32: +; X86: # BB#0: +; X86-NEXT: pmulld %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_4i32: +; X64: # BB#0: +; X64-NEXT: pmulld %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 4> %2 = mul <4 x i32> %a1, <i32 4, i32 3, i32 2, i32 1> %3 = mul <4 x i32> %1, %2 @@ -35,11 +53,17 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @mul_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: pmulld %xmm1, %xmm0 - ;CHECK-NEXT: pmulld .LCPI3_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: mul_4i32_commute: +; X86: # BB#0: +; X86-NEXT: pmulld %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_4i32_commute: +; X64: # BB#0: +; X64-NEXT: pmulld %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = mul <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %a0 %2 = mul <4 x i32> <i32 4, i32 3, i32 2, i32 1>, %a1 %3 = mul <4 x i32> %1, %2 @@ -47,11 +71,17 @@ define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @and_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: andps %xmm1, %xmm0 - ;CHECK-NEXT: andps .LCPI4_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: and_4i32: +; X86: # BB#0: +; X86-NEXT: andps %xmm1, %xmm0 +; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: and_4i32: +; X64: # BB#0: +; X64-NEXT: andps %xmm1, %xmm0 +; X64-NEXT: andps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = and <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = and <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = and <4 x i32> %1, %2 @@ -59,11 +89,17 @@ define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @and_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: andps %xmm1, %xmm0 - ;CHECK-NEXT: andps .LCPI5_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: and_4i32_commute: +; X86: # BB#0: +; X86-NEXT: andps %xmm1, %xmm0 +; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: and_4i32_commute: +; X64: # BB#0: +; X64-NEXT: andps %xmm1, %xmm0 +; X64-NEXT: andps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = and <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = and <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = and <4 x i32> %1, %2 @@ -71,11 +107,17 @@ define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @or_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: orps %xmm1, %xmm0 - ;CHECK-NEXT: orps .LCPI6_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: or_4i32: +; X86: # BB#0: +; X86-NEXT: orps %xmm1, %xmm0 +; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: or_4i32: +; X64: # BB#0: +; X64-NEXT: orps %xmm1, %xmm0 +; X64-NEXT: orps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = or <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = or <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = or <4 x i32> %1, %2 @@ -83,23 +125,35 @@ define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @or_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: orps %xmm1, %xmm0 - ;CHECK-NEXT: orps .LCPI7_0(%rip), %xmm0 - ;CHECK-NEXT: retq - %1 = or <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 +; X86-LABEL: or_4i32_commute: +; X86: # BB#0: +; X86-NEXT: orps %xmm1, %xmm0 +; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: or_4i32_commute: +; X64: # BB#0: +; X64-NEXT: orps %xmm1, %xmm0 +; X64-NEXT: orps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %1 = or <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = or <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = or <4 x i32> %1, %2 ret <4 x i32> %3 } define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @xor_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: xorps %xmm1, %xmm0 - ;CHECK-NEXT: xorps .LCPI8_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: xor_4i32: +; X86: # BB#0: +; X86-NEXT: xorps %xmm1, %xmm0 +; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: xor_4i32: +; X64: # BB#0: +; X64-NEXT: xorps %xmm1, %xmm0 +; X64-NEXT: xorps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = xor <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = xor <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = xor <4 x i32> %1, %2 @@ -107,11 +161,17 @@ define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @xor_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @xor_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: xorps %xmm1, %xmm0 - ;CHECK-NEXT: xorps .LCPI9_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: xor_4i32_commute: +; X86: # BB#0: +; X86-NEXT: xorps %xmm1, %xmm0 +; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: xor_4i32_commute: +; X64: # BB#0: +; X64-NEXT: xorps %xmm1, %xmm0 +; X64-NEXT: xorps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = xor <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = xor <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = xor <4 x i32> %1, %2 diff --git a/test/CodeGen/X86/vector-lzcnt-512.ll b/test/CodeGen/X86/vector-lzcnt-512.ll index 79d133bbfb8f3..88378eb51a27b 100644 --- a/test/CodeGen/X86/vector-lzcnt-512.ll +++ b/test/CodeGen/X86/vector-lzcnt-512.ll @@ -1,39 +1,337 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd,-avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CD +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CDBW +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=-avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=-avx512cd,-avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512DQ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind { -; ALL-LABEL: testv8i64: -; ALL: ## BB#0: -; ALL-NEXT: vplzcntq %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv8i64: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv8i64: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv8i64: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $16, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv8i64: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $4, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $8, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $16, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $32, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq %out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 0) ret <8 x i64> %out } define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind { -; ALL-LABEL: testv8i64u: -; ALL: ## BB#0: -; ALL-NEXT: vplzcntq %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv8i64u: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv8i64u: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv8i64u: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $16, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv8i64u: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $4, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $8, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $16, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $32, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq %out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 -1) ret <8 x i64> %out } define <16 x i32> @testv16i32(<16 x i32> %in) nounwind { -; ALL-LABEL: testv16i32: -; ALL: ## BB#0: -; ALL-NEXT: vplzcntd %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv16i32: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv16i32: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv16i32: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2 +; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv16i32: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $4, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $8, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $16, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackuswb %ymm5, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq %out = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %in, i1 0) ret <16 x i32> %out } define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind { -; ALL-LABEL: testv16i32u: -; ALL: ## BB#0: -; ALL-NEXT: vplzcntd %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv16i32u: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv16i32u: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv16i32u: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2 +; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv16i32u: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $4, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $8, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $16, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackuswb %ymm5, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq %out = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %in, i1 -1) ret <16 x i32> %out } @@ -52,20 +350,78 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind { ; AVX512CD-NEXT: vpsubw %ymm2, %ymm1, %ymm1 ; AVX512CD-NEXT: retq ; +; AVX512CDBW-LABEL: testv32i16: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512CDBW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 +; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 +; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; ; AVX512BW-LABEL: testv32i16: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero -; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 ; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpxor %ymm6, %ymm6, %ymm6 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm5, %ymm7 +; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddw %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm5 +; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm2, %ymm5 +; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm2, %ymm4, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddw %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: retq %out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 0) ret <32 x i16> %out } @@ -84,20 +440,78 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind { ; AVX512CD-NEXT: vpsubw %ymm2, %ymm1, %ymm1 ; AVX512CD-NEXT: retq ; +; AVX512CDBW-LABEL: testv32i16u: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512CDBW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 +; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 +; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; ; AVX512BW-LABEL: testv32i16u: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero -; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 ; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv32i16u: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpxor %ymm6, %ymm6, %ymm6 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm5, %ymm7 +; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddw %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm5 +; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm2, %ymm5 +; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm2, %ymm4, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddw %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: retq %out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 -1) ret <32 x i16> %out } @@ -128,32 +542,78 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind { ; AVX512CD-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512CD-NEXT: retq ; +; AVX512CDBW-LABEL: testv64i8: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512CDBW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 +; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512CDBW-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; ; AVX512BW-LABEL: testv64i8: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 -; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] -; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 -; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandnq %zmm2, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm1, %zmm3, %zmm1 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpxor %ymm5, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm6 +; AVX512DQ-NEXT: vpand %ymm6, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: retq %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0) ret <64 x i8> %out } @@ -184,32 +644,78 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind { ; AVX512CD-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512CD-NEXT: retq ; +; AVX512CDBW-LABEL: testv64i8u: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512CDBW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 +; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512CDBW-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; ; AVX512BW-LABEL: testv64i8u: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 -; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] -; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 -; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandnq %zmm2, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm1, %zmm3, %zmm1 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv64i8u: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpxor %ymm5, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm6 +; AVX512DQ-NEXT: vpand %ymm6, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: retq %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1) ret <64 x i8> %out } diff --git a/test/CodeGen/X86/vector-shuffle-variable-128.ll b/test/CodeGen/X86/vector-shuffle-variable-128.ll index 87fd4a7bf6b9b..bde8a16d2a5a6 100644 --- a/test/CodeGen/X86/vector-shuffle-variable-128.ll +++ b/test/CodeGen/X86/vector-shuffle-variable-128.ll @@ -1303,70 +1303,39 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> % ; SSE41-NEXT: andl $7, %r8d ; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: andl $7, %r9d -; SSE41-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; SSE41-NEXT: movd %eax, %xmm1 -; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm1 -; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm1 -; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm1 -; SSE41-NEXT: pinsrw $4, -40(%rsp,%r8,2), %xmm1 -; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm1 ; SSE41-NEXT: pxor %xmm0, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] +; SSE41-NEXT: pinsrw $0, -40(%rsp,%rdi,2), %xmm0 +; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm0 +; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm0 +; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm0 +; SSE41-NEXT: pinsrw $4, -40(%rsp,%r8,2), %xmm0 +; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm0 ; SSE41-NEXT: retq ; -; AVX1-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: -; AVX1: # BB#0: -; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> -; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> -; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> -; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> -; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> -; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; AVX1-NEXT: andl $7, %edi -; AVX1-NEXT: andl $7, %esi -; AVX1-NEXT: andl $7, %edx -; AVX1-NEXT: andl $7, %ecx -; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: andl $7, %r8d -; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: andl $7, %r9d -; AVX1-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0 -; AVX1-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0 -; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 -; AVX1-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0 -; AVX1-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7] -; AVX1-NEXT: retq -; -; AVX2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: -; AVX2: # BB#0: -; AVX2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> -; AVX2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> -; AVX2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> -; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> -; AVX2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> -; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; AVX2-NEXT: andl $7, %edi -; AVX2-NEXT: andl $7, %esi -; AVX2-NEXT: andl $7, %edx -; AVX2-NEXT: andl $7, %ecx -; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: andl $7, %r8d -; AVX2-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: andl $7, %r9d -; AVX2-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0 -; AVX2-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0 -; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 -; AVX2-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0 -; AVX2-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0 -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3] -; AVX2-NEXT: retq +; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: +; AVX: # BB#0: +; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> +; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> +; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> +; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> +; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> +; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> +; AVX-NEXT: andl $7, %edi +; AVX-NEXT: andl $7, %esi +; AVX-NEXT: andl $7, %edx +; AVX-NEXT: andl $7, %ecx +; AVX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) +; AVX-NEXT: andl $7, %r8d +; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp) +; AVX-NEXT: andl $7, %r9d +; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $0, -40(%rsp,%rdi,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0 +; AVX-NEXT: retq %x0 = extractelement <8 x i16> %x, i16 %i0 %y1 = extractelement <8 x i16> %y, i16 %i1 %x2 = extractelement <8 x i16> %x, i16 %i2 diff --git a/test/CodeGen/X86/win64_eh_leaf.ll b/test/CodeGen/X86/win64_eh_leaf.ll index 21a423ab36a9b..35d55a9073754 100644 --- a/test/CodeGen/X86/win64_eh_leaf.ll +++ b/test/CodeGen/X86/win64_eh_leaf.ll @@ -29,3 +29,12 @@ entry: ; and no unwind info in the object file. ; READOBJ-NOT: leaf_func } + +define void @naked_func() naked { + call void asm sideeffect "ret", ""() + unreachable +} +; ASM-LABEL: naked_func: +; ASM-NOT: .seh_ +; ASM: ret +; ASM-NOT: .seh_ diff --git a/test/CodeGen/X86/xray-attribute-instrumentation.ll b/test/CodeGen/X86/xray-attribute-instrumentation.ll index c52ccf9356bc5..7c60327d2c304 100644 --- a/test/CodeGen/X86/xray-attribute-instrumentation.ll +++ b/test/CodeGen/X86/xray-attribute-instrumentation.ll @@ -15,10 +15,17 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" } ; CHECK: .p2align 4, 0x90 ; CHECK-NEXT: .quad {{.*}}xray_synthetic_0 +; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_0 ; CHECK-NEXT: .section {{.*}}xray_instr_map ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .quad {{.*}}xray_sled_0 ; CHECK: .quad {{.*}}xray_sled_1 +; CHECK-LABEL: Lxray_synthetic_end0: +; CHECK: .section {{.*}}xray_fn_idx +; CHECK-LABEL: Lxray_fn_idx_synth_0: +; CHECK: .quad {{.*}}xray_synthetic_0 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_end0 + ; We test multiple returns in a single function to make sure we're getting all ; of them with XRay instrumentation. @@ -46,8 +53,14 @@ NotEqual: } ; CHECK: .p2align 4, 0x90 ; CHECK-NEXT: .quad {{.*}}xray_synthetic_1 +; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_1 ; CHECK-NEXT: .section {{.*}}xray_instr_map ; CHECK-LABEL: Lxray_synthetic_1: ; CHECK: .quad {{.*}}xray_sled_2 ; CHECK: .quad {{.*}}xray_sled_3 ; CHECK: .quad {{.*}}xray_sled_4 +; CHECK-LABEL: Lxray_synthetic_end1: +; CHECK: .section {{.*}}xray_fn_idx +; CHECK-LABEL: Lxray_fn_idx_synth_1: +; CHECK: .quad {{.*}}xray_synthetic_1 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_end1 diff --git a/test/CodeGen/X86/xray-custom-log.ll b/test/CodeGen/X86/xray-custom-log.ll new file mode 100644 index 0000000000000..63625d44b4cb2 --- /dev/null +++ b/test/CodeGen/X86/xray-custom-log.ll @@ -0,0 +1,23 @@ +; RUN: llc -filetype=asm -o - -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s + +define i32 @fn() nounwind noinline uwtable "function-instrument"="xray-always" { + %eventptr = alloca i8 + %eventsize = alloca i32 + store i32 3, i32* %eventsize + %val = load i32, i32* %eventsize + call void @llvm.xray.customevent(i8* %eventptr, i32 %val) + ; CHECK-LABEL: Lxray_event_sled_0: + ; CHECK-NEXT: .ascii "\353\024 + ; CHECK-NEXT: pushq %rax + ; CHECK-NEXT: movq {{.*}}, %rdi + ; CHECK-NEXT: movq {{.*}}, %rsi + ; CHECK-NEXT: movabsq $__xray_CustomEvent, %rax + ; CHECK-NEXT: callq *%rax + ; CHECK-NEXT: popq %rax + ret i32 0 +} +; CHECK: .section {{.*}}xray_instr_map +; CHECK-LABEL: Lxray_synthetic_0: +; CHECK: .quad {{.*}}xray_event_sled_0 + +declare void @llvm.xray.customevent(i8*, i32) diff --git a/test/CodeGen/X86/xray-loop-detection.ll b/test/CodeGen/X86/xray-loop-detection.ll new file mode 100644 index 0000000000000..3cd6b4aa6f8c4 --- /dev/null +++ b/test/CodeGen/X86/xray-loop-detection.ll @@ -0,0 +1,23 @@ +; RUN: llc -filetype=asm -o - -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s +; RUN: llc -filetype=asm -o - -mtriple=x86_64-darwin-unknown < %s | FileCheck %s + +define i32 @foo(i32 %i) nounwind noinline uwtable "xray-instruction-threshold"="1" { +entry: + br label %Test +Test: + %indvar = phi i32 [0, %entry], [%nextindvar, %Inc] + %cond = icmp eq i32 %indvar, %i + br i1 %cond, label %Exit, label %Inc +Inc: + %nextindvar = add i32 %indvar, 1 + br label %Test +Exit: + %retval = phi i32 [%indvar, %Test] + ret i32 %retval +} + +; CHECK-LABEL: xray_sled_0: +; CHECK-NEXT: .ascii "\353\t" +; CHECK-NEXT: nopw 512(%rax,%rax) +; CHECK-LABEL: Ltmp0: + diff --git a/test/CodeGen/X86/xray-tail-call-sled.ll b/test/CodeGen/X86/xray-tail-call-sled.ll index ece786a5e809b..b12c78a77b203 100644 --- a/test/CodeGen/X86/xray-tail-call-sled.ll +++ b/test/CodeGen/X86/xray-tail-call-sled.ll @@ -14,11 +14,17 @@ define i32 @callee() nounwind noinline uwtable "function-instrument"="xray-alway ; CHECK-NEXT: nopw %cs:512(%rax,%rax) } ; CHECK: .p2align 4, 0x90 -; CHECK-NEXT: .quad {{.*}}xray_synthetic_0 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_0{{.*}} +; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_0{{.*}} ; CHECK-NEXT: .section {{.*}}xray_instr_map ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .quad {{.*}}xray_sled_0 ; CHECK: .quad {{.*}}xray_sled_1 +; CHECK-LABEL: Lxray_synthetic_end0: +; CHECK-NEXT: .section {{.*}}xray_fn_idx +; CHECK-LABEL: Lxray_fn_idx_synth_0: +; CHECK: .quad {{.*}}xray_synthetic_0 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_end0 define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-always" { ; CHECK: .p2align 1, 0x90 @@ -36,7 +42,13 @@ define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-alway ret i32 %retval } ; CHECK: .p2align 4, 0x90 -; CHECK-NEXT: .quad {{.*}}xray_synthetic_1 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_1{{.*}} +; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_1{{.*}} ; CHECK-LABEL: Lxray_synthetic_1: ; CHECK: .quad {{.*}}xray_sled_2 ; CHECK: .quad {{.*}}xray_sled_3 +; CHECK-LABEL: Lxray_synthetic_end1: +; CHECK: .section {{.*}}xray_fn_idx +; CHECK-LABEL: Lxray_fn_idx_synth_1: +; CHECK: .quad {{.*}}xray_synthetic_1 +; CHECK: .quad {{.*}}xray_synthetic_end1 |