diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2017-05-08 17:12:57 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2017-05-08 17:12:57 +0000 |
commit | c46e6a5940c50058e00c0c5f9123fd82e338d29a (patch) | |
tree | 89a719d723035c54a190b1f81d329834f1f93336 /test | |
parent | 148779df305667b6942fee7e758fdf81a6498f38 (diff) |
Notes
Diffstat (limited to 'test')
141 files changed, 10119 insertions, 1181 deletions
diff --git a/test/Analysis/CostModel/X86/bitreverse.ll b/test/Analysis/CostModel/X86/bitreverse.ll index 2eb63babdc343..8d5e1421eb829 100644 --- a/test/Analysis/CostModel/X86/bitreverse.ll +++ b/test/Analysis/CostModel/X86/bitreverse.ll @@ -79,7 +79,7 @@ define <4 x i64> @var_bitreverse_v4i64(<4 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v4i64': ; SSE2: Found an estimated cost of 58 for instruction: %bitreverse ; SSE42: Found an estimated cost of 10 for instruction: %bitreverse -; AVX: Found an estimated cost of 10 for instruction: %bitreverse +; AVX: Found an estimated cost of 12 for instruction: %bitreverse ; AVX2: Found an estimated cost of 5 for instruction: %bitreverse ; XOP: Found an estimated cost of 4 for instruction: %bitreverse %bitreverse = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a) @@ -101,7 +101,7 @@ define <8 x i32> @var_bitreverse_v8i32(<8 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v8i32': ; SSE2: Found an estimated cost of 54 for instruction: %bitreverse ; SSE42: Found an estimated cost of 10 for instruction: %bitreverse -; AVX: Found an estimated cost of 10 for instruction: %bitreverse +; AVX: Found an estimated cost of 12 for instruction: %bitreverse ; AVX2: Found an estimated cost of 5 for instruction: %bitreverse ; XOP: Found an estimated cost of 4 for instruction: %bitreverse %bitreverse = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a) @@ -123,7 +123,7 @@ define <16 x i16> @var_bitreverse_v16i16(<16 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v16i16': ; SSE2: Found an estimated cost of 54 for instruction: %bitreverse ; SSE42: Found an estimated cost of 10 for instruction: %bitreverse -; AVX: Found an estimated cost of 10 for instruction: %bitreverse +; AVX: Found an estimated cost of 12 for instruction: %bitreverse ; AVX2: Found an estimated cost of 5 for instruction: %bitreverse ; XOP: Found an estimated cost of 4 for instruction: %bitreverse %bitreverse = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a) @@ -145,7 +145,7 @@ define <32 x i8> @var_bitreverse_v32i8(<32 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v32i8': ; SSE2: Found an estimated cost of 40 for instruction: %bitreverse ; SSE42: Found an estimated cost of 10 for instruction: %bitreverse -; AVX: Found an estimated cost of 10 for instruction: %bitreverse +; AVX: Found an estimated cost of 12 for instruction: %bitreverse ; AVX2: Found an estimated cost of 5 for instruction: %bitreverse ; XOP: Found an estimated cost of 4 for instruction: %bitreverse %bitreverse = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %a) diff --git a/test/Analysis/CostModel/X86/ctbits-cost.ll b/test/Analysis/CostModel/X86/ctbits-cost.ll index 8c7fa9d73151d..aaf092c7b1d75 100644 --- a/test/Analysis/CostModel/X86/ctbits-cost.ll +++ b/test/Analysis/CostModel/X86/ctbits-cost.ll @@ -69,7 +69,7 @@ define <4 x i64> @var_ctpop_v4i64(<4 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v4i64': ; SSE2: Found an estimated cost of 24 for instruction: %ctpop ; SSE42: Found an estimated cost of 14 for instruction: %ctpop -; AVX1: Found an estimated cost of 14 for instruction: %ctpop +; AVX1: Found an estimated cost of 16 for instruction: %ctpop ; AVX2: Found an estimated cost of 7 for instruction: %ctpop %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a) ret <4 x i64> %ctpop @@ -88,7 +88,7 @@ define <8 x i32> @var_ctpop_v8i32(<8 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v8i32': ; SSE2: Found an estimated cost of 30 for instruction: %ctpop ; SSE42: Found an estimated cost of 22 for instruction: %ctpop -; AVX1: Found an estimated cost of 22 for instruction: %ctpop +; AVX1: Found an estimated cost of 24 for instruction: %ctpop ; AVX2: Found an estimated cost of 11 for instruction: %ctpop %ctpop = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %a) ret <8 x i32> %ctpop @@ -107,7 +107,7 @@ define <16 x i16> @var_ctpop_v16i16(<16 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v16i16': ; SSE2: Found an estimated cost of 26 for instruction: %ctpop ; SSE42: Found an estimated cost of 18 for instruction: %ctpop -; AVX1: Found an estimated cost of 18 for instruction: %ctpop +; AVX1: Found an estimated cost of 20 for instruction: %ctpop ; AVX2: Found an estimated cost of 9 for instruction: %ctpop %ctpop = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %a) ret <16 x i16> %ctpop @@ -126,7 +126,7 @@ define <32 x i8> @var_ctpop_v32i8(<32 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v32i8': ; SSE2: Found an estimated cost of 20 for instruction: %ctpop ; SSE42: Found an estimated cost of 12 for instruction: %ctpop -; AVX1: Found an estimated cost of 12 for instruction: %ctpop +; AVX1: Found an estimated cost of 14 for instruction: %ctpop ; AVX2: Found an estimated cost of 6 for instruction: %ctpop %ctpop = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %a) ret <32 x i8> %ctpop @@ -229,7 +229,7 @@ define <4 x i64> @var_ctlz_v4i64(<4 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v4i64': ; SSE2: Found an estimated cost of 50 for instruction: %ctlz ; SSE42: Found an estimated cost of 46 for instruction: %ctlz -; AVX1: Found an estimated cost of 46 for instruction: %ctlz +; AVX1: Found an estimated cost of 48 for instruction: %ctlz ; AVX2: Found an estimated cost of 23 for instruction: %ctlz %ctlz = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %a, i1 0) ret <4 x i64> %ctlz @@ -239,7 +239,7 @@ define <4 x i64> @var_ctlz_v4i64u(<4 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v4i64u': ; SSE2: Found an estimated cost of 50 for instruction: %ctlz ; SSE42: Found an estimated cost of 46 for instruction: %ctlz -; AVX1: Found an estimated cost of 46 for instruction: %ctlz +; AVX1: Found an estimated cost of 48 for instruction: %ctlz ; AVX2: Found an estimated cost of 23 for instruction: %ctlz %ctlz = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %a, i1 1) ret <4 x i64> %ctlz @@ -267,7 +267,7 @@ define <8 x i32> @var_ctlz_v8i32(<8 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v8i32': ; SSE2: Found an estimated cost of 52 for instruction: %ctlz ; SSE42: Found an estimated cost of 36 for instruction: %ctlz -; AVX1: Found an estimated cost of 36 for instruction: %ctlz +; AVX1: Found an estimated cost of 38 for instruction: %ctlz ; AVX2: Found an estimated cost of 18 for instruction: %ctlz %ctlz = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %a, i1 0) ret <8 x i32> %ctlz @@ -277,7 +277,7 @@ define <8 x i32> @var_ctlz_v8i32u(<8 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v8i32u': ; SSE2: Found an estimated cost of 52 for instruction: %ctlz ; SSE42: Found an estimated cost of 36 for instruction: %ctlz -; AVX1: Found an estimated cost of 36 for instruction: %ctlz +; AVX1: Found an estimated cost of 38 for instruction: %ctlz ; AVX2: Found an estimated cost of 18 for instruction: %ctlz %ctlz = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %a, i1 1) ret <8 x i32> %ctlz @@ -305,7 +305,7 @@ define <16 x i16> @var_ctlz_v16i16(<16 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v16i16': ; SSE2: Found an estimated cost of 40 for instruction: %ctlz ; SSE42: Found an estimated cost of 28 for instruction: %ctlz -; AVX1: Found an estimated cost of 28 for instruction: %ctlz +; AVX1: Found an estimated cost of 30 for instruction: %ctlz ; AVX2: Found an estimated cost of 14 for instruction: %ctlz %ctlz = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %a, i1 0) ret <16 x i16> %ctlz @@ -315,7 +315,7 @@ define <16 x i16> @var_ctlz_v16i16u(<16 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v16i16u': ; SSE2: Found an estimated cost of 40 for instruction: %ctlz ; SSE42: Found an estimated cost of 28 for instruction: %ctlz -; AVX1: Found an estimated cost of 28 for instruction: %ctlz +; AVX1: Found an estimated cost of 30 for instruction: %ctlz ; AVX2: Found an estimated cost of 14 for instruction: %ctlz %ctlz = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %a, i1 1) ret <16 x i16> %ctlz @@ -343,7 +343,7 @@ define <32 x i8> @var_ctlz_v32i8(<32 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v32i8': ; SSE2: Found an estimated cost of 34 for instruction: %ctlz ; SSE42: Found an estimated cost of 18 for instruction: %ctlz -; AVX1: Found an estimated cost of 18 for instruction: %ctlz +; AVX1: Found an estimated cost of 20 for instruction: %ctlz ; AVX2: Found an estimated cost of 9 for instruction: %ctlz %ctlz = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %a, i1 0) ret <32 x i8> %ctlz @@ -353,7 +353,7 @@ define <32 x i8> @var_ctlz_v32i8u(<32 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v32i8u': ; SSE2: Found an estimated cost of 34 for instruction: %ctlz ; SSE42: Found an estimated cost of 18 for instruction: %ctlz -; AVX1: Found an estimated cost of 18 for instruction: %ctlz +; AVX1: Found an estimated cost of 20 for instruction: %ctlz ; AVX2: Found an estimated cost of 9 for instruction: %ctlz %ctlz = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %a, i1 1) ret <32 x i8> %ctlz @@ -456,7 +456,7 @@ define <4 x i64> @var_cttz_v4i64(<4 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v4i64': ; SSE2: Found an estimated cost of 28 for instruction: %cttz ; SSE42: Found an estimated cost of 20 for instruction: %cttz -; AVX1: Found an estimated cost of 20 for instruction: %cttz +; AVX1: Found an estimated cost of 22 for instruction: %cttz ; AVX2: Found an estimated cost of 10 for instruction: %cttz %cttz = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %a, i1 0) ret <4 x i64> %cttz @@ -466,7 +466,7 @@ define <4 x i64> @var_cttz_v4i64u(<4 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v4i64u': ; SSE2: Found an estimated cost of 28 for instruction: %cttz ; SSE42: Found an estimated cost of 20 for instruction: %cttz -; AVX1: Found an estimated cost of 20 for instruction: %cttz +; AVX1: Found an estimated cost of 22 for instruction: %cttz ; AVX2: Found an estimated cost of 10 for instruction: %cttz %cttz = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %a, i1 1) ret <4 x i64> %cttz @@ -494,7 +494,7 @@ define <8 x i32> @var_cttz_v8i32(<8 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v8i32': ; SSE2: Found an estimated cost of 36 for instruction: %cttz ; SSE42: Found an estimated cost of 28 for instruction: %cttz -; AVX1: Found an estimated cost of 28 for instruction: %cttz +; AVX1: Found an estimated cost of 30 for instruction: %cttz ; AVX2: Found an estimated cost of 14 for instruction: %cttz %cttz = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %a, i1 0) ret <8 x i32> %cttz @@ -504,7 +504,7 @@ define <8 x i32> @var_cttz_v8i32u(<8 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v8i32u': ; SSE2: Found an estimated cost of 36 for instruction: %cttz ; SSE42: Found an estimated cost of 28 for instruction: %cttz -; AVX1: Found an estimated cost of 28 for instruction: %cttz +; AVX1: Found an estimated cost of 30 for instruction: %cttz ; AVX2: Found an estimated cost of 14 for instruction: %cttz %cttz = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %a, i1 1) ret <8 x i32> %cttz @@ -532,7 +532,7 @@ define <16 x i16> @var_cttz_v16i16(<16 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v16i16': ; SSE2: Found an estimated cost of 32 for instruction: %cttz ; SSE42: Found an estimated cost of 24 for instruction: %cttz -; AVX1: Found an estimated cost of 24 for instruction: %cttz +; AVX1: Found an estimated cost of 26 for instruction: %cttz ; AVX2: Found an estimated cost of 12 for instruction: %cttz %cttz = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %a, i1 0) ret <16 x i16> %cttz @@ -542,7 +542,7 @@ define <16 x i16> @var_cttz_v16i16u(<16 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v16i16u': ; SSE2: Found an estimated cost of 32 for instruction: %cttz ; SSE42: Found an estimated cost of 24 for instruction: %cttz -; AVX1: Found an estimated cost of 24 for instruction: %cttz +; AVX1: Found an estimated cost of 26 for instruction: %cttz ; AVX2: Found an estimated cost of 12 for instruction: %cttz %cttz = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %a, i1 1) ret <16 x i16> %cttz @@ -570,7 +570,7 @@ define <32 x i8> @var_cttz_v32i8(<32 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v32i8': ; SSE2: Found an estimated cost of 26 for instruction: %cttz ; SSE42: Found an estimated cost of 18 for instruction: %cttz -; AVX1: Found an estimated cost of 18 for instruction: %cttz +; AVX1: Found an estimated cost of 20 for instruction: %cttz ; AVX2: Found an estimated cost of 9 for instruction: %cttz %cttz = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %a, i1 0) ret <32 x i8> %cttz @@ -580,7 +580,7 @@ define <32 x i8> @var_cttz_v32i8u(<32 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v32i8u': ; SSE2: Found an estimated cost of 26 for instruction: %cttz ; SSE42: Found an estimated cost of 18 for instruction: %cttz -; AVX1: Found an estimated cost of 18 for instruction: %cttz +; AVX1: Found an estimated cost of 20 for instruction: %cttz ; AVX2: Found an estimated cost of 9 for instruction: %cttz %cttz = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %a, i1 1) ret <32 x i8> %cttz diff --git a/test/Analysis/ScalarEvolution/ZeroStep.ll b/test/Analysis/ScalarEvolution/ZeroStep.ll new file mode 100644 index 0000000000000..fc6ed018e9033 --- /dev/null +++ b/test/Analysis/ScalarEvolution/ZeroStep.ll @@ -0,0 +1,18 @@ +; RUN: opt -analyze -scalar-evolution < %s -o - -S | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.9.0" + +; Test that SCEV is capable of figuring out value of 'IV' that actually does not change. +; CHECK: Classifying expressions for: @foo +; CHECK: %iv.i = phi i64 +; CHECK: -5 U: [-5,-4) S: [-5,-4) Exits: -5 LoopDispositions: { %loop: Invariant } +define void @foo() { +entry: + br label %loop + +loop: + %iv.i = phi i64 [ -5, %entry ], [ %iv.next.i, %loop ] + %iv.next.i = add nsw i64 %iv.i, 0 + br label %loop +} diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index 02848021dbc09..ac3d4b17f739f 100644 --- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -1541,3 +1541,12 @@ define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2) %res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> ret <16 x i8> %res } + +; CHECK-LABEL: test_constant_vector +; CHECK: [[UNDEF:%[0-9]+]](s16) = IMPLICIT_DEF +; CHECK: [[F:%[0-9]+]](s16) = G_FCONSTANT half 0xH3C00 +; CHECK: [[M:%[0-9]+]](<4 x s16>) = G_MERGE_VALUES [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16) +; CHECK: %d0 = COPY [[M]](<4 x s16>) +define <4 x half> @test_constant_vector() { + ret <4 x half> <half undef, half undef, half undef, half 0xH3C00> +} diff --git a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll index 5a76661180f22..e01bd2a9f7c85 100644 --- a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll +++ b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll @@ -12,33 +12,33 @@ entry: store i32 %in, i32* %in.addr, align 4 call void @llvm.dbg.declare(metadata i32* %in.addr, metadata !11, metadata !12), !dbg !13 call void @llvm.dbg.declare(metadata i32 %in, metadata !11, metadata !12), !dbg !13 - ret void, !dbg !14 + ret void, !dbg !13 } ; CHECK-LABEL: name: debug_declare_vla -; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use _, !11, !12, debug-location !13 -define void @debug_declare_vla(i32 %in) #0 !dbg !7 { +; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use _, !15, !12, debug-location !16 +define void @debug_declare_vla(i32 %in) #0 !dbg !14 { entry: %vla.addr = alloca i32, i32 %in - call void @llvm.dbg.declare(metadata i32* %vla.addr, metadata !11, metadata !12), !dbg !13 - ret void, !dbg !14 + call void @llvm.dbg.declare(metadata i32* %vla.addr, metadata !15, metadata !12), !dbg !16 + ret void, !dbg !16 } ; CHECK-LABEL: name: debug_value ; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0 -define void @debug_value(i32 %in) #0 !dbg !7 { +define void @debug_value(i32 %in) #0 !dbg !17 { %addr = alloca i32 -; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use _, !11, !12, debug-location !13 - call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !11, metadata !12), !dbg !13 +; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use _, !18, !12, debug-location !19 + call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !18, metadata !12), !dbg !19 store i32 %in, i32* %addr -; CHECK: DBG_VALUE debug-use %1(p0), debug-use _, !11, !15, debug-location !13 - call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !11, metadata !15), !dbg !13 -; CHECK: DBG_VALUE 123, 0, !11, !12, debug-location !13 - call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !11, metadata !12), !dbg !13 -; CHECK: DBG_VALUE float 1.000000e+00, 0, !11, !12, debug-location !13 - call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !11, metadata !12), !dbg !13 -; CHECK: DBG_VALUE _, 0, !11, !12, debug-location !13 - call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !11, metadata !12), !dbg !13 +; CHECK: DBG_VALUE debug-use %1(p0), debug-use _, !18, !20, debug-location !19 + call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !18, metadata !20), !dbg !19 +; CHECK: DBG_VALUE 123, 0, !18, !12, debug-location !19 + call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !18, metadata !12), !dbg !19 +; CHECK: DBG_VALUE float 1.000000e+00, 0, !18, !12, debug-location !19 + call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !18, metadata !12), !dbg !19 +; CHECK: DBG_VALUE _, 0, !18, !12, debug-location !19 + call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !18, metadata !12), !dbg !19 ret void } @@ -64,5 +64,10 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) !11 = !DILocalVariable(name: "in", arg: 1, scope: !7, file: !1, line: 1, type: !10) !12 = !DIExpression() !13 = !DILocation(line: 1, column: 14, scope: !7) -!14 = !DILocation(line: 2, column: 1, scope: !7) -!15 = !DIExpression(DW_OP_deref) +!14 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!15 = !DILocalVariable(name: "in", arg: 1, scope: !14, file: !1, line: 1, type: !10) +!16 = !DILocation(line: 1, column: 14, scope: !14) +!17 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!18 = !DILocalVariable(name: "in", arg: 1, scope: !17, file: !1, line: 1, type: !10) +!19 = !DILocation(line: 1, column: 14, scope: !17) +!20 = !DIExpression(DW_OP_deref) diff --git a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir index 2f36ec8d2aaa9..790cd6517dd3a 100644 --- a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir +++ b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir @@ -9,8 +9,8 @@ ret void } - define void @test_dbg_value_dead(i32 %a) !dbg !5 { - call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !7, metadata !9), !dbg !10 + define void @test_dbg_value_dead(i32 %a) !dbg !11 { + call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !12, metadata !9), !dbg !13 ret void } @@ -30,6 +30,9 @@ !8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) !9 = !DIExpression() !10 = !DILocation(line: 1, column: 1, scope: !5) + !11 = distinct !DISubprogram(name: "test_dbg_value", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) + !12 = !DILocalVariable(name: "in", arg: 1, scope: !11, file: !1, line: 1, type: !8) + !13 = !DILocation(line: 1, column: 1, scope: !11) ... --- diff --git a/test/CodeGen/AArch64/fadd-combines.ll b/test/CodeGen/AArch64/fadd-combines.ll new file mode 100644 index 0000000000000..c106f293ccffb --- /dev/null +++ b/test/CodeGen/AArch64/fadd-combines.ll @@ -0,0 +1,78 @@ +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -verify-machineinstrs | FileCheck %s + +; CHECK-LABEL: test1: +; CHECK: fadd d1, d1, d1 +; CHECK: fsub d0, d0, d1 +define double @test1(double %a, double %b) local_unnamed_addr #0 { +entry: + %mul = fmul double %b, -2.000000e+00 + %add1 = fadd double %a, %mul + ret double %add1 +} + +; DAGCombine will canonicalize 'a - 2.0*b' to 'a + -2.0*b' +; CHECK-LABEL: test2: +; CHECK: fadd d1, d1, d1 +; CHECK: fsub d0, d0, d1 +define double @test2(double %a, double %b) local_unnamed_addr #0 { +entry: + %mul = fmul double %b, 2.000000e+00 + %add1 = fsub double %a, %mul + ret double %add1 +} + +; CHECK-LABEL: test3: +; CHECK: fmul d0, d0, d1 +; CHECK: fadd d1, d2, d2 +; CHECK: fsub d0, d0, d1 +define double @test3(double %a, double %b, double %c) local_unnamed_addr #0 { +entry: + %mul = fmul double %a, %b + %mul1 = fmul double %c, 2.000000e+00 + %sub = fsub double %mul, %mul1 + ret double %sub +} + +; CHECK-LABEL: test4: +; CHECK: fmul d0, d0, d1 +; CHECK: fadd d1, d2, d2 +; CHECK: fsub d0, d0, d1 +define double @test4(double %a, double %b, double %c) local_unnamed_addr #0 { +entry: + %mul = fmul double %a, %b + %mul1 = fmul double %c, -2.000000e+00 + %add2 = fadd double %mul, %mul1 + ret double %add2 +} + +; CHECK-LABEL: test5: +; CHECK: fadd v1.4s, v1.4s, v1.4s +; CHECK: fsub v0.4s, v0.4s, v1.4s +define <4 x float> @test5(<4 x float> %a, <4 x float> %b) { + %mul = fmul <4 x float> %b, <float -2.0, float -2.0, float -2.0, float -2.0> + %add = fadd <4 x float> %a, %mul + ret <4 x float> %add +} + +; CHECK-LABEL: test6: +; CHECK: fadd v1.4s, v1.4s, v1.4s +; CHECK: fsub v0.4s, v0.4s, v1.4s +define <4 x float> @test6(<4 x float> %a, <4 x float> %b) { + %mul = fmul <4 x float> %b, <float 2.0, float 2.0, float 2.0, float 2.0> + %add = fsub <4 x float> %a, %mul + ret <4 x float> %add +} + +; Don't fold (fadd A, (fmul B, -2.0)) -> (fsub A, (fadd B, B)) if the fmul has +; multiple uses. +; CHECK-LABEL: test7: +; CHECK: fmul +define double @test7(double %a, double %b) local_unnamed_addr #0 { +entry: + %mul = fmul double %b, -2.000000e+00 + %add1 = fadd double %a, %mul + call void @use(double %mul) + ret double %add1 +} + +declare void @use(double) diff --git a/test/CodeGen/AArch64/loh.mir b/test/CodeGen/AArch64/loh.mir index 1d08ebdc5790a..6e4bb5cfaee6d 100644 --- a/test/CodeGen/AArch64/loh.mir +++ b/test/CodeGen/AArch64/loh.mir @@ -180,7 +180,6 @@ body: | %x9 = ADRP target-flags(aarch64-page, aarch64-got) @g5 bb.13: - successors: %bb.14 ; Cannot produce a LOH for multiple users ; CHECK-NOT: MCLOH_AdrpAdd %x10 = ADRP target-flags(aarch64-page) @g0 diff --git a/test/CodeGen/AArch64/machine-copy-remove.mir b/test/CodeGen/AArch64/machine-copy-remove.mir index 6f2d3a3009b02..50c03ddb40374 100644 --- a/test/CodeGen/AArch64/machine-copy-remove.mir +++ b/test/CodeGen/AArch64/machine-copy-remove.mir @@ -7,20 +7,16 @@ name: test1 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 %x0 = COPY %x1 CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -38,20 +34,16 @@ name: test2 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 %x1 = COPY %x0 CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -69,7 +61,6 @@ name: test3 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x0 = COPY %x1 @@ -77,13 +68,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -101,7 +89,6 @@ name: test4 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x1 = COPY %x0 @@ -109,13 +96,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -133,7 +117,6 @@ name: test5 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x1 = COPY %x0 @@ -141,13 +124,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -165,7 +145,6 @@ name: test6 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x2 = COPY %x0 @@ -173,13 +152,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -197,7 +173,6 @@ name: test7 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1, %x2 %x2 = COPY %x0 @@ -206,13 +181,10 @@ body: | CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 - %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -232,14 +204,12 @@ name: test8 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 %x1 = COPY %x0 CBNZX %x1, %bb.2 bb.1: - successors: %bb.3 liveins: %x0, %x2 %x0, %x1 = LDPXi %x2, 0 @@ -248,7 +218,6 @@ body: | B %bb.3 bb.2: - successors: %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -267,20 +236,17 @@ name: test9 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 CBNZX %x0, %bb.2 bb.1: - successors: %bb.3 liveins: %x0, %x2 %x0 = COPY %xzr B %bb.3 bb.2: - successors: %bb.1, %bb.3 liveins: %x1 %x0 = LDRXui %x1, 0 @@ -304,7 +270,6 @@ name: test10 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -312,7 +277,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7 @@ -332,7 +296,6 @@ name: test11 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %x0, %x1 dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv @@ -340,7 +303,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7, implicit-def %x0 @@ -360,7 +322,6 @@ name: test12 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %x0, %x1 dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv @@ -368,7 +329,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7 @@ -388,7 +348,6 @@ name: test13 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -396,7 +355,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7, implicit-def %x0 @@ -413,7 +371,6 @@ name: test14 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1, %x2 dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -423,7 +380,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7 @@ -440,7 +396,6 @@ name: test15 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1, %x2 dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -448,7 +403,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1, %x2 %w0 = LDRWui %x1, 0 @@ -467,7 +421,6 @@ name: test16 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = SUBSWri %w0, 7, 0, implicit-def %nzcv @@ -476,7 +429,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w2 = MOVi32imm 7 @@ -493,7 +445,6 @@ name: test17 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %w0 = SUBSWri killed %w0, 7, 0, implicit-def %nzcv @@ -501,7 +452,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 7 @@ -520,14 +470,12 @@ name: test18 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %x0, %x1 CBNZX killed %x0, %bb.2 B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %x0 = MOVi64imm 4252017623040 @@ -547,7 +495,6 @@ name: test19 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv @@ -555,7 +502,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm -1 @@ -575,7 +521,6 @@ name: test20 tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 liveins: %x0, %x1 dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv @@ -583,7 +528,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %x0 = MOVi64imm -1 @@ -603,7 +547,6 @@ name: test21 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %x0, %x1 dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv @@ -611,7 +554,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm -1 @@ -629,7 +571,6 @@ name: test22 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv @@ -637,7 +578,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %x0 = MOVi64imm -1 @@ -654,7 +594,6 @@ name: test23 tracksRegLiveness: true body: | bb.0.entry: - successors: %bb.1, %bb.2 liveins: %w0, %x1 dead %wzr = SUBSWri killed %w0, 1, 12, implicit-def %nzcv @@ -662,7 +601,6 @@ body: | B %bb.1 bb.1: - successors: %bb.2 liveins: %x1 %w0 = MOVi32imm 4096 diff --git a/test/CodeGen/AArch64/machine-sink-zr.mir b/test/CodeGen/AArch64/machine-sink-zr.mir index 535fba0dc63bc..2cf2bc488237f 100644 --- a/test/CodeGen/AArch64/machine-sink-zr.mir +++ b/test/CodeGen/AArch64/machine-sink-zr.mir @@ -17,7 +17,6 @@ body: | ; CHECK-LABEL: bb.0: ; CHECK-NOT: COPY %wzr bb.0: - successors: %bb.3, %bb.1 liveins: %w0 %0 = COPY %w0 @@ -28,13 +27,9 @@ body: | ; CHECK: COPY %wzr bb.1: - successors: %bb.2 - B %bb.2 bb.2: - successors: %bb.3, %bb.2 - %2 = PHI %0, %bb.1, %4, %bb.2 %w0 = COPY %1 %3 = SUBSWri %2, 1, 0, implicit-def dead %nzcv diff --git a/test/CodeGen/AArch64/regcoal-physreg.mir b/test/CodeGen/AArch64/regcoal-physreg.mir index 813106366968d..f88b7482acacf 100644 --- a/test/CodeGen/AArch64/regcoal-physreg.mir +++ b/test/CodeGen/AArch64/regcoal-physreg.mir @@ -93,7 +93,6 @@ body: | name: func1 body: | bb.0: - successors: %bb.1, %bb.2 ; Cannot coalesce physreg because we have reads on other CFG paths (we ; currently abort for any control flow) ; CHECK-NOT: %fp = SUBXri @@ -117,7 +116,6 @@ body: | name: func2 body: | bb.0: - successors: %bb.1, %bb.2 ; We can coalesce copies from physreg to vreg across multiple blocks. ; CHECK-NOT: COPY ; CHECK: CBZX undef %x0, %bb.1 diff --git a/test/CodeGen/AArch64/xray-attribute-instrumentation.ll b/test/CodeGen/AArch64/xray-attribute-instrumentation.ll index d0f5f40e156c9..38b62a72a20f5 100644 --- a/test/CodeGen/AArch64/xray-attribute-instrumentation.ll +++ b/test/CodeGen/AArch64/xray-attribute-instrumentation.ll @@ -26,6 +26,7 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" } ; CHECK: .p2align 4 ; CHECK-NEXT: .xword .Lxray_synthetic_0 +; CHECK-NEXT: .xword .Lxray_fn_idx_synth_0 ; CHECK-NEXT: .section xray_instr_map,{{.*}} ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .xword .Lxray_sled_0 diff --git a/test/CodeGen/AArch64/xray-tail-call-sled.ll b/test/CodeGen/AArch64/xray-tail-call-sled.ll index 6ada3ce8d551b..fb89950b99c84 100644 --- a/test/CodeGen/AArch64/xray-tail-call-sled.ll +++ b/test/CodeGen/AArch64/xray-tail-call-sled.ll @@ -29,10 +29,16 @@ define i32 @callee() nounwind noinline uwtable "function-instrument"="xray-alway }
; CHECK: .p2align 4
; CHECK-NEXT: .xword .Lxray_synthetic_0
+; CHECK-NEXT: .xword .Lxray_fn_idx_synth_0
; CHECK-NEXT: .section xray_instr_map,{{.*}}
; CHECK-LABEL: Lxray_synthetic_0:
; CHECK: .xword .Lxray_sled_0
; CHECK: .xword .Lxray_sled_1
+; CHECK-LABEL: Lxray_synthetic_end0:
+; CHECK: .section xray_fn_idx,{{.*}}
+; CHECK-LABEL: Lxray_fn_idx_synth_0:
+; CHECK: .xword .Lxray_synthetic_0
+; CHECK-NEXT: .xword .Lxray_synthetic_end0
define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-always" {
; CHECK: .p2align 2
@@ -63,7 +69,13 @@ define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-alway }
; CHECK: .p2align 4
; CHECK-NEXT: .xword .Lxray_synthetic_1
+; CHECK-NEXT: .xword .Lxray_fn_idx_synth_1
; CHECK-NEXT: .section xray_instr_map,{{.*}}
; CHECK-LABEL: Lxray_synthetic_1:
; CHECK: .xword .Lxray_sled_2
; CHECK: .xword .Lxray_sled_3
+; CHECK-LABEL: Lxray_synthetic_end1:
+; CHECK: .section xray_fn_idx,{{.*}}
+; CHECK-LABEL: Lxray_fn_idx_synth_1:
+; CHECK: .xword .Lxray_synthetic_1
+; CHECK-NEXT: .xword .Lxray_synthetic_end1
diff --git a/test/CodeGen/AMDGPU/detect-dead-lanes.mir b/test/CodeGen/AMDGPU/detect-dead-lanes.mir index 32e6f7cc0cdc7..3148b9b8ff9db 100644 --- a/test/CodeGen/AMDGPU/detect-dead-lanes.mir +++ b/test/CodeGen/AMDGPU/detect-dead-lanes.mir @@ -294,7 +294,6 @@ registers: - { id: 5, class: sreg_128 } body: | bb.0: - successors: %bb.1 S_NOP 0, implicit-def %0 S_NOP 0, implicit-def %1 S_NOP 0, implicit-def %2 @@ -302,7 +301,6 @@ body: | S_BRANCH %bb.1 bb.1: - successors: %bb.1, %bb.2 %4 = PHI %3, %bb.0, %5, %bb.1 ; let's swiffle some lanes around for fun... @@ -348,7 +346,6 @@ registers: - { id: 6, class: sreg_128 } body: | bb.0: - successors: %bb.1 S_NOP 0, implicit-def %0 S_NOP 0, implicit-def %1 S_NOP 0, implicit-def dead %2 @@ -357,7 +354,6 @@ body: | S_BRANCH %bb.1 bb.1: - successors: %bb.1, %bb.2 %5 = PHI %4, %bb.0, %6, %bb.1 ; rotate lanes, but skip sub2 lane... @@ -396,13 +392,11 @@ registers: - { id: 3, class: sreg_128 } body: | bb.0: - successors: %bb.1 S_NOP 0, implicit-def %0 %1 = REG_SEQUENCE %0, %subreg.sub0 S_BRANCH %bb.1 bb.1: - successors: %bb.1, %bb.2 %2 = PHI %1, %bb.0, %3, %bb.1 ; rotate subreg lanes, skipping sub1 diff --git a/test/CodeGen/AMDGPU/fmuladd.f32.ll b/test/CodeGen/AMDGPU/fmuladd.f32.ll index fb605dd2e4bd4..e422550266924 100644 --- a/test/CodeGen/AMDGPU/fmuladd.f32.ll +++ b/test/CodeGen/AMDGPU/fmuladd.f32.ll @@ -191,8 +191,8 @@ define amdgpu_kernel void @fadd_b_a_a_f32(float addrspace(1)* %out, ; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]] -; GCN-DENORM-SLOWFMA: v_mul_f32_e32 [[TMP:v[0-9]+]], -2.0, [[R1]] -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] +; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] +; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] ; SI-DENORM: buffer_store_dword [[RESULT]] ; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] @@ -251,8 +251,8 @@ define amdgpu_kernel void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, ; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], -[[R1]], 2.0, [[R2]] -; GCN-DENORM-SLOWFMA: v_mul_f32_e32 [[TMP:v[0-9]+]], -2.0, [[R1]] -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] +; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] +; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] ; SI-DENORM: buffer_store_dword [[RESULT]] ; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] diff --git a/test/CodeGen/AMDGPU/inserted-wait-states.mir b/test/CodeGen/AMDGPU/inserted-wait-states.mir index 1479303712d0f..c6fe6debd225a 100644 --- a/test/CodeGen/AMDGPU/inserted-wait-states.mir +++ b/test/CodeGen/AMDGPU/inserted-wait-states.mir @@ -77,19 +77,16 @@ name: div_fmas body: | bb.0: - successors: %bb.1 %vcc = S_MOV_B64 0 %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec S_BRANCH %bb.1 bb.1: - successors: %bb.2 implicit %vcc = V_CMP_EQ_I32_e32 %vgpr1, %vgpr2, implicit %exec %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec S_BRANCH %bb.2 bb.2: - successors: %bb.3 %vcc = V_CMP_EQ_I32_e64 %vgpr1, %vgpr2, implicit %exec %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec S_BRANCH %bb.3 @@ -130,19 +127,16 @@ name: s_getreg body: | bb.0: - successors: %bb.1 S_SETREG_B32 %sgpr0, 1 %sgpr1 = S_GETREG_B32 1 S_BRANCH %bb.1 bb.1: - successors: %bb.2 S_SETREG_IMM32_B32 0, 1 %sgpr1 = S_GETREG_B32 1 S_BRANCH %bb.2 bb.2: - successors: %bb.3 S_SETREG_B32 %sgpr0, 1 %sgpr1 = S_MOV_B32 0 %sgpr2 = S_GETREG_B32 1 @@ -178,13 +172,11 @@ name: s_setreg body: | bb.0: - successors: %bb.1 S_SETREG_B32 %sgpr0, 1 S_SETREG_B32 %sgpr1, 1 S_BRANCH %bb.1 bb.1: - successors: %bb.2 S_SETREG_B32 %sgpr0, 64 S_SETREG_B32 %sgpr1, 128 S_BRANCH %bb.2 @@ -237,7 +229,6 @@ name: vmem_gt_8dw_store body: | bb.0: - successors: %bb.1 BUFFER_STORE_DWORD_OFFSET %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec %vgpr3 = V_MOV_B32_e32 0, implicit %exec BUFFER_STORE_DWORDX3_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec @@ -310,19 +301,16 @@ name: readwrite_lane body: | bb.0: - successors: %bb.1 %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec %sgpr4 = V_READLANE_B32 %vgpr4, %sgpr0 S_BRANCH %bb.1 bb.1: - successors: %bb.2 %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec %vgpr4 = V_WRITELANE_B32 %sgpr0, %sgpr0 S_BRANCH %bb.2 bb.2: - successors: %bb.3 %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec %sgpr4 = V_READLANE_B32 %vgpr4, %vcc_lo S_BRANCH %bb.3 @@ -352,7 +340,6 @@ name: rfe body: | bb.0: - successors: %bb.1 S_SETREG_B32 %sgpr0, 3 S_RFE_B64 %sgpr2_sgpr3 S_BRANCH %bb.1 @@ -382,7 +369,6 @@ name: s_mov_fed_b32 body: | bb.0: - successors: %bb.1 %sgpr0 = S_MOV_FED_B32 %sgpr0 %sgpr0 = S_MOV_B32 %sgpr0 S_BRANCH %bb.1 @@ -423,19 +409,16 @@ name: s_movrel body: | bb.0: - successors: %bb.1 %m0 = S_MOV_B32 0 %sgpr0 = S_MOVRELS_B32 %sgpr0, implicit %m0 S_BRANCH %bb.1 bb.1: - successors: %bb.2 %m0 = S_MOV_B32 0 %sgpr0_sgpr1 = S_MOVRELS_B64 %sgpr0_sgpr1, implicit %m0 S_BRANCH %bb.2 bb.2: - successors: %bb.3 %m0 = S_MOV_B32 0 %sgpr0 = S_MOVRELD_B32 %sgpr0, implicit %m0 S_BRANCH %bb.3 @@ -475,19 +458,16 @@ name: v_interp body: | bb.0: - successors: %bb.1 %m0 = S_MOV_B32 0 %vgpr0 = V_INTERP_P1_F32 %vgpr0, 0, 0, implicit %m0, implicit %exec S_BRANCH %bb.1 bb.1: - successors: %bb.2 %m0 = S_MOV_B32 0 %vgpr0 = V_INTERP_P2_F32 %vgpr0, %vgpr1, 0, 0, implicit %m0, implicit %exec S_BRANCH %bb.2 bb.2: - successors: %bb.3 %m0 = S_MOV_B32 0 %vgpr0 = V_INTERP_P1_F32_16bank %vgpr0, 0, 0, implicit %m0, implicit %exec S_BRANCH %bb.3 diff --git a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir index bc1dafe0ea1e2..67642282f75b0 100644 --- a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir +++ b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir @@ -53,7 +53,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.entry: - successors: %bb.2.if, %bb.1.else liveins: %sgpr0_sgpr1 %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) @@ -62,7 +61,6 @@ body: | S_CBRANCH_VCCNZ %bb.2.if, implicit undef %vcc bb.1.else: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 100, implicit %exec @@ -71,7 +69,6 @@ body: | S_BRANCH %bb.3.done bb.2.if: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 9, implicit %exec diff --git a/test/CodeGen/AMDGPU/lds-size.ll b/test/CodeGen/AMDGPU/lds-size.ll index c65817abd489d..ff78c3bcb18cf 100644 --- a/test/CodeGen/AMDGPU/lds-size.ll +++ b/test/CodeGen/AMDGPU/lds-size.ll @@ -1,4 +1,5 @@ ; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=ALL -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefix=ALL -check-prefix=HSA %s ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=ALL -check-prefix=EG %s ; This test makes sure we do not double count global values when they are @@ -11,6 +12,9 @@ ; EG-NEXT: .long 1 ; ALL: {{^}}test: +; HSA: granulated_lds_size = 0 +; HSA: workgroup_group_segment_byte_size = 4 + ; GCN: ; LDSByteSize: 4 bytes/workgroup (compile time only) @lds = internal unnamed_addr addrspace(3) global i32 undef, align 4 diff --git a/test/CodeGen/AMDGPU/liveness.mir b/test/CodeGen/AMDGPU/liveness.mir index 48762e3f2ab42..6fd8466492d08 100644 --- a/test/CodeGen/AMDGPU/liveness.mir +++ b/test/CodeGen/AMDGPU/liveness.mir @@ -16,13 +16,11 @@ registers: - { id: 0, class: sreg_64 } body: | bb.0: - successors: %bb.1, %bb.2 S_NOP 0, implicit-def undef %0.sub0 S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc S_BRANCH %bb.2 bb.1: - successors: %bb.2 S_NOP 0, implicit-def %0.sub1 S_NOP 0, implicit %0.sub1 S_BRANCH %bb.2 diff --git a/test/CodeGen/AMDGPU/local-stack-slot-bug.ll b/test/CodeGen/AMDGPU/local-stack-slot-bug.ll deleted file mode 100644 index d3e0f0be4b5f3..0000000000000 --- a/test/CodeGen/AMDGPU/local-stack-slot-bug.ll +++ /dev/null @@ -1,26 +0,0 @@ -; RUN: llc -march=amdgcn -mcpu=verde -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s - -; This used to fail due to a v_add_i32 instruction with an illegal immediate -; operand that was created during Local Stack Slot Allocation. Test case derived -; from https://bugs.freedesktop.org/show_bug.cgi?id=96602 -; -; CHECK-LABEL: {{^}}main: - -; CHECK-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x200 -; CHECK-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0x400{{$}} -; CHECK-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0 -; CHECK-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]] - -; CHECK-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[CLAMP_IDX]], [[K]] -; CHECK-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[CLAMP_IDX]], [[ZERO]] - -; CHECK: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen -; CHECK: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen -define amdgpu_ps float @main(i32 %idx) { -main_body: - %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx - %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx - %r = fadd float %v1, %v2 - ret float %r -} diff --git a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir index 2de6b59e59e96..b5dc9d9dac841 100644 --- a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir +++ b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir @@ -176,7 +176,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -189,7 +188,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -236,7 +234,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -248,7 +245,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -295,7 +291,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -307,7 +302,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -356,7 +350,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -370,7 +363,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -418,7 +410,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr6 = S_MOV_B32 -1 @@ -433,7 +424,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7 %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`) @@ -480,7 +470,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -494,7 +483,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -544,7 +532,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -557,7 +544,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1, %sgpr2_sgpr3 S_SLEEP 0, implicit %sgpr2_sgpr3 %sgpr7 = S_MOV_B32 61440 @@ -606,7 +592,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -618,7 +603,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -665,7 +649,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -677,7 +660,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 @@ -724,7 +706,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.main_body: - successors: %bb.1.if, %bb.2.end liveins: %vgpr0 %sgpr0_sgpr1 = COPY %exec @@ -736,7 +717,6 @@ body: | S_BRANCH %bb.1.if bb.1.if: - successors: %bb.2.end liveins: %sgpr0_sgpr1 %sgpr7 = S_MOV_B32 61440 diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs.mir b/test/CodeGen/AMDGPU/rename-independent-subregs.mir index fc2e4426ba48f..31ad26e769796 100644 --- a/test/CodeGen/AMDGPU/rename-independent-subregs.mir +++ b/test/CodeGen/AMDGPU/rename-independent-subregs.mir @@ -49,7 +49,6 @@ registers: - { id: 1, class: sreg_128 } body: | bb.0: - successors: %bb.1, %bb.2 S_NOP 0, implicit-def undef %0.sub2 S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc S_BRANCH %bb.2 diff --git a/test/CodeGen/AMDGPU/scratch-simple.ll b/test/CodeGen/AMDGPU/scratch-simple.ll new file mode 100644 index 0000000000000..60b9b56a48d1f --- /dev/null +++ b/test/CodeGen/AMDGPU/scratch-simple.ll @@ -0,0 +1,103 @@ +; RUN: llc -march=amdgcn -mcpu=verde -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s + +; This used to fail due to a v_add_i32 instruction with an illegal immediate +; operand that was created during Local Stack Slot Allocation. Test case derived +; from https://bugs.freedesktop.org/show_bug.cgi?id=96602 +; +; GCN-LABEL: {{^}}ps_main: + +; GCN-DAG: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x200 +; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0x400{{$}} +; GCN-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0 +; GCN-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]] + +; GCN-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[CLAMP_IDX]], [[K]] +; GCN-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[CLAMP_IDX]], [[ZERO]] + +; GCN: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_ps float @ps_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}vs_main: +; GCN: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_vs float @vs_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}cs_main: +; GCN: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_cs float @cs_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}hs_main: +; SI: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_hs float @hs_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}gs_main: +; SI: s_mov_b32 [[SWO:s[0-9]+]], s0 +; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +define amdgpu_gs float @gs_main(i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %r = fadd float %v1, %v2 + ret float %r +} + +; GCN-LABEL: {{^}}hs_ir_uses_scratch_offset: +; SI: s_mov_b32 [[SWO:s[0-9]+]], s6 +; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: s_mov_b32 s2, s5 +define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg %swo, i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %f = fadd float %v1, %v2 + %r1 = insertvalue <{i32, i32, i32, float}> undef, i32 %swo, 2 + %r2 = insertvalue <{i32, i32, i32, float}> %r1, float %f, 3 + ret <{i32, i32, i32, float}> %r2 +} + +; GCN-LABEL: {{^}}gs_ir_uses_scratch_offset: +; SI: s_mov_b32 [[SWO:s[0-9]+]], s6 +; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5 +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen +; GCN: s_mov_b32 s2, s5 +define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg %swo, i32 %idx) { + %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx + %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx + %f = fadd float %v1, %v2 + %r1 = insertvalue <{i32, i32, i32, float}> undef, i32 %swo, 2 + %r2 = insertvalue <{i32, i32, i32, float}> %r1, float %f, 3 + ret <{i32, i32, i32, float}> %r2 +} diff --git a/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir b/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir index 20052e865a54e..18176de53793b 100644 --- a/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir +++ b/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir @@ -20,12 +20,10 @@ body: | ; GCN: V_ADD_I32 bb.0: liveins: %vgpr0 - successors: %bb.1 %7 = COPY %vgpr0 %8 = S_MOV_B32 0 bb.1: - successors: %bb.1, %bb.2 %0 = PHI %8, %bb.0, %0, %bb.1, %2, %bb.2 %9 = V_MOV_B32_e32 9, implicit %exec %10 = V_CMP_EQ_U32_e64 %7, %9, implicit %exec @@ -33,7 +31,6 @@ body: | S_BRANCH %bb.1 bb.2: - successors: %bb.1 SI_END_CF %1, implicit-def %exec, implicit-def %scc, implicit %exec %11 = S_MOV_B32 1 %2 = S_ADD_I32 %0, %11, implicit-def %scc diff --git a/test/CodeGen/AMDGPU/subreg-intervals.mir b/test/CodeGen/AMDGPU/subreg-intervals.mir index c477fe9bc6d34..62816da25b2c4 100644 --- a/test/CodeGen/AMDGPU/subreg-intervals.mir +++ b/test/CodeGen/AMDGPU/subreg-intervals.mir @@ -31,17 +31,14 @@ registers: - { id: 0, class: sreg_64 } body: | bb.0: - successors: %bb.1, %bb.2 S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc S_BRANCH %bb.2 bb.1: - successors: %bb.3 S_NOP 0, implicit-def undef %0.sub0 S_BRANCH %bb.3 bb.2: - successors: %bb.3 S_NOP 0, implicit-def %0 S_BRANCH %bb.3 diff --git a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir index 5e5465800c3a3..6eb937e71b1b6 100644 --- a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir +++ b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir @@ -75,7 +75,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.entry: - successors: %bb.2.if, %bb.1.else liveins: %sgpr0_sgpr1 %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 9, 0 :: (non-temporal dereferenceable invariant load 4 from `float addrspace(2)* undef`) @@ -86,7 +85,6 @@ body: | S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc bb.2.if: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 9, implicit %exec @@ -95,7 +93,6 @@ body: | S_BRANCH %bb.3.done bb.1.else: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 100, implicit %exec @@ -141,7 +138,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.entry: - successors: %bb.2.if, %bb.1.else liveins: %sgpr0_sgpr1 %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) @@ -150,7 +146,6 @@ body: | S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc bb.2.if: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 9, implicit %exec @@ -159,7 +154,6 @@ body: | S_BRANCH %bb.3.done bb.1.else: - successors: %bb.3.done liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 %vgpr0 = V_MOV_B32_e32 100, implicit %exec diff --git a/test/CodeGen/AMDGPU/waitcnt-looptest.ll b/test/CodeGen/AMDGPU/waitcnt-looptest.ll new file mode 100644 index 0000000000000..2a3ce4dfd191b --- /dev/null +++ b/test/CodeGen/AMDGPU/waitcnt-looptest.ll @@ -0,0 +1,146 @@ +; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global | FileCheck --check-prefix=GCN %s + +; Check that the waitcnt insertion algorithm correctly propagates wait counts +; from before a loop to the loop header. + +; GCN-LABEL: {{^}}testKernel +; GCN: BB0_1: +; GCN: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_f32_e64 +; GCN: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_f32_e32 +; GCN: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_f32_e32 + +@data_generic = addrspace(1) global [100 x float] [float 0.000000e+00, float 0x3FB99999A0000000, float 0x3FC99999A0000000, float 0x3FD3333340000000, float 0x3FD99999A0000000, float 5.000000e-01, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000, float 0x3FECCCCCC0000000, float 1.000000e+00, float 0x3FF19999A0000000, float 0x3FF3333340000000, float 0x3FF4CCCCC0000000, float 0x3FF6666660000000, float 1.500000e+00, float 0x3FF99999A0000000, float 0x3FFB333340000000, float 0x3FFCCCCCC0000000, float 0x3FFE666660000000, float 2.000000e+00, float 0x4000CCCCC0000000, float 0x40019999A0000000, float 0x4002666660000000, float 0x4003333340000000, float 2.500000e+00, float 0x4004CCCCC0000000, float 0x40059999A0000000, float 0x4006666660000000, float 0x4007333340000000, float 3.000000e+00, float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000, float 3.500000e+00, float 0x400CCCCCC0000000, float 0x400D9999A0000000, float 0x400E666660000000, float 0x400F333340000000, float 4.000000e+00, float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000, float 4.500000e+00, float 0x4012666660000000, float 0x4012CCCCC0000000, float 0x4013333340000000, float 0x40139999A0000000, float 5.000000e+00, float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000, float 5.500000e+00, float 0x4016666660000000, float 0x4016CCCCC0000000, float 0x4017333340000000, float 0x40179999A0000000, float 6.000000e+00, float 0x4018666660000000, float 0x4018CCCCC0000000, float 0x4019333340000000, float 0x40199999A0000000, float 6.500000e+00, float 0x401A666660000000, float 0x401ACCCCC0000000, float 0x401B333340000000, float 0x401B9999A0000000, float 7.000000e+00, float 0x401C666660000000, float 0x401CCCCCC0000000, float 0x401D333340000000, float 0x401D9999A0000000, float 7.500000e+00, float 0x401E666660000000, float 0x401ECCCCC0000000, float 0x401F333340000000, float 0x401F9999A0000000, float 8.000000e+00, float 0x4020333340000000, float 0x4020666660000000, float 0x40209999A0000000, float 0x4020CCCCC0000000, float 8.500000e+00, float 0x4021333340000000, float 0x4021666660000000, float 0x40219999A0000000, float 0x4021CCCCC0000000, float 9.000000e+00, float 0x4022333340000000, float 0x4022666660000000, float 0x40229999A0000000, float 0x4022CCCCC0000000, float 9.500000e+00, float 0x4023333340000000, float 0x4023666660000000, float 0x40239999A0000000, float 0x4023CCCCC0000000], align 4 +@data_reference = addrspace(1) global [100 x float] [float 0.000000e+00, float 0x3FB99999A0000000, float 0x3FC99999A0000000, float 0x3FD3333340000000, float 0x3FD99999A0000000, float 5.000000e-01, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000, float 0x3FECCCCCC0000000, float 1.000000e+00, float 0x3FF19999A0000000, float 0x3FF3333340000000, float 0x3FF4CCCCC0000000, float 0x3FF6666660000000, float 1.500000e+00, float 0x3FF99999A0000000, float 0x3FFB333340000000, float 0x3FFCCCCCC0000000, float 0x3FFE666660000000, float 2.000000e+00, float 0x4000CCCCC0000000, float 0x40019999A0000000, float 0x4002666660000000, float 0x4003333340000000, float 2.500000e+00, float 0x4004CCCCC0000000, float 0x40059999A0000000, float 0x4006666660000000, float 0x4007333340000000, float 3.000000e+00, float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000, float 3.500000e+00, float 0x400CCCCCC0000000, float 0x400D9999A0000000, float 0x400E666660000000, float 0x400F333340000000, float 4.000000e+00, float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000, float 4.500000e+00, float 0x4012666660000000, float 0x4012CCCCC0000000, float 0x4013333340000000, float 0x40139999A0000000, float 5.000000e+00, float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000, float 5.500000e+00, float 0x4016666660000000, float 0x4016CCCCC0000000, float 0x4017333340000000, float 0x40179999A0000000, float 6.000000e+00, float 0x4018666660000000, float 0x4018CCCCC0000000, float 0x4019333340000000, float 0x40199999A0000000, float 6.500000e+00, float 0x401A666660000000, float 0x401ACCCCC0000000, float 0x401B333340000000, float 0x401B9999A0000000, float 7.000000e+00, float 0x401C666660000000, float 0x401CCCCCC0000000, float 0x401D333340000000, float 0x401D9999A0000000, float 7.500000e+00, float 0x401E666660000000, float 0x401ECCCCC0000000, float 0x401F333340000000, float 0x401F9999A0000000, float 8.000000e+00, float 0x4020333340000000, float 0x4020666660000000, float 0x40209999A0000000, float 0x4020CCCCC0000000, float 8.500000e+00, float 0x4021333340000000, float 0x4021666660000000, float 0x40219999A0000000, float 0x4021CCCCC0000000, float 9.000000e+00, float 0x4022333340000000, float 0x4022666660000000, float 0x40229999A0000000, float 0x4022CCCCC0000000, float 9.500000e+00, float 0x4023333340000000, float 0x4023666660000000, float 0x40239999A0000000, float 0x4023CCCCC0000000], align 4 + +define amdgpu_kernel void @testKernel(i32 addrspace(1)* nocapture %arg) local_unnamed_addr #0 { +bb: + store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float> addrspace(4)* bitcast (float addrspace(4)* getelementptr ([100 x float], [100 x float] addrspace(4)* addrspacecast ([100 x float] addrspace(1)* @data_generic to [100 x float] addrspace(4)*), i64 0, i64 4) to <2 x float> addrspace(4)*), align 4 + store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float> addrspace(4)* bitcast (float addrspace(4)* getelementptr ([100 x float], [100 x float] addrspace(4)* addrspacecast ([100 x float] addrspace(1)* @data_reference to [100 x float] addrspace(4)*), i64 0, i64 4) to <2 x float> addrspace(4)*), align 4 + br label %bb18 + +bb1: ; preds = %bb18 + %tmp = tail call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() + %tmp2 = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = tail call i32 @llvm.amdgcn.workgroup.id.x() + %tmp4 = getelementptr inbounds i8, i8 addrspace(2)* %tmp, i64 4 + %tmp5 = bitcast i8 addrspace(2)* %tmp4 to i16 addrspace(2)* + %tmp6 = load i16, i16 addrspace(2)* %tmp5, align 4 + %tmp7 = zext i16 %tmp6 to i32 + %tmp8 = mul i32 %tmp3, %tmp7 + %tmp9 = add i32 %tmp8, %tmp2 + %tmp10 = tail call i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() + %tmp11 = zext i32 %tmp9 to i64 + %tmp12 = bitcast i8 addrspace(2)* %tmp10 to i64 addrspace(2)* + %tmp13 = load i64, i64 addrspace(2)* %tmp12, align 8 + %tmp14 = add i64 %tmp13, %tmp11 + %tmp15 = zext i1 %tmp99 to i32 + %tmp16 = and i64 %tmp14, 4294967295 + %tmp17 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp16 + store i32 %tmp15, i32 addrspace(1)* %tmp17, align 4 + ret void + +bb18: ; preds = %bb18, %bb + %tmp19 = phi i64 [ 0, %bb ], [ %tmp102, %bb18 ] + %tmp20 = phi i32 [ 0, %bb ], [ %tmp100, %bb18 ] + %tmp21 = phi i1 [ true, %bb ], [ %tmp99, %bb18 ] + %tmp22 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp19 + %tmp23 = load float, float addrspace(1)* %tmp22, align 4 + %tmp24 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp19 + %tmp25 = load float, float addrspace(1)* %tmp24, align 4 + %tmp26 = fcmp oeq float %tmp23, %tmp25 + %tmp27 = and i1 %tmp21, %tmp26 + %tmp28 = or i32 %tmp20, 1 + %tmp29 = sext i32 %tmp28 to i64 + %tmp30 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp29 + %tmp31 = load float, float addrspace(1)* %tmp30, align 4 + %tmp32 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp29 + %tmp33 = load float, float addrspace(1)* %tmp32, align 4 + %tmp34 = fcmp oeq float %tmp31, %tmp33 + %tmp35 = and i1 %tmp27, %tmp34 + %tmp36 = add nuw nsw i32 %tmp20, 2 + %tmp37 = sext i32 %tmp36 to i64 + %tmp38 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp37 + %tmp39 = load float, float addrspace(1)* %tmp38, align 4 + %tmp40 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp37 + %tmp41 = load float, float addrspace(1)* %tmp40, align 4 + %tmp42 = fcmp oeq float %tmp39, %tmp41 + %tmp43 = and i1 %tmp35, %tmp42 + %tmp44 = add nuw nsw i32 %tmp20, 3 + %tmp45 = sext i32 %tmp44 to i64 + %tmp46 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp45 + %tmp47 = load float, float addrspace(1)* %tmp46, align 4 + %tmp48 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp45 + %tmp49 = load float, float addrspace(1)* %tmp48, align 4 + %tmp50 = fcmp oeq float %tmp47, %tmp49 + %tmp51 = and i1 %tmp43, %tmp50 + %tmp52 = add nuw nsw i32 %tmp20, 4 + %tmp53 = sext i32 %tmp52 to i64 + %tmp54 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp53 + %tmp55 = load float, float addrspace(1)* %tmp54, align 4 + %tmp56 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp53 + %tmp57 = load float, float addrspace(1)* %tmp56, align 4 + %tmp58 = fcmp oeq float %tmp55, %tmp57 + %tmp59 = and i1 %tmp51, %tmp58 + %tmp60 = add nuw nsw i32 %tmp20, 5 + %tmp61 = sext i32 %tmp60 to i64 + %tmp62 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp61 + %tmp63 = load float, float addrspace(1)* %tmp62, align 4 + %tmp64 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp61 + %tmp65 = load float, float addrspace(1)* %tmp64, align 4 + %tmp66 = fcmp oeq float %tmp63, %tmp65 + %tmp67 = and i1 %tmp59, %tmp66 + %tmp68 = add nuw nsw i32 %tmp20, 6 + %tmp69 = sext i32 %tmp68 to i64 + %tmp70 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp69 + %tmp71 = load float, float addrspace(1)* %tmp70, align 4 + %tmp72 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp69 + %tmp73 = load float, float addrspace(1)* %tmp72, align 4 + %tmp74 = fcmp oeq float %tmp71, %tmp73 + %tmp75 = and i1 %tmp67, %tmp74 + %tmp76 = add nuw nsw i32 %tmp20, 7 + %tmp77 = sext i32 %tmp76 to i64 + %tmp78 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp77 + %tmp79 = load float, float addrspace(1)* %tmp78, align 4 + %tmp80 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp77 + %tmp81 = load float, float addrspace(1)* %tmp80, align 4 + %tmp82 = fcmp oeq float %tmp79, %tmp81 + %tmp83 = and i1 %tmp75, %tmp82 + %tmp84 = add nuw nsw i32 %tmp20, 8 + %tmp85 = sext i32 %tmp84 to i64 + %tmp86 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp85 + %tmp87 = load float, float addrspace(1)* %tmp86, align 4 + %tmp88 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp85 + %tmp89 = load float, float addrspace(1)* %tmp88, align 4 + %tmp90 = fcmp oeq float %tmp87, %tmp89 + %tmp91 = and i1 %tmp83, %tmp90 + %tmp92 = add nuw nsw i32 %tmp20, 9 + %tmp93 = sext i32 %tmp92 to i64 + %tmp94 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp93 + %tmp95 = load float, float addrspace(1)* %tmp94, align 4 + %tmp96 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp93 + %tmp97 = load float, float addrspace(1)* %tmp96, align 4 + %tmp98 = fcmp oeq float %tmp95, %tmp97 + %tmp99 = and i1 %tmp91, %tmp98 + %tmp100 = add nuw nsw i32 %tmp20, 10 + %tmp101 = icmp eq i32 %tmp100, 100 + %tmp102 = sext i32 %tmp100 to i64 + br i1 %tmp101, label %bb1, label %bb18 +} + +; Function Attrs: nounwind readnone speculatable +declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #1 + +; Function Attrs: nounwind readnone speculatable +declare i32 @llvm.amdgcn.workitem.id.x() #1 + +; Function Attrs: nounwind readnone speculatable +declare i32 @llvm.amdgcn.workgroup.id.x() #1 + +; Function Attrs: nounwind readnone speculatable +declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #1 + +attributes #0 = { "target-cpu"="fiji" "target-features"="-flat-for-global" } +attributes #1 = { nounwind readnone speculatable } diff --git a/test/CodeGen/ARM/ARMLoadStoreDBG.mir b/test/CodeGen/ARM/ARMLoadStoreDBG.mir index 0e6f80bfb48bc..cf5388ac1ccb9 100644 --- a/test/CodeGen/ARM/ARMLoadStoreDBG.mir +++ b/test/CodeGen/ARM/ARMLoadStoreDBG.mir @@ -118,7 +118,6 @@ stack: - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r7' } body: | bb.0.entry: - successors: %bb.1, %bb.2.if.end liveins: %r0, %r1, %r2, %r3, %lr, %r7 DBG_VALUE debug-use %r0, debug-use _, !18, !27, debug-location !28 diff --git a/test/CodeGen/ARM/acle-intrinsics-v5.ll b/test/CodeGen/ARM/acle-intrinsics-v5.ll new file mode 100644 index 0000000000000..407bea1488630 --- /dev/null +++ b/test/CodeGen/ARM/acle-intrinsics-v5.ll @@ -0,0 +1,110 @@ +; RUN: llc -O1 -mtriple=armv5te-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=armv7-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv6t2-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv7em-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv8m.main-none-none-eabi -mattr=+dsp %s -o - | FileCheck %s +define i32 @smulbb(i32 %a, i32 %b) { +; CHECK-LABEL: smulbb +; CHECK: smulbb r0, r0, r1 + %tmp = call i32 @llvm.arm.smulbb(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smulbt(i32 %a, i32 %b) { +; CHECK-LABEL: smulbt +; CHECK: smulbt r0, r0, r1 + %tmp = call i32 @llvm.arm.smulbt(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smultb(i32 %a, i32 %b) { +; CHECK-LABEL: smultb +; CHECK: smultb r0, r0, r1 + %tmp = call i32 @llvm.arm.smultb(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smultt(i32 %a, i32 %b) { +; CHECK-LABEL: smultt +; CHECK: smultt r0, r0, r1 + %tmp = call i32 @llvm.arm.smultt(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smulwb(i32 %a, i32 %b) { +; CHECK-LABEL: smulwb +; CHECK: smulwb r0, r0, r1 + %tmp = call i32 @llvm.arm.smulwb(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smulwt(i32 %a, i32 %b) { +; CHECK-LABEL: smulwt +; CHECK: smulwt r0, r0, r1 + %tmp = call i32 @llvm.arm.smulwt(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @acc_mults(i32 %a, i32 %b, i32 %acc) { +; CHECK-LABEL: acc_mults +; CHECK: smlabb r2, r0, r1, r2 +; CHECK: smlabt r2, r0, r1, r2 +; CHECK: smlatb r2, r0, r1, r2 +; CHECK: smlatt r2, r0, r1, r2 +; CHECK: smlawb r2, r0, r1, r2 +; CHECK: smlawt r0, r0, r1, r2 + %acc1 = call i32 @llvm.arm.smlabb(i32 %a, i32 %b, i32 %acc) + %acc2 = call i32 @llvm.arm.smlabt(i32 %a, i32 %b, i32 %acc1) + %acc3 = call i32 @llvm.arm.smlatb(i32 %a, i32 %b, i32 %acc2) + %acc4 = call i32 @llvm.arm.smlatt(i32 %a, i32 %b, i32 %acc3) + %acc5 = call i32 @llvm.arm.smlawb(i32 %a, i32 %b, i32 %acc4) + %acc6 = call i32 @llvm.arm.smlawt(i32 %a, i32 %b, i32 %acc5) + ret i32 %acc6 +} + +define i32 @qadd(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qadd +; CHECK: qadd r0, r0, r1 + %tmp = call i32 @llvm.arm.qadd(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qsub(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qsub +; CHECK: qsub r0, r0, r1 + %tmp = call i32 @llvm.arm.qsub(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qdadd(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qdadd +; CHECK: qdadd r0, r0, r1 + %dbl = call i32 @llvm.arm.qadd(i32 %a, i32 %a) + %add = call i32 @llvm.arm.qadd(i32 %dbl, i32 %b) + ret i32 %add +} + +define i32 @qdsub(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qdsub +; CHECK: qdsub r0, r0, r1 + %dbl = call i32 @llvm.arm.qadd(i32 %b, i32 %b) + %add = call i32 @llvm.arm.qsub(i32 %a, i32 %dbl) + ret i32 %add +} + +declare i32 @llvm.arm.smulbb(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smulbt(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smultb(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smultt(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smulwb(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smulwt(i32 %a, i32 %b) nounwind readnone +declare i32 @llvm.arm.smlabb(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlabt(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlatb(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlatt(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlawb(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlawt(i32, i32, i32) nounwind +declare i32 @llvm.arm.qadd(i32, i32) nounwind +declare i32 @llvm.arm.qsub(i32, i32) nounwind diff --git a/test/CodeGen/ARM/acle-intrinsics.ll b/test/CodeGen/ARM/acle-intrinsics.ll new file mode 100644 index 0000000000000..0c20744e126bd --- /dev/null +++ b/test/CodeGen/ARM/acle-intrinsics.ll @@ -0,0 +1,481 @@ +; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=armv7-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv6t2-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv7em-none-none-eabi %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=thumbv8m.main-none-none-eabi -mattr=+dsp %s -o - | FileCheck %s + + +; upper-bound of the immediate argument +define i32 @ssat1(i32 %a) nounwind { +; CHECK-LABEL: ssat1 +; CHECK: ssat r0, #32, r0 + %tmp = call i32 @llvm.arm.ssat(i32 %a, i32 32) + ret i32 %tmp +} + +; lower-bound of the immediate argument +define i32 @ssat2(i32 %a) nounwind { +; CHECK-LABEL: ssat2 +; CHECK: ssat r0, #1, r0 + %tmp = call i32 @llvm.arm.ssat(i32 %a, i32 1) + ret i32 %tmp +} + +; upper-bound of the immediate argument +define i32 @usat1(i32 %a) nounwind { +; CHECK-LABEL: usat1 +; CHECK: usat r0, #31, r0 + %tmp = call i32 @llvm.arm.usat(i32 %a, i32 31) + ret i32 %tmp +} + +; lower-bound of the immediate argument +define i32 @usat2(i32 %a) nounwind { +; CHECK-LABEL: usat2 +; CHECK: usat r0, #0, r0 + %tmp = call i32 @llvm.arm.usat(i32 %a, i32 0) + ret i32 %tmp +} + +define i32 @ssat16 (i32 %a) nounwind { +; CHECK-LABEL: ssat16 +; CHECK: ssat16 r0, #1, r0 +; CHECK: ssat16 r0, #16, r0 + %tmp = call i32 @llvm.arm.ssat16(i32 %a, i32 1) + %tmp2 = call i32 @llvm.arm.ssat16(i32 %tmp, i32 16) + ret i32 %tmp2 +} + +define i32 @usat16(i32 %a) nounwind { +; CHECK-LABEL: usat16 +; CHECK: usat16 r0, #0, r0 +; CHECK: usat16 r0, #15, r0 + %tmp = call i32 @llvm.arm.usat16(i32 %a, i32 0) + %tmp2 = call i32 @llvm.arm.usat16(i32 %tmp, i32 15) + ret i32 %tmp2 +} + +define i32 @pack_unpack(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: pack_unpack +; CHECK: sxtab16 r0, r0, r1 +; CHECK: sxtb16 r0, r0 +; CHECK: uxtab16 r0, r1, r0 +; CHECK: uxtb16 r0, r0 + %tmp = call i32 @llvm.arm.sxtab16(i32 %a, i32 %b) + %tmp1 = call i32 @llvm.arm.sxtb16(i32 %tmp) + %tmp2 = call i32 @llvm.arm.uxtab16(i32 %b, i32 %tmp1) + %tmp3 = call i32 @llvm.arm.uxtb16(i32 %tmp2) + ret i32 %tmp3 +} + +define i32 @sel(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: sel +; CHECK sel r0, r0, r1 + %tmp = call i32 @llvm.arm.sel(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qadd8 +; CHECK: qadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.qadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qsub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qsub8 +; CHECK: qsub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.qsub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @sadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: sadd8 +; CHECK: sadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.sadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shadd8 +; CHECK: shadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.shadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shsub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shsub8 +; CHECK: shsub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.shsub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @ssub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: ssub8 +; CHECK: ssub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.ssub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uadd8 +; CHECK: uadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhadd8 +; CHECK: uhadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uhadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhsub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhsub8 +; CHECK: uhsub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uhsub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqadd8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqadd8 +; CHECK: uqadd8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uqadd8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqsub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqsub8 +; CHECK: uqsub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.uqsub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @usub8(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: usub8 +; CHECK: usub8 r0, r0, r1 + %tmp = call i32 @llvm.arm.usub8(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @usad(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: usad +; CHECK: usad8 r0, r0, r1 +; CHECK: usada8 r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.usad8(i32 %a, i32 %b) + %tmp1 = call i32 @llvm.arm.usada8(i32 %tmp, i32 %b, i32 %c) + ret i32 %tmp1 +} + +define i32 @qadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qadd16 +; CHECK: qadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.qadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qasx +; CHECK: qasx r0, r0, r1 + %tmp = call i32 @llvm.arm.qasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qsax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qsax +; CHECK: qsax r0, r0, r1 + %tmp = call i32 @llvm.arm.qsax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @qsub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: qsub16 +; CHECK: qsub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.qsub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @sadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: sadd16 +; CHECK: sadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.sadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @sasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: sasx +; CHECK: sasx r0, r0, r1 + %tmp = call i32 @llvm.arm.sasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shadd16 +; CHECK: shadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.shadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shasx +; CHECK: shasx r0, r0, r1 + %tmp = call i32 @llvm.arm.shasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shsax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shsax +; CHECK: shsax r0, r0, r1 + %tmp = call i32 @llvm.arm.shsax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @shsub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: shsub16 +; CHECK: shsub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.shsub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @ssax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: ssax +; CHECK: ssax r0, r0, r1 + %tmp = call i32 @llvm.arm.ssax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @ssub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: ssub16 +; CHECK: ssub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.ssub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uadd16 +; CHECK: uadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uasx +; CHECK: uasx r0, r0, r1 + %tmp = call i32 @llvm.arm.uasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhadd16 +; CHECK: uhadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uhadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhasx +; CHECK: uhasx r0, r0, r1 + %tmp = call i32 @llvm.arm.uhasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhsax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhsax +; CHECK: uhsax r0, r0, r1 + %tmp = call i32 @llvm.arm.uhsax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uhsub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uhsub16 +; CHECK: uhsub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uhsub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqadd16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqadd16 +; CHECK: uqadd16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uqadd16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqasx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqasx +; CHECK: uqasx r0, r0, r1 + %tmp = call i32 @llvm.arm.uqasx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqsax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqsax +; CHECK: uqsax r0, r0, r1 + %tmp = call i32 @llvm.arm.uqsax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @uqsub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: uqsub16 +; CHECK: uqsub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.uqsub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @usax(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: usax +; CHECK: usax r0, r0, r1 + %tmp = call i32 @llvm.arm.usax(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @usub16(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: usub16 +; CHECK: usub16 r0, r0, r1 + %tmp = call i32 @llvm.arm.usub16(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smlad(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: smlad +; CHECK: smlad r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.smlad(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +define i32 @smladx(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: smladx +; CHECK: smladx r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.smladx(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +define i64 @smlald(i32 %a, i32 %b, i64 %c) nounwind { +; CHECK-LABEL: smlald +; CHECK: smlald r2, r3, r0, r1 + %tmp = call i64 @llvm.arm.smlald(i32 %a, i32 %b, i64 %c) + ret i64 %tmp +} + +define i64 @smlaldx(i32 %a, i32 %b, i64 %c) nounwind { +; CHECK-LABEL: smlaldx +; CHECK: smlaldx r2, r3, r0, r1 + %tmp = call i64 @llvm.arm.smlaldx(i32 %a, i32 %b, i64 %c) + ret i64 %tmp +} + +define i32 @smlsd(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: smlsd +; CHECK: smlsd r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.smlsd(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +define i32 @smlsdx(i32 %a, i32 %b, i32 %c) nounwind { +; CHECK-LABEL: smlsdx +; CHECK: smlsdx r0, r0, r1, r2 + %tmp = call i32 @llvm.arm.smlsdx(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +define i64 @smlsld(i32 %a, i32 %b, i64 %c) nounwind { +; CHECK-LABEL: smlsld +; CHECK: smlsld r2, r3, r0, r1 + %tmp = call i64 @llvm.arm.smlsld(i32 %a, i32 %b, i64 %c) + ret i64 %tmp +} + +define i64 @smlsldx(i32 %a, i32 %b, i64 %c) nounwind { +; CHECK-LABEL: smlsldx +; CHECK: smlsldx r2, r3, r0, r1 + %tmp = call i64 @llvm.arm.smlsldx(i32 %a, i32 %b, i64 %c) + ret i64 %tmp +} + +define i32 @smuad(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: smuad +; CHECK: smuad r0, r0, r1 + %tmp = call i32 @llvm.arm.smuad(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smuadx(i32 %a, i32 %b) nounwind { +;CHECK-LABEL: smuadx +; CHECK: smuadx r0, r0, r1 + %tmp = call i32 @llvm.arm.smuadx(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smusd(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: smusd +; CHECK: smusd r0, r0, r1 + %tmp = call i32 @llvm.arm.smusd(i32 %a, i32 %b) + ret i32 %tmp +} + +define i32 @smusdx(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: smusdx +; CHECK: smusdx r0, r0, r1 + %tmp = call i32 @llvm.arm.smusdx(i32 %a, i32 %b) + ret i32 %tmp +} +declare i32 @llvm.arm.ssat(i32, i32) nounwind readnone +declare i32 @llvm.arm.usat(i32, i32) nounwind readnone +declare i32 @llvm.arm.ssat16(i32, i32) nounwind +declare i32 @llvm.arm.usat16(i32, i32) nounwind +declare i32 @llvm.arm.sxtab16(i32, i32) +declare i32 @llvm.arm.sxtb16(i32) +declare i32 @llvm.arm.uxtab16(i32, i32) +declare i32 @llvm.arm.uxtb16(i32) +declare i32 @llvm.arm.sel(i32, i32) nounwind +declare i32 @llvm.arm.qadd8(i32, i32) nounwind +declare i32 @llvm.arm.qsub8(i32, i32) nounwind +declare i32 @llvm.arm.sadd8(i32, i32) nounwind +declare i32 @llvm.arm.shadd8(i32, i32) nounwind +declare i32 @llvm.arm.shsub8(i32, i32) nounwind +declare i32 @llvm.arm.ssub8(i32, i32) nounwind +declare i32 @llvm.arm.uadd8(i32, i32) nounwind +declare i32 @llvm.arm.uhadd8(i32, i32) nounwind +declare i32 @llvm.arm.uhsub8(i32, i32) nounwind +declare i32 @llvm.arm.uqadd8(i32, i32) nounwind +declare i32 @llvm.arm.uqsub8(i32, i32) nounwind +declare i32 @llvm.arm.usub8(i32, i32) nounwind +declare i32 @llvm.arm.usad8(i32, i32) nounwind readnone +declare i32 @llvm.arm.usada8(i32, i32, i32) nounwind readnone +declare i32 @llvm.arm.qadd16(i32, i32) nounwind +declare i32 @llvm.arm.qasx(i32, i32) nounwind +declare i32 @llvm.arm.qsax(i32, i32) nounwind +declare i32 @llvm.arm.qsub16(i32, i32) nounwind +declare i32 @llvm.arm.sadd16(i32, i32) nounwind +declare i32 @llvm.arm.sasx(i32, i32) nounwind +declare i32 @llvm.arm.shadd16(i32, i32) nounwind +declare i32 @llvm.arm.shasx(i32, i32) nounwind +declare i32 @llvm.arm.shsax(i32, i32) nounwind +declare i32 @llvm.arm.shsub16(i32, i32) nounwind +declare i32 @llvm.arm.ssax(i32, i32) nounwind +declare i32 @llvm.arm.ssub16(i32, i32) nounwind +declare i32 @llvm.arm.uadd16(i32, i32) nounwind +declare i32 @llvm.arm.uasx(i32, i32) nounwind +declare i32 @llvm.arm.usax(i32, i32) nounwind +declare i32 @llvm.arm.uhadd16(i32, i32) nounwind +declare i32 @llvm.arm.uhasx(i32, i32) nounwind +declare i32 @llvm.arm.uhsax(i32, i32) nounwind +declare i32 @llvm.arm.uhsub16(i32, i32) nounwind +declare i32 @llvm.arm.uqadd16(i32, i32) nounwind +declare i32 @llvm.arm.uqasx(i32, i32) nounwind +declare i32 @llvm.arm.uqsax(i32, i32) nounwind +declare i32 @llvm.arm.uqsub16(i32, i32) nounwind +declare i32 @llvm.arm.usub16(i32, i32) nounwind +declare i32 @llvm.arm.smlad(i32, i32, i32) nounwind +declare i32 @llvm.arm.smladx(i32, i32, i32) nounwind +declare i64 @llvm.arm.smlald(i32, i32, i64) nounwind +declare i64 @llvm.arm.smlaldx(i32, i32, i64) nounwind +declare i32 @llvm.arm.smlsd(i32, i32, i32) nounwind +declare i32 @llvm.arm.smlsdx(i32, i32, i32) nounwind +declare i64 @llvm.arm.smlsld(i32, i32, i64) nounwind +declare i64 @llvm.arm.smlsldx(i32, i32, i64) nounwind +declare i32 @llvm.arm.smuad(i32, i32) nounwind +declare i32 @llvm.arm.smuadx(i32, i32) nounwind +declare i32 @llvm.arm.smusd(i32, i32) nounwind +declare i32 @llvm.arm.smusdx(i32, i32) nounwind diff --git a/test/CodeGen/ARM/alloca-align.ll b/test/CodeGen/ARM/alloca-align.ll new file mode 100644 index 0000000000000..3bba156f0ee06 --- /dev/null +++ b/test/CodeGen/ARM/alloca-align.ll @@ -0,0 +1,24 @@ +; RUN: llc -o - %s | FileCheck %s +target triple="arm--" + +@glob = external global i32* + +declare void @bar(i32*, [20000 x i8]* byval) + +; CHECK-LABEL: foo: +; We should see the stack getting additional alignment +; CHECK: sub sp, sp, #16 +; CHECK: bic sp, sp, #31 +; And a base pointer getting used. +; CHECK: mov r6, sp +; Which is passed to the call +; CHECK: add [[REG:r[0-9]+]], r6, #19456 +; CHECK: add r0, [[REG]], #536 +; CHECK: bl bar +define void @foo([20000 x i8]* %addr) { + %tmp = alloca [4 x i32], align 32 + %tmp0 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 0 + call void @bar(i32* %tmp0, [20000 x i8]* byval %addr) + ret void +} + diff --git a/test/CodeGen/ARM/cmp1-peephole-thumb.mir b/test/CodeGen/ARM/cmp1-peephole-thumb.mir index 5ace58fd06584..3e87ced0ee57d 100644 --- a/test/CodeGen/ARM/cmp1-peephole-thumb.mir +++ b/test/CodeGen/ARM/cmp1-peephole-thumb.mir @@ -55,7 +55,6 @@ frameInfo: # CHECK-NOT: tCMPi8 body: | bb.0.entry: - successors: %bb.1.entry(0x40000000), %bb.2.entry(0x40000000) liveins: %r0, %r1 %1 = COPY %r1 @@ -67,8 +66,6 @@ body: | tBcc %bb.2.entry, 0, %cpsr bb.1.entry: - successors: %bb.2.entry(0x80000000) - bb.2.entry: %5 = PHI %4, %bb.1.entry, %3, %bb.0.entry diff --git a/test/CodeGen/ARM/cmp2-peephole-thumb.mir b/test/CodeGen/ARM/cmp2-peephole-thumb.mir index 6e9ca70f1741d..a31086d2113eb 100644 --- a/test/CodeGen/ARM/cmp2-peephole-thumb.mir +++ b/test/CodeGen/ARM/cmp2-peephole-thumb.mir @@ -76,7 +76,6 @@ stack: # CHECK-NEXT: tCMPi8 body: | bb.0.entry: - successors: %bb.1.if.then(0x40000000), %bb.2.if.end(0x40000000) liveins: %r0, %r1 %1 = COPY %r1 @@ -88,15 +87,11 @@ body: | tB %bb.1.if.then, 14, _ bb.1.if.then: - successors: %bb.3.return(0x80000000) - %4, %cpsr = tMOVi8 42, 14, _ tSTRspi killed %4, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval) tB %bb.3.return, 14, _ bb.2.if.end: - successors: %bb.3.return(0x80000000) - %3, %cpsr = tMOVi8 1, 14, _ tSTRspi killed %3, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval) diff --git a/test/CodeGen/ARM/dbg-range-extension.mir b/test/CodeGen/ARM/dbg-range-extension.mir index 466f693969489..a79607705c1c7 100644 --- a/test/CodeGen/ARM/dbg-range-extension.mir +++ b/test/CodeGen/ARM/dbg-range-extension.mir @@ -209,7 +209,6 @@ stack: - { id: 5, type: spill-slot, offset: -24, size: 4, alignment: 4, callee-saved-register: '%r4' } body: | bb.0.entry: - successors: %bb.5.if.end, %bb.1.if.then liveins: %r0, %r4, %r5, %r6, %r7, %r11, %lr %sp = frame-setup STMDB_UPD %sp, 14, _, killed %r4, killed %r5, killed %r6, killed %r7, killed %r11, killed %lr @@ -232,7 +231,6 @@ body: | Bcc %bb.5.if.end, 0, killed %cpsr bb.1.if.then: - successors: %bb.3.for.cond liveins: %r4, %r5 %r0 = MOVi 12, 14, _, _, debug-location !26 @@ -245,7 +243,6 @@ body: | B %bb.3.for.cond bb.2.for.body: - successors: %bb.3.for.cond liveins: %r4, %r5, %r6, %r7 %r1 = ADDrr %r5, %r7, 14, _, _, debug-location !36 @@ -255,7 +252,6 @@ body: | DBG_VALUE debug-use %r7, debug-use _, !18, !20, debug-location !28 bb.3.for.cond: - successors: %bb.2.for.body, %bb.4.for.cond.cleanup liveins: %r4, %r5, %r6, %r7 DBG_VALUE debug-use %r7, debug-use _, !18, !20, debug-location !28 @@ -263,7 +259,6 @@ body: | Bcc %bb.2.for.body, 11, killed %cpsr, debug-location !33 bb.4.for.cond.cleanup: - successors: %bb.5.if.end liveins: %r4, %r5, %r6 %r0 = MOVr %r5, 14, _, _, debug-location !34 diff --git a/test/CodeGen/ARM/sat-arith.ll b/test/CodeGen/ARM/sat-arith.ll deleted file mode 100644 index 4844ed1bd21e2..0000000000000 --- a/test/CodeGen/ARM/sat-arith.ll +++ /dev/null @@ -1,63 +0,0 @@ -; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s -check-prefix=ARM -check-prefix=CHECK -; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s -check-prefix=THUMB -check-prefix=CHECK - -; CHECK-LABEL: qadd -define i32 @qadd() nounwind { -; CHECK-DAG: mov{{s?}} [[R0:.*]], #8 -; CHECK-DAG: mov{{s?}} [[R1:.*]], #128 -; CHECK-ARM: qadd [[R0]], [[R1]], [[R0]] -; CHECK-THRUMB: qadd [[R0]], [[R0]], [[R1]] - %tmp = call i32 @llvm.arm.qadd(i32 128, i32 8) - ret i32 %tmp -} - -; CHECK-LABEL: qsub -define i32 @qsub() nounwind { -; CHECK-DAG: mov{{s?}} [[R0:.*]], #8 -; CHECK-DAG: mov{{s?}} [[R1:.*]], #128 -; CHECK-ARM: qsub [[R0]], [[R1]], [[R0]] -; CHECK-THRUMB: qadd [[R0]], [[R1]], [[R0]] - %tmp = call i32 @llvm.arm.qsub(i32 128, i32 8) - ret i32 %tmp -} - -; upper-bound of the immediate argument -; CHECK-LABEL: ssat1 -define i32 @ssat1() nounwind { -; CHECK: mov{{s?}} [[R0:.*]], #128 -; CHECK: ssat [[R1:.*]], #32, [[R0]] - %tmp = call i32 @llvm.arm.ssat(i32 128, i32 32) - ret i32 %tmp -} - -; lower-bound of the immediate argument -; CHECK-LABEL: ssat2 -define i32 @ssat2() nounwind { -; CHECK: mov{{s?}} [[R0:.*]], #128 -; CHECK: ssat [[R1:.*]], #1, [[R0]] - %tmp = call i32 @llvm.arm.ssat(i32 128, i32 1) - ret i32 %tmp -} - -; upper-bound of the immediate argument -; CHECK-LABEL: usat1 -define i32 @usat1() nounwind { -; CHECK: mov{{s?}} [[R0:.*]], #128 -; CHECK: usat [[R1:.*]], #31, [[R0]] - %tmp = call i32 @llvm.arm.usat(i32 128, i32 31) - ret i32 %tmp -} - -; lower-bound of the immediate argument -; CHECK-LABEL: usat2 -define i32 @usat2() nounwind { -; CHECK: mov{{s?}} [[R0:.*]], #128 -; CHECK: usat [[R1:.*]], #0, [[R0]] - %tmp = call i32 @llvm.arm.usat(i32 128, i32 0) - ret i32 %tmp -} - -declare i32 @llvm.arm.qadd(i32, i32) nounwind -declare i32 @llvm.arm.qsub(i32, i32) nounwind -declare i32 @llvm.arm.ssat(i32, i32) nounwind readnone -declare i32 @llvm.arm.usat(i32, i32) nounwind readnone diff --git a/test/CodeGen/ARM/vabs.ll b/test/CodeGen/ARM/vabs.ll index 38c6d6c28aedf..4295b32d25fc7 100644 --- a/test/CodeGen/ARM/vabs.ll +++ b/test/CodeGen/ARM/vabs.ll @@ -8,6 +8,22 @@ define <8 x i8> @vabss8(<8 x i8>* %A) nounwind { ret <8 x i8> %tmp2 } +define <8 x i8> @vabss8_fold(<8 x i8>* %A) nounwind { +; CHECK-LABEL: vabss8_fold: +; CHECK: vldr d16, .LCPI1_0 +; CHECK: .LCPI1_0: +; CHECK-NEXT: .byte 128 @ 0x80 +; CHECK-NEXT: .byte 127 @ 0x7f +; CHECK-NEXT: .byte 1 @ 0x1 +; CHECK-NEXT: .byte 0 @ 0x0 +; CHECK-NEXT: .byte 1 @ 0x1 +; CHECK-NEXT: .byte 127 @ 0x7f +; CHECK-NEXT: .byte 128 @ 0x80 +; CHECK-NEXT: .byte 1 @ 0x1 + %tmp1 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> <i8 -128, i8 -127, i8 -1, i8 0, i8 1, i8 127, i8 128, i8 255>) + ret <8 x i8> %tmp1 +} + define <4 x i16> @vabss16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: vabss16: ;CHECK: vabs.s16 @@ -16,6 +32,18 @@ define <4 x i16> @vabss16(<4 x i16>* %A) nounwind { ret <4 x i16> %tmp2 } +define <4 x i16> @vabss16_fold() nounwind { +; CHECK-LABEL: vabss16_fold: +; CHECK: vldr d16, .LCPI3_0 +; CHECK: .LCPI3_0: +; CHECK-NEXT: .short 32768 @ 0x8000 +; CHECK-NEXT: .short 32767 @ 0x7fff +; CHECK-NEXT: .short 255 @ 0xff +; CHECK-NEXT: .short 32768 @ 0x8000 + %tmp1 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> <i16 -32768, i16 -32767, i16 255, i16 32768>) + ret <4 x i16> %tmp1 +} + define <2 x i32> @vabss32(<2 x i32>* %A) nounwind { ;CHECK-LABEL: vabss32: ;CHECK: vabs.s32 @@ -24,6 +52,16 @@ define <2 x i32> @vabss32(<2 x i32>* %A) nounwind { ret <2 x i32> %tmp2 } +define <2 x i32> @vabss32_fold() nounwind { +; CHECK-LABEL: vabss32_fold: +; CHECK: vldr d16, .LCPI5_0 +; CHECK: .LCPI5_0: +; CHECK-NEXT: .long 2147483647 @ 0x7fffffff +; CHECK-NEXT: .long 2147483648 @ 0x80000000 + %tmp1 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> <i32 -2147483647, i32 2147483648>) + ret <2 x i32> %tmp1 +} + define <2 x float> @vabsf32(<2 x float>* %A) nounwind { ;CHECK-LABEL: vabsf32: ;CHECK: vabs.f32 diff --git a/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll b/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll index 93c3cb14fb738..5e3c45c3454d8 100644 --- a/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll +++ b/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll @@ -25,7 +25,13 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" } ; CHECK: .p2align 4 ; CHECK-NEXT: .long {{.*}}Lxray_synthetic_0 +; CHECK-NEXT: .long {{.*}}Lxray_fn_idx_synth_0 ; CHECK-NEXT: .section {{.*}}xray_instr_map{{.*}} ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .long {{.*}}Lxray_sled_0 ; CHECK: .long {{.*}}Lxray_sled_1 +; CHECK-LABEL: Lxray_synthetic_end0: +; CHECK: .section {{.*}}xray_fn_idx{{.*}} +; CHECK-LABEL: Lxray_fn_idx_synth_0: +; CHECK: .long {{.*}}Lxray_synthetic_0 +; CHECK-NEXT: .long {{.*}}Lxray_synthetic_end0 diff --git a/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll b/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll index d14590b886794..739151fbdd5e5 100644 --- a/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll +++ b/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll @@ -25,7 +25,14 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" } ; CHECK: .p2align 4 ; CHECK-NEXT: .long {{.*}}Lxray_synthetic_0 +; CHECK-NEXT: .long {{.*}}Lxray_fn_idx_synth_0 ; CHECK-NEXT: .section {{.*}}xray_instr_map{{.*}} ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .long {{.*}}Lxray_sled_0 ; CHECK: .long {{.*}}Lxray_sled_1 +; CHECK-LABEL: Lxray_synthetic_end0: +; CHECK: .section {{.*}}xray_fn_idx{{.*}} +; CHECK-LABEL: Lxray_fn_idx_synth_0: +; CHECK: .long {{.*}}xray_synthetic_0 +; CHECK-NEXT: .long {{.*}}xray_synthetic_end0 + diff --git a/test/CodeGen/BPF/dwarfdump.ll b/test/CodeGen/BPF/dwarfdump.ll index 7ae64dfb56827..6a6913011e644 100644 --- a/test/CodeGen/BPF/dwarfdump.ll +++ b/test/CodeGen/BPF/dwarfdump.ll @@ -1,5 +1,7 @@ ; RUN: llc -O2 -march=bpfel %s -o %t -filetype=obj ; RUN: llvm-dwarfdump -debug-dump=line %t | FileCheck %s +; RUN: llc -O2 -march=bpfeb %s -o %t -filetype=obj +; RUN: llvm-dwarfdump -debug-dump=line %t | FileCheck %s source_filename = "testprog.c" target datalayout = "e-m:e-p:64:64-i64:64-n32:64-S128" diff --git a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir new file mode 100644 index 0000000000000..a746d826265b6 --- /dev/null +++ b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir @@ -0,0 +1,59 @@ +# RUN: llc -march=hexagon -run-pass branch-folder -run-pass if-converter -verify-machineinstrs %s -o - | FileCheck %s + +# The hoisting of common instructions from successors could cause registers +# to no longer be live-in in the successor blocks. The liveness was updated +# to include potential new live-in registres, but not to remove registers +# that were no longer live-in. +# This could cause if-converter to generate incorrect code. +# +# In this testcase, the "r1 = A2_sxth r0<kill>" was hoisted, and since r0 +# was killed, it was no longer live-in in either successor. The if-converter +# then created code, where the first predicated instruction has incorrect +# implicit use of r0: +# +# BB#0: +# Live Ins: %R0 +# %R1<def> = A2_sxth %R0<kill> ; hoisted, kills r0 +# A2_nop %P0<imp-def> +# %R0<def> = C2_cmoveit %P0, 2, %R0<imp-use> ; predicated A2_tfrsi +# %R0<def> = C2_cmoveif %P0, 1, %R0<imp-use> ; predicated A2_tfrsi +# %R0<def> = A2_add %R0<kill>, %R1<kill> +# J2_jumpr %R31, %PC<imp-def,dead> +# + +# CHECK: %r1 = A2_sxth killed %r0 +# CHECK: %r0 = C2_cmoveit %p0, 2 +# CHECK-NOT: implicit-def %r0 +# CHECK: %r0 = C2_cmoveif %p0, 1, implicit %r0 + +--- +name: fred +tracksRegLiveness: true + +body: | + bb.0: + liveins: %r0 + successors: %bb.1, %bb.2 + + A2_nop implicit-def %p0 + J2_jumpt killed %p0, %bb.2, implicit-def dead %pc + + bb.1: + successors: %bb.3 + liveins: %r0 + %r1 = A2_sxth killed %r0 + %r0 = A2_tfrsi 1 + J2_jump %bb.3, implicit-def %pc + + bb.2: + successors: %bb.3 + liveins: %r0 + %r1 = A2_sxth killed %r0 + %r0 = A2_tfrsi 2 + + bb.3: + liveins: %r0, %r1 + %r0 = A2_add killed %r0, killed %r1 + J2_jumpr %r31, implicit-def dead %pc +... + diff --git a/test/CodeGen/Hexagon/rdf-cover-use.ll b/test/CodeGen/Hexagon/rdf-cover-use.ll new file mode 100644 index 0000000000000..4f3de0868aa6e --- /dev/null +++ b/test/CodeGen/Hexagon/rdf-cover-use.ll @@ -0,0 +1,38 @@ +; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s + +; Check for sane output. +; CHECK: vmpyweh + +target triple = "hexagon" + +declare i32 @llvm.hexagon.S2.clb(i32) #0 +declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32) #0 +declare i32 @llvm.hexagon.S2.vrndpackwh(i64) #0 +declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64) #0 + +define i64 @fred(i32 %a0, i32 %a1) local_unnamed_addr #1 { +b2: + br i1 undef, label %b15, label %b3 + +b3: ; preds = %b2 + %v4 = tail call i32 @llvm.hexagon.S2.clb(i32 %a1) #0 + %v5 = add nsw i32 %v4, -32 + %v6 = zext i32 %v5 to i64 + %v7 = shl nuw i64 %v6, 32 + %v8 = or i64 %v7, 0 + %v9 = tail call i32 @llvm.hexagon.S2.asl.r.r(i32 %a0, i32 0) + %v10 = tail call i32 @llvm.hexagon.S2.vrndpackwh(i64 %v8) + %v11 = sext i32 %v9 to i64 + %v12 = sext i32 %v10 to i64 + %v13 = tail call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %v11, i64 %v12) + %v14 = and i64 %v13, 4294967295 + br label %b15 + +b15: ; preds = %b3, %b2 + %v16 = phi i64 [ %v14, %b3 ], [ 0, %b2 ] + %v17 = or i64 0, %v16 + ret i64 %v17 +} + +attributes #0 = { nounwind readnone } +attributes #1 = { nounwind "target-cpu"="hexagonv55" } diff --git a/test/CodeGen/Hexagon/swp-matmul-bitext.ll b/test/CodeGen/Hexagon/swp-matmul-bitext.ll index 9c425ae6a0988..3b26d141238ad 100644 --- a/test/CodeGen/Hexagon/swp-matmul-bitext.ll +++ b/test/CodeGen/Hexagon/swp-matmul-bitext.ll @@ -1,17 +1,16 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-bsb-sched=0 -enable-pipeliner < %s | FileCheck %s -; RUN: llc -march=hexagon -mcpu=hexagonv5 -enable-pipeliner < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-pipeliner < %s | FileCheck %s ; From coremark. Test that we pipeline the matrix multiplication bitextract ; function. The pipelined code should have two packets. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: = extractu([[REG2:(r[0-9]+)]], -; CHECK: = extractu([[REG2]], -; CHECK: [[REG0:(r[0-9]+)]] = memh -; CHECK: [[REG1:(r[0-9]+)]] = memh +; CHECK: [[REG0:(r[0-9]+)]] = mpyi([[REG1:(r[0-9]+)]],[[REG2:(r[0-9]+)]]) ; CHECK: += mpyi -; CHECK: [[REG2]] = mpyi([[REG0]],[[REG1]]) +; CHECK: [[REG1:(r[0-9]+)]] = memh +; CHECK: = extractu([[REG0:(r[0-9]+)]], +; CHECK: = extractu([[REG0]], +; CHECK: [[REG2:(r[0-9]+)]] = memh ; CHECK: endloop0 %union_h2_sem_t = type { i32 } diff --git a/test/CodeGen/MIR/Generic/branch-probabilities.ll b/test/CodeGen/MIR/Generic/branch-probabilities.ll deleted file mode 100644 index 8d119316b1345..0000000000000 --- a/test/CodeGen/MIR/Generic/branch-probabilities.ll +++ /dev/null @@ -1,28 +0,0 @@ -; RUN: llc -stop-after machine-sink %s -o %t.mir -; RUN: FileCheck %s < %t.mir -; RUN: llc %t.mir -run-pass machine-sink -; Check that branch probabilities are printed in a format that can then be parsed. -; This test fails on powerpc because of an undefined physical register use in the MIR. See PR31062. -; XFAIL: powerpc - -declare void @foo() -declare void @bar() - -define void @test(i1 %c) { -; CHECK-LABEL: name: test -entry: - br i1 %c, label %then, label %else - -then: - call void @foo() - br label %end -; CHECK: successors: %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}}), %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}}) - -else: - call void @bar() - br label %end -; CHECK: successors: %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}}) - -end: - ret void -} diff --git a/test/CodeGen/MIR/X86/auto-successor.mir b/test/CodeGen/MIR/X86/auto-successor.mir new file mode 100644 index 0000000000000..23b4f91b3b604 --- /dev/null +++ b/test/CodeGen/MIR/X86/auto-successor.mir @@ -0,0 +1,61 @@ +# RUN: llc -mtriple=x86_64-- -o - %s -run-pass=none -verify-machineinstrs -simplify-mir | FileCheck %s +--- +# We shouldn't need any explicit successor lists in these examples +# CHECK-LABEL: name: func0 +# CHECK: bb.0: +# CHECK-NOT: successors +# CHECK: JE_1 %bb.1, implicit undef %eflags +# CHECK: JMP_1 %bb.3 +# CHECK: bb.1: +# CHECK-NOT: successors +# CHECK: bb.2: +# CHECK-NOT: successors +# CHECK: JE_1 %bb.1, implicit undef %eflags +# CHECK: bb.3: +# CHECK: RETQ undef %eax +name: func0 +body: | + bb.0: + JE_1 %bb.1, implicit undef %eflags + JMP_1 %bb.3 + + bb.1: + + bb.2: + JE_1 %bb.1, implicit undef %eflags + + bb.3: + JE_1 %bb.4, implicit undef %eflags ; condjump+fallthrough to same block + + bb.4: + RETQ undef %eax +... +--- +# Some cases that need explicit successors: +# CHECK-LABEL: name: func1 +name: func1 +body: | + bb.0: + ; CHECK: bb.0: + ; CHECK: successors: %bb.3, %bb.1 + successors: %bb.3, %bb.1 ; different order than operands + JE_1 %bb.1, implicit undef %eflags + JMP_1 %bb.3 + + bb.1: + ; CHECK: bb.1: + ; CHECK: successors: %bb.2, %bb.1 + successors: %bb.2, %bb.1 ; different order (fallthrough variant) + JE_1 %bb.1, implicit undef %eflags + + bb.2: + ; CHECK: bb.2: + ; CHECK: successors: %bb.1(0x60000000), %bb.3(0x20000000) + successors: %bb.1(3), %bb.3(1) ; branch probabilities not normalized + JE_1 %bb.1, implicit undef %eflags + + bb.3: + ; CHECK: bb.3: + ; CHECK: RETQ undef %eax + RETQ undef %eax +... diff --git a/test/CodeGen/MIR/X86/branch-probabilities.mir b/test/CodeGen/MIR/X86/branch-probabilities.mir new file mode 100644 index 0000000000000..4aacd2d5cef1d --- /dev/null +++ b/test/CodeGen/MIR/X86/branch-probabilities.mir @@ -0,0 +1,18 @@ +# RUN: llc -o - %s -mtriple=x86_64-- -run-pass=none | FileCheck %s +--- +# Check that branch probabilities are printed correctly as hex numbers. +# CHECK-LABEL: name: test +# CHECK: bb.0: +# CHECK-NEXT: successors: %bb.1(0x66666666), %bb.2(0x1999999a) +name: test +body: | + bb.0: + successors: %bb.1(4), %bb.2(1) + JE_1 %bb.2, implicit undef %eflags + + bb.1: + NOOP + + bb.2: + RETQ undef %eax +... diff --git a/test/CodeGen/MIR/X86/successor-basic-blocks.mir b/test/CodeGen/MIR/X86/successor-basic-blocks.mir index 395272bb23c02..ffeb04af9e40d 100644 --- a/test/CodeGen/MIR/X86/successor-basic-blocks.mir +++ b/test/CodeGen/MIR/X86/successor-basic-blocks.mir @@ -32,7 +32,6 @@ name: foo body: | ; CHECK-LABEL: bb.0.entry: - ; CHECK: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000) ; CHECK-LABEL: bb.1.less: bb.0.entry: successors: %bb.1.less, %bb.2.exit diff --git a/test/CodeGen/PowerPC/restore-r30.ll b/test/CodeGen/PowerPC/restore-r30.ll new file mode 100644 index 0000000000000..216d5a709340d --- /dev/null +++ b/test/CodeGen/PowerPC/restore-r30.ll @@ -0,0 +1,30 @@ +; RUN: llc -march=ppc32 -relocation-model=pic < %s | FileCheck %s + +; The load restoring r30 at the end of the function was placed out of order +; relative to its uses as the PIC base pointer. +; This was because the r30 operand was not marked as "def" which allowed +; the post-RA scheduler to move it over other uses of r30. + +; CHECK-LABEL: fred +; CHECK: lwz 30, 24(1) +; R30 should not appear in an instruction after it's been restored. +; CHECK-NOT: 30, + +target datalayout = "E-m:e-p:32:32-i64:64-n32" +target triple = "powerpc" + +define double @fred(i64 %a) #0 { +entry: + %0 = lshr i64 %a, 32 + %conv = trunc i64 %0 to i32 + %conv1 = sitofp i32 %conv to double + %mul = fmul double %conv1, 0x41F0000000000000 + %and = and i64 %a, 4294967295 + %or = or i64 %and, 4841369599423283200 + %sub = fadd double %mul, 0xC330000000000000 + %1 = bitcast i64 %or to double + %add = fadd double %sub, %1 + ret double %add +} + +attributes #0 = { norecurse nounwind readnone "target-cpu"="ppc" "use-soft-float"="false" } diff --git a/test/CodeGen/SystemZ/copy-physreg-128.ll b/test/CodeGen/SystemZ/copy-physreg-128.ll new file mode 100644 index 0000000000000..408316140605e --- /dev/null +++ b/test/CodeGen/SystemZ/copy-physreg-128.ll @@ -0,0 +1,68 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -join-liveintervals=false -verify-machineinstrs | FileCheck %s +; +; Check that copyPhysReg() properly adds impl-use operands of the super +; register while lowering a COPY of a GR128 bit reg. + +define void @autogen_SD5585(i32*, i64) { +; CHECK: .text +BB: + %L5 = load i1, i1* undef + %I8 = insertelement <8 x i64> undef, i64 %1, i32 3 + %I21 = insertelement <8 x i64> zeroinitializer, i64 475435, i32 5 + br label %CF290 + +CF290: ; preds = %CF290, %BB + %B29 = urem <8 x i64> %I8, %I21 + %Cmp31 = icmp sge i1 undef, undef + br i1 %Cmp31, label %CF290, label %CF296 + +CF296: ; preds = %CF290 + %FC36 = sitofp <8 x i64> %B29 to <8 x double> + br label %CF302 + +CF302: ; preds = %CF307, %CF296 + %Shuff49 = shufflevector <8 x i64> undef, <8 x i64> zeroinitializer, <8 x i32> <i32 undef, i32 9, i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5> + %L69 = load i16, i16* undef + br label %CF307 + +CF307: ; preds = %CF302 + %Cmp84 = icmp ne i16 undef, %L69 + br i1 %Cmp84, label %CF302, label %CF301 + +CF301: ; preds = %CF307 + %B126 = or i32 514315, undef + br label %CF280 + +CF280: ; preds = %CF280, %CF301 + %I139 = insertelement <8 x i64> %Shuff49, i64 undef, i32 2 + %B155 = udiv <8 x i64> %I8, %I139 + %Cmp157 = icmp ne i64 -1, undef + br i1 %Cmp157, label %CF280, label %CF281 + +CF281: ; preds = %CF280 + %Cmp164 = icmp slt i1 %L5, %Cmp84 + br label %CF282 + +CF282: ; preds = %CF304, %CF281 + br label %CF289 + +CF289: ; preds = %CF289, %CF282 + store i32 %B126, i32* %0 + %Cmp219 = icmp slt i64 undef, undef + br i1 %Cmp219, label %CF289, label %CF304 + +CF304: ; preds = %CF289 + %Cmp234 = icmp ult i64 0, undef + br i1 %Cmp234, label %CF282, label %CF283 + +CF283: ; preds = %CF308, %CF283, %CF304 + %E251 = extractelement <8 x i64> %B155, i32 0 + br i1 undef, label %CF283, label %CF308 + +CF308: ; preds = %CF283 + store i1 %Cmp164, i1* undef + br i1 undef, label %CF283, label %CF293 + +CF293: ; preds = %CF308 + ret void +} diff --git a/test/CodeGen/X86/2014-08-29-CompactUnwind.ll b/test/CodeGen/X86/2014-08-29-CompactUnwind.ll index e7e8bb724fc05..f6d6bd3ed6f74 100644 --- a/test/CodeGen/X86/2014-08-29-CompactUnwind.ll +++ b/test/CodeGen/X86/2014-08-29-CompactUnwind.ll @@ -24,7 +24,7 @@ target triple = "x86_64-apple-macosx10.9.0" ; CHECK-NOT: {{compact encoding:.*0x0309f800}} ; CHECK: {{compact encoding:.*0x030df800}} -define void @__asan_report_error() #0 { +define void @__asan_report_error(i64 %step) #0 { %str.i = alloca i64, align 8 %stack = alloca [256 x i64], align 8 br label %print_shadow_bytes.exit.i @@ -37,7 +37,7 @@ print_shadow_bytes.exit.i: ; preds = %print_shadow_bytes.exit.i, %0 %reg17 = shl i64 %iv.i, 1 %reg19 = inttoptr i64 %reg17 to i8* call void (i64*, i8*, ...) @append(i64* %str.i, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str2, i64 0, i64 0), i8* %reg16, i8* %reg19) - %iv.next.i = add nsw i64 %iv.i, 0 + %iv.next.i = add nsw i64 %iv.i, %step br label %print_shadow_bytes.exit.i } diff --git a/test/CodeGen/X86/GlobalISel/gep.ll b/test/CodeGen/X86/GlobalISel/gep.ll new file mode 100644 index 0000000000000..bc5b0152b24ae --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/gep.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64_GISEL +; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 + +define i32* @test_gep_i8(i32 *%arr, i8 %ind) { +; X64_GISEL-LABEL: test_gep_i8: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $4, %rax +; X64_GISEL-NEXT: movsbq %sil, %rcx +; X64_GISEL-NEXT: imulq %rax, %rcx +; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i8: +; X64: # BB#0: +; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> +; X64-NEXT: movsbq %sil, %rax +; X64-NEXT: leaq (%rdi,%rax,4), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i8 %ind + ret i32* %arrayidx +} + +define i32* @test_gep_i8_const(i32 *%arr) { +; X64_GISEL-LABEL: test_gep_i8_const: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $80, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i8_const: +; X64: # BB#0: +; X64-NEXT: leaq 80(%rdi), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i8 20 + ret i32* %arrayidx +} + +define i32* @test_gep_i16(i32 *%arr, i16 %ind) { +; X64_GISEL-LABEL: test_gep_i16: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $4, %rax +; X64_GISEL-NEXT: movswq %si, %rcx +; X64_GISEL-NEXT: imulq %rax, %rcx +; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i16: +; X64: # BB#0: +; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> +; X64-NEXT: movswq %si, %rax +; X64-NEXT: leaq (%rdi,%rax,4), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i16 %ind + ret i32* %arrayidx +} + +define i32* @test_gep_i16_const(i32 *%arr) { +; X64_GISEL-LABEL: test_gep_i16_const: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $80, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i16_const: +; X64: # BB#0: +; X64-NEXT: leaq 80(%rdi), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i16 20 + ret i32* %arrayidx +} + +define i32* @test_gep_i32(i32 *%arr, i32 %ind) { +; X64_GISEL-LABEL: test_gep_i32: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $4, %rax +; X64_GISEL-NEXT: movslq %esi, %rcx +; X64_GISEL-NEXT: imulq %rax, %rcx +; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i32: +; X64: # BB#0: +; X64-NEXT: movslq %esi, %rax +; X64-NEXT: leaq (%rdi,%rax,4), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i32 %ind + ret i32* %arrayidx +} + +define i32* @test_gep_i32_const(i32 *%arr) { +; X64_GISEL-LABEL: test_gep_i32_const: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $20, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i32_const: +; X64: # BB#0: +; X64-NEXT: leaq 20(%rdi), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i32 5 + ret i32* %arrayidx +} + +define i32* @test_gep_i64(i32 *%arr, i64 %ind) { +; X64_GISEL-LABEL: test_gep_i64: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $4, %rax +; X64_GISEL-NEXT: imulq %rsi, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i64: +; X64: # BB#0: +; X64-NEXT: leaq (%rdi,%rsi,4), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i64 %ind + ret i32* %arrayidx +} + +define i32* @test_gep_i64_const(i32 *%arr) { +; X64_GISEL-LABEL: test_gep_i64_const: +; X64_GISEL: # BB#0: +; X64_GISEL-NEXT: movq $20, %rax +; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: retq +; +; X64-LABEL: test_gep_i64_const: +; X64: # BB#0: +; X64-NEXT: leaq 20(%rdi), %rax +; X64-NEXT: retq + %arrayidx = getelementptr i32, i32* %arr, i64 5 + ret i32* %arrayidx +} + diff --git a/test/CodeGen/X86/GlobalISel/legalize-gep.mir b/test/CodeGen/X86/GlobalISel/legalize-gep.mir new file mode 100644 index 0000000000000..4fdb9b910ad78 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-gep.mir @@ -0,0 +1,101 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s + +--- | + define void @test_gep_i8() { + %arrayidx = getelementptr i32, i32* undef, i8 5 + ret void + } + + define void @test_gep_i16() { + %arrayidx = getelementptr i32, i32* undef, i16 5 + ret void + } + + define void @test_gep_i32() { + %arrayidx = getelementptr i32, i32* undef, i32 5 + ret void + } + + define void @test_gep_i64() { + %arrayidx = getelementptr i32, i32* undef, i64 5 + ret void + } +... +--- +name: test_gep_i8 +# CHECK-LABEL: name: test_gep_i8 +legalized: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: %0(p0) = IMPLICIT_DEF +# CHECK-NEXT: %1(s8) = G_CONSTANT i8 20 +# CHECK-NEXT: %3(s32) = G_SEXT %1(s8) +# CHECK-NEXT: %2(p0) = G_GEP %0, %3(s32) +# CHECK-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s8) = G_CONSTANT i8 20 + %2(p0) = G_GEP %0, %1(s8) + RET 0 +... +--- +name: test_gep_i16 +# CHECK-LABEL: name: test_gep_i16 +legalized: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: %0(p0) = IMPLICIT_DEF +# CHECK-NEXT: %1(s16) = G_CONSTANT i16 20 +# CHECK-NEXT: %3(s32) = G_SEXT %1(s16) +# CHECK-NEXT: %2(p0) = G_GEP %0, %3(s32) +# CHECK-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s16) = G_CONSTANT i16 20 + %2(p0) = G_GEP %0, %1(s16) + RET 0 +... +--- +name: test_gep_i32 +# CHECK-LABEL: name: test_gep_i32 +legalized: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: %0(p0) = IMPLICIT_DEF +# CHECK-NEXT: %1(s32) = G_CONSTANT i32 20 +# CHECK-NEXT: %2(p0) = G_GEP %0, %1(s32) +# CHECK-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s32) = G_CONSTANT i32 20 + %2(p0) = G_GEP %0, %1(s32) + RET 0 +... +--- +name: test_gep_i64 +# CHECK-LABEL: name: test_gep_i64 +legalized: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: %0(p0) = IMPLICIT_DEF +# CHECK-NEXT: %1(s64) = G_CONSTANT i64 20 +# CHECK-NEXT: %2(p0) = G_GEP %0, %1(s64) +# CHECK-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s64) = G_CONSTANT i64 20 + %2(p0) = G_GEP %0, %1(s64) + RET 0 +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir new file mode 100644 index 0000000000000..0d66a63841071 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir @@ -0,0 +1,115 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s + +--- | + define i16 @test_mul_i16(i16 %arg1, i16 %arg2) { + %ret = mul i16 %arg1, %arg2 + ret i16 %ret + } + + define i32 @test_mul_i32(i32 %arg1, i32 %arg2) { + %ret = mul i32 %arg1, %arg2 + ret i32 %ret + } + + define i64 @test_mul_i64(i64 %arg1, i64 %arg2) { + %ret = mul i64 %arg1, %arg2 + ret i64 %ret + } + +... +--- +name: test_mul_i16 +# CHECK-LABEL: name: test_mul_i16 +alignment: 4 +legalized: false +regBankSelected: false +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: _ } +# CHECK-NEXT: - { id: 1, class: _ } +# CHECK-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: body: | +# CHECK-NEXT: bb.0 (%ir-block.0): +# CHECK-NEXT: %0(s16) = COPY %edi +# CHECK-NEXT: %1(s16) = COPY %esi +# CHECK-NEXT: %2(s16) = G_MUL %0, %1 +# CHECK-NEXT: %ax = COPY %2(s16) +# CHECK-NEXT: RET 0, implicit %ax +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s16) = COPY %edi + %1(s16) = COPY %esi + %2(s16) = G_MUL %0, %1 + %ax = COPY %2(s16) + RET 0, implicit %ax + +... +--- +name: test_mul_i32 +# CHECK-LABEL: name: test_mul_i32 +alignment: 4 +legalized: false +regBankSelected: false +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: _ } +# CHECK-NEXT: - { id: 1, class: _ } +# CHECK-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: body: | +# CHECK-NEXT: bb.0 (%ir-block.0): +# CHECK-NEXT: %0(s32) = COPY %edi +# CHECK-NEXT: %1(s32) = COPY %esi +# CHECK-NEXT: %2(s32) = G_MUL %0, %1 +# CHECK-NEXT: %eax = COPY %2(s32) +# CHECK-NEXT: RET 0, implicit %eax +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s32) = COPY %edi + %1(s32) = COPY %esi + %2(s32) = G_MUL %0, %1 + %eax = COPY %2(s32) + RET 0, implicit %eax + +... +--- +name: test_mul_i64 +# CHECK-LABEL: name: test_mul_i64 +alignment: 4 +legalized: false +regBankSelected: false +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: _ } +# CHECK-NEXT: - { id: 1, class: _ } +# CHECK-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# CHECK: body: | +# CHECK-NEXT: bb.0 (%ir-block.0): +# CHECK-NEXT: %0(s64) = COPY %rdi +# CHECK-NEXT: %1(s64) = COPY %rsi +# CHECK-NEXT: %2(s64) = G_MUL %0, %1 +# CHECK-NEXT: %rax = COPY %2(s64) +# CHECK-NEXT: RET 0, implicit %rax +body: | + bb.1 (%ir-block.0): + liveins: %rdi, %rsi + + %0(s64) = COPY %rdi + %1(s64) = COPY %rsi + %2(s64) = G_MUL %0, %1 + %rax = COPY %2(s64) + RET 0, implicit %rax + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir new file mode 100644 index 0000000000000..be62832b008a0 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir @@ -0,0 +1,111 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL +--- | + define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) #0 { + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) #0 { + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) #1 { + %ret = mul <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret + } + + attributes #0 = { "target-features"="+sse4.1" } + attributes #1 = { "target-features"="+sse4.1,+avx512vl,+avx512f,+avx512dq" } + +... +--- +name: test_mul_v8i16 +# ALL-LABEL: name: test_mul_v8i16 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s16>) = COPY %xmm0 +# ALL-NEXT: %1(<8 x s16>) = COPY %xmm1 +# ALL-NEXT: %2(<8 x s16>) = G_MUL %0, %1 +# ALL-NEXT: %xmm0 = COPY %2(<8 x s16>) +# ALL-NEXT: RET 0, implicit %xmm0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_MUL %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v4i32 +# ALL-LABEL: name: test_mul_v4i32 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<4 x s32>) = COPY %xmm0 +# ALL-NEXT: %1(<4 x s32>) = COPY %xmm1 +# ALL-NEXT: %2(<4 x s32>) = G_MUL %0, %1 +# ALL-NEXT: %xmm0 = COPY %2(<4 x s32>) +# ALL-NEXT: RET 0, implicit %xmm0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_MUL %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v2i64 +# ALL-LABEL: name: test_mul_v2i64 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<2 x s64>) = COPY %xmm0 +# ALL-NEXT: %1(<2 x s64>) = COPY %xmm1 +# ALL-NEXT: %2(<2 x s64>) = G_MUL %0, %1 +# ALL-NEXT: %xmm0 = COPY %2(<2 x s64>) +# ALL-NEXT: RET 0, implicit %xmm0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<2 x s64>) = COPY %xmm0 + %1(<2 x s64>) = COPY %xmm1 + %2(<2 x s64>) = G_MUL %0, %1 + %xmm0 = COPY %2(<2 x s64>) + RET 0, implicit %xmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir new file mode 100644 index 0000000000000..d99303c3ba3b4 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir @@ -0,0 +1,111 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL +--- | + define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) #0 { + %ret = mul <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret + } + + define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #0 { + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret + } + + define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) #1 { + %ret = mul <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret + } + + attributes #0 = { "target-features"="+avx2" } + attributes #1 = { "target-features"="+avx2,+avx512vl,+avx512f,+avx512dq" } + +... +--- +name: test_mul_v16i16 +# ALL-LABEL: name: test_mul_v16i16 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<16 x s16>) = COPY %ymm0 +# ALL-NEXT: %1(<16 x s16>) = COPY %ymm1 +# ALL-NEXT: %2(<16 x s16>) = G_MUL %0, %1 +# ALL-NEXT: %ymm0 = COPY %2(<16 x s16>) +# ALL-NEXT: RET 0, implicit %ymm0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = COPY %ymm0 + %1(<16 x s16>) = COPY %ymm1 + %2(<16 x s16>) = G_MUL %0, %1 + %ymm0 = COPY %2(<16 x s16>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v8i32 +# ALL-LABEL: name: test_mul_v8i32 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s32>) = COPY %ymm0 +# ALL-NEXT: %1(<8 x s32>) = COPY %ymm1 +# ALL-NEXT: %2(<8 x s32>) = G_MUL %0, %1 +# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>) +# ALL-NEXT: RET 0, implicit %ymm0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = COPY %ymm0 + %1(<8 x s32>) = COPY %ymm1 + %2(<8 x s32>) = G_MUL %0, %1 + %ymm0 = COPY %2(<8 x s32>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v4i64 +# ALL-LABEL: name: test_mul_v4i64 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<4 x s64>) = COPY %ymm0 +# ALL-NEXT: %1(<4 x s64>) = COPY %ymm1 +# ALL-NEXT: %2(<4 x s64>) = G_MUL %0, %1 +# ALL-NEXT: %ymm0 = COPY %2(<4 x s64>) +# ALL-NEXT: RET 0, implicit %ymm0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<4 x s64>) = COPY %ymm0 + %1(<4 x s64>) = COPY %ymm1 + %2(<4 x s64>) = G_MUL %0, %1 + %ymm0 = COPY %2(<4 x s64>) + RET 0, implicit %ymm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir new file mode 100644 index 0000000000000..24eefd30c2ac8 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir @@ -0,0 +1,113 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL + +--- | + define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #0 { + %ret = mul <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret + } + + define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #1 { + %ret = mul <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret + } + + define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #2 { + %ret = mul <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret + } + + attributes #0 = { "target-features"="+avx512f,+avx512bw" } + attributes #1 = { "target-features"="+avx512f" } + attributes #2 = { "target-features"="+avx512f,+avx512dq" } + +... +--- +name: test_mul_v32i16 +# ALL-LABEL: name: test_mul_v32i16 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<32 x s16>) = COPY %zmm0 +# ALL-NEXT: %1(<32 x s16>) = COPY %zmm1 +# ALL-NEXT: %2(<32 x s16>) = G_MUL %0, %1 +# ALL-NEXT: %zmm0 = COPY %2(<32 x s16>) +# ALL-NEXT: RET 0, implicit %zmm0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<32 x s16>) = COPY %zmm0 + %1(<32 x s16>) = COPY %zmm1 + %2(<32 x s16>) = G_MUL %0, %1 + %zmm0 = COPY %2(<32 x s16>) + RET 0, implicit %zmm0 + +... +--- +name: test_mul_v16i32 +# ALL-LABEL: name: test_mul_v16i32 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<16 x s32>) = COPY %zmm0 +# ALL-NEXT: %1(<16 x s32>) = COPY %zmm1 +# ALL-NEXT: %2(<16 x s32>) = G_MUL %0, %1 +# ALL-NEXT: %zmm0 = COPY %2(<16 x s32>) +# ALL-NEXT: RET 0, implicit %zmm0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<16 x s32>) = COPY %zmm0 + %1(<16 x s32>) = COPY %zmm1 + %2(<16 x s32>) = G_MUL %0, %1 + %zmm0 = COPY %2(<16 x s32>) + RET 0, implicit %zmm0 + +... +--- +name: test_mul_v8i64 +# ALL-LABEL: name: test_mul_v8i64 +alignment: 4 +legalized: false +regBankSelected: false +# ALL: registers: +# ALL-NEXT: - { id: 0, class: _ } +# ALL-NEXT: - { id: 1, class: _ } +# ALL-NEXT: - { id: 2, class: _ } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s64>) = COPY %zmm0 +# ALL-NEXT: %1(<8 x s64>) = COPY %zmm1 +# ALL-NEXT: %2(<8 x s64>) = G_MUL %0, %1 +# ALL-NEXT: %zmm0 = COPY %2(<8 x s64>) +# ALL-NEXT: RET 0, implicit %zmm0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<8 x s64>) = COPY %zmm0 + %1(<8 x s64>) = COPY %zmm1 + %2(<8 x s64>) = G_MUL %0, %1 + %zmm0 = COPY %2(<8 x s64>) + RET 0, implicit %zmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/mul-scalar.ll b/test/CodeGen/X86/GlobalISel/mul-scalar.ll new file mode 100644 index 0000000000000..529e81c43304b --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/mul-scalar.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 + +;TODO: instruction selection not supported yet +;define i8 @test_mul_i8(i8 %arg1, i8 %arg2) { +; %ret = mul i8 %arg1, %arg2 +; ret i8 %ret +;} + +define i16 @test_mul_i16(i16 %arg1, i16 %arg2) { +; X64-LABEL: test_mul_i16: +; X64: # BB#0: +; X64-NEXT: imulw %di, %si +; X64-NEXT: movl %esi, %eax +; X64-NEXT: retq + %ret = mul i16 %arg1, %arg2 + ret i16 %ret +} + +define i32 @test_mul_i32(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_mul_i32: +; X64: # BB#0: +; X64-NEXT: imull %edi, %esi +; X64-NEXT: movl %esi, %eax +; X64-NEXT: retq + %ret = mul i32 %arg1, %arg2 + ret i32 %ret +} + +define i64 @test_mul_i64(i64 %arg1, i64 %arg2) { +; X64-LABEL: test_mul_i64: +; X64: # BB#0: +; X64-NEXT: imulq %rdi, %rsi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: retq + %ret = mul i64 %arg1, %arg2 + ret i64 %ret +} + diff --git a/test/CodeGen/X86/GlobalISel/mul-vec.ll b/test/CodeGen/X86/GlobalISel/mul-vec.ll new file mode 100644 index 0000000000000..83615a718528f --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/mul-vec.ll @@ -0,0 +1,84 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel < %s -o - | FileCheck %s --check-prefix=SKX + +define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) { +; SKX-LABEL: test_mul_v8i16: +; SKX: # BB#0: +; SKX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret +} + +define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { +; SKX-LABEL: test_mul_v4i32: +; SKX: # BB#0: +; SKX-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret +} + +define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) { +; SKX-LABEL: test_mul_v2i64: +; SKX: # BB#0: +; SKX-NEXT: vpmullq %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = mul <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret +} + +define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) { +; SKX-LABEL: test_mul_v16i16: +; SKX: # BB#0: +; SKX-NEXT: vpmullw %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = mul <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret +} + +define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) { +; SKX-LABEL: test_mul_v8i32: +; SKX: # BB#0: +; SKX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret +} + +define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) { +; SKX-LABEL: test_mul_v4i64: +; SKX: # BB#0: +; SKX-NEXT: vpmullq %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = mul <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret +} + +define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) { +; SKX-LABEL: test_mul_v32i16: +; SKX: # BB#0: +; SKX-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = mul <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret +} + +define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) { +; SKX-LABEL: test_mul_v16i32: +; SKX: # BB#0: +; SKX-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = mul <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret +} + +define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) { +; SKX-LABEL: test_mul_v8i64: +; SKX: # BB#0: +; SKX-NEXT: vpmullq %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = mul <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret +} + diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir new file mode 100644 index 0000000000000..446db56b992c5 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir @@ -0,0 +1,31 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY + +--- | + define void @test_mul_vec256() { + ret void + } +... +--- +name: test_mul_vec256 +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_mul_vec256 +# CHECK: registers: +# CHECK: - { id: 0, class: vecr } +# CHECK: - { id: 1, class: vecr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(<8 x s32>) = IMPLICIT_DEF + %1(<8 x s32>) = G_MUL %0, %0 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir new file mode 100644 index 0000000000000..f824ee12dcfb8 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir @@ -0,0 +1,33 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY + +--- | + + define void @test_mul_vec512() { + ret void + } + +... +--- +name: test_mul_vec512 +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_mul_vec512 +# CHECK: registers: +# CHECK: - { id: 0, class: vecr } +# CHECK: - { id: 1, class: vecr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(<16 x s32>) = IMPLICIT_DEF + %1(<16 x s32>) = G_MUL %0, %0 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir index 8e04239041a87..3a65a9003773c 100644 --- a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir +++ b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir @@ -27,6 +27,10 @@ ret i64 %ret } + define void @test_mul_gpr() { + ret void + } + define float @test_add_float(float %arg1, float %arg2) { %ret = fadd float %arg1, %arg2 ret float %ret @@ -110,6 +114,12 @@ ret void } + define void @test_gep() { + %p1 = getelementptr i32, i32* undef, i32 5 + %p2 = getelementptr i32, i32* undef, i64 5 + ret void + } + ... --- name: test_add_i8 @@ -220,6 +230,45 @@ body: | ... --- +name: test_mul_gpr +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_mul_gpr +# CHECK: registers: +# CHECK: - { id: 0, class: gpr } +# CHECK: - { id: 1, class: gpr } +# CHECK: - { id: 2, class: gpr } +# CHECK: - { id: 3, class: gpr } +# CHECK: - { id: 4, class: gpr } +# CHECK: - { id: 5, class: gpr } +# CHECK: - { id: 6, class: gpr } +# CHECK: - { id: 7, class: gpr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } + - { id: 3, class: _ } + - { id: 4, class: _ } + - { id: 5, class: _ } + - { id: 6, class: _ } + - { id: 7, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(s64) = IMPLICIT_DEF + %1(s32) = IMPLICIT_DEF + %2(s16) = IMPLICIT_DEF + %3(s8) = IMPLICIT_DEF + %4(s64) = G_MUL %0, %0 + %5(s32) = G_MUL %1, %1 + %6(s16) = G_MUL %2, %2 + %7(s8) = G_MUL %3, %3 + RET 0 +... +--- name: test_add_float alignment: 4 legalized: true @@ -660,3 +709,29 @@ body: | RET 0 ... +--- +name: test_gep +legalized: true +# CHECK-LABEL: name: test_gep +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gpr } +# CHECK-NEXT: - { id: 1, class: gpr } +# CHECK-NEXT: - { id: 2, class: gpr } +# CHECK-NEXT: - { id: 3, class: gpr } +# CHECK-NEXT: - { id: 4, class: gpr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } + - { id: 3, class: _ } + - { id: 4, class: _ } +body: | + bb.0 (%ir-block.0): + %0(p0) = IMPLICIT_DEF + %1(s32) = G_CONSTANT i32 20 + %2(p0) = G_GEP %0, %1(s32) + %3(s64) = G_CONSTANT i64 20 + %4(p0) = G_GEP %0, %3(s64) + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-gep.mir b/test/CodeGen/X86/GlobalISel/select-gep.mir new file mode 100644 index 0000000000000..2c89b7057c3d2 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-gep.mir @@ -0,0 +1,37 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK + +--- | + define i32* @test_gep_i32(i32* %arr) { + %arrayidx = getelementptr i32, i32* %arr, i32 5 + ret i32* %arrayidx + } +... +--- +name: test_gep_i32 +alignment: 4 +legalized: true +regBankSelected: true +selected: false +# CHECK-LABEL: name: test_gep_i32 +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr64 } +# CHECK-NEXT: - { id: 1, class: gr64_nosp } +# CHECK-NEXT: - { id: 2, class: gr64 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# CHECK: body: +# CHECK: %1 = MOV64ri32 20 +# CHECK-NEXT: %2 = LEA64r %0, 1, %1, 0, _ +body: | + bb.1 (%ir-block.0): + liveins: %rdi + + %0(p0) = COPY %rdi + %1(s64) = G_CONSTANT i64 20 + %2(p0) = G_GEP %0, %1(s64) + %rax = COPY %2(p0) + RET 0, implicit %rax + +... diff --git a/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir b/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir new file mode 100644 index 0000000000000..34a77acc2d1e9 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir @@ -0,0 +1,112 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL + +--- | + define i16 @test_mul_i16(i16 %arg1, i16 %arg2) { + %ret = mul i16 %arg1, %arg2 + ret i16 %ret + } + + define i32 @test_mul_i32(i32 %arg1, i32 %arg2) { + %ret = mul i32 %arg1, %arg2 + ret i32 %ret + } + + define i64 @test_mul_i64(i64 %arg1, i64 %arg2) { + %ret = mul i64 %arg1, %arg2 + ret i64 %ret + } + +... +--- +name: test_mul_i16 +# ALL-LABEL: name: test_mul_i16 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr16 } +# ALL-NEXT: - { id: 1, class: gr16 } +# ALL-NEXT: - { id: 2, class: gr16 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: body: | +# ALL: %0 = COPY %di +# ALL-NEXT: %1 = COPY %si +# ALL-NEXT: %2 = IMUL16rr %0, %1, implicit-def %eflags +# ALL-NEXT: %ax = COPY %2 +# ALL-NEXT: RET 0, implicit %ax +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s16) = COPY %edi + %1(s16) = COPY %esi + %2(s16) = G_MUL %0, %1 + %ax = COPY %2(s16) + RET 0, implicit %ax + +... +--- +name: test_mul_i32 +# ALL-LABEL: name: test_mul_i32 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr32 } +# ALL-NEXT: - { id: 1, class: gr32 } +# ALL-NEXT: - { id: 2, class: gr32 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: body: | +# ALL: %0 = COPY %edi +# ALL-NEXT: %1 = COPY %esi +# ALL-NEXT: %2 = IMUL32rr %0, %1, implicit-def %eflags +# ALL-NEXT: %eax = COPY %2 +# ALL-NEXT: RET 0, implicit %eax +body: | + bb.1 (%ir-block.0): + liveins: %edi, %esi + + %0(s32) = COPY %edi + %1(s32) = COPY %esi + %2(s32) = G_MUL %0, %1 + %eax = COPY %2(s32) + RET 0, implicit %eax + +... +--- +name: test_mul_i64 +# ALL-LABEL: name: test_mul_i64 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: gr64 } +# ALL-NEXT: - { id: 1, class: gr64 } +# ALL-NEXT: - { id: 2, class: gr64 } +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } +# ALL: body: | +# ALL: %0 = COPY %rdi +# ALL-NEXT: %1 = COPY %rsi +# ALL-NEXT: %2 = IMUL64rr %0, %1, implicit-def %eflags +# ALL-NEXT: %rax = COPY %2 +# ALL-NEXT: RET 0, implicit %rax +body: | + bb.1 (%ir-block.0): + liveins: %rdi, %rsi + + %0(s64) = COPY %rdi + %1(s64) = COPY %rsi + %2(s64) = G_MUL %0, %1 + %rax = COPY %2(s64) + RET 0, implicit %rax + +... diff --git a/test/CodeGen/X86/GlobalISel/select-mul-vec.mir b/test/CodeGen/X86/GlobalISel/select-mul-vec.mir new file mode 100644 index 0000000000000..5f8ab1e4f1896 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-mul-vec.mir @@ -0,0 +1,480 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + +--- | + define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) #0 { + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <8 x i16> @test_mul_v8i16_avx(<8 x i16> %arg1, <8 x i16> %arg2) #1 { + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <8 x i16> @test_mul_v8i16_avx512bwvl(<8 x i16> %arg1, <8 x i16> %arg2) #2 { + %ret = mul <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) #3 { + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <4 x i32> @test_mul_v4i32_avx(<4 x i32> %arg1, <4 x i32> %arg2) #1 { + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <4 x i32> @test_mul_v4i32_avx512vl(<4 x i32> %arg1, <4 x i32> %arg2) #4 { + %ret = mul <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) #5 { + %ret = mul <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret + } + + define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) #6 { + %ret = mul <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret + } + + define <16 x i16> @test_mul_v16i16_avx512bwvl(<16 x i16> %arg1, <16 x i16> %arg2) #2 { + %ret = mul <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret + } + + define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #6 { + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret + } + + define <8 x i32> @test_mul_v8i32_avx512vl(<8 x i32> %arg1, <8 x i32> %arg2) #4 { + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret + } + + define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) #5 { + %ret = mul <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret + } + + define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #7 { + %ret = mul <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret + } + + define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #8 { + %ret = mul <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret + } + + define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #9 { + %ret = mul <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret + } + + attributes #0 = { "target-features"="+sse2" } + attributes #1 = { "target-features"="+avx" } + attributes #2 = { "target-features"="+avx512vl,+avx512f,+avx512bw" } + attributes #3 = { "target-features"="+sse4.1" } + attributes #4 = { "target-features"="+avx512vl,+avx512f" } + attributes #5 = { "target-features"="+avx2,+avx512vl,+avx512f,+avx512dq" } + attributes #6 = { "target-features"="+avx2" } + attributes #7 = { "target-features"="+avx512f,+avx512bw" } + attributes #8 = { "target-features"="+avx512f" } + attributes #9 = { "target-features"="+avx512f,+avx512dq" } + +... +--- +name: test_mul_v8i16 +# CHECK-LABEL: name: test_mul_v8i16 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128 } +# CHECK-NEXT: - { id: 1, class: vr128 } +# CHECK-NEXT: - { id: 2, class: vr128 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = PMULLWrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_MUL %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v8i16_avx +# CHECK-LABEL: name: test_mul_v8i16_avx +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128 } +# CHECK-NEXT: - { id: 1, class: vr128 } +# CHECK-NEXT: - { id: 2, class: vr128 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_MUL %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v8i16_avx512bwvl +# CHECK-LABEL: name: test_mul_v8i16_avx512bwvl +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128x } +# CHECK-NEXT: - { id: 1, class: vr128x } +# CHECK-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_MUL %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v4i32 +# CHECK-LABEL: name: test_mul_v4i32 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128 } +# CHECK-NEXT: - { id: 1, class: vr128 } +# CHECK-NEXT: - { id: 2, class: vr128 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = PMULLDrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_MUL %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v4i32_avx +# CHECK-LABEL: name: test_mul_v4i32_avx +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128 } +# CHECK-NEXT: - { id: 1, class: vr128 } +# CHECK-NEXT: - { id: 2, class: vr128 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_MUL %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v4i32_avx512vl +# CHECK-LABEL: name: test_mul_v4i32_avx512vl +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128x } +# CHECK-NEXT: - { id: 1, class: vr128x } +# CHECK-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_MUL %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v2i64 +# CHECK-LABEL: name: test_mul_v2i64 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr128x } +# CHECK-NEXT: - { id: 1, class: vr128x } +# CHECK-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLQZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<2 x s64>) = COPY %xmm0 + %1(<2 x s64>) = COPY %xmm1 + %2(<2 x s64>) = G_MUL %0, %1 + %xmm0 = COPY %2(<2 x s64>) + RET 0, implicit %xmm0 + +... +--- +name: test_mul_v16i16 +# CHECK-LABEL: name: test_mul_v16i16 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256 } +# CHECK-NEXT: - { id: 1, class: vr256 } +# CHECK-NEXT: - { id: 2, class: vr256 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWYrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = COPY %ymm0 + %1(<16 x s16>) = COPY %ymm1 + %2(<16 x s16>) = G_MUL %0, %1 + %ymm0 = COPY %2(<16 x s16>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v16i16_avx512bwvl +# CHECK-LABEL: name: test_mul_v16i16_avx512bwvl +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256x } +# CHECK-NEXT: - { id: 1, class: vr256x } +# CHECK-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = COPY %ymm0 + %1(<16 x s16>) = COPY %ymm1 + %2(<16 x s16>) = G_MUL %0, %1 + %ymm0 = COPY %2(<16 x s16>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v8i32 +# CHECK-LABEL: name: test_mul_v8i32 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256 } +# CHECK-NEXT: - { id: 1, class: vr256 } +# CHECK-NEXT: - { id: 2, class: vr256 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDYrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = COPY %ymm0 + %1(<8 x s32>) = COPY %ymm1 + %2(<8 x s32>) = G_MUL %0, %1 + %ymm0 = COPY %2(<8 x s32>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v8i32_avx512vl +# CHECK-LABEL: name: test_mul_v8i32_avx512vl +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256x } +# CHECK-NEXT: - { id: 1, class: vr256x } +# CHECK-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = COPY %ymm0 + %1(<8 x s32>) = COPY %ymm1 + %2(<8 x s32>) = G_MUL %0, %1 + %ymm0 = COPY %2(<8 x s32>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v4i64 +# CHECK-LABEL: name: test_mul_v4i64 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr256x } +# CHECK-NEXT: - { id: 1, class: vr256x } +# CHECK-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLQZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<4 x s64>) = COPY %ymm0 + %1(<4 x s64>) = COPY %ymm1 + %2(<4 x s64>) = G_MUL %0, %1 + %ymm0 = COPY %2(<4 x s64>) + RET 0, implicit %ymm0 + +... +--- +name: test_mul_v32i16 +# CHECK-LABEL: name: test_mul_v32i16 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr512 } +# CHECK-NEXT: - { id: 1, class: vr512 } +# CHECK-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLWZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<32 x s16>) = COPY %zmm0 + %1(<32 x s16>) = COPY %zmm1 + %2(<32 x s16>) = G_MUL %0, %1 + %zmm0 = COPY %2(<32 x s16>) + RET 0, implicit %zmm0 + +... +--- +name: test_mul_v16i32 +# CHECK-LABEL: name: test_mul_v16i32 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr512 } +# CHECK-NEXT: - { id: 1, class: vr512 } +# CHECK-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLDZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<16 x s32>) = COPY %zmm0 + %1(<16 x s32>) = COPY %zmm1 + %2(<16 x s32>) = G_MUL %0, %1 + %zmm0 = COPY %2(<16 x s32>) + RET 0, implicit %zmm0 + +... +--- +name: test_mul_v8i64 +# CHECK-LABEL: name: test_mul_v8i64 +alignment: 4 +legalized: true +regBankSelected: true +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: vr512 } +# CHECK-NEXT: - { id: 1, class: vr512 } +# CHECK-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# CHECK: %2 = VPMULLQZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<8 x s64>) = COPY %zmm0 + %1(<8 x s64>) = COPY %zmm1 + %2(<8 x s64>) = G_MUL %0, %1 + %zmm0 = COPY %2(<8 x s64>) + RET 0, implicit %zmm0 + +... diff --git a/test/CodeGen/X86/addcarry.ll b/test/CodeGen/X86/addcarry.ll index 5e95cd832789b..be550e3fe2d16 100644 --- a/test/CodeGen/X86/addcarry.ll +++ b/test/CodeGen/X86/addcarry.ll @@ -204,3 +204,70 @@ entry: %6 = add i64 %4, %5 ret i64 %6 } + +%S = type { [4 x i64] } + +define %S @readd(%S* nocapture readonly %this, %S %arg.b) { +; CHECK-LABEL: readd: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: addq (%rsi), %rdx +; CHECK-NEXT: movq 8(%rsi), %r10 +; CHECK-NEXT: adcq $0, %r10 +; CHECK-NEXT: sbbq %rax, %rax +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: addq %rcx, %r10 +; CHECK-NEXT: adcq 16(%rsi), %rax +; CHECK-NEXT: sbbq %rcx, %rcx +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: addq %r8, %rax +; CHECK-NEXT: adcq 24(%rsi), %rcx +; CHECK-NEXT: addq %r9, %rcx +; CHECK-NEXT: movq %rdx, (%rdi) +; CHECK-NEXT: movq %r10, 8(%rdi) +; CHECK-NEXT: movq %rax, 16(%rdi) +; CHECK-NEXT: movq %rcx, 24(%rdi) +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: retq +entry: + %0 = extractvalue %S %arg.b, 0 + %.elt6 = extractvalue [4 x i64] %0, 1 + %.elt8 = extractvalue [4 x i64] %0, 2 + %.elt10 = extractvalue [4 x i64] %0, 3 + %.elt = extractvalue [4 x i64] %0, 0 + %1 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 0 + %2 = load i64, i64* %1, align 8 + %3 = zext i64 %2 to i128 + %4 = zext i64 %.elt to i128 + %5 = add nuw nsw i128 %3, %4 + %6 = trunc i128 %5 to i64 + %7 = lshr i128 %5, 64 + %8 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 1 + %9 = load i64, i64* %8, align 8 + %10 = zext i64 %9 to i128 + %11 = add nuw nsw i128 %7, %10 + %12 = zext i64 %.elt6 to i128 + %13 = add nuw nsw i128 %11, %12 + %14 = trunc i128 %13 to i64 + %15 = lshr i128 %13, 64 + %16 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 2 + %17 = load i64, i64* %16, align 8 + %18 = zext i64 %17 to i128 + %19 = add nuw nsw i128 %15, %18 + %20 = zext i64 %.elt8 to i128 + %21 = add nuw nsw i128 %19, %20 + %22 = lshr i128 %21, 64 + %23 = trunc i128 %21 to i64 + %24 = getelementptr inbounds %S, %S* %this, i64 0,i32 0, i64 3 + %25 = load i64, i64* %24, align 8 + %26 = zext i64 %25 to i128 + %27 = add nuw nsw i128 %22, %26 + %28 = zext i64 %.elt10 to i128 + %29 = add nuw nsw i128 %27, %28 + %30 = trunc i128 %29 to i64 + %31 = insertvalue [4 x i64] undef, i64 %6, 0 + %32 = insertvalue [4 x i64] %31, i64 %14, 1 + %33 = insertvalue [4 x i64] %32, i64 %23, 2 + %34 = insertvalue [4 x i64] %33, i64 %30, 3 + %35 = insertvalue %S undef, [4 x i64] %34, 0 + ret %S %35 +} diff --git a/test/CodeGen/X86/avx-isa-check.ll b/test/CodeGen/X86/avx-isa-check.ll index dffc8078e44f9..5d66dfde0bc67 100644 --- a/test/CodeGen/X86/avx-isa-check.ll +++ b/test/CodeGen/X86/avx-isa-check.ll @@ -680,3 +680,8 @@ define <4 x double> @_inreg4xdouble(double %a) { %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer ret <4 x double> %c } + +define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #0 { + %ret = mul <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret +} diff --git a/test/CodeGen/X86/avx1-logical-load-folding.ll b/test/CodeGen/X86/avx1-logical-load-folding.ll index 90e00c965391e..7073eb2247632 100644 --- a/test/CodeGen/X86/avx1-logical-load-folding.ll +++ b/test/CodeGen/X86/avx1-logical-load-folding.ll @@ -1,10 +1,26 @@ -; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s - -target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-apple-macosx10.9.0" +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -O3 -disable-peephole -mtriple=i686-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -O3 -disable-peephole -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X64 ; Function Attrs: nounwind ssp uwtable define void @test1(float* %A, float* %C) #0 { +; X86-LABEL: test1: +; X86: ## BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: vmovaps (%ecx), %ymm0 +; X86-NEXT: vandps LCPI0_0, %ymm0, %ymm0 +; X86-NEXT: vmovss %xmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test1: +; X64: ## BB#0: +; X64-NEXT: vmovaps (%rdi), %ymm0 +; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovss %xmm0, (%rsi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %tmp1 = bitcast float* %A to <8 x float>* %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32 %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32> @@ -13,12 +29,27 @@ define void @test1(float* %A, float* %C) #0 { %tmp6 = extractelement <8 x float> %tmp5, i32 0 store float %tmp6, float* %C ret void - - ; CHECK: vandps LCPI0_0(%rip), %ymm0, %ymm0 } ; Function Attrs: nounwind ssp uwtable define void @test2(float* %A, float* %C) #0 { +; X86-LABEL: test2: +; X86: ## BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: vmovaps (%ecx), %ymm0 +; X86-NEXT: vorps LCPI1_0, %ymm0, %ymm0 +; X86-NEXT: vmovss %xmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test2: +; X64: ## BB#0: +; X64-NEXT: vmovaps (%rdi), %ymm0 +; X64-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovss %xmm0, (%rsi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %tmp1 = bitcast float* %A to <8 x float>* %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32 %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32> @@ -27,12 +58,27 @@ define void @test2(float* %A, float* %C) #0 { %tmp6 = extractelement <8 x float> %tmp5, i32 0 store float %tmp6, float* %C ret void - - ; CHECK: vorps LCPI1_0(%rip), %ymm0, %ymm0 } ; Function Attrs: nounwind ssp uwtable define void @test3(float* %A, float* %C) #0 { +; X86-LABEL: test3: +; X86: ## BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: vmovaps (%ecx), %ymm0 +; X86-NEXT: vxorps LCPI2_0, %ymm0, %ymm0 +; X86-NEXT: vmovss %xmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test3: +; X64: ## BB#0: +; X64-NEXT: vmovaps (%rdi), %ymm0 +; X64-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovss %xmm0, (%rsi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %tmp1 = bitcast float* %A to <8 x float>* %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32 %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32> @@ -41,11 +87,26 @@ define void @test3(float* %A, float* %C) #0 { %tmp6 = extractelement <8 x float> %tmp5, i32 0 store float %tmp6, float* %C ret void - - ; CHECK: vxorps LCPI2_0(%rip), %ymm0, %ymm0 } define void @test4(float* %A, float* %C) #0 { +; X86-LABEL: test4: +; X86: ## BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: vmovaps (%ecx), %ymm0 +; X86-NEXT: vandnps LCPI3_0, %ymm0, %ymm0 +; X86-NEXT: vmovss %xmm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: test4: +; X64: ## BB#0: +; X64-NEXT: vmovaps (%rdi), %ymm0 +; X64-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vmovss %xmm0, (%rsi) +; X64-NEXT: vzeroupper +; X64-NEXT: retq %tmp1 = bitcast float* %A to <8 x float>* %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32 %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32> @@ -55,6 +116,4 @@ define void @test4(float* %A, float* %C) #0 { %tmp7 = extractelement <8 x float> %tmp6, i32 0 store float %tmp7, float * %C ret void - - ;CHECK: vandnps LCPI3_0(%rip), %ymm0, %ymm0 } diff --git a/test/CodeGen/X86/avx2-schedule.ll b/test/CodeGen/X86/avx2-schedule.ll new file mode 100644 index 0000000000000..042bc217b97cf --- /dev/null +++ b/test/CodeGen/X86/avx2-schedule.ll @@ -0,0 +1,338 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1 + +define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) { +; HASWELL-LABEL: test_pabsb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpabsb (%rdi), %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pabsb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [6:1.00] +; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0) + %2 = load <32 x i8>, <32 x i8> *%a1, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2) + %4 = or <32 x i8> %1, %3 + ret <32 x i8> %4 +} +declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone + +define <8 x i32> @test_pabsd(<8 x i32> %a0, <8 x i32> *%a1) { +; HASWELL-LABEL: test_pabsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpabsd (%rdi), %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pabsd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [6:1.00] +; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0) + %2 = load <8 x i32>, <8 x i32> *%a1, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2) + %4 = or <8 x i32> %1, %3 + ret <8 x i32> %4 +} +declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone + +define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) { +; HASWELL-LABEL: test_pabsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpabsw (%rdi), %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pabsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [6:1.00] +; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0) + %2 = load <16 x i16>, <16 x i16> *%a1, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2) + %4 = or <16 x i16> %1, %3 + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone + +define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; HASWELL-LABEL: test_paddb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_paddb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = add <32 x i8> %a0, %a1 + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = add <32 x i8> %1, %2 + ret <32 x i8> %3 +} + +define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; HASWELL-LABEL: test_paddd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_paddd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = add <8 x i32> %a0, %a1 + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = add <8 x i32> %1, %2 + ret <8 x i32> %3 +} + +define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_paddq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_paddq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = add <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = add <4 x i64> %1, %2 + ret <4 x i64> %3 +} + +define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; HASWELL-LABEL: test_paddw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_paddw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = add <16 x i16> %a0, %a1 + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = add <16 x i16> %1, %2 + ret <16 x i16> %3 +} + +define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_pand: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pand: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = and <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = and <4 x i64> %1, %2 + %4 = add <4 x i64> %3, %a1 + ret <4 x i64> %4 +} + +define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_pandn: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pandn: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [6:1.00] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1> + %2 = and <4 x i64> %a1, %1 + %3 = load <4 x i64>, <4 x i64> *%a2, align 32 + %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1> + %5 = and <4 x i64> %3, %4 + %6 = add <4 x i64> %2, %5 + ret <4 x i64> %6 +} + +define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; HASWELL-LABEL: test_pmulld: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00] +; HASWELL-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [10:2.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pmulld: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = mul <8 x i32> %a0, %a1 + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = mul <8 x i32> %1, %2 + ret <8 x i32> %3 +} + +define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; HASWELL-LABEL: test_pmullw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pmullw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = mul <16 x i16> %a0, %a1 + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = mul <16 x i16> %1, %2 + ret <16 x i16> %3 +} + +define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_por: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_por: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = or <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = or <4 x i64> %1, %2 + %4 = add <4 x i64> %3, %a1 + ret <4 x i64> %4 +} + +define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; HASWELL-LABEL: test_psubb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_psubb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = sub <32 x i8> %a0, %a1 + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = sub <32 x i8> %1, %2 + ret <32 x i8> %3 +} + +define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; HASWELL-LABEL: test_psubd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_psubd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = sub <8 x i32> %a0, %a1 + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = sub <8 x i32> %1, %2 + ret <8 x i32> %3 +} + +define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_psubq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_psubq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = sub <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = sub <4 x i64> %1, %2 + ret <4 x i64> %3 +} + +define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; HASWELL-LABEL: test_psubw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_psubw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = sub <16 x i16> %a0, %a1 + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = sub <16 x i16> %1, %2 + ret <16 x i16> %3 +} + +define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; HASWELL-LABEL: test_pxor: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33] +; HASWELL-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [1:1.00] +; +; ZNVER1-LABEL: test_pxor: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [4:1.00] + %1 = xor <4 x i64> %a0, %a1 + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = xor <4 x i64> %1, %2 + %4 = add <4 x i64> %3, %a1 + ret <4 x i64> %4 +} + +!0 = !{i32 1} diff --git a/test/CodeGen/X86/avx512vl-arith.ll b/test/CodeGen/X86/avx512vl-arith.ll index ef01d8656dac1..9c056cdee1966 100644 --- a/test/CodeGen/X86/avx512vl-arith.ll +++ b/test/CodeGen/X86/avx512vl-arith.ll @@ -1,36 +1,42 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl| FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl --show-mc-encoding| FileCheck %s ; 256-bit -; CHECK-LABEL: vpaddq256_test -; CHECK: vpaddq %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone { +; CHECK-LABEL: vpaddq256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i64> %i, %j ret <4 x i64> %x } -; CHECK-LABEL: vpaddq256_fold_test -; CHECK: vpaddq (%rdi), %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind { +; CHECK-LABEL: vpaddq256_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <4 x i64>, <4 x i64>* %j, align 4 %x = add <4 x i64> %i, %tmp ret <4 x i64> %x } -; CHECK-LABEL: vpaddq256_broadcast_test -; CHECK: vpaddq LCP{{.*}}(%rip){1to4}, %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind { +; CHECK-LABEL: vpaddq256_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI2_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i64> %i, <i64 1, i64 1, i64 1, i64 1> ret <4 x i64> %x } -; CHECK-LABEL: vpaddq256_broadcast2_test -; CHECK: vpaddq (%rdi){1to4}, %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind { +; CHECK-LABEL: vpaddq256_broadcast2_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %j = load i64, i64* %j.ptr %j.0 = insertelement <4 x i64> undef, i64 %j, i32 0 %j.v = shufflevector <4 x i64> %j.0, <4 x i64> undef, <4 x i32> zeroinitializer @@ -38,55 +44,68 @@ define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind ret <4 x i64> %x } -; CHECK-LABEL: vpaddd256_test -; CHECK: vpaddd %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone { +; CHECK-LABEL: vpaddd256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <8 x i32> %i, %j ret <8 x i32> %x } -; CHECK-LABEL: vpaddd256_fold_test -; CHECK: vpaddd (%rdi), %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind { +; CHECK-LABEL: vpaddd256_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <8 x i32>, <8 x i32>* %j, align 4 %x = add <8 x i32> %i, %tmp ret <8 x i32> %x } -; CHECK-LABEL: vpaddd256_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpaddd256_broadcast_test(<8 x i32> %i) nounwind { +; CHECK-LABEL: vpaddd256_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI6_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> ret <8 x i32> %x } -; CHECK-LABEL: vpaddd256_mask_test -; CHECK: vpaddd %ymm{{.*%k[1-7].*}} -; CHECK: ret define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_mask_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb] +; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04] +; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, %j %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_maskz_test -; CHECK: vpaddd %ymm{{.*{%k[1-7]} {z}.*}} -; CHECK: ret define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_maskz_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb] +; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04] +; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, %j %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_mask_fold_test -; CHECK: vpaddd (%rdi), %ymm{{.*%k[1-7]}} -; CHECK: ret define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_mask_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %j = load <8 x i32>, <8 x i32>* %j.ptr %x = add <8 x i32> %i, %j @@ -94,20 +113,27 @@ define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_mask_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]}}} -; CHECK: ret define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_mask_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI10_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_maskz_fold_test -; CHECK: vpaddd (%rdi), %ymm{{.*{%k[1-7]} {z}}} -; CHECK: ret define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_maskz_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %j = load <8 x i32>, <8 x i32>* %j.ptr %x = add <8 x i32> %i, %j @@ -115,96 +141,111 @@ define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 ret <8 x i32> %r } -; CHECK-LABEL: vpaddd256_maskz_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]} {z}}} -; CHECK: ret define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd256_maskz_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI12_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer ret <8 x i32> %r } -; CHECK-LABEL: vpsubq256_test -; CHECK: vpsubq %ymm{{.*}} -; CHECK: ret define <4 x i64> @vpsubq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone { +; CHECK-LABEL: vpsubq256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfb,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <4 x i64> %i, %j ret <4 x i64> %x } -; CHECK-LABEL: vpsubd256_test -; CHECK: vpsubd %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpsubd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone { +; CHECK-LABEL: vpsubd256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfa,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <8 x i32> %i, %j ret <8 x i32> %x } -; CHECK-LABEL: vpmulld256_test -; CHECK: vpmulld %ymm{{.*}} -; CHECK: ret define <8 x i32> @vpmulld256_test(<8 x i32> %i, <8 x i32> %j) { +; CHECK-LABEL: vpmulld256_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x40,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = mul <8 x i32> %i, %j ret <8 x i32> %x } -; CHECK-LABEL: test_vaddpd_256 -; CHECK: vaddpd{{.*}} -; CHECK: ret define <4 x double> @test_vaddpd_256(<4 x double> %y, <4 x double> %x) { +; CHECK-LABEL: test_vaddpd_256: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0] +; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <4 x double> %x, %y ret <4 x double> %add.i } -; CHECK-LABEL: test_fold_vaddpd_256 -; CHECK: vaddpd LCP{{.*}}(%rip){{.*}} -; CHECK: ret define <4 x double> @test_fold_vaddpd_256(<4 x double> %y) { +; CHECK-LABEL: test_fold_vaddpd_256: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI17_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <4 x double> %y, <double 4.500000e+00, double 3.400000e+00, double 4.500000e+00, double 5.600000e+00> ret <4 x double> %add.i } -; CHECK-LABEL: test_broadcast_vaddpd_256 -; CHECK: LCP{{.*}}(%rip){1to8}, %ymm0, %ymm0 -; CHECK: ret define <8 x float> @test_broadcast_vaddpd_256(<8 x float> %a) nounwind { +; CHECK-LABEL: test_broadcast_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x58,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI18_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %b = fadd <8 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> ret <8 x float> %b } -; CHECK-LABEL: test_mask_vaddps_256 -; CHECK: vaddps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vaddps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x58,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fadd <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } -; CHECK-LABEL: test_mask_vmulps_256 -; CHECK: vmulps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmulps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vmulps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x59,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fmul <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } -; CHECK-LABEL: test_mask_vminps_256 -; CHECK: vminps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1)nounwind readnone { +; CHECK-LABEL: test_mask_vminps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vminps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5d,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %cmp_res = fcmp olt <8 x float> %i, %j %min = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j @@ -212,12 +253,13 @@ define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, ret <8 x float> %r } -; CHECK-LABEL: test_mask_vmaxps_256 -; CHECK: vmaxps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmaxps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vmaxps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5f,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %cmp_res = fcmp ogt <8 x float> %i, %j %max = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j @@ -225,48 +267,52 @@ define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, ret <8 x float> %r } -; CHECK-LABEL: test_mask_vsubps_256 -; CHECK: vsubps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vsubps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vsubps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5c,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fsub <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } -; CHECK-LABEL: test_mask_vdivps_256 -; CHECK: vdivps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, - <8 x float> %j, <8 x i32> %mask1) - nounwind readnone { +define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vdivps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vdivps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5e,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fdiv <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } -; CHECK-LABEL: test_mask_vmulpd_256 -; CHECK: vmulpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmulpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vmulpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x59,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fmul <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } -; CHECK-LABEL: test_mask_vminpd_256 -; CHECK: vminpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vminpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vminpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5d,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %cmp_res = fcmp olt <4 x double> %i, %j %min = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j @@ -274,12 +320,13 @@ define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, ret <4 x double> %r } -; CHECK-LABEL: test_mask_vmaxpd_256 -; CHECK: vmaxpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmaxpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vmaxpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5f,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %cmp_res = fcmp ogt <4 x double> %i, %j %max = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j @@ -287,59 +334,65 @@ define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, ret <4 x double> %r } -; CHECK-LABEL: test_mask_vsubpd_256 -; CHECK: vsubpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vsubpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vsubpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5c,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fsub <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } -; CHECK-LABEL: test_mask_vdivpd_256 -; CHECK: vdivpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vdivpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vdivpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5e,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fdiv <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } -; CHECK-LABEL: test_mask_vaddpd_256 -; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double> %j, <4 x i64> %mask1) - nounwind readnone { +define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] +; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fadd <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } -; CHECK-LABEL: test_maskz_vaddpd_256 -; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}}} -; CHECK: ret -define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, - <4 x i64> %mask1) nounwind readnone { +define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_maskz_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb] +; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04] +; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fadd <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer ret <4 x double> %r } -; CHECK-LABEL: test_mask_fold_vaddpd_256 -; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}.*}} -; CHECK: ret -define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, - <4 x double>* %j, <4 x i64> %mask1) - nounwind { +define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind { +; CHECK-LABEL: test_mask_fold_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb] +; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04] +; CHECK-NEXT: vaddpd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load <4 x double>, <4 x double>* %j %x = fadd <4 x double> %i, %tmp @@ -347,11 +400,13 @@ define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> % ret <4 x double> %r } -; CHECK-LABEL: test_maskz_fold_vaddpd_256 -; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}.*}} -; CHECK: ret -define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, - <4 x i64> %mask1) nounwind { +define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind { +; CHECK-LABEL: test_maskz_fold_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vaddpd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load <4 x double>, <4 x double>* %j %x = fadd <4 x double> %i, %tmp @@ -359,43 +414,46 @@ define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* % ret <4 x double> %r } -; CHECK-LABEL: test_broadcast2_vaddpd_256 -; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*}} -; CHECK: ret define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind { +; CHECK-LABEL: test_broadcast2_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 - %c = shufflevector <4 x double> %b, <4 x double> undef, - <4 x i32> zeroinitializer + %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i ret <4 x double> %x } -; CHECK-LABEL: test_mask_broadcast_vaddpd_256 -; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]}.*}} -; CHECK: ret -define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, - double* %j, <4 x i64> %mask1) nounwind { +define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, double* %j, <4 x i64> %mask1) nounwind { +; CHECK-LABEL: test_mask_broadcast_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm0, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc0] +; CHECK-NEXT: vpcmpneqq %ymm0, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xc8,0x04] +; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm1, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x39,0x58,0x0f] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 - %c = shufflevector <4 x double> %b, <4 x double> undef, - <4 x i32> zeroinitializer + %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %i ret <4 x double> %r } -; CHECK-LABEL: test_maskz_broadcast_vaddpd_256 -; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]} {z}.*}} -; CHECK: ret -define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, - <4 x i64> %mask1) nounwind { +define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, <4 x i64> %mask1) nounwind { +; CHECK-LABEL: test_maskz_broadcast_vaddpd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2] +; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] +; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 - %c = shufflevector <4 x double> %b, <4 x double> undef, - <4 x i32> zeroinitializer + %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer ret <4 x double> %r @@ -403,27 +461,30 @@ define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j ; 128-bit -; CHECK-LABEL: vpaddq128_test -; CHECK: vpaddq %xmm{{.*}} -; CHECK: ret define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone { +; CHECK-LABEL: vpaddq128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <2 x i64> %i, %j ret <2 x i64> %x } -; CHECK-LABEL: vpaddq128_fold_test -; CHECK: vpaddq (%rdi), %xmm{{.*}} -; CHECK: ret define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind { +; CHECK-LABEL: vpaddq128_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <2 x i64>, <2 x i64>* %j, align 4 %x = add <2 x i64> %i, %tmp ret <2 x i64> %x } -; CHECK-LABEL: vpaddq128_broadcast2_test -; CHECK: vpaddq (%rdi){1to2}, %xmm{{.*}} -; CHECK: ret define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind { +; CHECK-LABEL: vpaddq128_broadcast2_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0xd4,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load i64, i64* %j %j.0 = insertelement <2 x i64> undef, i64 %tmp, i32 0 %j.1 = insertelement <2 x i64> %j.0, i64 %tmp, i32 1 @@ -431,55 +492,68 @@ define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind { ret <2 x i64> %x } -; CHECK-LABEL: vpaddd128_test -; CHECK: vpaddd %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone { +; CHECK-LABEL: vpaddd128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i32> %i, %j ret <4 x i32> %x } -; CHECK-LABEL: vpaddd128_fold_test -; CHECK: vpaddd (%rdi), %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind { +; CHECK-LABEL: vpaddd128_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <4 x i32>, <4 x i32>* %j, align 4 %x = add <4 x i32> %i, %tmp ret <4 x i32> %x } -; CHECK-LABEL: vpaddd128_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpaddd128_broadcast_test(<4 x i32> %i) nounwind { +; CHECK-LABEL: vpaddd128_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI42_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1> ret <4 x i32> %x } -; CHECK-LABEL: vpaddd128_mask_test -; CHECK: vpaddd %xmm{{.*%k[1-7].*}} -; CHECK: ret define <4 x i32> @vpaddd128_mask_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_mask_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] +; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, %j %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_maskz_test -; CHECK: vpaddd %xmm{{.*{%k[1-7]} {z}.*}} -; CHECK: ret define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_maskz_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] +; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, %j %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_mask_fold_test -; CHECK: vpaddd (%rdi), %xmm{{.*%k[1-7]}} -; CHECK: ret define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_mask_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %j = load <4 x i32>, <4 x i32>* %j.ptr %x = add <4 x i32> %i, %j @@ -487,20 +561,27 @@ define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_mask_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]}}} -; CHECK: ret define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_mask_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI46_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1> %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_maskz_fold_test -; CHECK: vpaddd (%rdi), %xmm{{.*{%k[1-7]} {z}}} -; CHECK: ret define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_maskz_fold_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %j = load <4 x i32>, <4 x i32>* %j.ptr %x = add <4 x i32> %i, %j @@ -508,96 +589,111 @@ define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 ret <4 x i32> %r } -; CHECK-LABEL: vpaddd128_maskz_broadcast_test -; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]} {z}}} -; CHECK: ret define <4 x i32> @vpaddd128_maskz_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: vpaddd128_maskz_broadcast_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xfe,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI48_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1> %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer ret <4 x i32> %r } -; CHECK-LABEL: vpsubq128_test -; CHECK: vpsubq %xmm{{.*}} -; CHECK: ret define <2 x i64> @vpsubq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone { +; CHECK-LABEL: vpsubq128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <2 x i64> %i, %j ret <2 x i64> %x } -; CHECK-LABEL: vpsubd128_test -; CHECK: vpsubd %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpsubd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone { +; CHECK-LABEL: vpsubd128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <4 x i32> %i, %j ret <4 x i32> %x } -; CHECK-LABEL: vpmulld128_test -; CHECK: vpmulld %xmm{{.*}} -; CHECK: ret define <4 x i32> @vpmulld128_test(<4 x i32> %i, <4 x i32> %j) { +; CHECK-LABEL: vpmulld128_test: +; CHECK: ## BB#0: +; CHECK-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x40,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %x = mul <4 x i32> %i, %j ret <4 x i32> %x } -; CHECK-LABEL: test_vaddpd_128 -; CHECK: vaddpd{{.*}} -; CHECK: ret define <2 x double> @test_vaddpd_128(<2 x double> %y, <2 x double> %x) { +; CHECK-LABEL: test_vaddpd_128: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0] +; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <2 x double> %x, %y ret <2 x double> %add.i } -; CHECK-LABEL: test_fold_vaddpd_128 -; CHECK: vaddpd LCP{{.*}}(%rip){{.*}} -; CHECK: ret define <2 x double> @test_fold_vaddpd_128(<2 x double> %y) { +; CHECK-LABEL: test_fold_vaddpd_128: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vaddpd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI53_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <2 x double> %y, <double 4.500000e+00, double 3.400000e+00> ret <2 x double> %add.i } -; CHECK-LABEL: test_broadcast_vaddpd_128 -; CHECK: LCP{{.*}}(%rip){1to4}, %xmm0, %xmm0 -; CHECK: ret define <4 x float> @test_broadcast_vaddpd_128(<4 x float> %a) nounwind { +; CHECK-LABEL: test_broadcast_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x58,0x05,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI54_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: retq ## encoding: [0xc3] %b = fadd <4 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> ret <4 x float> %b } -; CHECK-LABEL: test_mask_vaddps_128 -; CHECK: vaddps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vaddps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x58,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fadd <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } -; CHECK-LABEL: test_mask_vmulps_128 -; CHECK: vmulps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmulps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vmulps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x59,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fmul <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } -; CHECK-LABEL: test_mask_vminps_128 -; CHECK: vminps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vminps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vminps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5d,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %cmp_res = fcmp olt <4 x float> %i, %j %min = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j @@ -605,12 +701,13 @@ define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, ret <4 x float> %r } -; CHECK-LABEL: test_mask_vmaxps_128 -; CHECK: vmaxps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmaxps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vmaxps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5f,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %cmp_res = fcmp ogt <4 x float> %i, %j %max = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j @@ -618,12 +715,13 @@ define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, ret <4 x float> %r } -; CHECK-LABEL: test_mask_vsubps_128 -; CHECK: vsubps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vsubps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vsubps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5c,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fsub <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst @@ -631,36 +729,39 @@ define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, } -; CHECK-LABEL: test_mask_vdivps_128 -; CHECK: vdivps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, - <4 x float> %j, <4 x i32> %mask1) - nounwind readnone { +define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vdivps_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vdivps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5e,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fdiv <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } -; CHECK-LABEL: test_mask_vmulpd_128 -; CHECK: vmulpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmulpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vmulpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x59,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fmul <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } -; CHECK-LABEL: test_mask_vminpd_128 -; CHECK: vminpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vminpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vminpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5d,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %cmp_res = fcmp olt <2 x double> %i, %j %min = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j @@ -668,12 +769,13 @@ define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, ret <2 x double> %r } -; CHECK-LABEL: test_mask_vmaxpd_128 -; CHECK: vmaxpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vmaxpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vmaxpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5f,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %cmp_res = fcmp ogt <2 x double> %i, %j %max = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j @@ -681,46 +783,52 @@ define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, ret <2 x double> %r } -; CHECK-LABEL: test_mask_vsubpd_128 -; CHECK: vsubpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vsubpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vsubpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5c,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fsub <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } -; CHECK-LABEL: test_mask_vdivpd_128 -; CHECK: vdivpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vdivpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vdivpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5e,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fdiv <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } -; CHECK-LABEL: test_mask_vaddpd_128 -; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}} -; CHECK: ret -define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double> %j, <2 x i64> %mask1) - nounwind readnone { +define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { +; CHECK-LABEL: test_mask_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] +; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] +; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fadd <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } -; CHECK-LABEL: test_maskz_vaddpd_128 -; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}}} -; CHECK: ret define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j, +; CHECK-LABEL: test_maskz_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] +; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04] +; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] <2 x i64> %mask1) nounwind readnone { %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fadd <2 x double> %i, %j @@ -728,12 +836,13 @@ define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j, ret <2 x double> %r } -; CHECK-LABEL: test_mask_fold_vaddpd_128 -; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}.*}} -; CHECK: ret -define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i, - <2 x double>* %j, <2 x i64> %mask1) - nounwind { +define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind { +; CHECK-LABEL: test_mask_fold_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] +; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04] +; CHECK-NEXT: vaddpd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load <2 x double>, <2 x double>* %j %x = fadd <2 x double> %i, %tmp @@ -741,11 +850,13 @@ define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> % ret <2 x double> %r } -; CHECK-LABEL: test_maskz_fold_vaddpd_128 -; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}.*}} -; CHECK: ret -define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j, - <2 x i64> %mask1) nounwind { +define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind { +; CHECK-LABEL: test_maskz_fold_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vaddpd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load <2 x double>, <2 x double>* %j %x = fadd <2 x double> %i, %tmp @@ -753,10 +864,11 @@ define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* % ret <2 x double> %r } -; CHECK-LABEL: test_broadcast2_vaddpd_128 -; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*}} -; CHECK: ret define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind { +; CHECK-LABEL: test_broadcast2_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1 @@ -764,12 +876,14 @@ define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nou ret <2 x double> %x } -; CHECK-LABEL: test_mask_broadcast_vaddpd_128 -; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]}.*}} -; CHECK: ret -define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i, - double* %j, <2 x i64> %mask1) - nounwind { +define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i, double* %j, <2 x i64> %mask1) nounwind { +; CHECK-LABEL: test_mask_broadcast_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0] +; CHECK-NEXT: vpcmpneqq %xmm0, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xc8,0x04] +; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm1, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x19,0x58,0x0f] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 @@ -779,11 +893,13 @@ define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x doub ret <2 x double> %r } -; CHECK-LABEL: test_maskz_broadcast_vaddpd_128 -; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]} {z}.*}} -; CHECK: ret -define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j, - <2 x i64> %mask1) nounwind { +define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j, <2 x i64> %mask1) nounwind { +; CHECK-LABEL: test_maskz_broadcast_vaddpd_128: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] +; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] +; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0x58,0x07] +; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 diff --git a/test/CodeGen/X86/branchfolding-undef.mir b/test/CodeGen/X86/branchfolding-undef.mir index 0da167b332579..1a7dfb941875f 100644 --- a/test/CodeGen/X86/branchfolding-undef.mir +++ b/test/CodeGen/X86/branchfolding-undef.mir @@ -16,7 +16,6 @@ name: func tracksRegLiveness: true body: | bb.0: - successors: %bb.1, %bb.2 JE_1 %bb.1, implicit undef %eflags JMP_1 %bb.2 diff --git a/test/CodeGen/X86/build-vector-128.ll b/test/CodeGen/X86/build-vector-128.ll new file mode 100644 index 0000000000000..8c3a6790ffa6c --- /dev/null +++ b/test/CodeGen/X86/build-vector-128.ll @@ -0,0 +1,428 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE-32 --check-prefix=SSE2-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE-64 --check-prefix=SSE2-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE-32 --check-prefix=SSE41-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE-64 --check-prefix=SSE41-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX1-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX1-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX2-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX2-64 + +define <2 x double> @test_buildvector_v2f64(double %a0, double %a1) { +; SSE-32-LABEL: test_buildvector_v2f64: +; SSE-32: # BB#0: +; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0 +; SSE-32-NEXT: retl +; +; SSE-64-LABEL: test_buildvector_v2f64: +; SSE-64: # BB#0: +; SSE-64-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v2f64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v2f64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-64-NEXT: retq + %ins0 = insertelement <2 x double> undef, double %a0, i32 0 + %ins1 = insertelement <2 x double> %ins0, double %a1, i32 1 + ret <2 x double> %ins1 +} + +define <4 x float> @test_buildvector_v4f32(float %a0, float %a1, float %a2, float %a3) { +; SSE-32-LABEL: test_buildvector_v4f32: +; SSE-32: # BB#0: +; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0 +; SSE-32-NEXT: retl +; +; SSE2-64-LABEL: test_buildvector_v4f32: +; SSE2-64: # BB#0: +; SSE2-64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-64-NEXT: retq +; +; SSE41-64-LABEL: test_buildvector_v4f32: +; SSE41-64: # BB#0: +; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; SSE41-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v4f32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v4f32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; AVX-64-NEXT: retq + %ins0 = insertelement <4 x float> undef, float %a0, i32 0 + %ins1 = insertelement <4 x float> %ins0, float %a1, i32 1 + %ins2 = insertelement <4 x float> %ins1, float %a2, i32 2 + %ins3 = insertelement <4 x float> %ins2, float %a3, i32 3 + ret <4 x float> %ins3 +} + +define <2 x i64> @test_buildvector_v2i64(i64 %a0, i64 %a1) { +; SSE2-32-LABEL: test_buildvector_v2i64: +; SSE2-32: # BB#0: +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-32-NEXT: retl +; +; SSE-64-LABEL: test_buildvector_v2i64: +; SSE-64: # BB#0: +; SSE-64-NEXT: movq %rsi, %xmm1 +; SSE-64-NEXT: movq %rdi, %xmm0 +; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-64-NEXT: retq +; +; SSE41-32-LABEL: test_buildvector_v2i64: +; SSE41-32: # BB#0: +; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrd $2, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: retl +; +; AVX-32-LABEL: test_buildvector_v2i64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v2i64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovq %rsi, %xmm0 +; AVX-64-NEXT: vmovq %rdi, %xmm1 +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX-64-NEXT: retq + %ins0 = insertelement <2 x i64> undef, i64 %a0, i32 0 + %ins1 = insertelement <2 x i64> %ins0, i64 %a1, i32 1 + ret <2 x i64> %ins1 +} + +define <4 x i32> @test_buildvector_v4i32(i32 %f0, i32 %f1, i32 %f2, i32 %f3) { +; SSE-32-LABEL: test_buildvector_v4i32: +; SSE-32: # BB#0: +; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0 +; SSE-32-NEXT: retl +; +; SSE2-64-LABEL: test_buildvector_v4i32: +; SSE2-64: # BB#0: +; SSE2-64-NEXT: movd %ecx, %xmm0 +; SSE2-64-NEXT: movd %esi, %xmm1 +; SSE2-64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-64-NEXT: movd %edx, %xmm2 +; SSE2-64-NEXT: movd %edi, %xmm0 +; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-64-NEXT: retq +; +; SSE41-64-LABEL: test_buildvector_v4i32: +; SSE41-64: # BB#0: +; SSE41-64-NEXT: movd %edi, %xmm0 +; SSE41-64-NEXT: pinsrd $1, %esi, %xmm0 +; SSE41-64-NEXT: pinsrd $2, %edx, %xmm0 +; SSE41-64-NEXT: pinsrd $3, %ecx, %xmm0 +; SSE41-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v4i32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v4i32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovd %edi, %xmm0 +; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <4 x i32> undef, i32 %f0, i32 0 + %ins1 = insertelement <4 x i32> %ins0, i32 %f1, i32 1 + %ins2 = insertelement <4 x i32> %ins1, i32 %f2, i32 2 + %ins3 = insertelement <4 x i32> %ins2, i32 %f3, i32 3 + ret <4 x i32> %ins3 +} + +define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) { +; SSE2-32-LABEL: test_buildvector_v8i16: +; SSE2-32: # BB#0: +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-32-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-32-NEXT: retl +; +; SSE2-64-LABEL: test_buildvector_v8i16: +; SSE2-64: # BB#0: +; SSE2-64-NEXT: movd %ecx, %xmm0 +; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-64-NEXT: movd %r9d, %xmm1 +; SSE2-64-NEXT: movd %esi, %xmm2 +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-64-NEXT: movd %edx, %xmm1 +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-64-NEXT: movd %r8d, %xmm3 +; SSE2-64-NEXT: movd %edi, %xmm0 +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-64-NEXT: retq +; +; SSE41-32-LABEL: test_buildvector_v8i16: +; SSE41-32: # BB#0: +; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-32-NEXT: pinsrw $1, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $2, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $3, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $4, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $5, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $6, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrw $7, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: retl +; +; SSE41-64-LABEL: test_buildvector_v8i16: +; SSE41-64: # BB#0: +; SSE41-64-NEXT: movd %edi, %xmm0 +; SSE41-64-NEXT: pinsrw $1, %esi, %xmm0 +; SSE41-64-NEXT: pinsrw $2, %edx, %xmm0 +; SSE41-64-NEXT: pinsrw $3, %ecx, %xmm0 +; SSE41-64-NEXT: pinsrw $4, %r8d, %xmm0 +; SSE41-64-NEXT: pinsrw $5, %r9d, %xmm0 +; SSE41-64-NEXT: pinsrw $6, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrw $7, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v8i16: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v8i16: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovd %edi, %xmm0 +; AVX-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0 + %ins1 = insertelement <8 x i16> %ins0, i16 %a1, i32 1 + %ins2 = insertelement <8 x i16> %ins1, i16 %a2, i32 2 + %ins3 = insertelement <8 x i16> %ins2, i16 %a3, i32 3 + %ins4 = insertelement <8 x i16> %ins3, i16 %a4, i32 4 + %ins5 = insertelement <8 x i16> %ins4, i16 %a5, i32 5 + %ins6 = insertelement <8 x i16> %ins5, i16 %a6, i32 6 + %ins7 = insertelement <8 x i16> %ins6, i16 %a7, i32 7 + ret <8 x i16> %ins7 +} + +define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) { +; SSE2-32-LABEL: test_buildvector_v16i8: +; SSE2-32: # BB#0: +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-32-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-32-NEXT: retl +; +; SSE2-64-LABEL: test_buildvector_v16i8: +; SSE2-64: # BB#0: +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-64-NEXT: movd %ecx, %xmm0 +; SSE2-64-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-64-NEXT: movd %r9d, %xmm1 +; SSE2-64-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE2-64-NEXT: movd %esi, %xmm2 +; SSE2-64-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-64-NEXT: movd %edx, %xmm3 +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; SSE2-64-NEXT: movd %r8d, %xmm1 +; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-64-NEXT: movd %edi, %xmm0 +; SSE2-64-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-64-NEXT: retq +; +; SSE41-32-LABEL: test_buildvector_v16i8: +; SSE41-32: # BB#0: +; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-32-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $2, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $3, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $5, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $6, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $7, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $9, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $10, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $11, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $12, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $13, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $14, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: pinsrb $15, {{[0-9]+}}(%esp), %xmm0 +; SSE41-32-NEXT: retl +; +; SSE41-64-LABEL: test_buildvector_v16i8: +; SSE41-64: # BB#0: +; SSE41-64-NEXT: movd %edi, %xmm0 +; SSE41-64-NEXT: pinsrb $1, %esi, %xmm0 +; SSE41-64-NEXT: pinsrb $2, %edx, %xmm0 +; SSE41-64-NEXT: pinsrb $3, %ecx, %xmm0 +; SSE41-64-NEXT: pinsrb $4, %r8d, %xmm0 +; SSE41-64-NEXT: pinsrb $5, %r9d, %xmm0 +; SSE41-64-NEXT: pinsrb $6, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $7, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $8, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $9, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $10, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $11, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $12, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $13, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $14, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: pinsrb $15, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-64-NEXT: retq +; +; AVX-32-LABEL: test_buildvector_v16i8: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v16i8: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovd %edi, %xmm0 +; AVX-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <16 x i8> undef, i8 %a0, i32 0 + %ins1 = insertelement <16 x i8> %ins0, i8 %a1, i32 1 + %ins2 = insertelement <16 x i8> %ins1, i8 %a2, i32 2 + %ins3 = insertelement <16 x i8> %ins2, i8 %a3, i32 3 + %ins4 = insertelement <16 x i8> %ins3, i8 %a4, i32 4 + %ins5 = insertelement <16 x i8> %ins4, i8 %a5, i32 5 + %ins6 = insertelement <16 x i8> %ins5, i8 %a6, i32 6 + %ins7 = insertelement <16 x i8> %ins6, i8 %a7, i32 7 + %ins8 = insertelement <16 x i8> %ins7, i8 %a8, i32 8 + %ins9 = insertelement <16 x i8> %ins8, i8 %a9, i32 9 + %ins10 = insertelement <16 x i8> %ins9, i8 %a10, i32 10 + %ins11 = insertelement <16 x i8> %ins10, i8 %a11, i32 11 + %ins12 = insertelement <16 x i8> %ins11, i8 %a12, i32 12 + %ins13 = insertelement <16 x i8> %ins12, i8 %a13, i32 13 + %ins14 = insertelement <16 x i8> %ins13, i8 %a14, i32 14 + %ins15 = insertelement <16 x i8> %ins14, i8 %a15, i32 15 + ret <16 x i8> %ins15 +} diff --git a/test/CodeGen/X86/build-vector-256.ll b/test/CodeGen/X86/build-vector-256.ll new file mode 100644 index 0000000000000..1ced1fc3a3822 --- /dev/null +++ b/test/CodeGen/X86/build-vector-256.ll @@ -0,0 +1,434 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX1-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX1-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX2-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX2-64 + +define <4 x double> @test_buildvector_v4f64(double %a0, double %a1, double %a2, double %a3) { +; AVX-32-LABEL: test_buildvector_v4f64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v4f64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <4 x double> undef, double %a0, i32 0 + %ins1 = insertelement <4 x double> %ins0, double %a1, i32 1 + %ins2 = insertelement <4 x double> %ins1, double %a2, i32 2 + %ins3 = insertelement <4 x double> %ins2, double %a3, i32 3 + ret <4 x double> %ins3 +} + +define <8 x float> @test_buildvector_v8f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) { +; AVX-32-LABEL: test_buildvector_v8f32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v8f32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <8 x float> undef, float %a0, i32 0 + %ins1 = insertelement <8 x float> %ins0, float %a1, i32 1 + %ins2 = insertelement <8 x float> %ins1, float %a2, i32 2 + %ins3 = insertelement <8 x float> %ins2, float %a3, i32 3 + %ins4 = insertelement <8 x float> %ins3, float %a4, i32 4 + %ins5 = insertelement <8 x float> %ins4, float %a5, i32 5 + %ins6 = insertelement <8 x float> %ins5, float %a6, i32 6 + %ins7 = insertelement <8 x float> %ins6, float %a7, i32 7 + ret <8 x float> %ins7 +} + +define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) { +; AVX1-32-LABEL: test_buildvector_v4i64: +; AVX1-32: # BB#0: +; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-32-NEXT: retl +; +; AVX1-64-LABEL: test_buildvector_v4i64: +; AVX1-64: # BB#0: +; AVX1-64-NEXT: vmovq %rcx, %xmm0 +; AVX1-64-NEXT: vmovq %rdx, %xmm1 +; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-64-NEXT: vmovq %rsi, %xmm1 +; AVX1-64-NEXT: vmovq %rdi, %xmm2 +; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-64-NEXT: retq +; +; AVX2-32-LABEL: test_buildvector_v4i64: +; AVX2-32: # BB#0: +; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_buildvector_v4i64: +; AVX2-64: # BB#0: +; AVX2-64-NEXT: vmovq %rcx, %xmm0 +; AVX2-64-NEXT: vmovq %rdx, %xmm1 +; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-64-NEXT: vmovq %rsi, %xmm1 +; AVX2-64-NEXT: vmovq %rdi, %xmm2 +; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-64-NEXT: retq + %ins0 = insertelement <4 x i64> undef, i64 %a0, i32 0 + %ins1 = insertelement <4 x i64> %ins0, i64 %a1, i32 1 + %ins2 = insertelement <4 x i64> %ins1, i64 %a2, i32 2 + %ins3 = insertelement <4 x i64> %ins2, i64 %a3, i32 3 + ret <4 x i64> %ins3 +} + +define <8 x i32> @test_buildvector_v8i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) { +; AVX-32-LABEL: test_buildvector_v8i32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0 +; AVX-32-NEXT: retl +; +; AVX1-64-LABEL: test_buildvector_v8i32: +; AVX1-64: # BB#0: +; AVX1-64-NEXT: vmovd %edi, %xmm0 +; AVX1-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX1-64-NEXT: vmovd %r8d, %xmm1 +; AVX1-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-64-NEXT: retq +; +; AVX2-64-LABEL: test_buildvector_v8i32: +; AVX2-64: # BB#0: +; AVX2-64-NEXT: vmovd %edi, %xmm0 +; AVX2-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX2-64-NEXT: vmovd %r8d, %xmm1 +; AVX2-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-64-NEXT: retq + %ins0 = insertelement <8 x i32> undef, i32 %a0, i32 0 + %ins1 = insertelement <8 x i32> %ins0, i32 %a1, i32 1 + %ins2 = insertelement <8 x i32> %ins1, i32 %a2, i32 2 + %ins3 = insertelement <8 x i32> %ins2, i32 %a3, i32 3 + %ins4 = insertelement <8 x i32> %ins3, i32 %a4, i32 4 + %ins5 = insertelement <8 x i32> %ins4, i32 %a5, i32 5 + %ins6 = insertelement <8 x i32> %ins5, i32 %a6, i32 6 + %ins7 = insertelement <8 x i32> %ins6, i32 %a7, i32 7 + ret <8 x i32> %ins7 +} + +define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) { +; AVX1-32-LABEL: test_buildvector_v16i16: +; AVX1-32: # BB#0: +; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-32-NEXT: retl +; +; AVX1-64-LABEL: test_buildvector_v16i16: +; AVX1-64: # BB#0: +; AVX1-64-NEXT: vmovd %edi, %xmm0 +; AVX1-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-64-NEXT: retq +; +; AVX2-32-LABEL: test_buildvector_v16i16: +; AVX2-32: # BB#0: +; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_buildvector_v16i16: +; AVX2-64: # BB#0: +; AVX2-64-NEXT: vmovd %edi, %xmm0 +; AVX2-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-64-NEXT: retq + %ins0 = insertelement <16 x i16> undef, i16 %a0, i32 0 + %ins1 = insertelement <16 x i16> %ins0, i16 %a1, i32 1 + %ins2 = insertelement <16 x i16> %ins1, i16 %a2, i32 2 + %ins3 = insertelement <16 x i16> %ins2, i16 %a3, i32 3 + %ins4 = insertelement <16 x i16> %ins3, i16 %a4, i32 4 + %ins5 = insertelement <16 x i16> %ins4, i16 %a5, i32 5 + %ins6 = insertelement <16 x i16> %ins5, i16 %a6, i32 6 + %ins7 = insertelement <16 x i16> %ins6, i16 %a7, i32 7 + %ins8 = insertelement <16 x i16> %ins7, i16 %a8, i32 8 + %ins9 = insertelement <16 x i16> %ins8, i16 %a9, i32 9 + %ins10 = insertelement <16 x i16> %ins9, i16 %a10, i32 10 + %ins11 = insertelement <16 x i16> %ins10, i16 %a11, i32 11 + %ins12 = insertelement <16 x i16> %ins11, i16 %a12, i32 12 + %ins13 = insertelement <16 x i16> %ins12, i16 %a13, i32 13 + %ins14 = insertelement <16 x i16> %ins13, i16 %a14, i32 14 + %ins15 = insertelement <16 x i16> %ins14, i16 %a15, i32 15 + ret <16 x i16> %ins15 +} + +define <32 x i8> @test_buildvector_v32i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) { +; AVX1-32-LABEL: test_buildvector_v32i8: +; AVX1-32: # BB#0: +; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-32-NEXT: retl +; +; AVX1-64-LABEL: test_buildvector_v32i8: +; AVX1-64: # BB#0: +; AVX1-64-NEXT: vmovd %edi, %xmm0 +; AVX1-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX1-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-64-NEXT: retq +; +; AVX2-32-LABEL: test_buildvector_v32i8: +; AVX2-32: # BB#0: +; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_buildvector_v32i8: +; AVX2-64: # BB#0: +; AVX2-64-NEXT: vmovd %edi, %xmm0 +; AVX2-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX2-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-64-NEXT: retq + %ins0 = insertelement <32 x i8> undef, i8 %a0, i32 0 + %ins1 = insertelement <32 x i8> %ins0, i8 %a1, i32 1 + %ins2 = insertelement <32 x i8> %ins1, i8 %a2, i32 2 + %ins3 = insertelement <32 x i8> %ins2, i8 %a3, i32 3 + %ins4 = insertelement <32 x i8> %ins3, i8 %a4, i32 4 + %ins5 = insertelement <32 x i8> %ins4, i8 %a5, i32 5 + %ins6 = insertelement <32 x i8> %ins5, i8 %a6, i32 6 + %ins7 = insertelement <32 x i8> %ins6, i8 %a7, i32 7 + %ins8 = insertelement <32 x i8> %ins7, i8 %a8, i32 8 + %ins9 = insertelement <32 x i8> %ins8, i8 %a9, i32 9 + %ins10 = insertelement <32 x i8> %ins9, i8 %a10, i32 10 + %ins11 = insertelement <32 x i8> %ins10, i8 %a11, i32 11 + %ins12 = insertelement <32 x i8> %ins11, i8 %a12, i32 12 + %ins13 = insertelement <32 x i8> %ins12, i8 %a13, i32 13 + %ins14 = insertelement <32 x i8> %ins13, i8 %a14, i32 14 + %ins15 = insertelement <32 x i8> %ins14, i8 %a15, i32 15 + %ins16 = insertelement <32 x i8> %ins15, i8 %a16, i32 16 + %ins17 = insertelement <32 x i8> %ins16, i8 %a17, i32 17 + %ins18 = insertelement <32 x i8> %ins17, i8 %a18, i32 18 + %ins19 = insertelement <32 x i8> %ins18, i8 %a19, i32 19 + %ins20 = insertelement <32 x i8> %ins19, i8 %a20, i32 20 + %ins21 = insertelement <32 x i8> %ins20, i8 %a21, i32 21 + %ins22 = insertelement <32 x i8> %ins21, i8 %a22, i32 22 + %ins23 = insertelement <32 x i8> %ins22, i8 %a23, i32 23 + %ins24 = insertelement <32 x i8> %ins23, i8 %a24, i32 24 + %ins25 = insertelement <32 x i8> %ins24, i8 %a25, i32 25 + %ins26 = insertelement <32 x i8> %ins25, i8 %a26, i32 26 + %ins27 = insertelement <32 x i8> %ins26, i8 %a27, i32 27 + %ins28 = insertelement <32 x i8> %ins27, i8 %a28, i32 28 + %ins29 = insertelement <32 x i8> %ins28, i8 %a29, i32 29 + %ins30 = insertelement <32 x i8> %ins29, i8 %a30, i32 30 + %ins31 = insertelement <32 x i8> %ins30, i8 %a31, i32 31 + ret <32 x i8> %ins31 +} diff --git a/test/CodeGen/X86/build-vector-512.ll b/test/CodeGen/X86/build-vector-512.ll new file mode 100644 index 0000000000000..21737cca93a10 --- /dev/null +++ b/test/CodeGen/X86/build-vector-512.ll @@ -0,0 +1,712 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX512F-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX512F-64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX512BW-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX512BW-64 + +define <8 x double> @test_buildvector_v8f64(double %a0, double %a1, double %a2, double %a3, double %a4, double %a5, double %a6, double %a7) { +; AVX-32-LABEL: test_buildvector_v8f64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v8f64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0] +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm4[0],xmm5[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX-64-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <8 x double> undef, double %a0, i32 0 + %ins1 = insertelement <8 x double> %ins0, double %a1, i32 1 + %ins2 = insertelement <8 x double> %ins1, double %a2, i32 2 + %ins3 = insertelement <8 x double> %ins2, double %a3, i32 3 + %ins4 = insertelement <8 x double> %ins3, double %a4, i32 4 + %ins5 = insertelement <8 x double> %ins4, double %a5, i32 5 + %ins6 = insertelement <8 x double> %ins5, double %a6, i32 6 + %ins7 = insertelement <8 x double> %ins6, double %a7, i32 7 + ret <8 x double> %ins7 +} + +define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15) { +; AVX-32-LABEL: test_buildvector_v16f32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v16f32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] +; AVX-64-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] +; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] +; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX-64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <16 x float> undef, float %a0, i32 0 + %ins1 = insertelement <16 x float> %ins0, float %a1, i32 1 + %ins2 = insertelement <16 x float> %ins1, float %a2, i32 2 + %ins3 = insertelement <16 x float> %ins2, float %a3, i32 3 + %ins4 = insertelement <16 x float> %ins3, float %a4, i32 4 + %ins5 = insertelement <16 x float> %ins4, float %a5, i32 5 + %ins6 = insertelement <16 x float> %ins5, float %a6, i32 6 + %ins7 = insertelement <16 x float> %ins6, float %a7, i32 7 + %ins8 = insertelement <16 x float> %ins7, float %a8, i32 8 + %ins9 = insertelement <16 x float> %ins8, float %a9, i32 9 + %ins10 = insertelement <16 x float> %ins9, float %a10, i32 10 + %ins11 = insertelement <16 x float> %ins10, float %a11, i32 11 + %ins12 = insertelement <16 x float> %ins11, float %a12, i32 12 + %ins13 = insertelement <16 x float> %ins12, float %a13, i32 13 + %ins14 = insertelement <16 x float> %ins13, float %a14, i32 14 + %ins15 = insertelement <16 x float> %ins14, float %a15, i32 15 + ret <16 x float> %ins15 +} + +define <8 x i64> @test_buildvector_v8i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7) { +; AVX-32-LABEL: test_buildvector_v8i64: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v8i64: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovq %rcx, %xmm0 +; AVX-64-NEXT: vmovq %rdx, %xmm1 +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX-64-NEXT: vmovq %rsi, %xmm1 +; AVX-64-NEXT: vmovq %rdi, %xmm2 +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX-64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX-64-NEXT: vmovq %r9, %xmm1 +; AVX-64-NEXT: vmovq %r8, %xmm2 +; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX-64-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVX-64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <8 x i64> undef, i64 %a0, i32 0 + %ins1 = insertelement <8 x i64> %ins0, i64 %a1, i32 1 + %ins2 = insertelement <8 x i64> %ins1, i64 %a2, i32 2 + %ins3 = insertelement <8 x i64> %ins2, i64 %a3, i32 3 + %ins4 = insertelement <8 x i64> %ins3, i64 %a4, i32 4 + %ins5 = insertelement <8 x i64> %ins4, i64 %a5, i32 5 + %ins6 = insertelement <8 x i64> %ins5, i64 %a6, i32 6 + %ins7 = insertelement <8 x i64> %ins6, i64 %a7, i32 7 + ret <8 x i64> %ins7 +} + +define <16 x i32> @test_buildvector_v16i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15) { +; AVX-32-LABEL: test_buildvector_v16i32: +; AVX-32: # BB#0: +; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_buildvector_v16i32: +; AVX-64: # BB#0: +; AVX-64-NEXT: vmovd %edi, %xmm0 +; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0 +; AVX-64-NEXT: vmovd %r8d, %xmm1 +; AVX-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1 +; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-64-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX-64-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX-64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX-64-NEXT: retq + %ins0 = insertelement <16 x i32> undef, i32 %a0, i32 0 + %ins1 = insertelement <16 x i32> %ins0, i32 %a1, i32 1 + %ins2 = insertelement <16 x i32> %ins1, i32 %a2, i32 2 + %ins3 = insertelement <16 x i32> %ins2, i32 %a3, i32 3 + %ins4 = insertelement <16 x i32> %ins3, i32 %a4, i32 4 + %ins5 = insertelement <16 x i32> %ins4, i32 %a5, i32 5 + %ins6 = insertelement <16 x i32> %ins5, i32 %a6, i32 6 + %ins7 = insertelement <16 x i32> %ins6, i32 %a7, i32 7 + %ins8 = insertelement <16 x i32> %ins7, i32 %a8, i32 8 + %ins9 = insertelement <16 x i32> %ins8, i32 %a9, i32 9 + %ins10 = insertelement <16 x i32> %ins9, i32 %a10, i32 10 + %ins11 = insertelement <16 x i32> %ins10, i32 %a11, i32 11 + %ins12 = insertelement <16 x i32> %ins11, i32 %a12, i32 12 + %ins13 = insertelement <16 x i32> %ins12, i32 %a13, i32 13 + %ins14 = insertelement <16 x i32> %ins13, i32 %a14, i32 14 + %ins15 = insertelement <16 x i32> %ins14, i32 %a15, i32 15 + ret <16 x i32> %ins15 +} + +define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15, i16 %a16, i16 %a17, i16 %a18, i16 %a19, i16 %a20, i16 %a21, i16 %a22, i16 %a23, i16 %a24, i16 %a25, i16 %a26, i16 %a27, i16 %a28, i16 %a29, i16 %a30, i16 %a31) { +; AVX512F-32-LABEL: test_buildvector_v32i16: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512F-32-NEXT: retl +; +; AVX512F-64-LABEL: test_buildvector_v32i16: +; AVX512F-64: # BB#0: +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; AVX512F-64-NEXT: vmovd %edi, %xmm0 +; AVX512F-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512F-64-NEXT: retq +; +; AVX512BW-32-LABEL: test_buildvector_v32i16: +; AVX512BW-32: # BB#0: +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512BW-32-NEXT: retl +; +; AVX512BW-64-LABEL: test_buildvector_v32i16: +; AVX512BW-64: # BB#0: +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-64-NEXT: vmovd %edi, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $4, %r8d, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $5, %r9d, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-64-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512BW-64-NEXT: retq + %ins0 = insertelement <32 x i16> undef, i16 %a0, i32 0 + %ins1 = insertelement <32 x i16> %ins0, i16 %a1, i32 1 + %ins2 = insertelement <32 x i16> %ins1, i16 %a2, i32 2 + %ins3 = insertelement <32 x i16> %ins2, i16 %a3, i32 3 + %ins4 = insertelement <32 x i16> %ins3, i16 %a4, i32 4 + %ins5 = insertelement <32 x i16> %ins4, i16 %a5, i32 5 + %ins6 = insertelement <32 x i16> %ins5, i16 %a6, i32 6 + %ins7 = insertelement <32 x i16> %ins6, i16 %a7, i32 7 + %ins8 = insertelement <32 x i16> %ins7, i16 %a8, i32 8 + %ins9 = insertelement <32 x i16> %ins8, i16 %a9, i32 9 + %ins10 = insertelement <32 x i16> %ins9, i16 %a10, i32 10 + %ins11 = insertelement <32 x i16> %ins10, i16 %a11, i32 11 + %ins12 = insertelement <32 x i16> %ins11, i16 %a12, i32 12 + %ins13 = insertelement <32 x i16> %ins12, i16 %a13, i32 13 + %ins14 = insertelement <32 x i16> %ins13, i16 %a14, i32 14 + %ins15 = insertelement <32 x i16> %ins14, i16 %a15, i32 15 + %ins16 = insertelement <32 x i16> %ins15, i16 %a16, i32 16 + %ins17 = insertelement <32 x i16> %ins16, i16 %a17, i32 17 + %ins18 = insertelement <32 x i16> %ins17, i16 %a18, i32 18 + %ins19 = insertelement <32 x i16> %ins18, i16 %a19, i32 19 + %ins20 = insertelement <32 x i16> %ins19, i16 %a20, i32 20 + %ins21 = insertelement <32 x i16> %ins20, i16 %a21, i32 21 + %ins22 = insertelement <32 x i16> %ins21, i16 %a22, i32 22 + %ins23 = insertelement <32 x i16> %ins22, i16 %a23, i32 23 + %ins24 = insertelement <32 x i16> %ins23, i16 %a24, i32 24 + %ins25 = insertelement <32 x i16> %ins24, i16 %a25, i32 25 + %ins26 = insertelement <32 x i16> %ins25, i16 %a26, i32 26 + %ins27 = insertelement <32 x i16> %ins26, i16 %a27, i32 27 + %ins28 = insertelement <32 x i16> %ins27, i16 %a28, i32 28 + %ins29 = insertelement <32 x i16> %ins28, i16 %a29, i32 29 + %ins30 = insertelement <32 x i16> %ins29, i16 %a30, i32 30 + %ins31 = insertelement <32 x i16> %ins30, i16 %a31, i32 31 + ret <32 x i16> %ins31 +} + +define <64 x i8> @test_buildvector_v64i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31, i8 %a32, i8 %a33, i8 %a34, i8 %a35, i8 %a36, i8 %a37, i8 %a38, i8 %a39, i8 %a40, i8 %a41, i8 %a42, i8 %a43, i8 %a44, i8 %a45, i8 %a46, i8 %a47, i8 %a48, i8 %a49, i8 %a50, i8 %a51, i8 %a52, i8 %a53, i8 %a54, i8 %a55, i8 %a56, i8 %a57, i8 %a58, i8 %a59, i8 %a60, i8 %a61, i8 %a62, i8 %a63) { +; AVX512F-32-LABEL: test_buildvector_v64i8: +; AVX512F-32: # BB#0: +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512F-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512F-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512F-32-NEXT: retl +; +; AVX512F-64-LABEL: test_buildvector_v64i8: +; AVX512F-64: # BB#0: +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512F-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; AVX512F-64-NEXT: vmovd %edi, %xmm0 +; AVX512F-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512F-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512F-64-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512F-64-NEXT: retq +; +; AVX512BW-32-LABEL: test_buildvector_v64i8: +; AVX512BW-32: # BB#0: +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm2, %xmm2 +; AVX512BW-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512BW-32-NEXT: retl +; +; AVX512BW-64-LABEL: test_buildvector_v64i8: +; AVX512BW-64: # BB#0: +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512BW-64-NEXT: vmovd %edi, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $1, %esi, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $2, %edx, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $4, %r8d, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $5, %r9d, %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1 +; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX512BW-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512BW-64-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512BW-64-NEXT: retq + %ins0 = insertelement <64 x i8> undef, i8 %a0, i32 0 + %ins1 = insertelement <64 x i8> %ins0, i8 %a1, i32 1 + %ins2 = insertelement <64 x i8> %ins1, i8 %a2, i32 2 + %ins3 = insertelement <64 x i8> %ins2, i8 %a3, i32 3 + %ins4 = insertelement <64 x i8> %ins3, i8 %a4, i32 4 + %ins5 = insertelement <64 x i8> %ins4, i8 %a5, i32 5 + %ins6 = insertelement <64 x i8> %ins5, i8 %a6, i32 6 + %ins7 = insertelement <64 x i8> %ins6, i8 %a7, i32 7 + %ins8 = insertelement <64 x i8> %ins7, i8 %a8, i32 8 + %ins9 = insertelement <64 x i8> %ins8, i8 %a9, i32 9 + %ins10 = insertelement <64 x i8> %ins9, i8 %a10, i32 10 + %ins11 = insertelement <64 x i8> %ins10, i8 %a11, i32 11 + %ins12 = insertelement <64 x i8> %ins11, i8 %a12, i32 12 + %ins13 = insertelement <64 x i8> %ins12, i8 %a13, i32 13 + %ins14 = insertelement <64 x i8> %ins13, i8 %a14, i32 14 + %ins15 = insertelement <64 x i8> %ins14, i8 %a15, i32 15 + %ins16 = insertelement <64 x i8> %ins15, i8 %a16, i32 16 + %ins17 = insertelement <64 x i8> %ins16, i8 %a17, i32 17 + %ins18 = insertelement <64 x i8> %ins17, i8 %a18, i32 18 + %ins19 = insertelement <64 x i8> %ins18, i8 %a19, i32 19 + %ins20 = insertelement <64 x i8> %ins19, i8 %a20, i32 20 + %ins21 = insertelement <64 x i8> %ins20, i8 %a21, i32 21 + %ins22 = insertelement <64 x i8> %ins21, i8 %a22, i32 22 + %ins23 = insertelement <64 x i8> %ins22, i8 %a23, i32 23 + %ins24 = insertelement <64 x i8> %ins23, i8 %a24, i32 24 + %ins25 = insertelement <64 x i8> %ins24, i8 %a25, i32 25 + %ins26 = insertelement <64 x i8> %ins25, i8 %a26, i32 26 + %ins27 = insertelement <64 x i8> %ins26, i8 %a27, i32 27 + %ins28 = insertelement <64 x i8> %ins27, i8 %a28, i32 28 + %ins29 = insertelement <64 x i8> %ins28, i8 %a29, i32 29 + %ins30 = insertelement <64 x i8> %ins29, i8 %a30, i32 30 + %ins31 = insertelement <64 x i8> %ins30, i8 %a31, i32 31 + %ins32 = insertelement <64 x i8> %ins31, i8 %a32, i32 32 + %ins33 = insertelement <64 x i8> %ins32, i8 %a33, i32 33 + %ins34 = insertelement <64 x i8> %ins33, i8 %a34, i32 34 + %ins35 = insertelement <64 x i8> %ins34, i8 %a35, i32 35 + %ins36 = insertelement <64 x i8> %ins35, i8 %a36, i32 36 + %ins37 = insertelement <64 x i8> %ins36, i8 %a37, i32 37 + %ins38 = insertelement <64 x i8> %ins37, i8 %a38, i32 38 + %ins39 = insertelement <64 x i8> %ins38, i8 %a39, i32 39 + %ins40 = insertelement <64 x i8> %ins39, i8 %a40, i32 40 + %ins41 = insertelement <64 x i8> %ins40, i8 %a41, i32 41 + %ins42 = insertelement <64 x i8> %ins41, i8 %a42, i32 42 + %ins43 = insertelement <64 x i8> %ins42, i8 %a43, i32 43 + %ins44 = insertelement <64 x i8> %ins43, i8 %a44, i32 44 + %ins45 = insertelement <64 x i8> %ins44, i8 %a45, i32 45 + %ins46 = insertelement <64 x i8> %ins45, i8 %a46, i32 46 + %ins47 = insertelement <64 x i8> %ins46, i8 %a47, i32 47 + %ins48 = insertelement <64 x i8> %ins47, i8 %a48, i32 48 + %ins49 = insertelement <64 x i8> %ins48, i8 %a49, i32 49 + %ins50 = insertelement <64 x i8> %ins49, i8 %a50, i32 50 + %ins51 = insertelement <64 x i8> %ins50, i8 %a51, i32 51 + %ins52 = insertelement <64 x i8> %ins51, i8 %a52, i32 52 + %ins53 = insertelement <64 x i8> %ins52, i8 %a53, i32 53 + %ins54 = insertelement <64 x i8> %ins53, i8 %a54, i32 54 + %ins55 = insertelement <64 x i8> %ins54, i8 %a55, i32 55 + %ins56 = insertelement <64 x i8> %ins55, i8 %a56, i32 56 + %ins57 = insertelement <64 x i8> %ins56, i8 %a57, i32 57 + %ins58 = insertelement <64 x i8> %ins57, i8 %a58, i32 58 + %ins59 = insertelement <64 x i8> %ins58, i8 %a59, i32 59 + %ins60 = insertelement <64 x i8> %ins59, i8 %a60, i32 60 + %ins61 = insertelement <64 x i8> %ins60, i8 %a61, i32 61 + %ins62 = insertelement <64 x i8> %ins61, i8 %a62, i32 62 + %ins63 = insertelement <64 x i8> %ins62, i8 %a63, i32 63 + ret <64 x i8> %ins63 +} diff --git a/test/CodeGen/X86/combine-abs.ll b/test/CodeGen/X86/combine-abs.ll index ac8f790a2ead6..887abe99f6ed8 100644 --- a/test/CodeGen/X86/combine-abs.ll +++ b/test/CodeGen/X86/combine-abs.ll @@ -1,5 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL ; fold (abs c1) -> c2 define <4 x i32> @combine_v4i32_abs_constant() { @@ -27,10 +29,10 @@ define <8 x i16> @combine_v8i16_abs_abs(<8 x i16> %a) { ; CHECK-NEXT: vpabsw %xmm0, %xmm0 ; CHECK-NEXT: retq %a1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a) - %n2 = sub <8 x i16> zeroinitializer, %a1 - %c2 = icmp slt <8 x i16> %a1, zeroinitializer - %a2 = select <8 x i1> %c2, <8 x i16> %n2, <8 x i16> %a1 - ret <8 x i16> %a2 + %s2 = ashr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> + %a2 = add <8 x i16> %a1, %s2 + %x2 = xor <8 x i16> %a2, %s2 + ret <8 x i16> %x2 } define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) { @@ -46,17 +48,29 @@ define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) { } define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) { -; CHECK-LABEL: combine_v4i64_abs_abs: -; CHECK: # BB#0: -; CHECK-NEXT: vpsrad $31, %ymm0, %ymm1 -; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] -; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vpsrad $31, %ymm0, %ymm1 -; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] -; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: retq +; AVX2-LABEL: combine_v4i64_abs_abs: +; AVX2: # BB#0: +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1 +; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1 +; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_v4i64_abs_abs: +; AVX512F: # BB#0: +; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def> +; AVX512F-NEXT: vpabsq %zmm0, %zmm0 +; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill> +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: combine_v4i64_abs_abs: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpabsq %ymm0, %ymm0 +; AVX512VL-NEXT: retq %n1 = sub <4 x i64> zeroinitializer, %a %b1 = icmp slt <4 x i64> %a, zeroinitializer %a1 = select <4 x i1> %b1, <4 x i64> %n1, <4 x i64> %a diff --git a/test/CodeGen/X86/commuted-blend-mask.ll b/test/CodeGen/X86/commuted-blend-mask.ll index e6322cbb7a14b..37830509d5a27 100644 --- a/test/CodeGen/X86/commuted-blend-mask.ll +++ b/test/CodeGen/X86/commuted-blend-mask.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s ; When commuting the operands of a SSE blend, make sure that the resulting blend ; mask can be encoded as a imm8. @@ -7,7 +8,7 @@ ; pblendw $4294967103, %xmm1, %xmm0 define <4 x i32> @test(<4 x i32> %a, <4 x i32> %b) { - ;CHECK: pblendw $63, %xmm1, %xmm0 +; CHECK: pblendw $63, %xmm1, %xmm0 %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 6, i32 3> ret <4 x i32> %shuffle } diff --git a/test/CodeGen/X86/ctpop-combine.ll b/test/CodeGen/X86/ctpop-combine.ll index b7031a817e82d..bbfc2ead04c69 100644 --- a/test/CodeGen/X86/ctpop-combine.ll +++ b/test/CodeGen/X86/ctpop-combine.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=corei7 | FileCheck %s +declare i8 @llvm.ctpop.i8(i8) nounwind readnone declare i64 @llvm.ctpop.i64(i64) nounwind readnone define i32 @test1(i64 %x) nounwind readnone { @@ -48,3 +49,16 @@ define i32 @test3(i64 %x) nounwind readnone { %conv = zext i1 %cmp to i32 ret i32 %conv } + +define i8 @test4(i8 %x) nounwind readnone { +; CHECK-LABEL: test4: +; CHECK: # BB#0: +; CHECK-NEXT: andl $127, %edi +; CHECK-NEXT: popcntw %di, %ax +; CHECK-NEXT: # kill: %AL<def> %AL<kill> %AX<kill> +; CHECK-NEXT: retq + %x2 = and i8 %x, 127 + %count = tail call i8 @llvm.ctpop.i8(i8 %x2) + %and = and i8 %count, 7 + ret i8 %and +} diff --git a/test/CodeGen/X86/dbg-baseptr.ll b/test/CodeGen/X86/dbg-baseptr.ll index f69c78af73677..fb0da1b50d11c 100644 --- a/test/CodeGen/X86/dbg-baseptr.ll +++ b/test/CodeGen/X86/dbg-baseptr.ll @@ -16,12 +16,12 @@ define i32 @f0(%struct.s* byval align 8 %input) !dbg !8 { ; CHECK-LABEL: f1: ; CHECK: DEBUG_VALUE: f:input <- [%RBP+16] -define i32 @f1(%struct.s* byval align 8 %input) !dbg !8 { +define i32 @f1(%struct.s* byval align 8 %input) !dbg !19 { %val = load i64, i64* @glob ; this alloca should force FP usage. %stackspace = alloca i32, i64 %val, align 1 store i32* %stackspace, i32** @ptr - call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18 + call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !20, metadata !17), !dbg !21 ret i32 42 } @@ -37,11 +37,11 @@ define i32 @f1(%struct.s* byval align 8 %input) !dbg !8 { ; The parameter should still be referenced through RBP though. ; CHECK-NOT: DEBUG_VALUE: f:input <- [%RBX ; CHECK: DEBUG_VALUE: f:input <- [%RBP+16] -define i32 @f2(%struct.s* byval align 8 %input) !dbg !8 { +define i32 @f2(%struct.s* byval align 8 %input) !dbg !22 { %val = load i64, i64* @glob %stackspace = alloca i32, i64 %val, align 64 store i32* %stackspace, i32** @ptr - call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18 + call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !23, metadata !17), !dbg !24 ret i32 42 } @@ -73,3 +73,10 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) !17 = !DIExpression() !18 = !DILocation(line: 5, scope: !8) + +!19 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5) +!20 = !DILocalVariable(name: "input", arg: 1, scope: !19, file: !3, line: 5, type: !9) +!21 = !DILocation(line: 5, scope: !19) +!22 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5) +!23 = !DILocalVariable(name: "input", arg: 1, scope: !22, file: !3, line: 5, type: !9) +!24 = !DILocation(line: 5, scope: !22) diff --git a/test/CodeGen/X86/eflags-copy-expansion.mir b/test/CodeGen/X86/eflags-copy-expansion.mir index 36044b4d20594..28f47c3c2496a 100644 --- a/test/CodeGen/X86/eflags-copy-expansion.mir +++ b/test/CodeGen/X86/eflags-copy-expansion.mir @@ -25,7 +25,6 @@ liveins: body: | bb.0.entry: liveins: %edi - successors: %bb.1.false NOOP implicit-def %al ; The bug was triggered only when LivePhysReg is used, which diff --git a/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll b/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll index e86d094ac341f..f9ecf707810b3 100644 --- a/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll +++ b/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll @@ -35,8 +35,8 @@ define void @fn2NoDebug(%struct.Buffer* byval align 64 %p1) { ; CHECK-NEXT: pop ; CHECK-NEXT: ret -define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !4 { - call void @llvm.dbg.declare(metadata %struct.Buffer* %p1, metadata !5, metadata !6), !dbg !7 +define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 { + call void @llvm.dbg.declare(metadata %struct.Buffer* %p1, metadata !9, metadata !6), !dbg !10 ret void } @@ -64,3 +64,6 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) !5 = !DILocalVariable(name: "w", scope: !4) !6 = !DIExpression() !7 = !DILocation(line: 210, column: 12, scope: !4) +!8 = distinct !DISubprogram(name: "withDebug", unit: !0) +!9 = !DILocalVariable(name: "w", scope: !8) +!10 = !DILocation(line: 210, column: 12, scope: !8) diff --git a/test/CodeGen/X86/implicit-null-checks.mir b/test/CodeGen/X86/implicit-null-checks.mir index 39bfedaa7814a..d0ba057fa009c 100644 --- a/test/CodeGen/X86/implicit-null-checks.mir +++ b/test/CodeGen/X86/implicit-null-checks.mir @@ -384,14 +384,12 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %esi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %esi, %rdi %eax = MOV32ri 2200000 @@ -427,7 +425,6 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %esi, %rdi, %rdx %eax = MOV32rm killed %rdx, 1, _, 0, _ :: (volatile load 4 from %ir.ptr) @@ -435,7 +432,6 @@ body: | JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %esi, %rdi %eax = MOV32ri 2200000 @@ -444,7 +440,6 @@ body: | JE_1 %bb.4.ret_100, implicit %eflags bb.2.ret_200: - successors: %bb.3.is_null %eax = MOV32ri 200 @@ -472,14 +467,12 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %esi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %esi, %rdi %eax = MOV32ri 2200000 @@ -515,14 +508,12 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %rsi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %rsi, %rdi %rdi = MOV64ri 5000 @@ -557,14 +548,12 @@ liveins: body: | bb.0.entry: - successors: %bb.3.is_null, %bb.1.not_null liveins: %rsi, %rdi, %rdx TEST64rr %rdi, %rdi, implicit-def %eflags JE_1 %bb.3.is_null, implicit %eflags bb.1.not_null: - successors: %bb.4.ret_100, %bb.2.ret_200 liveins: %rsi, %rdi, %rdx %rbx = MOV64rr %rdx @@ -603,7 +592,6 @@ calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', # CHECK: CALL64pcrel32 body: | bb.0.entry: - successors: %bb.2.leave, %bb.1.stay liveins: %rdi, %rbx frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp @@ -645,7 +633,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -680,7 +667,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -712,7 +698,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.1.is_null(0x30000000), %bb.2.not_null(0x50000000) liveins: %rsi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -745,7 +730,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.1.is_null(0x30000000), %bb.2.not_null(0x50000000) liveins: %rsi, %rdi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -779,7 +763,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -810,7 +793,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -842,7 +824,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -874,7 +855,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -910,7 +890,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -941,7 +920,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -974,7 +952,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1006,7 +983,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1042,7 +1018,6 @@ calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rbx frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp @@ -1082,7 +1057,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1116,7 +1090,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1149,7 +1122,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1182,7 +1154,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1214,7 +1185,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1246,7 +1216,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags @@ -1279,7 +1248,6 @@ liveins: - { reg: '%rsi' } body: | bb.0.entry: - successors: %bb.2.is_null, %bb.1.not_null liveins: %rdi, %rsi TEST64rr %rdi, %rdi, implicit-def %eflags diff --git a/test/CodeGen/X86/invalid-liveness.mir b/test/CodeGen/X86/invalid-liveness.mir index ca862472ba86b..c1da65e0be698 100644 --- a/test/CodeGen/X86/invalid-liveness.mir +++ b/test/CodeGen/X86/invalid-liveness.mir @@ -16,12 +16,10 @@ registers: - { id: 0, class: gr32 } body: | bb.0: - successors: %bb.2, %bb.3 JG_1 %bb.2, implicit %eflags JMP_1 %bb.3 bb.2: - successors: %bb.3 %0 = IMPLICIT_DEF JMP_1 %bb.3 diff --git a/test/CodeGen/X86/machine-region-info.mir b/test/CodeGen/X86/machine-region-info.mir index 0998fe97c2353..78823a3eb0068 100644 --- a/test/CodeGen/X86/machine-region-info.mir +++ b/test/CodeGen/X86/machine-region-info.mir @@ -4,67 +4,48 @@ name: fun body: | bb.0: - successors: %bb.1, %bb.7 - CMP32ri8 %edi, 40, implicit-def %eflags JNE_1 %bb.7, implicit killed %eflags JMP_1 %bb.1 bb.1: - successors: %bb.2, %bb.11 - CMP32ri8 %edi, 1, implicit-def %eflags JNE_1 %bb.11, implicit killed %eflags JMP_1 %bb.2 bb.2: - successors: %bb.3, %bb.5 - CMP32ri8 %edi, 2, implicit-def %eflags JNE_1 %bb.5, implicit killed %eflags JMP_1 %bb.3 bb.3: - successors: %bb.4, %bb.5 - CMP32ri8 %edi, 90, implicit-def %eflags JNE_1 %bb.5, implicit killed %eflags JMP_1 %bb.4 bb.4: - successors: %bb.5 bb.5: - successors: %bb.6, %bb.11 - CMP32ri8 %edi, 4, implicit-def %eflags JNE_1 %bb.11, implicit killed %eflags JMP_1 %bb.6 bb.6: - successors: %bb.11 - JMP_1 %bb.11 bb.7: - successors: %bb.9, %bb.8 - CMP32ri8 %edi, 5, implicit-def %eflags JE_1 %bb.9, implicit killed %eflags JMP_1 %bb.8 bb.8: - successors: %bb.9 bb.9: - successors: %bb.11, %bb.10 - CMP32ri8 %edi, 6, implicit-def %eflags JE_1 %bb.11, implicit killed %eflags JMP_1 %bb.10 bb.10: - successors: %bb.11 bb.11: RET 0 @@ -74,10 +55,10 @@ body: | # CHECK: Region tree: # CHECK-NEXT: [0] BB#0 => <Function Return> # CHECK-NEXT: [1] BB#0 => BB#11 +# CHECK-NEXT: [2] BB#7 => BB#9 +# CHECK-NEXT: [2] BB#9 => BB#11 # CHECK-NEXT: [2] BB#1 => BB#11 # CHECK-NEXT: [3] BB#2 => BB#5 # CHECK-NEXT: [4] BB#3 => BB#5 # CHECK-NEXT: [3] BB#5 => BB#11 -# CHECK-NEXT: [2] BB#7 => BB#9 -# CHECK-NEXT: [2] BB#9 => BB#11 # CHECK-NEXT: End region tree diff --git a/test/CodeGen/X86/ms-inline-asm-avx512.ll b/test/CodeGen/X86/ms-inline-asm-avx512.ll new file mode 100644 index 0000000000000..be60f5bca1619 --- /dev/null +++ b/test/CodeGen/X86/ms-inline-asm-avx512.ll @@ -0,0 +1,24 @@ +; RUN: llc < %s | FileCheck %s + +; Generated from clang/test/CodeGen/ms-inline-asm-avx512.c + +target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-windows-msvc" + +; Function Attrs: noinline nounwind +define void @ignore_fe_size() #0 { +entry: + %c = alloca i8, align 1 + call void asm sideeffect inteldialect "vaddps xmm1, xmm2, $1{1to4}\0A\09vaddps xmm1, xmm2, $2\0A\09mov eax, $3\0A\09mov $0, rax", "=*m,*m,*m,*m,~{eax},~{xmm1},~{dirflag},~{fpsr},~{flags}"(i8* %c, i8* %c, i8* %c, i8* %c) #1 + ret void +} + +; CHECK-LABEL: ignore_fe_size: +; CHECK: vaddps 7(%rsp){1to4}, %xmm2, %xmm1 +; CHECK: vaddps 7(%rsp), %xmm2, %xmm1 +; CHECK: movl 7(%rsp), %eax +; CHECK: movq %rax, 7(%rsp) +; CHECK: retq + +attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind } diff --git a/test/CodeGen/X86/pr27681.mir b/test/CodeGen/X86/pr27681.mir index 3e931b182e4e9..002761bc1e687 100644 --- a/test/CodeGen/X86/pr27681.mir +++ b/test/CodeGen/X86/pr27681.mir @@ -25,7 +25,6 @@ stack: - { id: 2, type: spill-slot, offset: -32, size: 4, alignment: 4 } body: | bb.0: - successors: %bb.1 liveins: %ebp, %ebx, %edi, %esi frame-setup PUSH32r killed %ebp, implicit-def %esp, implicit %esp @@ -41,7 +40,6 @@ body: | %edx = MOV32ri 6 bb.1: - successors: %bb.3, %bb.2 liveins: %eax, %ebp, %ebx, %ecx, %edi, %edx %ebp = SHR32rCL killed %ebp, implicit-def dead %eflags, implicit %cl @@ -66,7 +64,6 @@ body: | JE_1 %bb.3, implicit %eflags bb.2: - successors: %bb.3 liveins: %cl, %eax, %ebp, %esi OR32mr %esp, 1, _, 8, _, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1) diff --git a/test/CodeGen/X86/pr32907.ll b/test/CodeGen/X86/pr32907.ll new file mode 100644 index 0000000000000..bc03fbe068439 --- /dev/null +++ b/test/CodeGen/X86/pr32907.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 + +define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) { +; SSE-LABEL: PR32907: +; SSE: # BB#0: # %entry +; SSE-NEXT: psubq %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: psubq %xmm0, %xmm1 +; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: pandn %xmm0, %xmm2 +; SSE-NEXT: por %xmm2, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX2-LABEL: PR32907: +; AVX2: # BB#0: # %entry +; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm2 +; AVX2-NEXT: vpandn %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: PR32907: +; AVX512: # BB#0: # %entry +; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $63, %zmm0, %zmm1 +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm2 +; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq +entry: + %sub13.i = sub <2 x i64> %astype.i, %astype6.i + %x.lobit.i.i = ashr <2 x i64> %sub13.i, <i64 63, i64 63> + %sub.i.i = sub <2 x i64> zeroinitializer, %sub13.i + %0 = xor <2 x i64> %x.lobit.i.i, <i64 -1, i64 -1> + %1 = and <2 x i64> %sub13.i, %0 + %2 = and <2 x i64> %x.lobit.i.i, %sub.i.i + %cond.i.i = or <2 x i64> %1, %2 + ret <2 x i64> %cond.i.i +} diff --git a/test/CodeGen/X86/pre-coalesce.mir b/test/CodeGen/X86/pre-coalesce.mir index 11805fe090b42..17d447dd097b9 100644 --- a/test/CodeGen/X86/pre-coalesce.mir +++ b/test/CodeGen/X86/pre-coalesce.mir @@ -83,8 +83,6 @@ frameInfo: hasMustTailInVarArgFunc: false body: | bb.0.entry: - successors: %bb.4(0x30000000), %bb.1.while.body.preheader(0x50000000) - %0 = MOV64rm %rip, 1, _, @b, _ :: (dereferenceable load 8 from @b) %12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0) TEST8rr %12, %12, implicit-def %eflags @@ -92,17 +90,12 @@ body: | JNE_1 %bb.1.while.body.preheader, implicit killed %eflags bb.4: - successors: %bb.3.while.end(0x80000000) - %10 = COPY %11 JMP_1 %bb.3.while.end bb.1.while.body.preheader: - successors: %bb.2.while.body(0x80000000) bb.2.while.body: - successors: %bb.3.while.end(0x04000000), %bb.2.while.body(0x7c000000) - %8 = MOVSX32rr8 %12 %10 = COPY %11 %10 = SHL32ri %10, 5, implicit-def dead %eflags diff --git a/test/CodeGen/X86/regcall-no-plt.ll b/test/CodeGen/X86/regcall-no-plt.ll new file mode 100644 index 0000000000000..d525448b60ca8 --- /dev/null +++ b/test/CodeGen/X86/regcall-no-plt.ll @@ -0,0 +1,44 @@ +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic < %s | FileCheck %s +; RUN: llc -mtriple=x86_64-freebsd -relocation-model=pic < %s | FileCheck %s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; According to x86-64 psABI, xmm0-xmm7 can be used to pass function parameters. +;; However regcall calling convention uses also xmm8-xmm15 to pass function +;; parameters which violates x86-64 psABI. +;; Detail info about it can be found at: +;; https://sourceware.org/bugzilla/show_bug.cgi?id=21265 +;; +;; We encounter the violation symptom when using PIC with lazy binding +;; optimization. +;; In that case the PLT mechanism as described in x86_64 psABI will +;; not preserve xmm8-xmm15 registers and will lead to miscompilation. +;; +;; The agreed solution is to disable PLT for regcall calling convention for +;; SystemV using ELF format. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +declare void @lazy() +declare x86_regcallcc void @regcall_not_lazy() + +; CHECK-LABEL: foo: +; CHECK: callq lazy@PLT +; CHECK: callq *regcall_not_lazy@GOTPCREL(%rip) +define void @foo() nounwind { + call void @lazy() + call void @regcall_not_lazy() + ret void +} + +; CHECK-LABEL: tail_call_regcall: +; CHECK: jmpq *regcall_not_lazy@GOTPCREL(%rip) +define void @tail_call_regcall() nounwind { + tail call void @regcall_not_lazy() + ret void +} + +; CHECK-LABEL: tail_call_regular: +; CHECK: jmp lazy +define void @tail_call_regular() nounwind { + tail call void @lazy() + ret void +} diff --git a/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/test/CodeGen/X86/shuffle-vs-trunc-512.ll index d053c63dcdb37..a3ba589758009 100644 --- a/test/CodeGen/X86/shuffle-vs-trunc-512.ll +++ b/test/CodeGen/X86/shuffle-vs-trunc-512.ll @@ -392,8 +392,10 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind { ; AVX512BW-LABEL: shuffle_v32i16_to_v8i16: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 +; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX512BW-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] ; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax -; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1 +; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 ; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2 ; AVX512BW-NEXT: vmovd %xmm2, %eax ; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 @@ -416,8 +418,10 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind { ; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16: ; AVX512BWVL: # BB#0: ; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0 +; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] ; AVX512BWVL-NEXT: vpextrw $4, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1 +; AVX512BWVL-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 ; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2 ; AVX512BWVL-NEXT: vmovd %xmm2, %eax ; AVX512BWVL-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 diff --git a/test/CodeGen/X86/stack-folding-int-avx512.ll b/test/CodeGen/X86/stack-folding-int-avx512.ll index 04a7d11590147..38e19efb71326 100644 --- a/test/CodeGen/X86/stack-folding-int-avx512.ll +++ b/test/CodeGen/X86/stack-folding-int-avx512.ll @@ -204,8 +204,8 @@ define <64 x i8> @stack_fold_pabsb_maskz(<64 x i8> %a0, i64 %mask) { } define <16 x i32> @stack_fold_pabsd(<16 x i32> %a0) { - ;check-label: stack_fold_pabsd - ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte folded reload + ;CHECK-LABEL: stack_fold_pabsd + ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> undef, i16 -1) ret <16 x i32> %2 @@ -213,16 +213,16 @@ define <16 x i32> @stack_fold_pabsd(<16 x i32> %a0) { declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16) nounwind readnone define <16 x i32> @stack_fold_pabsd_mask(<16 x i32> %passthru, <16 x i32> %a0, i16 %mask) { - ;check-label: stack_fold_pabsd - ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte folded reload + ;CHECK-LABEL: stack_fold_pabsd_mask + ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) ret <16 x i32> %2 } define <16 x i32> @stack_fold_pabsd_maskz(<16 x i32> %a0, i16 %mask) { - ;check-label: stack_fold_pabsd - ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte folded reload + ;CHECK-LABEL: stack_fold_pabsd_maskz + ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() %2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> zeroinitializer, i16 %mask) ret <16 x i32> %2 diff --git a/test/CodeGen/X86/vec_partial.ll b/test/CodeGen/X86/vec_partial.ll index e5ac81add7f6b..ee15c2af6dd2e 100644 --- a/test/CodeGen/X86/vec_partial.ll +++ b/test/CodeGen/X86/vec_partial.ll @@ -1,12 +1,18 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 ; PR11580 define <3 x float> @addf3(<3 x float> %x) { -; CHECK-LABEL: addf3: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: addps {{.*}}(%rip), %xmm0 -; CHECK-NEXT: retq +; X86-LABEL: addf3: +; X86: # BB#0: # %entry +; X86-NEXT: addps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: addf3: +; X64: # BB#0: # %entry +; X64-NEXT: addps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq entry: %add = fadd <3 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> ret <3 x float> %add @@ -14,9 +20,13 @@ entry: ; PR11580 define <4 x float> @cvtf3_f4(<3 x float> %x) { -; CHECK-LABEL: cvtf3_f4: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: retq +; X86-LABEL: cvtf3_f4: +; X86: # BB#0: # %entry +; X86-NEXT: retl +; +; X64-LABEL: cvtf3_f4: +; X64: # BB#0: # %entry +; X64-NEXT: retq entry: %extractVec = shufflevector <3 x float> %x, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> ret <4 x float> %extractVec @@ -24,9 +34,13 @@ entry: ; PR11580 define <3 x float> @cvtf4_f3(<4 x float> %x) { -; CHECK-LABEL: cvtf4_f3: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: retq +; X86-LABEL: cvtf4_f3: +; X86: # BB#0: # %entry +; X86-NEXT: retl +; +; X64-LABEL: cvtf4_f3: +; X64: # BB#0: # %entry +; X64-NEXT: retq entry: %extractVec = shufflevector <4 x float> %x, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> ret <3 x float> %extractVec diff --git a/test/CodeGen/X86/vec_reassociate.ll b/test/CodeGen/X86/vec_reassociate.ll index 0d3373528f583..5234b0c8a77cd 100644 --- a/test/CodeGen/X86/vec_reassociate.ll +++ b/test/CodeGen/X86/vec_reassociate.ll @@ -1,10 +1,17 @@ -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @add_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: paddd %xmm1, %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: add_4i32: +; X86: # BB#0: +; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_4i32: +; X64: # BB#0: +; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: retq %1 = add <4 x i32> %a0, <i32 1, i32 -2, i32 3, i32 -4> %2 = add <4 x i32> %a1, <i32 -1, i32 2, i32 -3, i32 4> %3 = add <4 x i32> %1, %2 @@ -12,10 +19,15 @@ define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @add_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: paddd %xmm1, %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: add_4i32_commute: +; X86: # BB#0: +; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_4i32_commute: +; X64: # BB#0: +; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: retq %1 = add <4 x i32> <i32 1, i32 -2, i32 3, i32 -4>, %a0 %2 = add <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, %a1 %3 = add <4 x i32> %1, %2 @@ -23,11 +35,17 @@ define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @mul_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: pmulld %xmm1, %xmm0 - ;CHECK-NEXT: pmulld .LCPI2_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: mul_4i32: +; X86: # BB#0: +; X86-NEXT: pmulld %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_4i32: +; X64: # BB#0: +; X64-NEXT: pmulld %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 4> %2 = mul <4 x i32> %a1, <i32 4, i32 3, i32 2, i32 1> %3 = mul <4 x i32> %1, %2 @@ -35,11 +53,17 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @mul_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: pmulld %xmm1, %xmm0 - ;CHECK-NEXT: pmulld .LCPI3_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: mul_4i32_commute: +; X86: # BB#0: +; X86-NEXT: pmulld %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_4i32_commute: +; X64: # BB#0: +; X64-NEXT: pmulld %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = mul <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %a0 %2 = mul <4 x i32> <i32 4, i32 3, i32 2, i32 1>, %a1 %3 = mul <4 x i32> %1, %2 @@ -47,11 +71,17 @@ define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @and_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: andps %xmm1, %xmm0 - ;CHECK-NEXT: andps .LCPI4_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: and_4i32: +; X86: # BB#0: +; X86-NEXT: andps %xmm1, %xmm0 +; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: and_4i32: +; X64: # BB#0: +; X64-NEXT: andps %xmm1, %xmm0 +; X64-NEXT: andps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = and <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = and <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = and <4 x i32> %1, %2 @@ -59,11 +89,17 @@ define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @and_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: andps %xmm1, %xmm0 - ;CHECK-NEXT: andps .LCPI5_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: and_4i32_commute: +; X86: # BB#0: +; X86-NEXT: andps %xmm1, %xmm0 +; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: and_4i32_commute: +; X64: # BB#0: +; X64-NEXT: andps %xmm1, %xmm0 +; X64-NEXT: andps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = and <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = and <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = and <4 x i32> %1, %2 @@ -71,11 +107,17 @@ define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @or_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: orps %xmm1, %xmm0 - ;CHECK-NEXT: orps .LCPI6_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: or_4i32: +; X86: # BB#0: +; X86-NEXT: orps %xmm1, %xmm0 +; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: or_4i32: +; X64: # BB#0: +; X64-NEXT: orps %xmm1, %xmm0 +; X64-NEXT: orps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = or <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = or <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = or <4 x i32> %1, %2 @@ -83,23 +125,35 @@ define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @or_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: orps %xmm1, %xmm0 - ;CHECK-NEXT: orps .LCPI7_0(%rip), %xmm0 - ;CHECK-NEXT: retq - %1 = or <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 +; X86-LABEL: or_4i32_commute: +; X86: # BB#0: +; X86-NEXT: orps %xmm1, %xmm0 +; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: or_4i32_commute: +; X64: # BB#0: +; X64-NEXT: orps %xmm1, %xmm0 +; X64-NEXT: orps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %1 = or <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = or <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = or <4 x i32> %1, %2 ret <4 x i32> %3 } define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @xor_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: xorps %xmm1, %xmm0 - ;CHECK-NEXT: xorps .LCPI8_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: xor_4i32: +; X86: # BB#0: +; X86-NEXT: xorps %xmm1, %xmm0 +; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: xor_4i32: +; X64: # BB#0: +; X64-NEXT: xorps %xmm1, %xmm0 +; X64-NEXT: xorps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = xor <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = xor <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = xor <4 x i32> %1, %2 @@ -107,11 +161,17 @@ define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @xor_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @xor_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: xorps %xmm1, %xmm0 - ;CHECK-NEXT: xorps .LCPI9_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: xor_4i32_commute: +; X86: # BB#0: +; X86-NEXT: xorps %xmm1, %xmm0 +; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: xor_4i32_commute: +; X64: # BB#0: +; X64-NEXT: xorps %xmm1, %xmm0 +; X64-NEXT: xorps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = xor <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = xor <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = xor <4 x i32> %1, %2 diff --git a/test/CodeGen/X86/vector-lzcnt-512.ll b/test/CodeGen/X86/vector-lzcnt-512.ll index 79d133bbfb8f3..88378eb51a27b 100644 --- a/test/CodeGen/X86/vector-lzcnt-512.ll +++ b/test/CodeGen/X86/vector-lzcnt-512.ll @@ -1,39 +1,337 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd,-avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CD +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CDBW +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=-avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=-avx512cd,-avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512DQ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind { -; ALL-LABEL: testv8i64: -; ALL: ## BB#0: -; ALL-NEXT: vplzcntq %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv8i64: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv8i64: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv8i64: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $16, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv8i64: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $4, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $8, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $16, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $32, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq %out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 0) ret <8 x i64> %out } define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind { -; ALL-LABEL: testv8i64u: -; ALL: ## BB#0: -; ALL-NEXT: vplzcntq %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv8i64u: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv8i64u: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv8i64u: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $16, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv8i64u: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $4, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $8, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $16, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrlq $32, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq %out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 -1) ret <8 x i64> %out } define <16 x i32> @testv16i32(<16 x i32> %in) nounwind { -; ALL-LABEL: testv16i32: -; ALL: ## BB#0: -; ALL-NEXT: vplzcntd %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv16i32: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv16i32: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv16i32: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2 +; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv16i32: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $4, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $8, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $16, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackuswb %ymm5, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq %out = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %in, i1 0) ret <16 x i32> %out } define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind { -; ALL-LABEL: testv16i32u: -; ALL: ## BB#0: -; ALL-NEXT: vplzcntd %zmm0, %zmm0 -; ALL-NEXT: retq +; AVX512CD-LABEL: testv16i32u: +; AVX512CD: ## BB#0: +; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CD-NEXT: retq +; +; AVX512CDBW-LABEL: testv16i32u: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; +; AVX512BW-LABEL: testv16i32u: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2 +; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv16i32u: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1 +; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $4, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $8, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpsrld $16, %zmm0, %zmm1 +; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpackuswb %ymm5, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5] +; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512DQ-NEXT: retq %out = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %in, i1 -1) ret <16 x i32> %out } @@ -52,20 +350,78 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind { ; AVX512CD-NEXT: vpsubw %ymm2, %ymm1, %ymm1 ; AVX512CD-NEXT: retq ; +; AVX512CDBW-LABEL: testv32i16: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512CDBW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 +; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 +; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; ; AVX512BW-LABEL: testv32i16: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero -; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 ; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv32i16: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpxor %ymm6, %ymm6, %ymm6 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm5, %ymm7 +; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddw %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm5 +; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm2, %ymm5 +; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm2, %ymm4, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddw %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: retq %out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 0) ret <32 x i16> %out } @@ -84,20 +440,78 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind { ; AVX512CD-NEXT: vpsubw %ymm2, %ymm1, %ymm1 ; AVX512CD-NEXT: retq ; +; AVX512CDBW-LABEL: testv32i16u: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero +; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512CDBW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] +; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 +; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 +; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; ; AVX512BW-LABEL: testv32i16u: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero -; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] -; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 -; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 +; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 ; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv32i16u: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm5 +; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpxor %ymm6, %ymm6, %ymm6 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm5, %ymm7 +; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5 +; AVX512DQ-NEXT: vpaddb %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddw %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm5 +; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm2 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm2, %ymm5 +; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm2, %ymm4, %ymm2 +; AVX512DQ-NEXT: vpaddb %ymm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpaddw %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: retq %out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 -1) ret <32 x i16> %out } @@ -128,32 +542,78 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind { ; AVX512CD-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512CD-NEXT: retq ; +; AVX512CDBW-LABEL: testv64i8: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512CDBW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 +; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512CDBW-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; ; AVX512BW-LABEL: testv64i8: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 -; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] -; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 -; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandnq %zmm2, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm1, %zmm3, %zmm1 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv64i8: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpxor %ymm5, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm6 +; AVX512DQ-NEXT: vpand %ymm6, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: retq %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0) ret <64 x i8> %out } @@ -184,32 +644,78 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind { ; AVX512CD-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512CD-NEXT: retq ; +; AVX512CDBW-LABEL: testv64i8u: +; AVX512CDBW: ## BB#0: +; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512CDBW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 +; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX512CDBW-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512CDBW-NEXT: retq +; ; AVX512BW-LABEL: testv64i8u: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 -; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24] -; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2 -; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0 -; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0 -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandnq %zmm2, %zmm0, %zmm1 +; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] +; AVX512BW-NEXT: vpshufb %zmm1, %zmm3, %zmm1 +; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: testv64i8u: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3 +; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpxor %ymm5, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm6 +; AVX512DQ-NEXT: vpand %ymm6, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0 +; AVX512DQ-NEXT: vpaddb %ymm0, %ymm3, %ymm0 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3 +; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpand %ymm2, %ymm3, %ymm2 +; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1 +; AVX512DQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1 +; AVX512DQ-NEXT: retq %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1) ret <64 x i8> %out } diff --git a/test/CodeGen/X86/vector-shuffle-variable-128.ll b/test/CodeGen/X86/vector-shuffle-variable-128.ll index 87fd4a7bf6b9b..bde8a16d2a5a6 100644 --- a/test/CodeGen/X86/vector-shuffle-variable-128.ll +++ b/test/CodeGen/X86/vector-shuffle-variable-128.ll @@ -1303,70 +1303,39 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> % ; SSE41-NEXT: andl $7, %r8d ; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: andl $7, %r9d -; SSE41-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; SSE41-NEXT: movd %eax, %xmm1 -; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm1 -; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm1 -; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm1 -; SSE41-NEXT: pinsrw $4, -40(%rsp,%r8,2), %xmm1 -; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm1 ; SSE41-NEXT: pxor %xmm0, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] +; SSE41-NEXT: pinsrw $0, -40(%rsp,%rdi,2), %xmm0 +; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm0 +; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm0 +; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm0 +; SSE41-NEXT: pinsrw $4, -40(%rsp,%r8,2), %xmm0 +; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm0 ; SSE41-NEXT: retq ; -; AVX1-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: -; AVX1: # BB#0: -; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> -; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> -; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> -; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> -; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> -; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; AVX1-NEXT: andl $7, %edi -; AVX1-NEXT: andl $7, %esi -; AVX1-NEXT: andl $7, %edx -; AVX1-NEXT: andl $7, %ecx -; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: andl $7, %r8d -; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: andl $7, %r9d -; AVX1-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0 -; AVX1-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0 -; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 -; AVX1-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0 -; AVX1-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7] -; AVX1-NEXT: retq -; -; AVX2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: -; AVX2: # BB#0: -; AVX2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> -; AVX2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> -; AVX2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> -; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> -; AVX2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> -; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; AVX2-NEXT: andl $7, %edi -; AVX2-NEXT: andl $7, %esi -; AVX2-NEXT: andl $7, %edx -; AVX2-NEXT: andl $7, %ecx -; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: andl $7, %r8d -; AVX2-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: andl $7, %r9d -; AVX2-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0 -; AVX2-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0 -; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 -; AVX2-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0 -; AVX2-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0 -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3] -; AVX2-NEXT: retq +; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: +; AVX: # BB#0: +; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> +; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> +; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> +; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> +; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> +; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> +; AVX-NEXT: andl $7, %edi +; AVX-NEXT: andl $7, %esi +; AVX-NEXT: andl $7, %edx +; AVX-NEXT: andl $7, %ecx +; AVX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) +; AVX-NEXT: andl $7, %r8d +; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp) +; AVX-NEXT: andl $7, %r9d +; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $0, -40(%rsp,%rdi,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0 +; AVX-NEXT: retq %x0 = extractelement <8 x i16> %x, i16 %i0 %y1 = extractelement <8 x i16> %y, i16 %i1 %x2 = extractelement <8 x i16> %x, i16 %i2 diff --git a/test/CodeGen/X86/win64_eh_leaf.ll b/test/CodeGen/X86/win64_eh_leaf.ll index 21a423ab36a9b..35d55a9073754 100644 --- a/test/CodeGen/X86/win64_eh_leaf.ll +++ b/test/CodeGen/X86/win64_eh_leaf.ll @@ -29,3 +29,12 @@ entry: ; and no unwind info in the object file. ; READOBJ-NOT: leaf_func } + +define void @naked_func() naked { + call void asm sideeffect "ret", ""() + unreachable +} +; ASM-LABEL: naked_func: +; ASM-NOT: .seh_ +; ASM: ret +; ASM-NOT: .seh_ diff --git a/test/CodeGen/X86/xray-attribute-instrumentation.ll b/test/CodeGen/X86/xray-attribute-instrumentation.ll index c52ccf9356bc5..7c60327d2c304 100644 --- a/test/CodeGen/X86/xray-attribute-instrumentation.ll +++ b/test/CodeGen/X86/xray-attribute-instrumentation.ll @@ -15,10 +15,17 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" } ; CHECK: .p2align 4, 0x90 ; CHECK-NEXT: .quad {{.*}}xray_synthetic_0 +; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_0 ; CHECK-NEXT: .section {{.*}}xray_instr_map ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .quad {{.*}}xray_sled_0 ; CHECK: .quad {{.*}}xray_sled_1 +; CHECK-LABEL: Lxray_synthetic_end0: +; CHECK: .section {{.*}}xray_fn_idx +; CHECK-LABEL: Lxray_fn_idx_synth_0: +; CHECK: .quad {{.*}}xray_synthetic_0 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_end0 + ; We test multiple returns in a single function to make sure we're getting all ; of them with XRay instrumentation. @@ -46,8 +53,14 @@ NotEqual: } ; CHECK: .p2align 4, 0x90 ; CHECK-NEXT: .quad {{.*}}xray_synthetic_1 +; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_1 ; CHECK-NEXT: .section {{.*}}xray_instr_map ; CHECK-LABEL: Lxray_synthetic_1: ; CHECK: .quad {{.*}}xray_sled_2 ; CHECK: .quad {{.*}}xray_sled_3 ; CHECK: .quad {{.*}}xray_sled_4 +; CHECK-LABEL: Lxray_synthetic_end1: +; CHECK: .section {{.*}}xray_fn_idx +; CHECK-LABEL: Lxray_fn_idx_synth_1: +; CHECK: .quad {{.*}}xray_synthetic_1 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_end1 diff --git a/test/CodeGen/X86/xray-custom-log.ll b/test/CodeGen/X86/xray-custom-log.ll new file mode 100644 index 0000000000000..63625d44b4cb2 --- /dev/null +++ b/test/CodeGen/X86/xray-custom-log.ll @@ -0,0 +1,23 @@ +; RUN: llc -filetype=asm -o - -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s + +define i32 @fn() nounwind noinline uwtable "function-instrument"="xray-always" { + %eventptr = alloca i8 + %eventsize = alloca i32 + store i32 3, i32* %eventsize + %val = load i32, i32* %eventsize + call void @llvm.xray.customevent(i8* %eventptr, i32 %val) + ; CHECK-LABEL: Lxray_event_sled_0: + ; CHECK-NEXT: .ascii "\353\024 + ; CHECK-NEXT: pushq %rax + ; CHECK-NEXT: movq {{.*}}, %rdi + ; CHECK-NEXT: movq {{.*}}, %rsi + ; CHECK-NEXT: movabsq $__xray_CustomEvent, %rax + ; CHECK-NEXT: callq *%rax + ; CHECK-NEXT: popq %rax + ret i32 0 +} +; CHECK: .section {{.*}}xray_instr_map +; CHECK-LABEL: Lxray_synthetic_0: +; CHECK: .quad {{.*}}xray_event_sled_0 + +declare void @llvm.xray.customevent(i8*, i32) diff --git a/test/CodeGen/X86/xray-loop-detection.ll b/test/CodeGen/X86/xray-loop-detection.ll new file mode 100644 index 0000000000000..3cd6b4aa6f8c4 --- /dev/null +++ b/test/CodeGen/X86/xray-loop-detection.ll @@ -0,0 +1,23 @@ +; RUN: llc -filetype=asm -o - -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s +; RUN: llc -filetype=asm -o - -mtriple=x86_64-darwin-unknown < %s | FileCheck %s + +define i32 @foo(i32 %i) nounwind noinline uwtable "xray-instruction-threshold"="1" { +entry: + br label %Test +Test: + %indvar = phi i32 [0, %entry], [%nextindvar, %Inc] + %cond = icmp eq i32 %indvar, %i + br i1 %cond, label %Exit, label %Inc +Inc: + %nextindvar = add i32 %indvar, 1 + br label %Test +Exit: + %retval = phi i32 [%indvar, %Test] + ret i32 %retval +} + +; CHECK-LABEL: xray_sled_0: +; CHECK-NEXT: .ascii "\353\t" +; CHECK-NEXT: nopw 512(%rax,%rax) +; CHECK-LABEL: Ltmp0: + diff --git a/test/CodeGen/X86/xray-tail-call-sled.ll b/test/CodeGen/X86/xray-tail-call-sled.ll index ece786a5e809b..b12c78a77b203 100644 --- a/test/CodeGen/X86/xray-tail-call-sled.ll +++ b/test/CodeGen/X86/xray-tail-call-sled.ll @@ -14,11 +14,17 @@ define i32 @callee() nounwind noinline uwtable "function-instrument"="xray-alway ; CHECK-NEXT: nopw %cs:512(%rax,%rax) } ; CHECK: .p2align 4, 0x90 -; CHECK-NEXT: .quad {{.*}}xray_synthetic_0 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_0{{.*}} +; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_0{{.*}} ; CHECK-NEXT: .section {{.*}}xray_instr_map ; CHECK-LABEL: Lxray_synthetic_0: ; CHECK: .quad {{.*}}xray_sled_0 ; CHECK: .quad {{.*}}xray_sled_1 +; CHECK-LABEL: Lxray_synthetic_end0: +; CHECK-NEXT: .section {{.*}}xray_fn_idx +; CHECK-LABEL: Lxray_fn_idx_synth_0: +; CHECK: .quad {{.*}}xray_synthetic_0 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_end0 define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-always" { ; CHECK: .p2align 1, 0x90 @@ -36,7 +42,13 @@ define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-alway ret i32 %retval } ; CHECK: .p2align 4, 0x90 -; CHECK-NEXT: .quad {{.*}}xray_synthetic_1 +; CHECK-NEXT: .quad {{.*}}xray_synthetic_1{{.*}} +; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_1{{.*}} ; CHECK-LABEL: Lxray_synthetic_1: ; CHECK: .quad {{.*}}xray_sled_2 ; CHECK: .quad {{.*}}xray_sled_3 +; CHECK-LABEL: Lxray_synthetic_end1: +; CHECK: .section {{.*}}xray_fn_idx +; CHECK-LABEL: Lxray_fn_idx_synth_1: +; CHECK: .quad {{.*}}xray_synthetic_1 +; CHECK: .quad {{.*}}xray_synthetic_end1 diff --git a/test/DebugInfo/COFF/synthetic.ll b/test/DebugInfo/COFF/synthetic.ll new file mode 100644 index 0000000000000..7a2f3b87b9e63 --- /dev/null +++ b/test/DebugInfo/COFF/synthetic.ll @@ -0,0 +1,55 @@ +; RUN: llc -mtriple x86_64-unknown-windows-msvc -filetype asm -o - %s | FileCheck %s + +target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-windows-msvc" + +define dllexport void ()* @f() !dbg !6 { +entry: + ret void ()* null, !dbg !28 +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) +!1 = !DIFile(filename: "<stdin>", directory: "/Users/compnerd/Source/llvm", checksumkind: CSK_MD5, checksum: "2851eea4f12e754f1a68c47a7045406a") +!2 = !{} +!3 = !{i32 2, !"CodeView", i32 1} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!6 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2) +!7 = !DISubroutineType(types: !8) +!8 = !{!9} +!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !10, size: 64) +!10 = !DICompositeType(tag: DW_TAG_structure_type, scope: !1, size: 256, flags: DIFlagAppleBlock, elements: !11) +!11 = !{!12, !14, !16, !17, !21} +!12 = !DIDerivedType(tag: DW_TAG_member, name: "__isa", scope: !1, file: !1, baseType: !13, size: 64) +!13 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64) +!14 = !DIDerivedType(tag: DW_TAG_member, name: "__flags", scope: !1, file: !1, baseType: !15, size: 32, offset: 64) +!15 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!16 = !DIDerivedType(tag: DW_TAG_member, name: "__reserved", scope: !1, file: !1, baseType: !15, size: 32, offset: 96) +!17 = !DIDerivedType(tag: DW_TAG_member, name: "__FuncPtr", scope: !1, file: !1, baseType: !18, size: 64, offset: 128) +!18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !19, size: 64) +!19 = !DISubroutineType(types: !20) +!20 = !{null} +!21 = !DIDerivedType(tag: DW_TAG_member, name: "__descriptor", scope: !1, baseType: !22, size: 64, align: 64, offset: 192) +!22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 64) +!23 = !DICompositeType(tag: DW_TAG_structure_type, name: "__block_descriptor", scope: !1, size: 64, flags: DIFlagAppleBlock, elements: !24) +!24 = !{!25, !27} +!25 = !DIDerivedType(tag: DW_TAG_member, name: "reserved", scope: !1, file: !1, baseType: !26, size: 32) +!26 = !DIBasicType(name: "long unsigned int", size: 32, encoding: DW_ATE_unsigned) +!27 = !DIDerivedType(tag: DW_TAG_member, name: "Size", scope: !1, file: !1, baseType: !26, size: 32, offset: 32) +!28 = !DILocation(line: 1, scope: !6) + +; CHECK: # Struct +; CHECK: # TypeLeafKind: LF_STRUCTURE +; CHECK: # MemberCount: 0 +; CHECK: # Properties [ +; CHECK: # ForwardReference +; CHECK: # ] +; CHECK: # FieldList: 0x0 +; CHECK: # DerivedFrom: 0x0 +; CHECK: # VShape: 0x0 +; CHECK: # SizeOf: 0 +; CHECK: # Name: __block_descriptor +; CHECK: # } + diff --git a/test/DebugInfo/Inputs/dwarfdump-decompression-error.elf-x86-64 b/test/DebugInfo/Inputs/dwarfdump-decompression-error.elf-x86-64 Binary files differnew file mode 100644 index 0000000000000..ba352f51123d5 --- /dev/null +++ b/test/DebugInfo/Inputs/dwarfdump-decompression-error.elf-x86-64 diff --git a/test/DebugInfo/dwarfdump-decompression-error.test b/test/DebugInfo/dwarfdump-decompression-error.test new file mode 100644 index 0000000000000..184833164dc9a --- /dev/null +++ b/test/DebugInfo/dwarfdump-decompression-error.test @@ -0,0 +1,15 @@ +REQUIRES: zlib + +// dwarfdump-decompression-error.elf-x86-64 is prepared using following +// source code and invocation: +// test.cpp: +// int main() { return 0; } +// +// gcc test.cpp -o out -g -Wl,--compress-debug-sections,zlib +// +// After that result object was modified manually. One random byte in compressed +// content of .debug_info section was changed to 0xff. That breaks normal +// decompression flow in runtime. +RUN: llvm-dwarfdump %p/Inputs/dwarfdump-decompression-error.elf-x86-64 2>&1 | FileCheck %s + +CHECK: error: failed to decompress '.debug_info', zlib error: Z_DATA_ERROR diff --git a/test/Linker/metadata-global.ll b/test/Linker/metadata-global.ll new file mode 100644 index 0000000000000..56d77e128bde8 --- /dev/null +++ b/test/Linker/metadata-global.ll @@ -0,0 +1,11 @@ +; RUN: llvm-link %s -S | FileCheck %s + +; CHECK-DAG: @a = global i32 0 +; CHECK-DAG: @b = global i32 0, !associated !0 + +; CHECK-DAG: !0 = !{i32* @b} + +@a = global i32 0 +@b = global i32 0, !associated !0 + +!0 = !{i32* @b} diff --git a/test/MC/AArch64/basic-a64-instructions.s b/test/MC/AArch64/basic-a64-instructions.s index 8a82c99eb8c1f..46b2397ec734e 100644 --- a/test/MC/AArch64/basic-a64-instructions.s +++ b/test/MC/AArch64/basic-a64-instructions.s @@ -1496,23 +1496,6 @@ _func: // Data-processing (2 source) //------------------------------------------------------------------------------ - crc32b w5, w7, w20 - crc32h w28, wzr, w30 - crc32w w0, w1, w2 - crc32x w7, w9, x20 - crc32cb w9, w5, w4 - crc32ch w13, w17, w25 - crc32cw wzr, w3, w5 - crc32cx w18, w16, xzr -// CHECK: crc32b w5, w7, w20 // encoding: [0xe5,0x40,0xd4,0x1a] -// CHECK: crc32h w28, wzr, w30 // encoding: [0xfc,0x47,0xde,0x1a] -// CHECK: crc32w w0, w1, w2 // encoding: [0x20,0x48,0xc2,0x1a] -// CHECK: crc32x w7, w9, x20 // encoding: [0x27,0x4d,0xd4,0x9a] -// CHECK: crc32cb w9, w5, w4 // encoding: [0xa9,0x50,0xc4,0x1a] -// CHECK: crc32ch w13, w17, w25 // encoding: [0x2d,0x56,0xd9,0x1a] -// CHECK: crc32cw wzr, w3, w5 // encoding: [0x7f,0x58,0xc5,0x1a] -// CHECK: crc32cx w18, w16, xzr // encoding: [0x12,0x5e,0xdf,0x9a] - udiv w0, w7, w10 udiv x9, x22, x4 sdiv w12, w21, w0 diff --git a/test/MC/AArch64/crc.s b/test/MC/AArch64/crc.s new file mode 100644 index 0000000000000..f0e4a5aa7531b --- /dev/null +++ b/test/MC/AArch64/crc.s @@ -0,0 +1,45 @@ +// RUN: llvm-mc -triple aarch64-- -mattr=+crc %s 2>&1 |\ +// RUN: FileCheck %s --check-prefix=CRC + +// RUN: not llvm-mc -triple aarch64-- %s 2>&1 |\ +// RUN: FileCheck %s --check-prefix=NOCRC +// RUN: not llvm-mc -triple aarch64-- -mcpu=cyclone %s 2>&1 |\ +// RUN: FileCheck %s --check-prefix=NOCRC + + crc32b w0, w1, w5 + crc32h w3, w5, w6 + crc32w w19, wzr, w20 + crc32x w3, w5, x20 + +// CRC: crc32b w0, w1, w5 +// CRC: crc32h w3, w5, w6 +// CRC: crc32w w19, wzr, w20 +// CRC: crc32x w3, w5, x20 + +// NOCRC: error: instruction requires: crc +// NOCRC: crc32b w0, w1, w5 +// NOCRC: error: instruction requires: crc +// NOCRC: crc32h w3, w5, w6 +// NOCRC: error: instruction requires: crc +// NOCRC: crc32w w19, wzr, w20 +// NOCRC: error: instruction requires: crc +// NOCRC: crc32x w3, w5, x20 + + crc32cb w5, w10, w15 + crc32ch w3, w5, w7 + crc32cw w11, w13, w17 + crc32cx w19, w23, x29 + +// CRC: crc32cb w5, w10, w15 +// CRC: crc32ch w3, w5, w7 +// CRC: crc32cw w11, w13, w17 +// CRC: crc32cx w19, w23, x29 + +// NOCRC: error: instruction requires: crc +// NOCRC: crc32cb w5, w10, w15 +// NOCRC: error: instruction requires: crc +// NOCRC: crc32ch w3, w5, w7 +// NOCRC: error: instruction requires: crc +// NOCRC: crc32cw w11, w13, w17 +// NOCRC: error: instruction requires: crc +// NOCRC: crc32cx w19, w23, x29 diff --git a/test/MC/AArch64/cyclone-crc.s b/test/MC/AArch64/cyclone-crc.s deleted file mode 100644 index 5786df51ddeb0..0000000000000 --- a/test/MC/AArch64/cyclone-crc.s +++ /dev/null @@ -1,27 +0,0 @@ -// RUN: not llvm-mc -triple arm64-apple-ios -mcpu=cyclone %s 2>&1 | FileCheck %s - - crc32b w0, w1, w5 - crc32h w3, w5, w6 - crc32w w19, wzr, w20 - crc32x w3, w5, x20 -CHECK: error: instruction requires: crc -CHECK: crc32b w0, w1, w5 -CHECK: error: instruction requires: crc -CHECK: crc32h w3, w5, w6 -CHECK: error: instruction requires: crc -CHECK: crc32w w19, wzr, w20 -CHECK: error: instruction requires: crc -CHECK: crc32x w3, w5, x20 - - crc32cb w5, w10, w15 - crc32ch w3, w5, w7 - crc32cw w11, w13, w17 - crc32cx w19, w23, x29 -CHECK: error: instruction requires: crc -CHECK: crc32cb w5, w10, w15 -CHECK: error: instruction requires: crc -CHECK: crc32ch w3, w5, w7 -CHECK: error: instruction requires: crc -CHECK: crc32cw w11, w13, w17 -CHECK: error: instruction requires: crc -CHECK: crc32cx w19, w23, x29 diff --git a/test/MC/AArch64/directive-arch-negative.s b/test/MC/AArch64/directive-arch-negative.s index 21fd90ebdf111..2991d2499ebfd 100644 --- a/test/MC/AArch64/directive-arch-negative.s +++ b/test/MC/AArch64/directive-arch-negative.s @@ -44,6 +44,12 @@ # CHECK: error: instruction requires: lse # CHECK: casa w5, w7, [x19] + .arch armv8+crypto + crc32b w0, w1, w2 + +# CHECK: error: instruction requires: crc +# CHECK: crc32b w0, w1, w2 + .arch armv8.1-a+nolse casa w5, w7, [x20] diff --git a/test/MC/ARM/ltorg-range.s b/test/MC/ARM/ltorg-range.s new file mode 100644 index 0000000000000..5c27d4cd0df26 --- /dev/null +++ b/test/MC/ARM/ltorg-range.s @@ -0,0 +1,27 @@ +@ RUN: llvm-mc -triple armv7-unknown-linux-gnueabi -filetype obj -o - %s \ +@ RUN: | llvm-objdump -d - | FileCheck %s + + ldr r0, =0x01020304 +@ CHECK: ldr + .ltorg +@ CHECK: 0x01020304 + ldr r0, =0x01020304 + ldr r0, =0x01020304 + ldr r0, =0x01020304 +@ CHECK: ldr +@ CHECK: ldr +@ CHECK: ldr + .ltorg +@ CHECK: 0x01020304 + .rep 1028 + .word 0 + .endr +@ CHECK: 0x00000000 + + ldr r0, =0x01020304 +@ CHECK: ldr + .ltorg +@ CHECK: 0x01020304 + .rep 1028 + .word 0 + .endr diff --git a/test/MC/ARM/negative-immediates-fail.s b/test/MC/ARM/negative-immediates-fail.s index dd45e4316389c..959e55eebb5a8 100644 --- a/test/MC/ARM/negative-immediates-fail.s +++ b/test/MC/ARM/negative-immediates-fail.s @@ -11,3 +11,8 @@ ADC r0, r1, #0xFFFFFE02 ADD.W r0, r0, #0xFF01FF01 # CHECK: error: immediate operand must be in the range [0,7] + +ORR r0, r1, #0xFFFFFF00 +# CHECK: error: instruction requires: thumb2 +ORN r0, r1, #0xFFFFFF00 +# CHECK: error: instruction requires: thumb2 diff --git a/test/MC/ARM/negative-immediates-thumb1-fail.s b/test/MC/ARM/negative-immediates-thumb1-fail.s index 0e8525ede9031..3648721203a0e 100644 --- a/test/MC/ARM/negative-immediates-thumb1-fail.s +++ b/test/MC/ARM/negative-immediates-thumb1-fail.s @@ -13,3 +13,8 @@ SUBs r1, r0, #0xFFFFFFF5 SUBs r0, #0xFFFFFEFF # CHECK: error: immediate operand must be in the range [0,255] + +ORRs r0, r1, #0xFFFFFF00 +# CHECK: error: instruction requires: thumb2 +ORNs r0, r1, #0xFFFFFF00 +# CHECK: error: instruction requires: thumb2 diff --git a/test/MC/ARM/negative-immediates.s b/test/MC/ARM/negative-immediates.s index aa3998163d883..38a6bbb1b7b43 100644 --- a/test/MC/ARM/negative-immediates.s +++ b/test/MC/ARM/negative-immediates.s @@ -98,6 +98,22 @@ # CHECK: and r0, r1, #16777472 @ encoding: [0x01,0xf0,0x01,0x20] # CHECK-DISABLED: error: instruction requires: NegativeImmediates # CHECK-DISABLED: BIC + ORR r0, r1, #0xFFFFFF00 +# CHECK-DISABLED: error: instruction requires: NegativeImmediates +# CHECK-DISABLED: ORR +# CHECK: orn r0, r1, #255 + ORR r0, r1, #0xFEFFFEFF +# CHECK: orn r0, r1, #16777472 @ encoding: [0x61,0xf0,0x01,0x20] +# CHECK-DISABLED: error: instruction requires: NegativeImmediates +# CHECK-DISABLED: ORR + ORN r0, r1, #0xFFFFFF00 +# CHECK: orr r0, r1, #255 +# CHECK-DISABLED: error: instruction requires: NegativeImmediates +# CHECK-DISABLED: ORN + ORN r0, r1, #0xFEFFFEFF +# CHECK: orr r0, r1, #16777472 @ encoding: [0x41,0xf0,0x01,0x20] +# CHECK-DISABLED: error: instruction requires: NegativeImmediates +# CHECK-DISABLED: ORN CMP r0, #0xFFFFFF01 # CHECK: cmn.w r0, #255 # CHECK-DISABLED: error: instruction requires: NegativeImmediates diff --git a/test/MC/AsmParser/altmacro_string.s b/test/MC/AsmParser/altmacro_string.s new file mode 100644 index 0000000000000..70012b2b85237 --- /dev/null +++ b/test/MC/AsmParser/altmacro_string.s @@ -0,0 +1,73 @@ +# RUN: llvm-mc -triple i386-linux-gnu %s| FileCheck %s + +# This test checks the altmacro string delimiter '<' and '>'. + +.altmacro + +# Test #1: +# You can delimit strings with matching angle brackets '<' '>'. +# If an argument begins with '<' and ends with '>'. +# The argument is considered as a string. + +# CHECK: simpleCheck: +.macro simple_check_0 name + \name: + addl $5,%eax +.endm + +simple_check_0 <simpleCheck> + +# Test #2: +# Except adding new string marks '<..>', a regular macro behavior is expected. + +# CHECK: simpleCheck0: +# CHECK: addl $0, %eax +.macro concat string1 string2 string3 + \string1\string2\string3: + addl $\string3, %eax +.endm + +concat <simple>,<Check>,<0> + +# Test #3: +# The altmacro cannot affect the regular less/greater behavior. + +# CHECK: addl $1, %eax +# CHECK: addl $0, %eax + +.macro fun3 arg1 arg2 + addl $\arg1,%eax + addl $\arg2,%eax +.endm + +fun3 5<6 , 5>8 + +# Test #4: +# If a comma is present inside an angle brackets, +# the comma considered as a character and not as a separator. +# This check checks the ability to split the string to different +# arguments according to the use of the comma. +# Fun2 sees the comma as a character. +# Fun3 sees the comma as a separator. + +# CHECK: addl $5, %eax +# CHECK: addl $6, %eax +.macro fun2 arg + fun3 \arg +.endm + +fun2 <5,6> + +# Test #5: +# If argument begin with '<' and there is no '>' to close it. +# A regular macro behavior is expected. + +# CHECK: addl $4, %eax +.macro fun4 arg1 arg2 + .if \arg2\arg1 + addl $\arg2,%eax + .endif +.endm + +fun4 <5,4 +.noaltmacro diff --git a/test/MC/AsmParser/negative_altmacro_string.s b/test/MC/AsmParser/negative_altmacro_string.s new file mode 100644 index 0000000000000..81096c6cbdaa1 --- /dev/null +++ b/test/MC/AsmParser/negative_altmacro_string.s @@ -0,0 +1,29 @@ +# RUN: not llvm-mc -triple i386-linux-gnu %s 2>&1 | FileCheck %s + +# This test checks the altmacro string delimiter '<' and '>'. +# In this test we check the '.noaltmacro' directive. +# We expect that '.altmacro' and '.noaltmacro' will act as a switch on/off directives to the alternate macro mode. +# .noaltmacro returns the format into a regular macro handling. +# The default mode is ".noaltmacro". + +# Test #1: default mode +# CHECK: error: unexpected token at start of statement +# CHECK-NEXT: <simpleCheck>: +.macro simple_check_0 name + \name: +.endm + +simple_check_0 <simpleCheck> + + +.altmacro +.noaltmacro + +# Test #2: Switching from alternate mode to default mode +# CHECK: error: unexpected token at start of statement +# CHECK-NEXT: <simpleCheck1>: +.macro simple_check_1 name + \name: +.endm + +simple_check_1 <simpleCheck1> diff --git a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt index 4d438e032e777..a2f9d24091ef2 100644 --- a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt +++ b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt @@ -1042,23 +1042,6 @@ # Data-processing (2 source) #------------------------------------------------------------------------------ -# CHECK: crc32b w5, w7, w20 -# CHECK: crc32h w28, wzr, w30 -# CHECK: crc32w w0, w1, w2 -# CHECK: crc32x w7, w9, x20 -# CHECK: crc32cb w9, w5, w4 -# CHECK: crc32ch w13, w17, w25 -# CHECK: crc32cw wzr, w3, w5 -# CHECK: crc32cx w18, w16, xzr -0xe5 0x40 0xd4 0x1a -0xfc 0x47 0xde 0x1a -0x20 0x48 0xc2 0x1a -0x27 0x4d 0xd4 0x9a -0xa9 0x50 0xc4 0x1a -0x2d 0x56 0xd9 0x1a -0x7f 0x58 0xc5 0x1a -0x12 0x5e 0xdf 0x9a - # CHECK: udiv w0, w7, w10 # CHECK: udiv x9, x22, x4 # CHECK: sdiv w12, w21, w0 diff --git a/test/ObjectYAML/wasm/name_section.yaml b/test/ObjectYAML/wasm/name_section.yaml new file mode 100644 index 0000000000000..0a4191dd05410 --- /dev/null +++ b/test/ObjectYAML/wasm/name_section.yaml @@ -0,0 +1,40 @@ +# RUN: yaml2obj %s | obj2yaml | FileCheck %s +--- !WASM +FileHeader: + Version: 0x00000001 +Sections: + - Type: TYPE + Signatures: + - ReturnType: I32 + ParamTypes: + - I32 + - Type: IMPORT + Imports: + - Module: foo + Field: a + Kind: FUNCTION + SigIndex: 0 + - Module: foo + Field: b + Kind: FUNCTION + SigIndex: 0 + - Module: foo + Field: c + Kind: FUNCTION + SigIndex: 0 + - Type: CUSTOM + Name: name + FunctionNames: + - Index: 1 + Name: foo + - Index: 0 + Name: bar +... +# CHECK: - Type: CUSTOM +# CHECK-NEXT: Name: name +# CHECK-NEXT: FunctionNames: +# CHECK-NEXT: - Index: 1 +# CHECK-NEXT: Name: foo +# CHECK-NEXT: - Index: 0 +# CHECK-NEXT: Name: bar +# CHECK: ... diff --git a/test/Other/new-pm-defaults.ll b/test/Other/new-pm-defaults.ll index a4a1c1f546c65..f712dc7b63caa 100644 --- a/test/Other/new-pm-defaults.ll +++ b/test/Other/new-pm-defaults.ll @@ -57,6 +57,8 @@ ; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}GlobalsAA ; CHECK-O-NEXT: Running analysis: GlobalsAA ; CHECK-O-NEXT: Running analysis: CallGraphAnalysis +; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}ProfileSummaryAnalysis +; CHECK-O-NEXT: Running analysis: ProfileSummaryAnalysis ; CHECK-O-NEXT: Running pass: ModuleToPostOrderCGSCCPassAdaptor<{{.*}}LazyCallGraph{{.*}}> ; CHECK-O-NEXT: Running analysis: InnerAnalysisManagerProxy ; CHECK-O-NEXT: Running analysis: LazyCallGraphAnalysis diff --git a/test/Transforms/ArgumentPromotion/pr32917.ll b/test/Transforms/ArgumentPromotion/pr32917.ll new file mode 100644 index 0000000000000..a2aeac081cea0 --- /dev/null +++ b/test/Transforms/ArgumentPromotion/pr32917.ll @@ -0,0 +1,23 @@ +; RUN: opt < %s -argpromotion -S | FileCheck %s +; PR 32917 + +@b = common local_unnamed_addr global i32 0, align 4 +@a = common local_unnamed_addr global i32 0, align 4 + +define i32 @fn2() local_unnamed_addr { + %1 = load i32, i32* @b, align 4 + %2 = sext i32 %1 to i64 + %3 = inttoptr i64 %2 to i32* + call fastcc void @fn1(i32* %3) + ret i32 undef +} + +define internal fastcc void @fn1(i32* nocapture readonly) unnamed_addr { + %2 = getelementptr inbounds i32, i32* %0, i64 -1 + %3 = load i32, i32* %2, align 4 + store i32 %3, i32* @a, align 4 + ret void +} + +; CHECK: getelementptr {{.*}} -1 +; CHECK-NOT: getelementptr {{.*}} 4294967295 diff --git a/test/Transforms/CodeExtractor/PartialInlineOptRemark.ll b/test/Transforms/CodeExtractor/PartialInlineOptRemark.ll index b2442b8b173cf..c0d89d606d66e 100644 --- a/test/Transforms/CodeExtractor/PartialInlineOptRemark.ll +++ b/test/Transforms/CodeExtractor/PartialInlineOptRemark.ll @@ -32,52 +32,52 @@ bb2: ; preds = %bb1, %bb ret i32 %tmp3, !dbg !19 } -define i32 @bar_noinline(i32 %arg) local_unnamed_addr #1 !dbg !5 { +define i32 @bar_noinline(i32 %arg) local_unnamed_addr #1 !dbg !23 { bb: - %tmp = icmp slt i32 %arg, 0, !dbg !7 - br i1 %tmp, label %bb1, label %bb2, !dbg !8 + %tmp = icmp slt i32 %arg, 0, !dbg !24 + br i1 %tmp, label %bb1, label %bb2, !dbg !24 bb1: ; preds = %bb - tail call void (...) @foo() #0, !dbg !9 - tail call void (...) @foo() #0, !dbg !10 - tail call void (...) @foo() #0, !dbg !11 - br label %bb2, !dbg !18 + tail call void (...) @foo() #0, !dbg !24 + tail call void (...) @foo() #0, !dbg !24 + tail call void (...) @foo() #0, !dbg !24 + br label %bb2, !dbg !24 bb2: ; preds = %bb1, %bb %tmp3 = phi i32 [ 0, %bb1 ], [ 1, %bb ] - ret i32 %tmp3, !dbg !19 + ret i32 %tmp3, !dbg !24 } -define i32 @bar_alwaysinline(i32 %arg) local_unnamed_addr #2 !dbg !5 { +define i32 @bar_alwaysinline(i32 %arg) local_unnamed_addr #2 !dbg !25 { bb: - %tmp = icmp slt i32 %arg, 0, !dbg !7 - br i1 %tmp, label %bb1, label %bb2, !dbg !8 + %tmp = icmp slt i32 %arg, 0, !dbg !26 + br i1 %tmp, label %bb1, label %bb2, !dbg !26 bb1: ; preds = %bb - tail call void (...) @foo() #0, !dbg !9 - tail call void (...) @foo() #0, !dbg !10 - tail call void (...) @foo() #0, !dbg !11 - br label %bb2, !dbg !18 + tail call void (...) @foo() #0, !dbg !26 + tail call void (...) @foo() #0, !dbg !26 + tail call void (...) @foo() #0, !dbg !26 + br label %bb2, !dbg !26 bb2: ; preds = %bb1, %bb %tmp3 = phi i32 [ 0, %bb1 ], [ 1, %bb ] - ret i32 %tmp3, !dbg !19 + ret i32 %tmp3, !dbg !26 } -define i32 @bar_cold(i32 %arg) local_unnamed_addr #3 !dbg !5 { +define i32 @bar_cold(i32 %arg) local_unnamed_addr #3 !dbg !27 { bb: - %tmp = icmp slt i32 %arg, 0, !dbg !7 - br i1 %tmp, label %bb1, label %bb2, !dbg !8 + %tmp = icmp slt i32 %arg, 0, !dbg !28 + br i1 %tmp, label %bb1, label %bb2, !dbg !28 bb1: ; preds = %bb - tail call void (...) @foo() #0, !dbg !9 - tail call void (...) @foo() #0, !dbg !10 - tail call void (...) @foo() #0, !dbg !11 - br label %bb2, !dbg !18 + tail call void (...) @foo() #0, !dbg !28 + tail call void (...) @foo() #0, !dbg !28 + tail call void (...) @foo() #0, !dbg !28 + br label %bb2, !dbg !28 bb2: ; preds = %bb1, %bb %tmp3 = phi i32 [ 0, %bb1 ], [ 1, %bb ] - ret i32 %tmp3, !dbg !19 + ret i32 %tmp3, !dbg !28 } ; Function Attrs: nounwind @@ -130,3 +130,9 @@ attributes #3 = { cold nounwind } !20 = distinct !DISubprogram(name: "dummy_caller", scope: !1, file: !1, line: 19, type: !6, isLocal: false, isDefinition: true, scopeLine: 19, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2) !21 = !DILocation(line: 21, column: 11, scope: !20) !22 = !DILocation(line: 21, column: 4, scope: !20) +!23 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !6, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2) +!24 = !DILocation(line: 4, column: 6, scope: !23) +!25 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !6, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2) +!26 = !DILocation(line: 4, column: 6, scope: !25) +!27 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !6, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2) +!28 = !DILocation(line: 4, column: 6, scope: !27) diff --git a/test/Transforms/Inline/inline-hot-callsite.ll b/test/Transforms/Inline/inline-hot-callsite.ll index ebf4030d3d100..48fa3039741f6 100644 --- a/test/Transforms/Inline/inline-hot-callsite.ll +++ b/test/Transforms/Inline/inline-hot-callsite.ll @@ -1,16 +1,21 @@ -; RUN: opt < %s -inline -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s -; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s - ; This tests that a hot callsite gets the (higher) inlinehint-threshold even without ; without inline hints and gets inlined because the cost is less than ; inlinehint-threshold. A cold callee with identical body does not get inlined because ; cost exceeds the inline-threshold +; RUN: opt < %s -inline -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s +; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s + +; Run this with the default O2 pipeline to test that profile summary analysis +; is available during inlining. +; RUN: opt < %s -passes='default<O2>' -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s + define i32 @callee1(i32 %x) { %x1 = add i32 %x, 1 %x2 = add i32 %x1, 1 %x3 = add i32 %x2, 1 call void @extern() + call void @extern() ret i32 %x3 } @@ -20,6 +25,7 @@ define i32 @callee2(i32 %x) { %x2 = add i32 %x1, 1 %x3 = add i32 %x2, 1 call void @extern() + call void @extern() ret i32 %x3 } diff --git a/test/Transforms/Inline/prof-update.ll b/test/Transforms/Inline/prof-update.ll index 38fcc7e459964..3fefa1c56ceab 100644 --- a/test/Transforms/Inline/prof-update.ll +++ b/test/Transforms/Inline/prof-update.ll @@ -3,6 +3,7 @@ declare void @ext(); declare void @ext1(); +@func = global void ()* null ; CHECK: define void @callee(i32 %n) !prof ![[ENTRY_COUNT:[0-9]*]] define void @callee(i32 %n) !prof !1 { @@ -17,12 +18,16 @@ cond_false: ; ext is cloned and updated. ; CHECK: call void @ext(), !prof ![[COUNT_CALLEE:[0-9]*]] call void @ext(), !prof !2 + %f = load void ()*, void ()** @func +; CHECK: call void %f(), !prof ![[COUNT_IND_CALLEE:[0-9]*]] + call void %f(), !prof !4 ret void } ; CHECK: define void @caller() define void @caller() { ; CHECK: call void @ext(), !prof ![[COUNT_CALLER:[0-9]*]] +; CHECK: call void %f.i(), !prof ![[COUNT_IND_CALLER:[0-9]*]] call void @callee(i32 15), !prof !3 ret void } @@ -32,8 +37,11 @@ define void @caller() { !1 = !{!"function_entry_count", i64 1000} !2 = !{!"branch_weights", i64 2000} !3 = !{!"branch_weights", i64 400} +!4 = !{!"VP", i32 0, i64 140, i64 111, i64 80, i64 222, i64 40, i64 333, i64 20} attributes #0 = { alwaysinline } ; CHECK: ![[ENTRY_COUNT]] = !{!"function_entry_count", i64 600} ; CHECK: ![[COUNT_CALLEE1]] = !{!"branch_weights", i64 2000} -; CHECK: ![[COUNT_CALLEE]] = !{!"branch_weights", i32 1200} -; CHECK: ![[COUNT_CALLER]] = !{!"branch_weights", i32 800} +; CHECK: ![[COUNT_CALLEE]] = !{!"branch_weights", i64 1200} +; CHECK: ![[COUNT_IND_CALLEE]] = !{!"VP", i32 0, i64 84, i64 111, i64 48, i64 222, i64 24, i64 333, i64 12} +; CHECK: ![[COUNT_CALLER]] = !{!"branch_weights", i64 800} +; CHECK: ![[COUNT_IND_CALLER]] = !{!"VP", i32 0, i64 56, i64 111, i64 32, i64 222, i64 16, i64 333, i64 8} diff --git a/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll b/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll deleted file mode 100644 index 9c989b9ecf8a4..0000000000000 --- a/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll +++ /dev/null @@ -1,14 +0,0 @@ -; RUN: opt < %s -instcombine -S | \ -; RUN: grep "ret i1 true" -; PR586 - -@g_07918478 = external global i32 ; <i32*> [#uses=1] - -define i1 @test() { - %tmp.0 = load i32, i32* @g_07918478 ; <i32> [#uses=2] - %tmp.1 = icmp ne i32 %tmp.0, 0 ; <i1> [#uses=1] - %tmp.4 = icmp ult i32 %tmp.0, 4111 ; <i1> [#uses=1] - %bothcond = or i1 %tmp.1, %tmp.4 ; <i1> [#uses=1] - ret i1 %bothcond -} - diff --git a/test/Transforms/InstCombine/AddOverFlow.ll b/test/Transforms/InstCombine/AddOverFlow.ll index a341cb042ccfe..91fa86e815798 100644 --- a/test/Transforms/InstCombine/AddOverFlow.ll +++ b/test/Transforms/InstCombine/AddOverFlow.ll @@ -95,6 +95,44 @@ define i16 @ripple_nsw2(i16 %x, i16 %y) { ret i16 %c } +; CHECK-LABEL: @ripple_nsw3 +; CHECK: add nsw i16 %a, %b +define i16 @ripple_nsw3(i16 %x, i16 %y) { + %a = and i16 %y, 43691 + %b = and i16 %x, 21843 + %c = add i16 %a, %b + ret i16 %c +} + +; Like the previous test, but flip %a and %b +; CHECK-LABEL: @ripple_nsw4 +; CHECK: add nsw i16 %b, %a +define i16 @ripple_nsw4(i16 %x, i16 %y) { + %a = and i16 %y, 43691 + %b = and i16 %x, 21843 + %c = add i16 %b, %a + ret i16 %c +} + +; CHECK-LABEL: @ripple_nsw5 +; CHECK: add nsw i16 %a, %b +define i16 @ripple_nsw5(i16 %x, i16 %y) { + %a = or i16 %y, 43691 + %b = or i16 %x, 54613 + %c = add i16 %a, %b + ret i16 %c +} + +; Like the previous test, but flip %a and %b +; CHECK-LABEL: @ripple_nsw6 +; CHECK: add nsw i16 %b, %a +define i16 @ripple_nsw6(i16 %x, i16 %y) { + %a = or i16 %y, 43691 + %b = or i16 %x, 54613 + %c = add i16 %b, %a + ret i16 %c +} + ; CHECK-LABEL: @ripple_no_nsw1 ; CHECK: add i32 %a, %x define i32 @ripple_no_nsw1(i32 %x, i32 %y) { @@ -116,3 +154,41 @@ define i16 @ripple_no_nsw2(i16 %x, i16 %y) { %c = add i16 %a, %b ret i16 %c } + +; CHECK-LABEL: @ripple_no_nsw3 +; CHECK: add i16 %a, %b +define i16 @ripple_no_nsw3(i16 %x, i16 %y) { + %a = and i16 %y, 43691 + %b = and i16 %x, 21845 + %c = add i16 %a, %b + ret i16 %c +} + +; Like the previous test, but flip %a and %b +; CHECK-LABEL: @ripple_no_nsw4 +; CHECK: add i16 %b, %a +define i16 @ripple_no_nsw4(i16 %x, i16 %y) { + %a = and i16 %y, 43691 + %b = and i16 %x, 21845 + %c = add i16 %b, %a + ret i16 %c +} + +; CHECK-LABEL: @ripple_no_nsw5 +; CHECK: add i16 %a, %b +define i16 @ripple_no_nsw5(i16 %x, i16 %y) { + %a = or i16 %y, 43689 + %b = or i16 %x, 54613 + %c = add i16 %a, %b + ret i16 %c +} + +; Like the previous test, but flip %a and %b +; CHECK-LABEL: @ripple_no_nsw6 +; CHECK: add i16 %b, %a +define i16 @ripple_no_nsw6(i16 %x, i16 %y) { + %a = or i16 %y, 43689 + %b = or i16 %x, 54613 + %c = add i16 %b, %a + ret i16 %c +} diff --git a/test/Transforms/InstCombine/and-or-icmps.ll b/test/Transforms/InstCombine/and-or-icmps.ll index 464f390f988fb..165f5d1bffed5 100644 --- a/test/Transforms/InstCombine/and-or-icmps.ll +++ b/test/Transforms/InstCombine/and-or-icmps.ll @@ -15,9 +15,7 @@ define i1 @PR1817_1(i32 %X) { define i1 @PR1817_2(i32 %X) { ; CHECK-LABEL: @PR1817_2( ; CHECK-NEXT: [[A:%.*]] = icmp slt i32 %X, 10 -; CHECK-NEXT: [[B:%.*]] = icmp ult i32 %X, 10 -; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] -; CHECK-NEXT: ret i1 [[C]] +; CHECK-NEXT: ret i1 [[A]] ; %A = icmp slt i32 %X, 10 %B = icmp ult i32 %X, 10 diff --git a/test/Transforms/InstCombine/debuginfo-dce.ll b/test/Transforms/InstCombine/debuginfo-dce.ll index 58e9d7d767e99..086743e80820b 100644 --- a/test/Transforms/InstCombine/debuginfo-dce.ll +++ b/test/Transforms/InstCombine/debuginfo-dce.ll @@ -37,60 +37,60 @@ entry: ret void, !dbg !21 } -define void @salvage_bitcast(%struct.entry* %queue) local_unnamed_addr #0 !dbg !14 { +define void @salvage_bitcast(%struct.entry* %queue) local_unnamed_addr #0 !dbg !22 { entry: %im_not_dead = alloca i8* - %0 = bitcast %struct.entry* %queue to i8*, !dbg !19 - %1 = bitcast %struct.entry* %queue to i8*, !dbg !19 - call void @llvm.dbg.value(metadata i8* %1, i64 0, metadata !18, metadata !20), !dbg !19 + %0 = bitcast %struct.entry* %queue to i8*, !dbg !23 + %1 = bitcast %struct.entry* %queue to i8*, !dbg !23 + call void @llvm.dbg.value(metadata i8* %1, i64 0, metadata !24, metadata !20), !dbg !23 ; CHECK: define void @salvage_bitcast ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0, ; CHECK-SAME: metadata ![[BITCAST_EXPR:[0-9]+]]) store i8* %1, i8** %im_not_dead, align 8 - ret void, !dbg !21 + ret void, !dbg !23 } -define void @salvage_gep0(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !14 { +define void @salvage_gep0(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !25 { entry: %im_not_dead = alloca %struct.entry** - %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19 - %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19 - call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !18, metadata !20), !dbg !19 + %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !26 + %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !26 + call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !27, metadata !20), !dbg !26 ; CHECK: define void @salvage_gep0 ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0, ; CHECK-SAME: metadata ![[GEP0_EXPR:[0-9]+]]) store %struct.entry** %1, %struct.entry*** %im_not_dead, align 8 - ret void, !dbg !21 + ret void, !dbg !26 } -define void @salvage_gep1(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !14 { +define void @salvage_gep1(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !28 { entry: %im_not_dead = alloca %struct.entry** - %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19 - %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19 - call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !18, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32)), !dbg !19 + %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !29 + %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !29 + call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !30, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32)), !dbg !29 ; CHECK: define void @salvage_gep1 ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0, ; CHECK-SAME: metadata ![[GEP1_EXPR:[0-9]+]]) store %struct.entry** %1, %struct.entry*** %im_not_dead, align 8 - ret void, !dbg !21 + ret void, !dbg !29 } -define void @salvage_gep2(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !14 { +define void @salvage_gep2(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !31 { entry: %im_not_dead = alloca %struct.entry** - %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19 - %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19 - call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !18, metadata !DIExpression(DW_OP_stack_value)), !dbg !19 + %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !32 + %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !32 + call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !33, metadata !DIExpression(DW_OP_stack_value)), !dbg !32 ; CHECK: define void @salvage_gep2 ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0, ; CHECK-SAME: metadata ![[GEP2_EXPR:[0-9]+]]) store %struct.entry** %1, %struct.entry*** %im_not_dead, align 8 - ret void, !dbg !21 + ret void, !dbg !32 } ; CHECK: ![[LOAD_EXPR]] = !DIExpression(DW_OP_deref, DW_OP_plus, 0) @@ -132,3 +132,15 @@ attributes #1 = { nounwind readnone } !19 = !DILocation(line: 6, column: 17, scope: !14) !20 = !DIExpression(DW_OP_plus, 0) !21 = !DILocation(line: 11, column: 1, scope: !14) +!22 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17) +!23 = !DILocation(line: 6, column: 17, scope: !22) +!24 = !DILocalVariable(name: "entry", scope: !22, file: !1, line: 6, type: !4) +!25 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17) +!26 = !DILocation(line: 6, column: 17, scope: !25) +!27 = !DILocalVariable(name: "entry", scope: !25, file: !1, line: 6, type: !4) +!28 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17) +!29 = !DILocation(line: 6, column: 17, scope: !28) +!30 = !DILocalVariable(name: "entry", scope: !28, file: !1, line: 6, type: !4) +!31 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17) +!32 = !DILocation(line: 6, column: 17, scope: !31) +!33 = !DILocalVariable(name: "entry", scope: !31, file: !1, line: 6, type: !4) diff --git a/test/Transforms/InstCombine/demand_shrink_nsw.ll b/test/Transforms/InstCombine/demand_shrink_nsw.ll index f491742951670..4f7d00e32aaf1 100644 --- a/test/Transforms/InstCombine/demand_shrink_nsw.ll +++ b/test/Transforms/InstCombine/demand_shrink_nsw.ll @@ -3,7 +3,7 @@ ; The constant at %v35 should be shrunk, but this must lead to the nsw flag of ; %v43 getting removed so that %v44 is not illegally optimized away. ; CHECK-LABEL: @foo -; CHECK: %v35 = add nuw i32 %v34, 1362915575 +; CHECK: %v35 = add nuw nsw i32 %v34, 1362915575 ; ... ; CHECK: add nuw i32 %v42, 1533579450 ; CHECK-NEXT: %v44 = or i32 %v43, -2147483648 diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll index 9ae5eafdfccff..bfafd66ebb415 100644 --- a/test/Transforms/InstCombine/or.ll +++ b/test/Transforms/InstCombine/or.ll @@ -661,17 +661,6 @@ define i1 @test47(i8 signext %c) { ret i1 %or } -define i1 @test48(i64 %x, i1 %b) { -; CHECK-LABEL: @test48( -; CHECK-NEXT: ret i1 true -; - %1 = icmp ult i64 %x, 2305843009213693952 - %2 = icmp ugt i64 %x, 2305843009213693951 - %.b = or i1 %2, %b - %3 = or i1 %1, %.b - ret i1 %3 -} - define i32 @test49(i1 %C) { ; CHECK-LABEL: @test49( ; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], i32 1019, i32 123 diff --git a/test/Transforms/InstCombine/strlen-1.ll b/test/Transforms/InstCombine/strlen-1.ll index f3287c0de35f1..1e0dfb6a3088f 100644 --- a/test/Transforms/InstCombine/strlen-1.ll +++ b/test/Transforms/InstCombine/strlen-1.ll @@ -64,13 +64,14 @@ define i1 @test_simplify5() { ret i1 %eq_hello } -define i1 @test_simplify6() { +define i1 @test_simplify6(i8* %str_p) { ; CHECK-LABEL: @test_simplify6( -; CHECK-NEXT: ret i1 true +; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i8, i8* [[STR_P:%.*]], align 1 +; CHECK-NEXT: [[EQ_NULL:%.*]] = icmp eq i8 [[STRLENFIRST]], 0 +; CHECK-NEXT: ret i1 [[EQ_NULL]] ; - %null_p = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0 - %null_l = call i32 @strlen(i8* %null_p) - %eq_null = icmp eq i32 %null_l, 0 + %str_l = call i32 @strlen(i8* %str_p) + %eq_null = icmp eq i32 %str_l, 0 ret i1 %eq_null } @@ -86,13 +87,14 @@ define i1 @test_simplify7() { ret i1 %ne_hello } -define i1 @test_simplify8() { +define i1 @test_simplify8(i8* %str_p) { ; CHECK-LABEL: @test_simplify8( -; CHECK-NEXT: ret i1 false +; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i8, i8* [[STR_P:%.*]], align 1 +; CHECK-NEXT: [[NE_NULL:%.*]] = icmp ne i8 [[STRLENFIRST]], 0 +; CHECK-NEXT: ret i1 [[NE_NULL]] ; - %null_p = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0 - %null_l = call i32 @strlen(i8* %null_p) - %ne_null = icmp ne i32 %null_l, 0 + %str_l = call i32 @strlen(i8* %str_p) + %ne_null = icmp ne i32 %str_l, 0 ret i1 %ne_null } diff --git a/test/Transforms/InstSimplify/AndOrXor.ll b/test/Transforms/InstSimplify/AndOrXor.ll index f9aaa4fa0c6c6..e059d77f1fa88 100644 --- a/test/Transforms/InstSimplify/AndOrXor.ll +++ b/test/Transforms/InstSimplify/AndOrXor.ll @@ -468,6 +468,51 @@ define <2 x i3> @and_of_different_cast_icmps_vec(<2 x i8> %i, <2 x i16> %j) { ret <2 x i3> %and } +define i32 @or_of_zexted_icmps(i32 %i) { +; CHECK-LABEL: @or_of_zexted_icmps( +; CHECK-NEXT: ret i32 1 +; + %cmp0 = icmp ne i32 %i, 0 + %conv0 = zext i1 %cmp0 to i32 + %cmp1 = icmp uge i32 4, %i + %conv1 = zext i1 %cmp1 to i32 + %or = or i32 %conv0, %conv1 + ret i32 %or +} + +; Try a different cast and weird vector types. + +define i3 @or_of_bitcast_icmps_vec(<3 x i65> %i) { +; CHECK-LABEL: @or_of_bitcast_icmps_vec( +; CHECK-NEXT: ret i3 bitcast (<3 x i1> <i1 true, i1 true, i1 true> to i3) +; + %cmp0 = icmp sge <3 x i65> %i, zeroinitializer + %conv0 = bitcast <3 x i1> %cmp0 to i3 + %cmp1 = icmp slt <3 x i65> %i, zeroinitializer + %conv1 = bitcast <3 x i1> %cmp1 to i3 + %or = or i3 %conv0, %conv1 + ret i3 %or +} + +; We can't simplify if the casts are different. + +define i16 @or_of_different_cast_icmps(i8 %i) { +; CHECK-LABEL: @or_of_different_cast_icmps( +; CHECK-NEXT: [[CMP0:%.*]] = icmp ne i8 %i, 0 +; CHECK-NEXT: [[CONV0:%.*]] = zext i1 [[CMP0]] to i16 +; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i8 %i, 1 +; CHECK-NEXT: [[CONV1:%.*]] = sext i1 [[CMP1]] to i16 +; CHECK-NEXT: [[OR:%.*]] = or i16 [[CONV0]], [[CONV1]] +; CHECK-NEXT: ret i16 [[OR]] +; + %cmp0 = icmp ne i8 %i, 0 + %conv0 = zext i1 %cmp0 to i16 + %cmp1 = icmp ne i8 %i, 1 + %conv1 = sext i1 %cmp1 to i16 + %or = or i16 %conv0, %conv1 + ret i16 %or +} + ; (A & ~B) | (A ^ B) -> A ^ B define i32 @test43(i32 %a, i32 %b) { diff --git a/test/Transforms/InstSimplify/compare.ll b/test/Transforms/InstSimplify/compare.ll index cd2fa880294ad..883bf31ff77a9 100644 --- a/test/Transforms/InstSimplify/compare.ll +++ b/test/Transforms/InstSimplify/compare.ll @@ -576,13 +576,38 @@ define i1 @srem3(i16 %X, i32 %Y) { ret i1 %E } -define i1 @udiv2(i32 %X, i32 %Y, i32 %Z) { +define i1 @udiv2(i32 %Z) { ; CHECK-LABEL: @udiv2( +; CHECK-NEXT: ret i1 true +; %A = udiv exact i32 10, %Z %B = udiv exact i32 20, %Z %C = icmp ult i32 %A, %B ret i1 %C -; CHECK: ret i1 true +} + +; Exact sdiv and equality preds can simplify. + +define i1 @sdiv_exact_equality(i32 %Z) { +; CHECK-LABEL: @sdiv_exact_equality( +; CHECK-NEXT: ret i1 false +; + %A = sdiv exact i32 10, %Z + %B = sdiv exact i32 20, %Z + %C = icmp eq i32 %A, %B + ret i1 %C +} + +; FIXME: But not other preds: PR32949 - https://bugs.llvm.org/show_bug.cgi?id=32949 + +define i1 @sdiv_exact_not_equality(i32 %Z) { +; CHECK-LABEL: @sdiv_exact_not_equality( +; CHECK-NEXT: ret i1 true +; + %A = sdiv exact i32 10, %Z + %B = sdiv exact i32 20, %Z + %C = icmp ult i32 %A, %B + ret i1 %C } define i1 @udiv3(i32 %X, i32 %Y) { diff --git a/test/Transforms/InstSimplify/icmp-ranges.ll b/test/Transforms/InstSimplify/icmp-ranges.ll index 292be6a8a559b..45194f2df4f14 100644 --- a/test/Transforms/InstSimplify/icmp-ranges.ll +++ b/test/Transforms/InstSimplify/icmp-ranges.ll @@ -2729,6 +2729,2732 @@ define i1 @and_ult_ult_swap(i8 %x) { ret i1 %c } +; eq +; x == 13 || x == 17 + +define i1 @or_eq_eq(i8 %x) { +; CHECK-LABEL: @or_eq_eq( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x != 17 + +define i1 @or_eq_ne(i8 %x) { +; CHECK-LABEL: @or_eq_ne( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x >=s 17 + +define i1 @or_eq_sge(i8 %x) { +; CHECK-LABEL: @or_eq_sge( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x >s 17 + +define i1 @or_eq_sgt(i8 %x) { +; CHECK-LABEL: @or_eq_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x <=s 17 + +define i1 @or_eq_sle(i8 %x) { +; CHECK-LABEL: @or_eq_sle( +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x <s 17 + +define i1 @or_eq_slt(i8 %x) { +; CHECK-LABEL: @or_eq_slt( +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x >=u 17 + +define i1 @or_eq_uge(i8 %x) { +; CHECK-LABEL: @or_eq_uge( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x >u 17 + +define i1 @or_eq_ugt(i8 %x) { +; CHECK-LABEL: @or_eq_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x <=u 17 + +define i1 @or_eq_ule(i8 %x) { +; CHECK-LABEL: @or_eq_ule( +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 13 || x <u 17 + +define i1 @or_eq_ult(i8 %x) { +; CHECK-LABEL: @or_eq_ult( +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; ne +; x != 13 || x == 17 + +define i1 @or_ne_eq(i8 %x) { +; CHECK-LABEL: @or_ne_eq( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x != 17 + +define i1 @or_ne_ne(i8 %x) { +; CHECK-LABEL: @or_ne_ne( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x >=s 17 + +define i1 @or_ne_sge(i8 %x) { +; CHECK-LABEL: @or_ne_sge( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x >s 17 + +define i1 @or_ne_sgt(i8 %x) { +; CHECK-LABEL: @or_ne_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x <=s 17 + +define i1 @or_ne_sle(i8 %x) { +; CHECK-LABEL: @or_ne_sle( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x <s 17 + +define i1 @or_ne_slt(i8 %x) { +; CHECK-LABEL: @or_ne_slt( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x >=u 17 + +define i1 @or_ne_uge(i8 %x) { +; CHECK-LABEL: @or_ne_uge( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x >u 17 + +define i1 @or_ne_ugt(i8 %x) { +; CHECK-LABEL: @or_ne_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x <=u 17 + +define i1 @or_ne_ule(i8 %x) { +; CHECK-LABEL: @or_ne_ule( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 13 || x <u 17 + +define i1 @or_ne_ult(i8 %x) { +; CHECK-LABEL: @or_ne_ult( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; sge +; x >=s 13 || x == 17 + +define i1 @or_sge_eq(i8 %x) { +; CHECK-LABEL: @or_sge_eq( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sge i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x != 17 + +define i1 @or_sge_ne(i8 %x) { +; CHECK-LABEL: @or_sge_ne( +; CHECK-NEXT: ret i1 true +; + %a = icmp sge i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x >=s 17 + +define i1 @or_sge_sge(i8 %x) { +; CHECK-LABEL: @or_sge_sge( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sge i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x >s 17 + +define i1 @or_sge_sgt(i8 %x) { +; CHECK-LABEL: @or_sge_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sge i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x <=s 17 + +define i1 @or_sge_sle(i8 %x) { +; CHECK-LABEL: @or_sge_sle( +; CHECK-NEXT: ret i1 true +; + %a = icmp sge i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x <s 17 + +define i1 @or_sge_slt(i8 %x) { +; CHECK-LABEL: @or_sge_slt( +; CHECK-NEXT: ret i1 true +; + %a = icmp sge i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x >=u 17 + +define i1 @or_sge_uge(i8 %x) { +; CHECK-LABEL: @or_sge_uge( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x >u 17 + +define i1 @or_sge_ugt(i8 %x) { +; CHECK-LABEL: @or_sge_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x <=u 17 + +define i1 @or_sge_ule(i8 %x) { +; CHECK-LABEL: @or_sge_ule( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 13 || x <u 17 + +define i1 @or_sge_ult(i8 %x) { +; CHECK-LABEL: @or_sge_ult( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; sgt +; x >s 13 || x == 17 + +define i1 @or_sgt_eq(i8 %x) { +; CHECK-LABEL: @or_sgt_eq( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x != 17 + +define i1 @or_sgt_ne(i8 %x) { +; CHECK-LABEL: @or_sgt_ne( +; CHECK-NEXT: ret i1 true +; + %a = icmp sgt i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x >=s 17 + +define i1 @or_sgt_sge(i8 %x) { +; CHECK-LABEL: @or_sgt_sge( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x >s 17 + +define i1 @or_sgt_sgt(i8 %x) { +; CHECK-LABEL: @or_sgt_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x <=s 17 + +define i1 @or_sgt_sle(i8 %x) { +; CHECK-LABEL: @or_sgt_sle( +; CHECK-NEXT: ret i1 true +; + %a = icmp sgt i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x <s 17 + +define i1 @or_sgt_slt(i8 %x) { +; CHECK-LABEL: @or_sgt_slt( +; CHECK-NEXT: ret i1 true +; + %a = icmp sgt i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x >=u 17 + +define i1 @or_sgt_uge(i8 %x) { +; CHECK-LABEL: @or_sgt_uge( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x >u 17 + +define i1 @or_sgt_ugt(i8 %x) { +; CHECK-LABEL: @or_sgt_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x <=u 17 + +define i1 @or_sgt_ule(i8 %x) { +; CHECK-LABEL: @or_sgt_ule( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 13 || x <u 17 + +define i1 @or_sgt_ult(i8 %x) { +; CHECK-LABEL: @or_sgt_ult( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; sle +; x <=s 13 || x == 17 + +define i1 @or_sle_eq(i8 %x) { +; CHECK-LABEL: @or_sle_eq( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x != 17 + +define i1 @or_sle_ne(i8 %x) { +; CHECK-LABEL: @or_sle_ne( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sle i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x >=s 17 + +define i1 @or_sle_sge(i8 %x) { +; CHECK-LABEL: @or_sle_sge( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x >s 17 + +define i1 @or_sle_sgt(i8 %x) { +; CHECK-LABEL: @or_sle_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x <=s 17 + +define i1 @or_sle_sle(i8 %x) { +; CHECK-LABEL: @or_sle_sle( +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sle i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x <s 17 + +define i1 @or_sle_slt(i8 %x) { +; CHECK-LABEL: @or_sle_slt( +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sle i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x >=u 17 + +define i1 @or_sle_uge(i8 %x) { +; CHECK-LABEL: @or_sle_uge( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x >u 17 + +define i1 @or_sle_ugt(i8 %x) { +; CHECK-LABEL: @or_sle_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x <=u 17 + +define i1 @or_sle_ule(i8 %x) { +; CHECK-LABEL: @or_sle_ule( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 13 || x <u 17 + +define i1 @or_sle_ult(i8 %x) { +; CHECK-LABEL: @or_sle_ult( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sle i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; slt +; x <s 13 || x == 17 + +define i1 @or_slt_eq(i8 %x) { +; CHECK-LABEL: @or_slt_eq( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x != 17 + +define i1 @or_slt_ne(i8 %x) { +; CHECK-LABEL: @or_slt_ne( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp slt i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x >=s 17 + +define i1 @or_slt_sge(i8 %x) { +; CHECK-LABEL: @or_slt_sge( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x >s 17 + +define i1 @or_slt_sgt(i8 %x) { +; CHECK-LABEL: @or_slt_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x <=s 17 + +define i1 @or_slt_sle(i8 %x) { +; CHECK-LABEL: @or_slt_sle( +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp slt i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x <s 17 + +define i1 @or_slt_slt(i8 %x) { +; CHECK-LABEL: @or_slt_slt( +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp slt i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x >=u 17 + +define i1 @or_slt_uge(i8 %x) { +; CHECK-LABEL: @or_slt_uge( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x >u 17 + +define i1 @or_slt_ugt(i8 %x) { +; CHECK-LABEL: @or_slt_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x <=u 17 + +define i1 @or_slt_ule(i8 %x) { +; CHECK-LABEL: @or_slt_ule( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 13 || x <u 17 + +define i1 @or_slt_ult(i8 %x) { +; CHECK-LABEL: @or_slt_ult( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp slt i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; uge +; x >=u 13 || x == 17 + +define i1 @or_uge_eq(i8 %x) { +; CHECK-LABEL: @or_uge_eq( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp uge i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x != 17 + +define i1 @or_uge_ne(i8 %x) { +; CHECK-LABEL: @or_uge_ne( +; CHECK-NEXT: ret i1 true +; + %a = icmp uge i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x >=s 17 + +define i1 @or_uge_sge(i8 %x) { +; CHECK-LABEL: @or_uge_sge( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp uge i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x >s 17 + +define i1 @or_uge_sgt(i8 %x) { +; CHECK-LABEL: @or_uge_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp uge i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x <=s 17 + +define i1 @or_uge_sle(i8 %x) { +; CHECK-LABEL: @or_uge_sle( +; CHECK-NEXT: ret i1 true +; + %a = icmp uge i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x <s 17 + +define i1 @or_uge_slt(i8 %x) { +; CHECK-LABEL: @or_uge_slt( +; CHECK-NEXT: ret i1 true +; + %a = icmp uge i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x >=u 17 + +define i1 @or_uge_uge(i8 %x) { +; CHECK-LABEL: @or_uge_uge( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp uge i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x >u 17 + +define i1 @or_uge_ugt(i8 %x) { +; CHECK-LABEL: @or_uge_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp uge i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x <=u 17 + +define i1 @or_uge_ule(i8 %x) { +; CHECK-LABEL: @or_uge_ule( +; CHECK-NEXT: ret i1 true +; + %a = icmp uge i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 13 || x <u 17 + +define i1 @or_uge_ult(i8 %x) { +; CHECK-LABEL: @or_uge_ult( +; CHECK-NEXT: ret i1 true +; + %a = icmp uge i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; ugt +; x >u 13 || x == 17 + +define i1 @or_ugt_eq(i8 %x) { +; CHECK-LABEL: @or_ugt_eq( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x != 17 + +define i1 @or_ugt_ne(i8 %x) { +; CHECK-LABEL: @or_ugt_ne( +; CHECK-NEXT: ret i1 true +; + %a = icmp ugt i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x >=s 17 + +define i1 @or_ugt_sge(i8 %x) { +; CHECK-LABEL: @or_ugt_sge( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x >s 17 + +define i1 @or_ugt_sgt(i8 %x) { +; CHECK-LABEL: @or_ugt_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x <=s 17 + +define i1 @or_ugt_sle(i8 %x) { +; CHECK-LABEL: @or_ugt_sle( +; CHECK-NEXT: ret i1 true +; + %a = icmp ugt i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x <s 17 + +define i1 @or_ugt_slt(i8 %x) { +; CHECK-LABEL: @or_ugt_slt( +; CHECK-NEXT: ret i1 true +; + %a = icmp ugt i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x >=u 17 + +define i1 @or_ugt_uge(i8 %x) { +; CHECK-LABEL: @or_ugt_uge( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x >u 17 + +define i1 @or_ugt_ugt(i8 %x) { +; CHECK-LABEL: @or_ugt_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ugt i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x <=u 17 + +define i1 @or_ugt_ule(i8 %x) { +; CHECK-LABEL: @or_ugt_ule( +; CHECK-NEXT: ret i1 true +; + %a = icmp ugt i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 13 || x <u 17 + +define i1 @or_ugt_ult(i8 %x) { +; CHECK-LABEL: @or_ugt_ult( +; CHECK-NEXT: ret i1 true +; + %a = icmp ugt i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; ule +; x <=u 13 || x == 17 + +define i1 @or_ule_eq(i8 %x) { +; CHECK-LABEL: @or_ule_eq( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x != 17 + +define i1 @or_ule_ne(i8 %x) { +; CHECK-LABEL: @or_ule_ne( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ule i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x >=s 17 + +define i1 @or_ule_sge(i8 %x) { +; CHECK-LABEL: @or_ule_sge( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x >s 17 + +define i1 @or_ule_sgt(i8 %x) { +; CHECK-LABEL: @or_ule_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x <=s 17 + +define i1 @or_ule_sle(i8 %x) { +; CHECK-LABEL: @or_ule_sle( +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ule i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x <s 17 + +define i1 @or_ule_slt(i8 %x) { +; CHECK-LABEL: @or_ule_slt( +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ule i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x >=u 17 + +define i1 @or_ule_uge(i8 %x) { +; CHECK-LABEL: @or_ule_uge( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x >u 17 + +define i1 @or_ule_ugt(i8 %x) { +; CHECK-LABEL: @or_ule_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x <=u 17 + +define i1 @or_ule_ule(i8 %x) { +; CHECK-LABEL: @or_ule_ule( +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ule i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 13 || x <u 17 + +define i1 @or_ule_ult(i8 %x) { +; CHECK-LABEL: @or_ule_ult( +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ule i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; ult +; x <u 13 || x == 17 + +define i1 @or_ult_eq(i8 %x) { +; CHECK-LABEL: @or_ult_eq( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x != 17 + +define i1 @or_ult_ne(i8 %x) { +; CHECK-LABEL: @or_ult_ne( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ult i8 %x, 13 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x >=s 17 + +define i1 @or_ult_sge(i8 %x) { +; CHECK-LABEL: @or_ult_sge( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x >s 17 + +define i1 @or_ult_sgt(i8 %x) { +; CHECK-LABEL: @or_ult_sgt( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x <=s 17 + +define i1 @or_ult_sle(i8 %x) { +; CHECK-LABEL: @or_ult_sle( +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ult i8 %x, 13 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x <s 17 + +define i1 @or_ult_slt(i8 %x) { +; CHECK-LABEL: @or_ult_slt( +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ult i8 %x, 13 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x >=u 17 + +define i1 @or_ult_uge(i8 %x) { +; CHECK-LABEL: @or_ult_uge( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x >u 17 + +define i1 @or_ult_ugt(i8 %x) { +; CHECK-LABEL: @or_ult_ugt( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13 +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 13 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x <=u 17 + +define i1 @or_ult_ule(i8 %x) { +; CHECK-LABEL: @or_ult_ule( +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ult i8 %x, 13 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 13 || x <u 17 + +define i1 @or_ult_ult(i8 %x) { +; CHECK-LABEL: @or_ult_ult( +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ult i8 %x, 13 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; eq +; x == 23 || x == 17 + +define i1 @or_eq_eq_swap(i8 %x) { +; CHECK-LABEL: @or_eq_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x != 17 + +define i1 @or_eq_ne_swap(i8 %x) { +; CHECK-LABEL: @or_eq_ne_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x >=s 17 + +define i1 @or_eq_sge_swap(i8 %x) { +; CHECK-LABEL: @or_eq_sge_swap( +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x >s 17 + +define i1 @or_eq_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_eq_sgt_swap( +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x <=s 17 + +define i1 @or_eq_sle_swap(i8 %x) { +; CHECK-LABEL: @or_eq_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x <s 17 + +define i1 @or_eq_slt_swap(i8 %x) { +; CHECK-LABEL: @or_eq_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x >=u 17 + +define i1 @or_eq_uge_swap(i8 %x) { +; CHECK-LABEL: @or_eq_uge_swap( +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x >u 17 + +define i1 @or_eq_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_eq_ugt_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp eq i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x <=u 17 + +define i1 @or_eq_ule_swap(i8 %x) { +; CHECK-LABEL: @or_eq_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x == 23 || x <u 17 + +define i1 @or_eq_ult_swap(i8 %x) { +; CHECK-LABEL: @or_eq_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp eq i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; ne +; x != 23 || x == 17 + +define i1 @or_ne_eq_swap(i8 %x) { +; CHECK-LABEL: @or_ne_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x != 17 + +define i1 @or_ne_ne_swap(i8 %x) { +; CHECK-LABEL: @or_ne_ne_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x >=s 17 + +define i1 @or_ne_sge_swap(i8 %x) { +; CHECK-LABEL: @or_ne_sge_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x >s 17 + +define i1 @or_ne_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_ne_sgt_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x <=s 17 + +define i1 @or_ne_sle_swap(i8 %x) { +; CHECK-LABEL: @or_ne_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x <s 17 + +define i1 @or_ne_slt_swap(i8 %x) { +; CHECK-LABEL: @or_ne_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x >=u 17 + +define i1 @or_ne_uge_swap(i8 %x) { +; CHECK-LABEL: @or_ne_uge_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x >u 17 + +define i1 @or_ne_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_ne_ugt_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ne i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x <=u 17 + +define i1 @or_ne_ule_swap(i8 %x) { +; CHECK-LABEL: @or_ne_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x != 23 || x <u 17 + +define i1 @or_ne_ult_swap(i8 %x) { +; CHECK-LABEL: @or_ne_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ne i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; sge +; x >=s 23 || x == 17 + +define i1 @or_sge_eq_swap(i8 %x) { +; CHECK-LABEL: @or_sge_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x != 17 + +define i1 @or_sge_ne_swap(i8 %x) { +; CHECK-LABEL: @or_sge_ne_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sge i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x >=s 17 + +define i1 @or_sge_sge_swap(i8 %x) { +; CHECK-LABEL: @or_sge_sge_swap( +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sge i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x >s 17 + +define i1 @or_sge_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_sge_sgt_swap( +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sge i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x <=s 17 + +define i1 @or_sge_sle_swap(i8 %x) { +; CHECK-LABEL: @or_sge_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x <s 17 + +define i1 @or_sge_slt_swap(i8 %x) { +; CHECK-LABEL: @or_sge_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x >=u 17 + +define i1 @or_sge_uge_swap(i8 %x) { +; CHECK-LABEL: @or_sge_uge_swap( +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sge i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x >u 17 + +define i1 @or_sge_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_sge_ugt_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sge i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x <=u 17 + +define i1 @or_sge_ule_swap(i8 %x) { +; CHECK-LABEL: @or_sge_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=s 23 || x <u 17 + +define i1 @or_sge_ult_swap(i8 %x) { +; CHECK-LABEL: @or_sge_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sge i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; sgt +; x >s 23 || x == 17 + +define i1 @or_sgt_eq_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x != 17 + +define i1 @or_sgt_ne_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_ne_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x >=s 17 + +define i1 @or_sgt_sge_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_sge_swap( +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x >s 17 + +define i1 @or_sgt_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_sgt_swap( +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x <=s 17 + +define i1 @or_sgt_sle_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x <s 17 + +define i1 @or_sgt_slt_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x >=u 17 + +define i1 @or_sgt_uge_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_uge_swap( +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x >u 17 + +define i1 @or_sgt_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_ugt_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x <=u 17 + +define i1 @or_sgt_ule_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >s 23 || x <u 17 + +define i1 @or_sgt_ult_swap(i8 %x) { +; CHECK-LABEL: @or_sgt_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp sgt i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; sle +; x <=s 23 || x == 17 + +define i1 @or_sle_eq_swap(i8 %x) { +; CHECK-LABEL: @or_sle_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sle i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x != 17 + +define i1 @or_sle_ne_swap(i8 %x) { +; CHECK-LABEL: @or_sle_ne_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp sle i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x >=s 17 + +define i1 @or_sle_sge_swap(i8 %x) { +; CHECK-LABEL: @or_sle_sge_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp sle i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x >s 17 + +define i1 @or_sle_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_sle_sgt_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp sle i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x <=s 17 + +define i1 @or_sle_sle_swap(i8 %x) { +; CHECK-LABEL: @or_sle_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sle i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x <s 17 + +define i1 @or_sle_slt_swap(i8 %x) { +; CHECK-LABEL: @or_sle_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sle i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x >=u 17 + +define i1 @or_sle_uge_swap(i8 %x) { +; CHECK-LABEL: @or_sle_uge_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp sle i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x >u 17 + +define i1 @or_sle_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_sle_ugt_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp sle i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x <=u 17 + +define i1 @or_sle_ule_swap(i8 %x) { +; CHECK-LABEL: @or_sle_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sle i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=s 23 || x <u 17 + +define i1 @or_sle_ult_swap(i8 %x) { +; CHECK-LABEL: @or_sle_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp sle i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; slt +; x <s 23 || x == 17 + +define i1 @or_slt_eq_swap(i8 %x) { +; CHECK-LABEL: @or_slt_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp slt i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x != 17 + +define i1 @or_slt_ne_swap(i8 %x) { +; CHECK-LABEL: @or_slt_ne_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp slt i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x >=s 17 + +define i1 @or_slt_sge_swap(i8 %x) { +; CHECK-LABEL: @or_slt_sge_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp slt i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x >s 17 + +define i1 @or_slt_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_slt_sgt_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp slt i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x <=s 17 + +define i1 @or_slt_sle_swap(i8 %x) { +; CHECK-LABEL: @or_slt_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp slt i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x <s 17 + +define i1 @or_slt_slt_swap(i8 %x) { +; CHECK-LABEL: @or_slt_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp slt i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x >=u 17 + +define i1 @or_slt_uge_swap(i8 %x) { +; CHECK-LABEL: @or_slt_uge_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp slt i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x >u 17 + +define i1 @or_slt_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_slt_ugt_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp slt i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x <=u 17 + +define i1 @or_slt_ule_swap(i8 %x) { +; CHECK-LABEL: @or_slt_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp slt i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <s 23 || x <u 17 + +define i1 @or_slt_ult_swap(i8 %x) { +; CHECK-LABEL: @or_slt_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp slt i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; uge +; x >=u 23 || x == 17 + +define i1 @or_uge_eq_swap(i8 %x) { +; CHECK-LABEL: @or_uge_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x != 17 + +define i1 @or_uge_ne_swap(i8 %x) { +; CHECK-LABEL: @or_uge_ne_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp uge i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x >=s 17 + +define i1 @or_uge_sge_swap(i8 %x) { +; CHECK-LABEL: @or_uge_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x >s 17 + +define i1 @or_uge_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_uge_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x <=s 17 + +define i1 @or_uge_sle_swap(i8 %x) { +; CHECK-LABEL: @or_uge_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x <s 17 + +define i1 @or_uge_slt_swap(i8 %x) { +; CHECK-LABEL: @or_uge_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x >=u 17 + +define i1 @or_uge_uge_swap(i8 %x) { +; CHECK-LABEL: @or_uge_uge_swap( +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp uge i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x >u 17 + +define i1 @or_uge_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_uge_ugt_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp uge i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x <=u 17 + +define i1 @or_uge_ule_swap(i8 %x) { +; CHECK-LABEL: @or_uge_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >=u 23 || x <u 17 + +define i1 @or_uge_ult_swap(i8 %x) { +; CHECK-LABEL: @or_uge_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp uge i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; ugt +; x >u 23 || x == 17 + +define i1 @or_ugt_eq_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x != 17 + +define i1 @or_ugt_ne_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_ne_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x >=s 17 + +define i1 @or_ugt_sge_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x >s 17 + +define i1 @or_ugt_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x <=s 17 + +define i1 @or_ugt_sle_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x <s 17 + +define i1 @or_ugt_slt_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x >=u 17 + +define i1 @or_ugt_uge_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_uge_swap( +; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x >u 17 + +define i1 @or_ugt_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_ugt_swap( +; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17 +; CHECK-NEXT: ret i1 [[B]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x <=u 17 + +define i1 @or_ugt_ule_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x >u 23 || x <u 17 + +define i1 @or_ugt_ult_swap(i8 %x) { +; CHECK-LABEL: @or_ugt_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ugt i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; ule +; x <=u 23 || x == 17 + +define i1 @or_ule_eq_swap(i8 %x) { +; CHECK-LABEL: @or_ule_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ule i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x != 17 + +define i1 @or_ule_ne_swap(i8 %x) { +; CHECK-LABEL: @or_ule_ne_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ule i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x >=s 17 + +define i1 @or_ule_sge_swap(i8 %x) { +; CHECK-LABEL: @or_ule_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x >s 17 + +define i1 @or_ule_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_ule_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x <=s 17 + +define i1 @or_ule_sle_swap(i8 %x) { +; CHECK-LABEL: @or_ule_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x <s 17 + +define i1 @or_ule_slt_swap(i8 %x) { +; CHECK-LABEL: @or_ule_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ule i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x >=u 17 + +define i1 @or_ule_uge_swap(i8 %x) { +; CHECK-LABEL: @or_ule_uge_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ule i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x >u 17 + +define i1 @or_ule_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_ule_ugt_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ule i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x <=u 17 + +define i1 @or_ule_ule_swap(i8 %x) { +; CHECK-LABEL: @or_ule_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ule i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <=u 23 || x <u 17 + +define i1 @or_ule_ult_swap(i8 %x) { +; CHECK-LABEL: @or_ule_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ule i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; ult +; x <u 23 || x == 17 + +define i1 @or_ult_eq_swap(i8 %x) { +; CHECK-LABEL: @or_ult_eq_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ult i8 %x, 23 + %b = icmp eq i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x != 17 + +define i1 @or_ult_ne_swap(i8 %x) { +; CHECK-LABEL: @or_ult_ne_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ult i8 %x, 23 + %b = icmp ne i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x >=s 17 + +define i1 @or_ult_sge_swap(i8 %x) { +; CHECK-LABEL: @or_ult_sge_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp sge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x >s 17 + +define i1 @or_ult_sgt_swap(i8 %x) { +; CHECK-LABEL: @or_ult_sgt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp sgt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x <=s 17 + +define i1 @or_ult_sle_swap(i8 %x) { +; CHECK-LABEL: @or_ult_sle_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp sle i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x <s 17 + +define i1 @or_ult_slt_swap(i8 %x) { +; CHECK-LABEL: @or_ult_slt_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17 +; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]] +; CHECK-NEXT: ret i1 [[C]] +; + %a = icmp ult i8 %x, 23 + %b = icmp slt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x >=u 17 + +define i1 @or_ult_uge_swap(i8 %x) { +; CHECK-LABEL: @or_ult_uge_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ult i8 %x, 23 + %b = icmp uge i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x >u 17 + +define i1 @or_ult_ugt_swap(i8 %x) { +; CHECK-LABEL: @or_ult_ugt_swap( +; CHECK-NEXT: ret i1 true +; + %a = icmp ult i8 %x, 23 + %b = icmp ugt i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x <=u 17 + +define i1 @or_ult_ule_swap(i8 %x) { +; CHECK-LABEL: @or_ult_ule_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ult i8 %x, 23 + %b = icmp ule i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + +; x <u 23 || x <u 17 + +define i1 @or_ult_ult_swap(i8 %x) { +; CHECK-LABEL: @or_ult_ult_swap( +; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23 +; CHECK-NEXT: ret i1 [[A]] +; + %a = icmp ult i8 %x, 23 + %b = icmp ult i8 %x, 17 + %c = or i1 %a, %b + ret i1 %c +} + ; Special case - slt is uge ; x <u 31 && x <s 0 diff --git a/test/Transforms/InstSimplify/shufflevector.ll b/test/Transforms/InstSimplify/shufflevector.ll index 6af0db8e5a442..cc49ae3554c05 100644 --- a/test/Transforms/InstSimplify/shufflevector.ll +++ b/test/Transforms/InstSimplify/shufflevector.ll @@ -233,3 +233,17 @@ define <8 x i64> @PR30630(<8 x i64> %x) { ret <8 x i64> %s7 } +; This case covers internal canonicalization of shuffles with one constant input vector. + +;FIXME: Another issue exposed here, this whole function could be simplified to: +; ret <2 x float> zeroinitializer +define <2 x float> @PR32872(<2 x float> %x) { +; CHECK-LABEL: @PR32872( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[X:%.*]], <2 x float> zeroinitializer, <4 x i32> <i32 2, i32 2, i32 0, i32 1> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> zeroinitializer, <4 x float> [[TMP1]], <2 x i32> <i32 4, i32 5> +; CHECK-NEXT: ret <2 x float> [[TMP4]] +; + %tmp1 = shufflevector <2 x float> %x, <2 x float> zeroinitializer, <4 x i32> <i32 2, i32 2, i32 0, i32 1> + %tmp4 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp1, <2 x i32> <i32 4, i32 5> + ret <2 x float> %tmp4 +} diff --git a/test/Transforms/LoopIdiom/unsafe.ll b/test/Transforms/LoopIdiom/unsafe.ll new file mode 100644 index 0000000000000..8eff8996adfa3 --- /dev/null +++ b/test/Transforms/LoopIdiom/unsafe.ll @@ -0,0 +1,55 @@ +; RUN: opt -S < %s -loop-idiom | FileCheck %s +; CHECK-NOT: memset +; check that memset is not generated (for stores) because that will result +; in udiv hoisted out of the loop by the SCEV Expander +; TODO: ideally we should be able to generate memset +; if SCEV expander is taught to generate the dependencies +; at the right point. + +@a = global i32 0, align 4 +@b = global i32 0, align 4 +@c = external local_unnamed_addr global [1 x i8], align 1 + +define void @e() local_unnamed_addr { +entry: + %d0 = load i32, i32* @a, align 4 + %d1 = load i32, i32* @b, align 4 + br label %for.cond1thread-pre-split + +for.cond1thread-pre-split: ; preds = %for.body5, %entry + %div = udiv i32 %d0, %d1 + br label %for.body5 + +for.body5: ; preds = %for.body5, %for.cond1thread-pre-split + %indvars.iv = phi i64 [ 0, %for.cond1thread-pre-split ], [ %indvars.iv.next, %for.body5 ] + %divx = sext i32 %div to i64 + %0 = add nsw i64 %divx, %indvars.iv + %arrayidx = getelementptr inbounds [1 x i8], [1 x i8]* @c, i64 0, i64 %0 + store i8 0, i8* %arrayidx, align 1 + %indvars.iv.next = add nsw i64 %indvars.iv, 1 + %1 = trunc i64 %indvars.iv.next to i32 + %tobool4 = icmp eq i32 %1, 0 + br i1 %tobool4, label %for.cond1thread-pre-split, label %for.body5 +} + +; The loop's trip count is depending on an unsafe operation +; udiv. SCEV expander hoists it out of the loop, so loop-idiom +; should check that the memset is not generated in this case. +define void @f(i32 %a, i32 %b, i8* nocapture %x) local_unnamed_addr { +entry: + br label %for.body + +for.body: ; preds = %for.body6, %entry + %div = udiv i32 %a, %b + %conv = zext i32 %div to i64 + br label %for.body6 + +for.body6: ; preds = %for.body6, %for.body + %i.09 = phi i64 [ %inc, %for.body6 ], [ 0, %for.body ] + %arrayidx = getelementptr inbounds i8, i8* %x, i64 %i.09 + store i8 0, i8* %arrayidx, align 1 + %inc = add nuw nsw i64 %i.09, 1 + %cmp3 = icmp slt i64 %inc, %conv + br i1 %cmp3, label %for.body6, label %for.body +} + diff --git a/test/Transforms/LoopRotate/dbgvalue.ll b/test/Transforms/LoopRotate/dbgvalue.ll index 9ff8bda4bc08d..90105047f86ae 100644 --- a/test/Transforms/LoopRotate/dbgvalue.ll +++ b/test/Transforms/LoopRotate/dbgvalue.ll @@ -38,7 +38,7 @@ return: ; preds = %if.end ret i32 %z.tr, !dbg !17 } -define i32 @tak2(i32 %x, i32 %y, i32 %z) nounwind ssp !dbg !0 { +define i32 @tak2(i32 %x, i32 %y, i32 %z) nounwind ssp !dbg !21 { ; CHECK-LABEL: define i32 @tak2( ; CHECK: entry ; CHECK: tail call void @llvm.dbg.value(metadata i32 %x.tr @@ -51,29 +51,29 @@ tailrecurse: ; preds = %if.then, %entry %x.tr = phi i32 [ %x, %entry ], [ %call, %if.then ] %y.tr = phi i32 [ %y, %entry ], [ %call9, %if.then ] %z.tr = phi i32 [ %z, %entry ], [ %call14, %if.then ] - %cmp = icmp slt i32 %y.tr, %x.tr, !dbg !12 - br i1 %cmp, label %if.then, label %if.end, !dbg !12 + %cmp = icmp slt i32 %y.tr, %x.tr, !dbg !22 + br i1 %cmp, label %if.then, label %if.end, !dbg !22 if.then: ; preds = %tailrecurse - tail call void @llvm.dbg.value(metadata i32 %x.tr, i64 0, metadata !6, metadata !DIExpression()), !dbg !7 - tail call void @llvm.dbg.value(metadata i32 %y.tr, i64 0, metadata !8, metadata !DIExpression()), !dbg !9 - tail call void @llvm.dbg.value(metadata i32 %z.tr, i64 0, metadata !10, metadata !DIExpression()), !dbg !11 - %sub = sub nsw i32 %x.tr, 1, !dbg !14 - %call = tail call i32 @tak(i32 %sub, i32 %y.tr, i32 %z.tr), !dbg !14 - %sub6 = sub nsw i32 %y.tr, 1, !dbg !14 - %call9 = tail call i32 @tak(i32 %sub6, i32 %z.tr, i32 %x.tr), !dbg !14 - %sub11 = sub nsw i32 %z.tr, 1, !dbg !14 - %call14 = tail call i32 @tak(i32 %sub11, i32 %x.tr, i32 %y.tr), !dbg !14 + tail call void @llvm.dbg.value(metadata i32 %x.tr, i64 0, metadata !36, metadata !DIExpression()), !dbg !37 + tail call void @llvm.dbg.value(metadata i32 %y.tr, i64 0, metadata !38, metadata !DIExpression()), !dbg !39 + tail call void @llvm.dbg.value(metadata i32 %z.tr, i64 0, metadata !40, metadata !DIExpression()), !dbg !41 + %sub = sub nsw i32 %x.tr, 1, !dbg !24 + %call = tail call i32 @tak(i32 %sub, i32 %y.tr, i32 %z.tr), !dbg !24 + %sub6 = sub nsw i32 %y.tr, 1, !dbg !24 + %call9 = tail call i32 @tak(i32 %sub6, i32 %z.tr, i32 %x.tr), !dbg !24 + %sub11 = sub nsw i32 %z.tr, 1, !dbg !24 + %call14 = tail call i32 @tak(i32 %sub11, i32 %x.tr, i32 %y.tr), !dbg !24 br label %tailrecurse if.end: ; preds = %tailrecurse - tail call void @llvm.dbg.value(metadata i32 %x.tr, i64 0, metadata !6, metadata !DIExpression()), !dbg !7 - tail call void @llvm.dbg.value(metadata i32 %y.tr, i64 0, metadata !8, metadata !DIExpression()), !dbg !9 - tail call void @llvm.dbg.value(metadata i32 %z.tr, i64 0, metadata !10, metadata !DIExpression()), !dbg !11 - br label %return, !dbg !16 + tail call void @llvm.dbg.value(metadata i32 %x.tr, i64 0, metadata !36, metadata !DIExpression()), !dbg !37 + tail call void @llvm.dbg.value(metadata i32 %y.tr, i64 0, metadata !38, metadata !DIExpression()), !dbg !39 + tail call void @llvm.dbg.value(metadata i32 %z.tr, i64 0, metadata !40, metadata !DIExpression()), !dbg !41 + br label %return, !dbg !26 return: ; preds = %if.end - ret i32 %z.tr, !dbg !17 + ret i32 %z.tr, !dbg !27 } @channelColumns = external global i64 @@ -143,3 +143,16 @@ for.end: !17 = !DILocation(line: 37, column: 1, scope: !13) !18 = !DIFile(filename: "/Volumes/Lalgate/cj/llvm/projects/llvm-test/SingleSource/Benchmarks/BenchmarkGame/recursive.c", directory: "/Volumes/Lalgate/cj/D/projects/llvm-test/SingleSource/Benchmarks/BenchmarkGame") !20 = !{i32 1, !"Debug Info Version", i32 3} +!21 = distinct !DISubprogram(name: "tak", line: 32, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !2, file: !18, scope: !1, type: !3) +!22 = !DILocation(line: 33, column: 3, scope: !23) +!23 = distinct !DILexicalBlock(line: 32, column: 30, file: !18, scope: !21) +!24 = !DILocation(line: 34, column: 5, scope: !25) +!25 = distinct !DILexicalBlock(line: 33, column: 14, file: !18, scope: !23) +!26 = !DILocation(line: 36, column: 3, scope: !23) +!27 = !DILocation(line: 37, column: 1, scope: !23) +!36 = !DILocalVariable(name: "x", line: 32, arg: 1, scope: !21, file: !1, type: !5) +!37 = !DILocation(line: 32, column: 13, scope: !21) +!38 = !DILocalVariable(name: "y", line: 32, arg: 2, scope: !21, file: !1, type: !5) +!39 = !DILocation(line: 32, column: 20, scope: !21) +!40 = !DILocalVariable(name: "z", line: 32, arg: 3, scope: !21, file: !1, type: !5) +!41 = !DILocation(line: 32, column: 27, scope: !21) diff --git a/test/Transforms/SampleProfile/Inputs/indirect-call.prof b/test/Transforms/SampleProfile/Inputs/indirect-call.prof index aaf9ec15d02e3..ff7be5df977a6 100644 --- a/test/Transforms/SampleProfile/Inputs/indirect-call.prof +++ b/test/Transforms/SampleProfile/Inputs/indirect-call.prof @@ -1,19 +1,19 @@ test:63067:0 - 4: 3345 _Z3barv:1398 _Z3foov:2059 + 1: 3345 _Z3barv:1398 _Z3foov:2059 test_inline:3000:0 - 5: foo_inline1:3000 - 1: 3000 - 5: foo_inline2:4000 - 1: 4000 + 1: foo_inline1:3000 + 11: 3000 + 1: foo_inline2:4000 + 19: 4000 test_noinline:3000:0 - 5: foo_noinline:3000 - 1: 3000 + 1: foo_noinline:3000 + 20: 3000 test_direct:3000:0 - 5: foo_direct:3000 - 1: 3000 + 1: foo_direct:3000 + 21: 3000 test_inline_strip:3000:0 - 5: foo_inline_strip:3000 + 1: foo_inline_strip:3000 1: 3000 -test_inline_strip_confilict:3000:0 - 5: foo_inline_strip_conflict:3000 +test_inline_strip_conflict:3000:0 + 1: foo_inline_strip_conflict:3000 1: 3000 diff --git a/test/Transforms/SampleProfile/indirect-call.ll b/test/Transforms/SampleProfile/indirect-call.ll index 4647dd4212998..4101f6f492e53 100644 --- a/test/Transforms/SampleProfile/indirect-call.ll +++ b/test/Transforms/SampleProfile/indirect-call.ll @@ -12,7 +12,7 @@ define void @test(void ()*) !dbg !3 { ; CHECK-LABEL: @test_inline ; If the indirect call is promoted and inlined in profile, we should promote and inline it. -define void @test_inline(i64* (i32*)*, i32* %x) !dbg !3 { +define void @test_inline(i64* (i32*)*, i32* %x) !dbg !6 { %2 = alloca i64* (i32*)* store i64* (i32*)* %0, i64* (i32*)** %2 %3 = load i64* (i32*)*, i64* (i32*)** %2 @@ -25,14 +25,14 @@ define void @test_inline(i64* (i32*)*, i32* %x) !dbg !3 { ; CHECK-NOT: call ; CHECK: if.false.orig_indirect2: ; CHECK: call - call i64* %3(i32* %x), !dbg !5 + call i64* %3(i32* %x), !dbg !7 ret void } ; CHECK-LABEL: @test_inline_strip ; If the indirect call is promoted and inlined in profile, and the callee name ; is stripped we should promote and inline it. -define void @test_inline_strip(i64* (i32*)*, i32* %x) !dbg !3 { +define void @test_inline_strip(i64* (i32*)*, i32* %x) !dbg !8 { %2 = alloca i64* (i32*)* store i64* (i32*)* %0, i64* (i32*)** %2 %3 = load i64* (i32*)*, i64* (i32*)** %2 @@ -41,74 +41,74 @@ define void @test_inline_strip(i64* (i32*)*, i32* %x) !dbg !3 { ; CHECK-NOT: call ; CHECK: if.false.orig_indirect: ; CHECK: call - call i64* %3(i32* %x), !dbg !5 + call i64* %3(i32* %x), !dbg !9 ret void } ; CHECK-LABEL: @test_inline_strip_conflict ; If the indirect call is promoted and inlined in profile, and the callee name ; is stripped, but have more than 1 potential match, we should not promote. -define void @test_inline_strip_conflict(i64* (i32*)*, i32* %x) !dbg !3 { +define void @test_inline_strip_conflict(i64* (i32*)*, i32* %x) !dbg !10 { %2 = alloca i64* (i32*)* store i64* (i32*)* %0, i64* (i32*)** %2 %3 = load i64* (i32*)*, i64* (i32*)** %2 ; CHECK-NOT: if.true.direct_targ: - call i64* %3(i32* %x), !dbg !5 + call i64* %3(i32* %x), !dbg !11 ret void } ; CHECK-LABEL: @test_noinline ; If the indirect call target is not available, we should not promote it. -define void @test_noinline(void ()*) !dbg !3 { +define void @test_noinline(void ()*) !dbg !12 { %2 = alloca void ()* store void ()* %0, void ()** %2 %3 = load void ()*, void ()** %2 ; CHECK-NOT: icmp ; CHECK: call - call void %3(), !dbg !5 + call void %3(), !dbg !13 ret void } @x = global i32 0, align 4 -define i32* @foo_inline1(i32* %x) !dbg !3 { +define i32* @foo_inline1(i32* %x) !dbg !14 { ret i32* %x } -define i32* @foo_inline_strip.suffix(i32* %x) !dbg !3 { +define i32* @foo_inline_strip.suffix(i32* %x) !dbg !15 { ret i32* %x } -define i32* @foo_inline_strip_conflict.suffix1(i32* %x) !dbg !3 { +define i32* @foo_inline_strip_conflict.suffix1(i32* %x) !dbg !16 { ret i32* %x } -define i32* @foo_inline_strip_conflict.suffix2(i32* %x) !dbg !3 { +define i32* @foo_inline_strip_conflict.suffix2(i32* %x) !dbg !17 { ret i32* %x } -define i32* @foo_inline_strip_conflict.suffix3(i32* %x) !dbg !3 { +define i32* @foo_inline_strip_conflict.suffix3(i32* %x) !dbg !18 { ret i32* %x } -define i32* @foo_inline2(i32* %x) !dbg !3 { +define i32* @foo_inline2(i32* %x) !dbg !19 { ret i32* %x } -define i32 @foo_noinline(i32 %x) !dbg !3 { +define i32 @foo_noinline(i32 %x) !dbg !20 { ret i32 %x } -define void @foo_direct() !dbg !3 { +define void @foo_direct() !dbg !21 { ret void } ; CHECK-LABEL: @test_direct ; We should not promote a direct call. -define void @test_direct() !dbg !3 { +define void @test_direct() !dbg !22 { ; CHECK-NOT: icmp ; CHECK: call - call void @foo_alias(), !dbg !5 + call void @foo_alias(), !dbg !23 ret void } @@ -120,7 +120,25 @@ define void @test_direct() !dbg !3 { !0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1) !1 = !DIFile(filename: "test.cc", directory: "/") !2 = !{i32 2, !"Debug Info Version", i32 3} -!3 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 1, unit: !0) -!4 = !DILocation(line: 5, scope: !3) +!3 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 3, unit: !0) +!4 = !DILocation(line: 4, scope: !3) !5 = !DILocation(line: 6, scope: !3) ; CHECK: ![[PROF]] = !{!"VP", i32 0, i64 3457, i64 9191153033785521275, i64 2059, i64 -1069303473483922844, i64 1398} +!6 = distinct !DISubprogram(name: "test_inline", scope: !1, file: !1, line: 6, unit: !0) +!7 = !DILocation(line: 7, scope: !6) +!8 = distinct !DISubprogram(name: "test_inline_strip", scope: !1, file: !1, line: 8, unit: !0) +!9 = !DILocation(line: 9, scope: !8) +!10 = distinct !DISubprogram(name: "test_inline_strip_conflict", scope: !1, file: !1, line: 10, unit: !0) +!11 = !DILocation(line: 11, scope: !10) +!12 = distinct !DISubprogram(name: "test_noinline", scope: !1, file: !1, line: 12, unit: !0) +!13 = !DILocation(line: 13, scope: !12) +!14 = distinct !DISubprogram(name: "foo_inline1", scope: !1, file: !1, line: 11, unit: !0) +!15 = distinct !DISubprogram(name: "foo_inline_strip.suffix", scope: !1, file: !1, line: 1, unit: !0) +!16 = distinct !DISubprogram(name: "foo_inline_strip_conflict.suffix1", scope: !1, file: !1, line: 1, unit: !0) +!17 = distinct !DISubprogram(name: "foo_inline_strip_conflict.suffix2", scope: !1, file: !1, line: 1, unit: !0) +!18 = distinct !DISubprogram(name: "foo_inline_strip_conflict.suffix3", scope: !1, file: !1, line: 1, unit: !0) +!19 = distinct !DISubprogram(name: "foo_inline2", scope: !1, file: !1, line: 19, unit: !0) +!20 = distinct !DISubprogram(name: "foo_noinline", scope: !1, file: !1, line: 20, unit: !0) +!21 = distinct !DISubprogram(name: "foo_direct", scope: !1, file: !1, line: 21, unit: !0) +!22 = distinct !DISubprogram(name: "test_direct", scope: !1, file: !1, line: 22, unit: !0) +!23 = !DILocation(line: 23, scope: !22) diff --git a/test/Unit/lit.cfg b/test/Unit/lit.cfg index 30a5d3fab826d..dac0bf829ba6f 100644 --- a/test/Unit/lit.cfg +++ b/test/Unit/lit.cfg @@ -43,6 +43,10 @@ if sys.platform in ['win32', 'cygwin'] and os.path.isdir(config.shlibdir): config.environment['PATH'] = os.path.pathsep.join(( config.shlibdir, config.environment['PATH'])) +# Win32 may use %SYSTEMDRIVE% during file system shell operations, so propogate. +if sys.platform == 'win32' and 'SYSTEMDRIVE' in os.environ: + config.environment['SYSTEMDRIVE'] = os.environ['SYSTEMDRIVE'] + ### # Check that the object root is known. diff --git a/test/tools/llvm-objdump/WebAssembly/symbol-table.test b/test/tools/llvm-objdump/WebAssembly/symbol-table.test new file mode 100644 index 0000000000000..8936c7a12e4c6 --- /dev/null +++ b/test/tools/llvm-objdump/WebAssembly/symbol-table.test @@ -0,0 +1,8 @@ +RUN: llvm-objdump -t %p/../Inputs/test.wasm | FileCheck %s + +CHECK: SYMBOL TABLE: +CHECK: 00000000 l F IMPORT bar +CHECK: 00000000 g F EXPORT baz +CHECK: 00000001 g F EXPORT quux +CHECK: 00000000 l F name $import +CHECK: 00000001 l F name $func0 diff --git a/test/tools/llvm-readobj/Inputs/resources/cursor_small.bmp b/test/tools/llvm-readobj/Inputs/resources/cursor_small.bmp Binary files differnew file mode 100644 index 0000000000000..ce513261bc2c2 --- /dev/null +++ b/test/tools/llvm-readobj/Inputs/resources/cursor_small.bmp diff --git a/test/tools/llvm-readobj/Inputs/resources/okay_small.bmp b/test/tools/llvm-readobj/Inputs/resources/okay_small.bmp Binary files differnew file mode 100644 index 0000000000000..e4005bf5ef97c --- /dev/null +++ b/test/tools/llvm-readobj/Inputs/resources/okay_small.bmp diff --git a/test/tools/llvm-readobj/Inputs/resources/test_resource.obj.coff b/test/tools/llvm-readobj/Inputs/resources/test_resource.obj.coff Binary files differnew file mode 100644 index 0000000000000..b9a7908b1c5c9 --- /dev/null +++ b/test/tools/llvm-readobj/Inputs/resources/test_resource.obj.coff diff --git a/test/tools/llvm-readobj/Inputs/resources/test_resource.rc b/test/tools/llvm-readobj/Inputs/resources/test_resource.rc new file mode 100644 index 0000000000000..fd616520dbe1b --- /dev/null +++ b/test/tools/llvm-readobj/Inputs/resources/test_resource.rc @@ -0,0 +1,44 @@ +#include "windows.h"
+
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
+
+myaccelerators ACCELERATORS
+{
+ "^C", 999, VIRTKEY, ALT
+ "D", 1100, VIRTKEY, CONTROL, SHIFT
+ "^R", 444, ASCII, NOINVERT
+}
+
+cursor BITMAP "cursor_small.bmp"
+okay BITMAP "okay_small.bmp"
+
+14432 MENU
+LANGUAGE LANG_CHINESE, SUBLANG_CHINESE_SIMPLIFIED
+{
+ MENUITEM "yu", 100
+ MENUITEM "shala", 101
+ MENUITEM "kaoya", 102
+}
+
+testdialog DIALOG 10, 10, 200, 300
+STYLE WS_POPUP | WS_BORDER
+CAPTION "Test"
+{
+ CTEXT "Continue:", 1, 10, 10, 230, 14
+ PUSHBUTTON "&OK", 2, 66, 134, 161, 13
+}
+
+12 ACCELERATORS
+{
+ "X", 164, VIRTKEY, ALT
+ "H", 5678, VIRTKEY, CONTROL, SHIFT
+ "^R", 444, ASCII, NOINVERT
+}
+
+"eat" MENU
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_AUS
+{
+ MENUITEM "fish", 100
+ MENUITEM "salad", 101
+ MENUITEM "duck", 102
+}
diff --git a/test/tools/llvm-readobj/Inputs/resources/test_resource.res b/test/tools/llvm-readobj/Inputs/resources/test_resource.res Binary files differnew file mode 100644 index 0000000000000..c577ecc3d6333 --- /dev/null +++ b/test/tools/llvm-readobj/Inputs/resources/test_resource.res diff --git a/test/tools/llvm-readobj/resources.test b/test/tools/llvm-readobj/resources.test index 46ee8b99a65de..855ce5393b84b 100644 --- a/test/tools/llvm-readobj/resources.test +++ b/test/tools/llvm-readobj/resources.test @@ -1,19 +1,111 @@ -RUN: llvm-readobj -coff-resources %p/Inputs/zero-string-table.obj.coff-i386 \ -RUN: | FileCheck %s -check-prefix RESOURCE +// Check dumping of the .rsrc section(s) +// The input was generated with the following commands, using the original Windows +// rc.exe and cvtres.exe: +// > rc /fo test_resource.res /nologo test_resource.rc +// > cvtres /machine:X86 /readonly /nologo /out:test_resource.o test_resource.res -RESOURCE: Resources [ -RESOURCE-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) -RESOURCE-NEXT: .rsrc$01 Data ( -RESOURCE-NEXT: 0000: 00000000 00000000 00000000 00000100 |................| -RESOURCE-NEXT: 0010: 06000000 18000080 00000000 00000000 |................| -RESOURCE-NEXT: 0020: 00000000 00000100 01000000 30000080 |............0...| -RESOURCE-NEXT: 0030: 00000000 00000000 00000000 00000100 |................| -RESOURCE-NEXT: 0040: 09040000 48000000 00000000 2A000000 |....H.......*...| -RESOURCE-NEXT: 0050: 00000000 00000000 |........| -RESOURCE-NEXT: ) -RESOURCE-NEXT: .rsrc$02 Data ( -RESOURCE-NEXT: 0000: 00000500 48006500 6C006C00 6F000000 |....H.e.l.l.o...| -RESOURCE-NEXT: 0010: 00000000 00000000 00000000 00000000 |................| -RESOURCE-NEXT: 0020: 00000000 00000000 00000000 00000000 |................| -RESOURCE-NEXT: ) -RESOURCE-NEXT: ] +RUN: llvm-readobj -coff-resources -section-data %p/Inputs/zero-string-table.obj.coff-i386 \ +RUN: | FileCheck %s -check-prefix ZERO +RUN: llvm-readobj -coff-resources %p/Inputs/resources/test_resource.obj.coff \ +RUN: | FileCheck %s -check-prefix TEST_RES + +ZERO: Resources [ +ZERO-NEXT: String Name Entries: 0 +ZERO-NEXT: ID Entries: 1 +ZERO-NEXT: Type: kRT_STRING (ID 6) [ +ZERO-NEXT: String Name Entries: 0 +ZERO-NEXT: ID Entries: 1 +ZERO-NEXT: Name: (ID 1) [ +ZERO-NEXT: String Name Entries: 0 +ZERO-NEXT: ID Entries: 1 +ZERO-NEXT: Language: (ID 1033) [ +ZERO-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) +ZERO-NEXT: Major Version: 0 +ZERO-NEXT: Minor Version: 0 +ZERO-NEXT: ] +ZERO-NEXT: ] +ZERO-NEXT: ] + + +TEST_RES: Resources [ +TEST_RES-NEXT: String Name Entries: 0 +TEST_RES-NEXT: ID Entries: 4 +TEST_RES-NEXT: Type: kRT_BITMAP (ID 2) [ +TEST_RES-NEXT: String Name Entries: 2 +TEST_RES-NEXT: ID Entries: 0 +TEST_RES-NEXT: Name: CURSOR [ +TEST_RES-NEXT: String Name Entries: 0 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Language: (ID 1033) [ +TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) +TEST_RES-NEXT: Major Version: 0 +TEST_RES-NEXT: Minor Version: 0 +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: Name: OKAY [ +TEST_RES-NEXT: String Name Entries: 0 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Language: (ID 1033) [ +TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) +TEST_RES-NEXT: Major Version: 0 +TEST_RES-NEXT: Minor Version: 0 +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: Type: kRT_MENU (ID 4) [ +TEST_RES-NEXT: String Name Entries: 1 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Name: "EAT" [ +TEST_RES-NEXT: String Name Entries: 0 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Language: (ID 3081) [ +TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) +TEST_RES-NEXT: Major Version: 0 +TEST_RES-NEXT: Minor Version: 0 +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: Name: (ID 14432) [ +TEST_RES-NEXT: String Name Entries: 0 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Language: (ID 2052) [ +TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) +TEST_RES-NEXT: Major Version: 0 +TEST_RES-NEXT: Minor Version: 0 +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: Type: kRT_DIALOG (ID 5) [ +TEST_RES-NEXT: String Name Entries: 1 +TEST_RES-NEXT: ID Entries: 0 +TEST_RES-NEXT: Name: TESTDIALOG [ +TEST_RES-NEXT: String Name Entries: 0 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Language: (ID 1033) [ +TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) +TEST_RES-NEXT: Major Version: 0 +TEST_RES-NEXT: Minor Version: 0 +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: Type: kRT_ACCELERATOR (ID 9) [ +TEST_RES-NEXT: String Name Entries: 1 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Name: MYACCELERATORS [ +TEST_RES-NEXT: String Name Entries: 0 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Language: (ID 1033) [ +TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) +TEST_RES-NEXT: Major Version: 0 +TEST_RES-NEXT: Minor Version: 0 +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: Name: (ID 12) [ +TEST_RES-NEXT: String Name Entries: 0 +TEST_RES-NEXT: ID Entries: 1 +TEST_RES-NEXT: Language: (ID 1033) [ +TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0) +TEST_RES-NEXT: Major Version: 0 +TEST_RES-NEXT: Minor Version: 0 +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] +TEST_RES-NEXT: ] |