summaryrefslogtreecommitdiff
path: root/test/CodeGen/AMDGPU/shl.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/AMDGPU/shl.ll')
-rw-r--r--test/CodeGen/AMDGPU/shl.ll171
1 files changed, 170 insertions, 1 deletions
diff --git a/test/CodeGen/AMDGPU/shl.ll b/test/CodeGen/AMDGPU/shl.ll
index 55db80731c900..5a2b03bff9909 100644
--- a/test/CodeGen/AMDGPU/shl.ll
+++ b/test/CodeGen/AMDGPU/shl.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=GCN -check-prefix=SI %s
; XUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
declare i32 @llvm.r600.read.tidig.x() #0
@@ -208,4 +208,173 @@ define void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
ret void
}
+; FUNC-LABEL: {{^}}s_shl_constant_i64
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+define void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) {
+ %shl = shl i64 281474976710655, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_shl_constant_i64:
+; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]]
+; SI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0xab19b207
+; SI-DAG: s_movk_i32 s[[KHI:[0-9]+]], 0x11e{{$}}
+; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]]
+; SI: buffer_store_dwordx2
+define void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+ %a = load i64, i64 addrspace(1)* %aptr, align 8
+ %shl = shl i64 1231231234567, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_shl_i64_32_bit_constant:
+; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]]
+; SI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x12d687{{$}}
+; SI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0{{$}}
+; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]]
+define void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+ %a = load i64, i64 addrspace(1)* %aptr, align 8
+ %shl = shl i64 1234567, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_shl_inline_imm_64_i64:
+; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, 64, {{v[0-9]+}}
+define void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+ %a = load i64, i64 addrspace(1)* %aptr, align 8
+ %shl = shl i64 64, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_64_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 64, s{{[0-9]+}}
+define void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 64, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_1_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1, s{{[0-9]+}}
+define void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 1, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_1.0_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, s{{[0-9]+}}
+define void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 4607182418800017408, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_1.0_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, s{{[0-9]+}}
+define void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 13830554455654793216, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_0.5_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 0.5, s{{[0-9]+}}
+define void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 4602678819172646912, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_0.5_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -0.5, s{{[0-9]+}}
+define void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 13826050856027422720, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_2.0_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 2.0, s{{[0-9]+}}
+define void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 4611686018427387904, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_2.0_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -2.0, s{{[0-9]+}}
+define void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 13835058055282163712, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_4.0_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 4.0, s{{[0-9]+}}
+define void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 4616189618054758400, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_4.0_i64:
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -4.0, s{{[0-9]+}}
+define void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 13839561654909534208, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+
+; Test with the 64-bit integer bitpattern for a 32-bit float in the
+; low 32-bits, which is not a valid 64-bit inline immmediate.
+
+; FUNC-LABEL: {{^}}s_shl_inline_imm_f32_4.0_i64:
+; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 4.0
+; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0{{$}}
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
+define void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 1082130432, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FIXME: Copy of -1 register
+; FUNC-LABEL: {{^}}s_shl_inline_imm_f32_neg_4.0_i64:
+; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], -4.0
+; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -1{{$}}
+; SI-DAG: s_mov_b32 s[[K_HI_COPY:[0-9]+]], s[[K_HI]]
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI_COPY]]{{\]}}, s{{[0-9]+}}
+define void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 -1065353216, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; Shift into upper 32-bits
+; FUNC-LABEL: {{^}}s_shl_inline_high_imm_f32_4.0_i64:
+; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 4.0
+; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
+define void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 4647714815446351872, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_shl_inline_high_imm_f32_neg_4.0_i64:
+; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -4.0
+; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
+; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
+define void @s_shl_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %shl = shl i64 13871086852301127680, %a
+ store i64 %shl, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
attributes #0 = { nounwind readnone }