summaryrefslogtreecommitdiff
path: root/test/CodeGen/AMDGPU
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-06-11 18:16:27 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-06-11 18:16:27 +0000
commit687a64222b4c87c825258d4dfeb1f0794e8cb300 (patch)
treef15e528223c9e06e4ed874e21ad41c2eb169030b /test/CodeGen/AMDGPU
parent24eadf6f46cd3637ffe867648ce8eca7314115c6 (diff)
Notes
Diffstat (limited to 'test/CodeGen/AMDGPU')
-rw-r--r--test/CodeGen/AMDGPU/add.ll83
-rw-r--r--test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll6
-rw-r--r--test/CodeGen/AMDGPU/fence-barrier.ll3
-rw-r--r--test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir230
-rw-r--r--test/CodeGen/AMDGPU/fold-immediate-operand-shrink.mir72
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll3
-rw-r--r--test/CodeGen/AMDGPU/r600.add.ll167
-rw-r--r--test/CodeGen/AMDGPU/r600.sub.ll152
-rw-r--r--test/CodeGen/AMDGPU/salu-to-valu.ll2
-rw-r--r--test/CodeGen/AMDGPU/sub.ll90
10 files changed, 686 insertions, 122 deletions
diff --git a/test/CodeGen/AMDGPU/add.ll b/test/CodeGen/AMDGPU/add.ll
index 6a108db879cc7..bce924ec4a081 100644
--- a/test/CodeGen/AMDGPU/add.ll
+++ b/test/CodeGen/AMDGPU/add.ll
@@ -1,11 +1,8 @@
; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SIVI,FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SIVI,FUNC %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}s_add_i32:
-; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
; GCN: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}}
; GCN: v_mov_b32_e32 v[[V_REG:[0-9]+]], s[[REG]]
; GCN: buffer_store_dword v[[V_REG]],
@@ -19,9 +16,6 @@ define amdgpu_kernel void @s_add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %
}
; FUNC-LABEL: {{^}}s_add_v2i32:
-; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @s_add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
@@ -34,11 +28,6 @@ define amdgpu_kernel void @s_add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> a
}
; FUNC-LABEL: {{^}}s_add_v4i32:
-; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
@@ -53,15 +42,6 @@ define amdgpu_kernel void @s_add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> a
}
; FUNC-LABEL: {{^}}s_add_v8i32:
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
@@ -78,23 +58,6 @@ entry:
}
; FUNC-LABEL: {{^}}s_add_v16i32:
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-; EG: ADD_INT
-
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
@@ -124,7 +87,7 @@ entry:
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc, [[A]], [[B]]
; GFX9: v_add_u32_e32 v{{[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @v_add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr i32, i32 addrspace(1)* %gep, i32 1
%a = load volatile i32, i32 addrspace(1)* %gep
@@ -139,7 +102,7 @@ define amdgpu_kernel void @v_add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc, 0x7b, [[A]]
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 0x7b, [[A]]
define amdgpu_kernel void @v_add_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
- %tid = call i32 @llvm.r600.read.tidig.x()
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr i32, i32 addrspace(1)* %gep, i32 1
%a = load volatile i32, i32 addrspace(1)* %gep
@@ -151,13 +114,6 @@ define amdgpu_kernel void @v_add_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1
; FUNC-LABEL: {{^}}add64:
; GCN: s_add_u32
; GCN: s_addc_u32
-
-; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
-; EG-DAG: ADD_INT {{[* ]*}}
-; EG-DAG: ADDC_UINT
-; EG-DAG: ADD_INT
-; EG-DAG: ADD_INT {{[* ]*}}
-; EG-NOT: SUB
define amdgpu_kernel void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%add = add i64 %a, %b
@@ -172,13 +128,6 @@ entry:
; FUNC-LABEL: {{^}}add64_sgpr_vgpr:
; GCN-NOT: v_addc_u32_e32 s
-
-; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
-; EG-DAG: ADD_INT {{[* ]*}}
-; EG-DAG: ADDC_UINT
-; EG-DAG: ADD_INT
-; EG-DAG: ADD_INT {{[* ]*}}
-; EG-NOT: SUB
define amdgpu_kernel void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
entry:
%0 = load i64, i64 addrspace(1)* %in
@@ -191,13 +140,6 @@ entry:
; FUNC-LABEL: {{^}}add64_in_branch:
; GCN: s_add_u32
; GCN: s_addc_u32
-
-; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
-; EG-DAG: ADD_INT {{[* ]*}}
-; EG-DAG: ADDC_UINT
-; EG-DAG: ADD_INT
-; EG-DAG: ADD_INT {{[* ]*}}
-; EG-NOT: SUB
define amdgpu_kernel void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
entry:
%0 = icmp eq i64 %a, 0
@@ -217,7 +159,26 @@ endif:
ret void
}
-declare i32 @llvm.r600.read.tidig.x() #1
+; Make sure the VOP3 form of add is initially selected. Otherwise pair
+; of opies from/to VCC would be necessary
+
+; GCN-LABEL: {{^}}add_select_vop3:
+; SI: v_add_i32_e64 v0, s[0:1], s0, v0
+; VI: v_add_u32_e64 v0, s[0:1], s0, v0
+; GFX9: v_add_u32_e32 v0, s0, v0
+
+; GCN: ; def vcc
+; GCN: ds_write_b32
+; GCN: ; use vcc
+define amdgpu_ps void @add_select_vop3(i32 inreg %s, i32 %v) {
+ %vcc = call i64 asm sideeffect "; def vcc", "={vcc}"()
+ %sub = add i32 %v, %s
+ store i32 %sub, i32 addrspace(3)* undef
+ call void asm sideeffect "; use vcc", "{vcc}"(i64 %vcc)
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll b/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll
index 5997e27fd815e..e2c7f1c47cf9f 100644
--- a/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll
+++ b/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll
@@ -7,6 +7,8 @@ declare void @llvm.amdgcn.s.barrier() #1
; Function Attrs: nounwind
; CHECK-LABEL: {{^}}signed_ds_offset_addressing_loop:
+; SI: s_movk_i32 [[K_0X88:s[0-9]+]], 0x
+; SI: s_movk_i32 [[K_0X100:s[0-9]+]], 0x100
; CHECK: BB0_1:
; CHECK: v_add_i32_e32 [[VADDR:v[0-9]+]],
; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR]]
@@ -14,9 +16,9 @@ declare void @llvm.amdgcn.s.barrier() #1
; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR8]]
; SI-DAG: v_add_i32_e32 [[VADDR0x80:v[0-9]+]], vcc, 0x80, [[VADDR]]
; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x80]]
-; SI-DAG: v_add_i32_e32 [[VADDR0x88:v[0-9]+]], vcc, 0x88, [[VADDR]]
+; SI-DAG: v_add_i32_e32 [[VADDR0x88:v[0-9]+]], vcc, [[K_0X88]], [[VADDR]]
; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x88]]
-; SI-DAG: v_add_i32_e32 [[VADDR0x100:v[0-9]+]], vcc, 0x100, [[VADDR]]
+; SI-DAG: v_add_i32_e32 [[VADDR0x100:v[0-9]+]], vcc, [[K_0X100]], [[VADDR]]
; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[VADDR0x100]]
; CI-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[VADDR]] offset1:2
diff --git a/test/CodeGen/AMDGPU/fence-barrier.ll b/test/CodeGen/AMDGPU/fence-barrier.ll
index 8f5a06d01fa22..7de4f1796b08a 100644
--- a/test/CodeGen/AMDGPU/fence-barrier.ll
+++ b/test/CodeGen/AMDGPU/fence-barrier.ll
@@ -54,7 +54,8 @@ define amdgpu_kernel void @test_local(i32 addrspace(1)*) {
}
; GCN-LABEL: {{^}}test_global
-; GCN: v_add_u32_e32 v{{[0-9]+}}, vcc, 0x888, v{{[0-9]+}}
+; GCN: s_movk_i32 [[K:s[0-9]+]], 0x888
+; GCN: v_add_u32_e32 v{{[0-9]+}}, vcc, [[K]], v{{[0-9]+}}
; GCN: flat_store_dword
; GCN: s_waitcnt vmcnt(0) lgkmcnt(0){{$}}
; GCN-NEXT: s_barrier
diff --git a/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir b/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir
new file mode 100644
index 0000000000000..ab544665efb41
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir
@@ -0,0 +1,230 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -run-pass si-fold-operands,dead-mi-elimination %s -o - | FileCheck -check-prefix=GCN %s
+
+---
+
+# First operand is FI is in a VGPR, other operand is a VGPR
+name: shrink_vgpr_fi_vgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: shrink_vgpr_fi_vgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[V_MOV_B32_e32_]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %1:vgpr_32 = COPY $vgpr0
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is a VGPR, other operand FI is in a VGPR
+name: shrink_vgpr_vgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: shrink_vgpr_vgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[COPY]], [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is FI is in an SGPR, other operand is a VGPR
+name: shrink_vgpr_fi_sgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $sgpr0
+
+ ; GCN-LABEL: name: shrink_vgpr_fi_sgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $sgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
+ ; GCN: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_I32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e64_]]
+ %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %1:sreg_32_xm0 = COPY $sgpr0
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is an SGPR, other operand FI is in a VGPR
+name: shrink_sgpr_vgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $sgpr0
+
+ ; GCN-LABEL: name: shrink_sgpr_vgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $sgpr0
+ ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_I32_e64 [[V_MOV_B32_e32_]], [[COPY]], implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e64_]]
+ %0:sreg_32_xm0 = COPY $sgpr0
+ %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is FI is in an SGPR, other operand is a VGPR
+name: shrink_sgpr_fi_vgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: shrink_sgpr_fi_vgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 %stack.0
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[S_MOV_B32_]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:sreg_32_xm0 = S_MOV_B32 %stack.0
+ %1:vgpr_32 = COPY $vgpr0
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is a VGPR, other operand FI is in an SGPR
+name: shrink_vgpr_sgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16}
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: shrink_vgpr_sgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 %stack.0
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[S_MOV_B32_]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:sreg_32_xm0 = S_MOV_B32 %stack.0
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is FI is in a VGPR, other operand is an inline imm in a VGPR
+name: shrink_vgpr_imm_fi_vgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: shrink_vgpr_imm_fi_vgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 16, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 16, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is an inline imm in a VGPR, other operand FI is in a VGPR
+name: shrink_vgpr_imm_vgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: shrink_vgpr_imm_vgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_I32_e64 16, [[V_MOV_B32_e32_]], implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e64_]]
+ %0:vgpr_32 = V_MOV_B32_e32 16, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is FI is in a VGPR, other operand is an literal constant in a VGPR
+name: shrink_vgpr_k_fi_vgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: shrink_vgpr_k_fi_vgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 1234, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 1234, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is a literal constant in a VGPR, other operand FI is in a VGPR
+name: shrink_vgpr_k_vgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: shrink_vgpr_k_vgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1234, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 %stack.0, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = V_MOV_B32_e32 1234, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
diff --git a/test/CodeGen/AMDGPU/fold-immediate-operand-shrink.mir b/test/CodeGen/AMDGPU/fold-immediate-operand-shrink.mir
index 847c2b720cd4e..15c453f36f631 100644
--- a/test/CodeGen/AMDGPU/fold-immediate-operand-shrink.mir
+++ b/test/CodeGen/AMDGPU/fold-immediate-operand-shrink.mir
@@ -250,8 +250,8 @@ body: |
; GCN-LABEL: name: shrink_scalar_imm_vgpr_v_sub_i32_e64_no_carry_out_use
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN: [[V_SUBREV_I32_e32_:%[0-9]+]]:vgpr_32 = V_SUBREV_I32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
- ; GCN: S_ENDPGM implicit [[V_SUBREV_I32_e32_]]
+ ; GCN: [[V_SUB_I32_e32_:%[0-9]+]]:vgpr_32 = V_SUB_I32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_SUB_I32_e32_]]
%0:sreg_32_xm0 = S_MOV_B32 12345
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32, %3:sreg_64 = V_SUB_I32_e64 %0, %1, implicit $exec
@@ -269,8 +269,8 @@ body: |
; GCN-LABEL: name: shrink_vgpr_scalar_imm_v_sub_i32_e64_no_carry_out_use
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
- ; GCN: [[V_SUB_I32_e32_:%[0-9]+]]:vgpr_32 = V_SUB_I32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
- ; GCN: S_ENDPGM implicit [[V_SUB_I32_e32_]]
+ ; GCN: [[V_SUBREV_I32_e32_:%[0-9]+]]:vgpr_32 = V_SUBREV_I32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_SUBREV_I32_e32_]]
%0:vgpr_32 = IMPLICIT_DEF
%1:sreg_32_xm0 = S_MOV_B32 12345
%2:vgpr_32, %3:sreg_64 = V_SUB_I32_e64 %0, %1, implicit $exec
@@ -288,8 +288,8 @@ body: |
; GCN-LABEL: name: shrink_scalar_imm_vgpr_v_subrev_i32_e64_no_carry_out_use
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; GCN: [[V_SUB_I32_e32_:%[0-9]+]]:vgpr_32 = V_SUB_I32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
- ; GCN: S_ENDPGM implicit [[V_SUB_I32_e32_]]
+ ; GCN: [[V_SUBREV_I32_e32_:%[0-9]+]]:vgpr_32 = V_SUBREV_I32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_SUBREV_I32_e32_]]
%0:sreg_32_xm0 = S_MOV_B32 12345
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32, %3:sreg_64 = V_SUBREV_I32_e64 %0, %1, implicit $exec
@@ -307,8 +307,8 @@ body: |
; GCN-LABEL: name: shrink_vgpr_scalar_imm_v_subrev_i32_e64_no_carry_out_use
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
- ; GCN: [[V_SUBREV_I32_e32_:%[0-9]+]]:vgpr_32 = V_SUBREV_I32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
- ; GCN: S_ENDPGM implicit [[V_SUBREV_I32_e32_]]
+ ; GCN: [[V_SUB_I32_e32_:%[0-9]+]]:vgpr_32 = V_SUB_I32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_SUB_I32_e32_]]
%0:vgpr_32 = IMPLICIT_DEF
%1:sreg_32_xm0 = S_MOV_B32 12345
%2:vgpr_32, %3:sreg_64 = V_SUBREV_I32_e64 %0, %1, implicit $exec
@@ -590,3 +590,59 @@ body: |
S_ENDPGM implicit %2
...
+
+---
+name: shrink_add_kill_flags_src0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; GCN-LABEL: name: shrink_add_kill_flags_src0
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 518144, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 killed [[V_MOV_B32_e32_]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = V_MOV_B32_e32 518144, implicit $exec
+ %2:vgpr_32, %3:sreg_64_xexec = V_ADD_I32_e64 killed %1, %0, implicit $exec
+ S_ENDPGM implicit %2
+...
+
+---
+name: shrink_add_kill_flags_src1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; GCN-LABEL: name: shrink_add_kill_flags_src1
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 518144, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[V_MOV_B32_e32_]], killed [[COPY]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = V_MOV_B32_e32 518144, implicit $exec
+ %2:vgpr_32, %3:sreg_64_xexec = V_ADD_I32_e64 %1, killed %0, implicit $exec
+ S_ENDPGM implicit %2
+...
+
+---
+name: shrink_addc_kill_flags_src2
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vcc
+ ; GCN-LABEL: name: shrink_addc_kill_flags_src2
+ ; GCN: liveins: $vgpr0, $vcc
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 518144, implicit $exec
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $vcc
+ ; GCN: [[V_ADDC_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADDC_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[V_MOV_B32_e32_]], [[COPY]], [[COPY1]], implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADDC_U32_e64_]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = V_MOV_B32_e32 518144, implicit $exec
+ %2:sreg_64_xexec = COPY $vcc
+ %3:vgpr_32, %4:sreg_64_xexec = V_ADDC_U32_e64 %1, %0, %2, implicit $exec
+ S_ENDPGM implicit %3
+...
diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
index 18abf607aea5a..77d518f503ab7 100644
--- a/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
+++ b/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
@@ -15,7 +15,8 @@ define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in1, i32 %in2)
}
; VI-LABEL: {{^}}dpp_test1:
-; VI: v_add_u32_e32 [[REG:v[0-9]+]], vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-OPT: v_add_u32_e32 [[REG:v[0-9]+]], vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-NOOPT: v_add_u32_e64 [[REG:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v{{[0-9]+}}
; VI-NOOPT: v_mov_b32_e32 v{{[0-9]+}}, 0
; VI-NEXT: s_nop 0
; VI-NEXT: s_nop 0
diff --git a/test/CodeGen/AMDGPU/r600.add.ll b/test/CodeGen/AMDGPU/r600.add.ll
new file mode 100644
index 0000000000000..73eea3ef21774
--- /dev/null
+++ b/test/CodeGen/AMDGPU/r600.add.ll
@@ -0,0 +1,167 @@
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope -check-prefix=EG -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}s_add_i32:
+; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @s_add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
+ %a = load i32, i32 addrspace(1)* %in
+ %b = load i32, i32 addrspace(1)* %b_ptr
+ %result = add i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_add_v2i32:
+; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @s_add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
+ %a = load <2 x i32>, <2 x i32> addrspace(1)* %in
+ %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
+ %result = add <2 x i32> %a, %b
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_add_v4i32:
+; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @s_add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
+ %a = load <4 x i32>, <4 x i32> addrspace(1)* %in
+ %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
+ %result = add <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_add_v8i32:
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+define amdgpu_kernel void @s_add_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
+entry:
+ %0 = add <8 x i32> %a, %b
+ store <8 x i32> %0, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_add_v16i32:
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+define amdgpu_kernel void @s_add_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
+entry:
+ %0 = add <16 x i32> %a, %b
+ store <16 x i32> %0, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_add_i32:
+define amdgpu_kernel void @v_add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %gep, i32 1
+ %a = load volatile i32, i32 addrspace(1)* %gep
+ %b = load volatile i32, i32 addrspace(1)* %b_ptr
+ %result = add i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_add_imm_i32:
+define amdgpu_kernel void @v_add_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %gep, i32 1
+ %a = load volatile i32, i32 addrspace(1)* %gep
+ %result = add i32 %a, 123
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}add64:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
+; EG-DAG: ADD_INT {{[* ]*}}
+; EG-DAG: ADDC_UINT
+; EG-DAG: ADD_INT
+; EG-DAG: ADD_INT {{[* ]*}}
+; EG-NOT: SUB
+define amdgpu_kernel void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+entry:
+ %add = add i64 %a, %b
+ store i64 %add, i64 addrspace(1)* %out
+ ret void
+}
+
+; The v_addc_u32 and v_add_i32 instruction can't read SGPRs, because they
+; use VCC. The test is designed so that %a will be stored in an SGPR and
+; %0 will be stored in a VGPR, so the comiler will be forced to copy %a
+; to a VGPR before doing the add.
+
+; FUNC-LABEL: {{^}}add64_sgpr_vgpr:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
+; EG-DAG: ADD_INT {{[* ]*}}
+; EG-DAG: ADDC_UINT
+; EG-DAG: ADD_INT
+; EG-DAG: ADD_INT {{[* ]*}}
+; EG-NOT: SUB
+define amdgpu_kernel void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
+entry:
+ %0 = load i64, i64 addrspace(1)* %in
+ %1 = add i64 %a, %0
+ store i64 %1, i64 addrspace(1)* %out
+ ret void
+}
+
+; Test i64 add inside a branch.
+; FUNC-LABEL: {{^}}add64_in_branch:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
+; EG-DAG: ADD_INT {{[* ]*}}
+; EG-DAG: ADDC_UINT
+; EG-DAG: ADD_INT
+; EG-DAG: ADD_INT {{[* ]*}}
+; EG-NOT: SUB
+define amdgpu_kernel void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
+entry:
+ %0 = icmp eq i64 %a, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = load i64, i64 addrspace(1)* %in
+ br label %endif
+
+else:
+ %2 = add i64 %a, %b
+ br label %endif
+
+endif:
+ %3 = phi i64 [%1, %if], [%2, %else]
+ store i64 %3, i64 addrspace(1)* %out
+ ret void
+}
+
+declare i32 @llvm.r600.read.tidig.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/r600.sub.ll b/test/CodeGen/AMDGPU/r600.sub.ll
new file mode 100644
index 0000000000000..2ded4f64328d4
--- /dev/null
+++ b/test/CodeGen/AMDGPU/r600.sub.ll
@@ -0,0 +1,152 @@
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=EG,FUNC %s
+
+declare i32 @llvm.r600.read.tidig.x() readnone
+
+; FUNC-LABEL: {{^}}s_sub_i32:
+define amdgpu_kernel void @s_sub_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %result = sub i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_sub_imm_i32:
+define amdgpu_kernel void @s_sub_imm_i32(i32 addrspace(1)* %out, i32 %a) {
+ %result = sub i32 1234, %a
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
+ %a = load i32, i32 addrspace(1)* %in
+ %b = load i32, i32 addrspace(1)* %b_ptr
+ %result = sub i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_imm_i32:
+; EG: SUB_INT
+define amdgpu_kernel void @test_sub_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %a = load i32, i32 addrspace(1)* %in
+ %result = sub i32 123, %a
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_v2i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
+ %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+ %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
+ %result = sub <2 x i32> %a, %b
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_v4i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
+ %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+ %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
+ %result = sub <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_i16:
+define amdgpu_kernel void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr i16, i16 addrspace(1)* %gep, i32 1
+ %a = load volatile i16, i16 addrspace(1)* %gep
+ %b = load volatile i16, i16 addrspace(1)* %b_ptr
+ %result = sub i16 %a, %b
+ store i16 %result, i16 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_v2i16:
+define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %gep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %gep, i16 1
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %gep
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
+ %result = sub <2 x i16> %a, %b
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_v4i16:
+define amdgpu_kernel void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %gep = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %gep, i16 1
+ %a = load <4 x i16>, <4 x i16> addrspace(1) * %gep
+ %b = load <4 x i16>, <4 x i16> addrspace(1) * %b_ptr
+ %result = sub <4 x i16> %a, %b
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_sub_i64:
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
+; EG-DAG: SUB_INT {{[* ]*}}
+; EG-DAG: SUBB_UINT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT {{[* ]*}}
+define amdgpu_kernel void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind {
+ %result = sub i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_sub_i64:
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
+; EG-DAG: SUB_INT {{[* ]*}}
+; EG-DAG: SUBB_UINT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT {{[* ]*}}
+define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
+ %a = load i64, i64 addrspace(1)* %a_ptr
+ %b = load i64, i64 addrspace(1)* %b_ptr
+ %result = sub i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_test_sub_v2i64:
+define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
+ %a = load <2 x i64>, <2 x i64> addrspace(1)* %a_ptr
+ %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
+ %result = sub <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_test_sub_v4i64:
+define amdgpu_kernel void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inB, i32 %tid
+ %a = load <4 x i64>, <4 x i64> addrspace(1)* %a_ptr
+ %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
+ %result = sub <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/salu-to-valu.ll b/test/CodeGen/AMDGPU/salu-to-valu.ll
index 9b46962108c28..cf42ee9b39ba0 100644
--- a/test/CodeGen/AMDGPU/salu-to-valu.ll
+++ b/test/CodeGen/AMDGPU/salu-to-valu.ll
@@ -458,7 +458,7 @@ bb7: ; preds = %bb3
}
; GCN-LABEL: {{^}}phi_visit_order:
-; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, 1, v{{[0-9]+}}
+; GCN: v_add_i32_e64 v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 1, v{{[0-9]+}}
define amdgpu_kernel void @phi_visit_order() {
bb:
br label %bb1
diff --git a/test/CodeGen/AMDGPU/sub.ll b/test/CodeGen/AMDGPU/sub.ll
index 4bd346dc586d1..485b374454de6 100644
--- a/test/CodeGen/AMDGPU/sub.ll
+++ b/test/CodeGen/AMDGPU/sub.ll
@@ -1,11 +1,10 @@
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI,FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX89,FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,GFX89,FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=EG,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX89 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,GFX89 %s
-declare i32 @llvm.r600.read.tidig.x() readnone
+declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone speculatable
-; FUNC-LABEL: {{^}}s_sub_i32:
+; GCN-LABEL: {{^}}s_sub_i32:
; GCN: s_load_dwordx2
; GCN: s_load_dwordx2 s{{\[}}[[A:[0-9]+]]:[[B:[0-9]+]]{{\]}}
; GCN: s_sub_i32 s{{[0-9]+}}, s[[A]], s[[B]]
@@ -15,7 +14,7 @@ define amdgpu_kernel void @s_sub_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
ret void
}
-; FUNC-LABEL: {{^}}s_sub_imm_i32:
+; GCN-LABEL: {{^}}s_sub_imm_i32:
; GCN: s_load_dword [[A:s[0-9]+]]
; GCN: s_sub_i32 s{{[0-9]+}}, 0x4d2, [[A]]
define amdgpu_kernel void @s_sub_imm_i32(i32 addrspace(1)* %out, i32 %a) {
@@ -24,9 +23,7 @@ define amdgpu_kernel void @s_sub_imm_i32(i32 addrspace(1)* %out, i32 %a) {
ret void
}
-; FUNC-LABEL: {{^}}test_sub_i32:
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
+; GCN-LABEL: {{^}}test_sub_i32:
; SI: v_subrev_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -38,9 +35,7 @@ define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)
ret void
}
-; FUNC-LABEL: {{^}}test_sub_imm_i32:
-; EG: SUB_INT
-
+; GCN-LABEL: {{^}}test_sub_imm_i32:
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, 0x7b, v{{[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+}}, 0x7b, v{{[0-9]+}}
define amdgpu_kernel void @test_sub_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -50,10 +45,7 @@ define amdgpu_kernel void @test_sub_imm_i32(i32 addrspace(1)* %out, i32 addrspac
ret void
}
-; FUNC-LABEL: {{^}}test_sub_v2i32:
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
+; GCN-LABEL: {{^}}test_sub_v2i32:
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
@@ -68,12 +60,7 @@ define amdgpu_kernel void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32
ret void
}
-; FUNC-LABEL: {{^}}test_sub_v4i32:
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
+; GCN-LABEL: {{^}}test_sub_v4i32:
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
@@ -92,11 +79,11 @@ define amdgpu_kernel void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32
ret void
}
-; FUNC-LABEL: {{^}}test_sub_i16:
+; GCN-LABEL: {{^}}test_sub_i16:
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; GFX89: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
- %tid = call i32 @llvm.r600.read.tidig.x()
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr i16, i16 addrspace(1)* %gep, i32 1
%a = load volatile i16, i16 addrspace(1)* %gep
@@ -106,13 +93,13 @@ define amdgpu_kernel void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)
ret void
}
-; FUNC-LABEL: {{^}}test_sub_v2i16:
+; GCN-LABEL: {{^}}test_sub_v2i16:
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+, v[0-9]+, v[0-9]+}}
; GFX9: v_pk_sub_i16
define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
- %tid = call i32 @llvm.r600.read.tidig.x()
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %gep, i16 1
%a = load <2 x i16>, <2 x i16> addrspace(1)* %gep
@@ -122,7 +109,7 @@ define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
ret void
}
-; FUNC-LABEL: {{^}}test_sub_v4i16:
+; GCN-LABEL: {{^}}test_sub_v4i16:
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -131,7 +118,7 @@ define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
; GFX9: v_pk_sub_i16
; GFX9: v_pk_sub_i16
define amdgpu_kernel void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
- %tid = call i32 @llvm.r600.read.tidig.x()
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %gep, i16 1
%a = load <4 x i16>, <4 x i16> addrspace(1) * %gep
@@ -141,22 +128,16 @@ define amdgpu_kernel void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16
ret void
}
-; FUNC-LABEL: {{^}}s_sub_i64:
+; GCN-LABEL: {{^}}s_sub_i64:
; GCN: s_sub_u32
; GCN: s_subb_u32
-
-; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
-; EG-DAG: SUB_INT {{[* ]*}}
-; EG-DAG: SUBB_UINT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT {{[* ]*}}
define amdgpu_kernel void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind {
%result = sub i64 %a, %b
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
}
-; FUNC-LABEL: {{^}}v_sub_i64:
+; GCN-LABEL: {{^}}v_sub_i64:
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
@@ -165,14 +146,8 @@ define amdgpu_kernel void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64
; GFX9: v_sub_co_u32_e32
; GFX9: v_subb_co_u32_e32
-
-; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
-; EG-DAG: SUB_INT {{[* ]*}}
-; EG-DAG: SUBB_UINT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT {{[* ]*}}
define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
- %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
%a = load i64, i64 addrspace(1)* %a_ptr
@@ -182,7 +157,7 @@ define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspa
ret void
}
-; FUNC-LABEL: {{^}}v_test_sub_v2i64:
+; GCN-LABEL: {{^}}v_test_sub_v2i64:
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
@@ -198,7 +173,7 @@ define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspa
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
- %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
%a = load <2 x i64>, <2 x i64> addrspace(1)* %a_ptr
@@ -208,7 +183,7 @@ define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
ret void
}
-; FUNC-LABEL: {{^}}v_test_sub_v4i64:
+; GCN-LABEL: {{^}}v_test_sub_v4i64:
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
@@ -236,7 +211,7 @@ define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
define amdgpu_kernel void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
- %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inB, i32 %tid
%a = load <4 x i64>, <4 x i64> addrspace(1)* %a_ptr
@@ -245,3 +220,22 @@ define amdgpu_kernel void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
store <4 x i64> %result, <4 x i64> addrspace(1)* %out
ret void
}
+
+; Make sure the VOP3 form of sub is initially selected. Otherwise pair
+; of opies from/to VCC would be necessary
+
+; GCN-LABEL: {{^}}sub_select_vop3:
+; SI: v_subrev_i32_e64 v0, s[0:1], s0, v0
+; VI: v_subrev_u32_e64 v0, s[0:1], s0, v0
+; GFX9: v_subrev_u32_e32 v0, s0, v0
+
+; GCN: ; def vcc
+; GCN: ds_write_b32
+; GCN: ; use vcc
+define amdgpu_ps void @sub_select_vop3(i32 inreg %s, i32 %v) {
+ %vcc = call i64 asm sideeffect "; def vcc", "={vcc}"()
+ %sub = sub i32 %v, %s
+ store i32 %sub, i32 addrspace(3)* undef
+ call void asm sideeffect "; use vcc", "{vcc}"(i64 %vcc)
+ ret void
+}