summaryrefslogtreecommitdiff
path: root/test/CodeGen/AMDGPU/hsa-func.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/AMDGPU/hsa-func.ll')
-rw-r--r--test/CodeGen/AMDGPU/hsa-func.ll27
1 files changed, 20 insertions, 7 deletions
diff --git a/test/CodeGen/AMDGPU/hsa-func.ll b/test/CodeGen/AMDGPU/hsa-func.ll
index b4cdd4030d86..d96b796d4495 100644
--- a/test/CodeGen/AMDGPU/hsa-func.ll
+++ b/test/CodeGen/AMDGPU/hsa-func.ll
@@ -14,6 +14,7 @@
; ELF: Flags [ (0x6)
; ELF: SHF_ALLOC (0x2)
; ELF: SHF_EXECINSTR (0x4)
+; ELF: AddressAlignment: 4
; ELF: }
; ELF: SHT_NOTE
@@ -26,7 +27,7 @@
; ELF: Symbol {
; ELF: Name: simple
-; ELF: Size: 292
+; ELF: Size: 44
; ELF: Type: Function (0x2)
; ELF: }
@@ -36,12 +37,13 @@
; HSA-VI: .hsa_code_object_isa 8,0,1,"AMD","AMDGPU"
; HSA-NOT: .amdgpu_hsa_kernel simple
+; HSA: .globl simple
+; HSA: .p2align 2
; HSA: {{^}}simple:
-; HSA: .amd_kernel_code_t
-; HSA: enable_sgpr_private_segment_buffer = 1
-; HSA: enable_sgpr_kernarg_segment_ptr = 1
-; HSA: .end_amd_kernel_code_t
-; HSA: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0
+; HSA-NOT: amd_kernel_code_t
+
+; FIXME: Check this isn't a kernarg load when calling convention implemented.
+; XHSA-NOT: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0
; Make sure we are setting the ATC bit:
; HSA-CI: s_mov_b32 s[[HI:[0-9]]], 0x100f000
@@ -52,9 +54,20 @@
; HSA: .Lfunc_end0:
; HSA: .size simple, .Lfunc_end0-simple
-
+; HSA: ; Function info:
+; HSA-NOT: COMPUTE_PGM_RSRC2
define void @simple(i32 addrspace(1)* %out) {
entry:
store i32 0, i32 addrspace(1)* %out
ret void
}
+
+; Ignore explicit alignment that is too low.
+; HSA: .globl simple_align2
+; HSA: .p2align 2
+define void @simple_align2(i32 addrspace(1)* addrspace(2)* %ptr.out) align 2 {
+entry:
+ %out = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(2)* %ptr.out
+ store i32 0, i32 addrspace(1)* %out
+ ret void
+}