aboutsummaryrefslogtreecommitdiff
path: root/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-04-16 16:01:22 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-04-16 16:01:22 +0000
commit71d5a2540a98c81f5bcaeb48805e0e2881f530ef (patch)
tree5343938942df402b49ec7300a1c25a2d4ccd5821 /test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
parent31bbf64f3a4974a2d6c8b3b27ad2f519caf74057 (diff)
Diffstat (limited to 'test/Transforms/InferAddressSpaces/AMDGPU/basic.ll')
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/basic.ll173
1 files changed, 173 insertions, 0 deletions
diff --git a/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll b/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
new file mode 100644
index 000000000000..b566c147e9b8
--- /dev/null
+++ b/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
@@ -0,0 +1,173 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; Trivial optimization of generic addressing
+
+; CHECK-LABEL: @load_global_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
+; CHECK-NEXT: %tmp1 = load float, float addrspace(1)* %tmp0
+; CHECK-NEXT: ret float %tmp1
+define float @load_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
+ %tmp1 = load float, float addrspace(1)* %tmp0
+ ret float %tmp1
+}
+
+; CHECK-LABEL: @load_constant_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
+; CHECK-NEXT: %tmp1 = load float, float addrspace(2)* %tmp0
+; CHECK-NEXT: ret float %tmp1
+define float @load_constant_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(2)*
+ %tmp1 = load float, float addrspace(2)* %tmp0
+ ret float %tmp1
+}
+
+; CHECK-LABEL: @load_group_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
+; CHECK-NEXT: %tmp1 = load float, float addrspace(3)* %tmp0
+; CHECK-NEXT: ret float %tmp1
+define float @load_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
+ %tmp1 = load float, float addrspace(3)* %tmp0
+ ret float %tmp1
+}
+
+; CHECK-LABEL: @load_private_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
+; CHECK-NEXT: %tmp1 = load float, float* %tmp0
+; CHECK-NEXT: ret float %tmp1
+define float @load_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
+ %tmp1 = load float, float* %tmp0
+ ret float %tmp1
+}
+
+; CHECK-LABEL: @store_global_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
+; CHECK-NEXT: store float 0.000000e+00, float addrspace(1)* %tmp0
+define amdgpu_kernel void @store_global_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(1)*
+ store float 0.0, float addrspace(1)* %tmp0
+ ret void
+}
+
+; CHECK-LABEL: @store_group_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
+; CHECK-NEXT: store float 0.000000e+00, float addrspace(3)* %tmp0
+define amdgpu_kernel void @store_group_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float addrspace(3)*
+ store float 0.0, float addrspace(3)* %tmp0
+ ret void
+}
+
+; CHECK-LABEL: @store_private_from_flat(
+; CHECK-NEXT: %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
+; CHECK-NEXT: store float 0.000000e+00, float* %tmp0
+define amdgpu_kernel void @store_private_from_flat(float addrspace(4)* %generic_scalar) #0 {
+ %tmp0 = addrspacecast float addrspace(4)* %generic_scalar to float*
+ store float 0.0, float* %tmp0
+ ret void
+}
+
+; optimized to global load/store.
+; CHECK-LABEL: @load_store_global(
+; CHECK-NEXT: %val = load i32, i32 addrspace(1)* %input, align 4
+; CHECK-NEXT: store i32 %val, i32 addrspace(1)* %output, align 4
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(1)* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; Optimized to group load/store.
+; CHECK-LABEL: @load_store_group(
+; CHECK-NEXT: %val = load i32, i32 addrspace(3)* %input, align 4
+; CHECK-NEXT: store i32 %val, i32 addrspace(3)* %output, align 4
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32 addrspace(3)* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32 addrspace(3)* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; Optimized to private load/store.
+; CHECK-LABEL: @load_store_private(
+; CHECK-NEXT: %val = load i32, i32* %input, align 4
+; CHECK-NEXT: store i32 %val, i32* %output, align 4
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @load_store_private(i32* nocapture %input, i32* nocapture %output) #0 {
+ %tmp0 = addrspacecast i32* %input to i32 addrspace(4)*
+ %tmp1 = addrspacecast i32* %output to i32 addrspace(4)*
+ %val = load i32, i32 addrspace(4)* %tmp0, align 4
+ store i32 %val, i32 addrspace(4)* %tmp1, align 4
+ ret void
+}
+
+; No optimization. flat load/store.
+; CHECK-LABEL: @load_store_flat(
+; CHECK-NEXT: %val = load i32, i32 addrspace(4)* %input, align 4
+; CHECK-NEXT: store i32 %val, i32 addrspace(4)* %output, align 4
+; CHECK-NEXT: ret void
+define amdgpu_kernel void @load_store_flat(i32 addrspace(4)* nocapture %input, i32 addrspace(4)* nocapture %output) #0 {
+ %val = load i32, i32 addrspace(4)* %input, align 4
+ store i32 %val, i32 addrspace(4)* %output, align 4
+ ret void
+}
+
+; CHECK-LABEL: @store_addrspacecast_ptr_value(
+; CHECK: %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
+; CHECK-NEXT: store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
+define amdgpu_kernel void @store_addrspacecast_ptr_value(i32 addrspace(1)* nocapture %input, i32 addrspace(4)* addrspace(1)* nocapture %output) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %input to i32 addrspace(4)*
+ store i32 addrspace(4)* %cast, i32 addrspace(4)* addrspace(1)* %output, align 4
+ ret void
+}
+
+; CHECK-LABEL: @atomicrmw_add_global_to_flat(
+; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(1)* %global.ptr, i32 %y seq_cst
+define i32 @atomicrmw_add_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = atomicrmw add i32 addrspace(4)* %cast, i32 %y seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @atomicrmw_add_group_to_flat(
+; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(3)* %group.ptr, i32 %y seq_cst
+define i32 @atomicrmw_add_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = atomicrmw add i32 addrspace(4)* %cast, i32 %y seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: @cmpxchg_global_to_flat(
+; CHECK: %ret = cmpxchg i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val seq_cst monotonic
+define { i32, i1 } @cmpxchg_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32 addrspace(4)*
+ %ret = cmpxchg i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
+ ret { i32, i1 } %ret
+}
+
+; CHECK-LABEL: @cmpxchg_group_to_flat(
+; CHECK: %ret = cmpxchg i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val seq_cst monotonic
+define { i32, i1 } @cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val) #0 {
+ %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32 addrspace(4)*
+ %ret = cmpxchg i32 addrspace(4)* %cast, i32 %cmp, i32 %val seq_cst monotonic
+ ret { i32, i1 } %ret
+}
+
+; Not pointer operand
+; CHECK-LABEL: @cmpxchg_group_to_flat_wrong_operand(
+; CHECK: %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32 addrspace(4)*
+; CHECK: %ret = cmpxchg i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(4)* %cast.cmp, i32 addrspace(4)* %val seq_cst monotonic
+define { i32 addrspace(4)*, i1 } @cmpxchg_group_to_flat_wrong_operand(i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(3)* %cmp.ptr, i32 addrspace(4)* %val) #0 {
+ %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32 addrspace(4)*
+ %ret = cmpxchg i32 addrspace(4)* addrspace(3)* %cas.ptr, i32 addrspace(4)* %cast.cmp, i32 addrspace(4)* %val seq_cst monotonic
+ ret { i32 addrspace(4)*, i1 } %ret
+}
+
+attributes #0 = { nounwind }