diff options
Diffstat (limited to 'test/CodeGen/AMDGPU/fsub.ll')
-rw-r--r-- | test/CodeGen/AMDGPU/fsub.ll | 81 |
1 files changed, 72 insertions, 9 deletions
diff --git a/test/CodeGen/AMDGPU/fsub.ll b/test/CodeGen/AMDGPU/fsub.ll index 3429df33c015f..e7a92d95d4859 100644 --- a/test/CodeGen/AMDGPU/fsub.ll +++ b/test/CodeGen/AMDGPU/fsub.ll @@ -4,7 +4,7 @@ ; FUNC-LABEL: {{^}}v_fsub_f32: ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} -define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) { +define amdgpu_kernel void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) { %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1 %a = load float, float addrspace(1)* %in, align 4 %b = load float, float addrspace(1)* %b_ptr, align 4 @@ -17,23 +17,19 @@ define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) { ; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W ; SI: v_sub_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} -define void @s_fsub_f32(float addrspace(1)* %out, float %a, float %b) { +define amdgpu_kernel void @s_fsub_f32(float addrspace(1)* %out, float %a, float %b) { %sub = fsub float %a, %b store float %sub, float addrspace(1)* %out, align 4 ret void } -declare float @llvm.r600.load.input(i32) readnone - -declare void @llvm.AMDGPU.store.output(float, i32) - ; FUNC-LABEL: {{^}}fsub_v2f32: ; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z ; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} -define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) { +define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) { %sub = fsub <2 x float> %a, %b store <2 x float> %sub, <2 x float> addrspace(1)* %out, align 8 ret void @@ -49,7 +45,7 @@ define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x flo ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} -define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { +define amdgpu_kernel void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1 %a = load <4 x float>, <4 x float> addrspace(1)* %in, align 16 %b = load <4 x float>, <4 x float> addrspace(1)* %b_ptr, align 16 @@ -64,8 +60,75 @@ define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace( ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} ; SI: s_endpgm -define void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) { +define amdgpu_kernel void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) { %result = fsub <4 x float> %a, %b store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16 ret void } + +; FUNC-LABEL: {{^}}v_fneg_fsub_f32: +; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}} +; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]] +define amdgpu_kernel void @v_fneg_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) { + %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1 + %a = load float, float addrspace(1)* %in, align 4 + %b = load float, float addrspace(1)* %b_ptr, align 4 + %result = fsub float %a, %b + %neg.result = fsub float -0.0, %result + store float %neg.result, float addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_f32: +; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}} +; SI-NOT: xor +define amdgpu_kernel void @v_fneg_fsub_nsz_f32(float addrspace(1)* %out, float addrspace(1)* %in) { + %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1 + %a = load float, float addrspace(1)* %in, align 4 + %b = load float, float addrspace(1)* %b_ptr, align 4 + %result = fsub nsz float %a, %b + %neg.result = fsub float -0.0, %result + store float %neg.result, float addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_attribute_f32: +; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}} +; SI-NOT: xor +define amdgpu_kernel void @v_fneg_fsub_nsz_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { + %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1 + %a = load float, float addrspace(1)* %in, align 4 + %b = load float, float addrspace(1)* %b_ptr, align 4 + %result = fsub float %a, %b + %neg.result = fsub float -0.0, %result + store float %neg.result, float addrspace(1)* %out, align 4 + ret void +} + +; For some reason the attribute has a string "true" or "false", so +; make sure it is disabled and the fneg is not folded if it is not +; "true". +; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_false_attribute_f32: +; SI: v_subrev_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}} +; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]] +define amdgpu_kernel void @v_fneg_fsub_nsz_false_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #1 { + %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1 + %a = load float, float addrspace(1)* %in, align 4 + %b = load float, float addrspace(1)* %b_ptr, align 4 + %result = fsub float %a, %b + %neg.result = fsub float -0.0, %result + store float %neg.result, float addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v_fsub_0_nsz_attribute_f32: +; SI-NOT: v_sub +define amdgpu_kernel void @v_fsub_0_nsz_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { + %a = load float, float addrspace(1)* %in, align 4 + %result = fsub float %a, 0.0 + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +attributes #0 = { nounwind "no-signed-zeros-fp-math"="true" } +attributes #1 = { nounwind "no-signed-zeros-fp-math"="false" } |