diff options
Diffstat (limited to 'test/CodeGen/AMDGPU/mad-sub.ll')
| -rw-r--r-- | test/CodeGen/AMDGPU/mad-sub.ll | 72 |
1 files changed, 36 insertions, 36 deletions
diff --git a/test/CodeGen/AMDGPU/mad-sub.ll b/test/CodeGen/AMDGPU/mad-sub.ll index 24ff23a4cfc1c..7fcfe7f53f060 100644 --- a/test/CodeGen/AMDGPU/mad-sub.ll +++ b/test/CodeGen/AMDGPU/mad-sub.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -declare i32 @llvm.r600.read.tidig.x() #0 +declare i32 @llvm.amdgcn.workitem.id.x() #0 declare float @llvm.fabs.f32(float) #0 ; FUNC-LABEL: {{^}}mad_sub_f32: @@ -10,7 +10,7 @@ declare float @llvm.fabs.f32(float) #0 ; SI: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -[[REGC]] ; SI: buffer_store_dword [[RESULT]] define void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { - %tid = tail call i32 @llvm.r600.read.tidig.x() #0 + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext %add1 = add i64 %tid.ext, 1 @@ -18,9 +18,9 @@ define void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrs %add2 = add i64 %tid.ext, 2 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2 %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext - %a = load float, float addrspace(1)* %gep0, align 4 - %b = load float, float addrspace(1)* %gep1, align 4 - %c = load float, float addrspace(1)* %gep2, align 4 + %a = load volatile float, float addrspace(1)* %gep0, align 4 + %b = load volatile float, float addrspace(1)* %gep1, align 4 + %c = load volatile float, float addrspace(1)* %gep2, align 4 %mul = fmul float %a, %b %sub = fsub float %mul, %c store float %sub, float addrspace(1)* %outgep, align 4 @@ -34,7 +34,7 @@ define void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrs ; SI: v_mad_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], [[REGC]] ; SI: buffer_store_dword [[RESULT]] define void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { - %tid = tail call i32 @llvm.r600.read.tidig.x() #0 + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext %add1 = add i64 %tid.ext, 1 @@ -42,9 +42,9 @@ define void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture %out, float a %add2 = add i64 %tid.ext, 2 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2 %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext - %a = load float, float addrspace(1)* %gep0, align 4 - %b = load float, float addrspace(1)* %gep1, align 4 - %c = load float, float addrspace(1)* %gep2, align 4 + %a = load volatile float, float addrspace(1)* %gep0, align 4 + %b = load volatile float, float addrspace(1)* %gep1, align 4 + %c = load volatile float, float addrspace(1)* %gep2, align 4 %mul = fmul float %a, %b %sub = fsub float %c, %mul store float %sub, float addrspace(1)* %outgep, align 4 @@ -55,7 +55,7 @@ define void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture %out, float a ; SI: v_mul_f64 ; SI: v_add_f64 define void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double addrspace(1)* noalias nocapture readonly %ptr) #1 { - %tid = tail call i32 @llvm.r600.read.tidig.x() #0 + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr double, double addrspace(1)* %ptr, i64 %tid.ext %add1 = add i64 %tid.ext, 1 @@ -63,9 +63,9 @@ define void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double add %add2 = add i64 %tid.ext, 2 %gep2 = getelementptr double, double addrspace(1)* %ptr, i64 %add2 %outgep = getelementptr double, double addrspace(1)* %out, i64 %tid.ext - %a = load double, double addrspace(1)* %gep0, align 8 - %b = load double, double addrspace(1)* %gep1, align 8 - %c = load double, double addrspace(1)* %gep2, align 8 + %a = load volatile double, double addrspace(1)* %gep0, align 8 + %b = load volatile double, double addrspace(1)* %gep1, align 8 + %c = load volatile double, double addrspace(1)* %gep2, align 8 %mul = fmul double %a, %b %sub = fsub double %mul, %c store double %sub, double addrspace(1)* %outgep, align 8 @@ -79,7 +79,7 @@ define void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double add ; SI: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -|[[REGC]]| ; SI: buffer_store_dword [[RESULT]] define void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { - %tid = tail call i32 @llvm.r600.read.tidig.x() #0 + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext %add1 = add i64 %tid.ext, 1 @@ -87,9 +87,9 @@ define void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float %add2 = add i64 %tid.ext, 2 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2 %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext - %a = load float, float addrspace(1)* %gep0, align 4 - %b = load float, float addrspace(1)* %gep1, align 4 - %c = load float, float addrspace(1)* %gep2, align 4 + %a = load volatile float, float addrspace(1)* %gep0, align 4 + %b = load volatile float, float addrspace(1)* %gep1, align 4 + %c = load volatile float, float addrspace(1)* %gep2, align 4 %c.abs = call float @llvm.fabs.f32(float %c) #0 %mul = fmul float %a, %b %sub = fsub float %mul, %c.abs @@ -104,7 +104,7 @@ define void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float ; SI: v_mad_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], |[[REGC]]| ; SI: buffer_store_dword [[RESULT]] define void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { - %tid = tail call i32 @llvm.r600.read.tidig.x() #0 + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext %add1 = add i64 %tid.ext, 1 @@ -112,9 +112,9 @@ define void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, fl %add2 = add i64 %tid.ext, 2 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2 %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext - %a = load float, float addrspace(1)* %gep0, align 4 - %b = load float, float addrspace(1)* %gep1, align 4 - %c = load float, float addrspace(1)* %gep2, align 4 + %a = load volatile float, float addrspace(1)* %gep0, align 4 + %b = load volatile float, float addrspace(1)* %gep1, align 4 + %c = load volatile float, float addrspace(1)* %gep2, align 4 %c.abs = call float @llvm.fabs.f32(float %c) #0 %mul = fmul float %a, %b %sub = fsub float %c.abs, %mul @@ -125,7 +125,7 @@ define void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, fl ; FUNC-LABEL: {{^}}neg_neg_mad_f32: ; SI: v_mac_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} define void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { - %tid = tail call i32 @llvm.r600.read.tidig.x() #0 + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext %add1 = add i64 %tid.ext, 1 @@ -133,9 +133,9 @@ define void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float a %add2 = add i64 %tid.ext, 2 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2 %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext - %a = load float, float addrspace(1)* %gep0, align 4 - %b = load float, float addrspace(1)* %gep1, align 4 - %c = load float, float addrspace(1)* %gep2, align 4 + %a = load volatile float, float addrspace(1)* %gep0, align 4 + %b = load volatile float, float addrspace(1)* %gep1, align 4 + %c = load volatile float, float addrspace(1)* %gep2, align 4 %nega = fsub float -0.000000e+00, %a %negb = fsub float -0.000000e+00, %b %mul = fmul float %nega, %negb @@ -151,7 +151,7 @@ define void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float a ; SI: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], |[[REGB]]|, -[[REGC]] ; SI: buffer_store_dword [[RESULT]] define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { - %tid = tail call i32 @llvm.r600.read.tidig.x() #0 + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext %add1 = add i64 %tid.ext, 1 @@ -159,9 +159,9 @@ define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float %add2 = add i64 %tid.ext, 2 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2 %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext - %a = load float, float addrspace(1)* %gep0, align 4 - %b = load float, float addrspace(1)* %gep1, align 4 - %c = load float, float addrspace(1)* %gep2, align 4 + %a = load volatile float, float addrspace(1)* %gep0, align 4 + %b = load volatile float, float addrspace(1)* %gep1, align 4 + %c = load volatile float, float addrspace(1)* %gep2, align 4 %b.abs = call float @llvm.fabs.f32(float %b) #0 %mul = fmul float %a, %b.abs %sub = fsub float %mul, %c @@ -175,13 +175,13 @@ define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float ; SI: v_mac_f32_e32 [[R2]], -2.0, [[R1]] ; SI: buffer_store_dword [[R2]] define void @fsub_c_fadd_a_a(float addrspace(1)* %out, float addrspace(1)* %in) { - %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid - %r1 = load float, float addrspace(1)* %gep.0 - %r2 = load float, float addrspace(1)* %gep.1 + %r1 = load volatile float, float addrspace(1)* %gep.0 + %r2 = load volatile float, float addrspace(1)* %gep.1 %add = fadd float %r1, %r1 %r3 = fsub float %r2, %add @@ -196,13 +196,13 @@ define void @fsub_c_fadd_a_a(float addrspace(1)* %out, float addrspace(1)* %in) ; SI: v_mad_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], -[[R2]] ; SI: buffer_store_dword [[RESULT]] define void @fsub_fadd_a_a_c(float addrspace(1)* %out, float addrspace(1)* %in) { - %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid - %r1 = load float, float addrspace(1)* %gep.0 - %r2 = load float, float addrspace(1)* %gep.1 + %r1 = load volatile float, float addrspace(1)* %gep.0 + %r2 = load volatile float, float addrspace(1)* %gep.1 %add = fadd float %r1, %r1 %r3 = fsub float %add, %r2 |
