diff options
Diffstat (limited to 'test/CodeGen/R600/setcc64.ll')
-rw-r--r-- | test/CodeGen/R600/setcc64.ll | 129 |
1 files changed, 56 insertions, 73 deletions
diff --git a/test/CodeGen/R600/setcc64.ll b/test/CodeGen/R600/setcc64.ll index 54a33b30940a..c0632198efdc 100644 --- a/test/CodeGen/R600/setcc64.ll +++ b/test/CodeGen/R600/setcc64.ll @@ -1,4 +1,4 @@ -;RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s +;RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s ; XXX: Merge this into setcc, once R600 supports 64-bit operations @@ -6,8 +6,8 @@ ;; Double comparisons ;;;==========================================================================;;; -; FUNC-LABEL: @f64_oeq -; SI: V_CMP_EQ_F64 +; FUNC-LABEL: {{^}}f64_oeq: +; SI: v_cmp_eq_f64 define void @f64_oeq(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp oeq double %a, %b @@ -16,8 +16,8 @@ entry: ret void } -; FUNC-LABEL: @f64_ogt -; SI: V_CMP_GT_F64 +; FUNC-LABEL: {{^}}f64_ogt: +; SI: v_cmp_gt_f64 define void @f64_ogt(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp ogt double %a, %b @@ -26,8 +26,8 @@ entry: ret void } -; FUNC-LABEL: @f64_oge -; SI: V_CMP_GE_F64 +; FUNC-LABEL: {{^}}f64_oge: +; SI: v_cmp_ge_f64 define void @f64_oge(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp oge double %a, %b @@ -36,8 +36,8 @@ entry: ret void } -; FUNC-LABEL: @f64_olt -; SI: V_CMP_LT_F64 +; FUNC-LABEL: {{^}}f64_olt: +; SI: v_cmp_lt_f64 define void @f64_olt(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp olt double %a, %b @@ -46,8 +46,8 @@ entry: ret void } -; FUNC-LABEL: @f64_ole -; SI: V_CMP_LE_F64 +; FUNC-LABEL: {{^}}f64_ole: +; SI: v_cmp_le_f64 define void @f64_ole(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp ole double %a, %b @@ -56,12 +56,9 @@ entry: ret void } -; FUNC-LABEL: @f64_one -; SI: V_CMP_O_F64 -; SI: V_CMP_NEQ_F64 -; SI: V_CNDMASK_B32_e64 -; SI: V_CNDMASK_B32_e64 -; SI: V_AND_B32_e32 +; FUNC-LABEL: {{^}}f64_one: +; SI: v_cmp_lg_f64_e32 vcc +; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc define void @f64_one(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp one double %a, %b @@ -70,8 +67,8 @@ entry: ret void } -; FUNC-LABEL: @f64_ord -; SI: V_CMP_O_F64 +; FUNC-LABEL: {{^}}f64_ord: +; SI: v_cmp_o_f64 define void @f64_ord(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp ord double %a, %b @@ -80,12 +77,9 @@ entry: ret void } -; FUNC-LABEL: @f64_ueq -; SI: V_CMP_U_F64 -; SI: V_CMP_EQ_F64 -; SI: V_CNDMASK_B32_e64 -; SI: V_CNDMASK_B32_e64 -; SI: V_OR_B32_e32 +; FUNC-LABEL: {{^}}f64_ueq: +; SI: v_cmp_nlg_f64_e32 vcc +; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc define void @f64_ueq(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp ueq double %a, %b @@ -94,12 +88,10 @@ entry: ret void } -; FUNC-LABEL: @f64_ugt -; SI: V_CMP_U_F64 -; SI: V_CMP_GT_F64 -; SI: V_CNDMASK_B32_e64 -; SI: V_CNDMASK_B32_e64 -; SI: V_OR_B32_e32 +; FUNC-LABEL: {{^}}f64_ugt: + +; SI: v_cmp_nle_f64_e32 vcc +; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc define void @f64_ugt(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp ugt double %a, %b @@ -108,12 +100,9 @@ entry: ret void } -; FUNC-LABEL: @f64_uge -; SI: V_CMP_U_F64 -; SI: V_CMP_GE_F64 -; SI: V_CNDMASK_B32_e64 -; SI: V_CNDMASK_B32_e64 -; SI: V_OR_B32_e32 +; FUNC-LABEL: {{^}}f64_uge: +; SI: v_cmp_nlt_f64_e32 vcc +; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc define void @f64_uge(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp uge double %a, %b @@ -122,12 +111,9 @@ entry: ret void } -; FUNC-LABEL: @f64_ult -; SI: V_CMP_U_F64 -; SI: V_CMP_LT_F64 -; SI: V_CNDMASK_B32_e64 -; SI: V_CNDMASK_B32_e64 -; SI: V_OR_B32_e32 +; FUNC-LABEL: {{^}}f64_ult: +; SI: v_cmp_nge_f64_e32 vcc +; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc define void @f64_ult(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp ult double %a, %b @@ -136,12 +122,9 @@ entry: ret void } -; FUNC-LABEL: @f64_ule -; SI: V_CMP_U_F64 -; SI: V_CMP_LE_F64 -; SI: V_CNDMASK_B32_e64 -; SI: V_CNDMASK_B32_e64 -; SI: V_OR_B32_e32 +; FUNC-LABEL: {{^}}f64_ule: +; SI: v_cmp_ngt_f64_e32 vcc +; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc define void @f64_ule(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp ule double %a, %b @@ -150,8 +133,8 @@ entry: ret void } -; FUNC-LABEL: @f64_une -; SI: V_CMP_NEQ_F64 +; FUNC-LABEL: {{^}}f64_une: +; SI: v_cmp_neq_f64 define void @f64_une(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp une double %a, %b @@ -160,8 +143,8 @@ entry: ret void } -; FUNC-LABEL: @f64_uno -; SI: V_CMP_U_F64 +; FUNC-LABEL: {{^}}f64_uno: +; SI: v_cmp_u_f64 define void @f64_uno(i32 addrspace(1)* %out, double %a, double %b) { entry: %0 = fcmp uno double %a, %b @@ -174,8 +157,8 @@ entry: ;; 64-bit integer comparisons ;;;==========================================================================;;; -; FUNC-LABEL: @i64_eq -; SI: V_CMP_EQ_I64 +; FUNC-LABEL: {{^}}i64_eq: +; SI: v_cmp_eq_i64 define void @i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp eq i64 %a, %b @@ -184,8 +167,8 @@ entry: ret void } -; FUNC-LABEL: @i64_ne -; SI: V_CMP_NE_I64 +; FUNC-LABEL: {{^}}i64_ne: +; SI: v_cmp_ne_i64 define void @i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp ne i64 %a, %b @@ -194,8 +177,8 @@ entry: ret void } -; FUNC-LABEL: @i64_ugt -; SI: V_CMP_GT_U64 +; FUNC-LABEL: {{^}}i64_ugt: +; SI: v_cmp_gt_u64 define void @i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp ugt i64 %a, %b @@ -204,8 +187,8 @@ entry: ret void } -; FUNC-LABEL: @i64_uge -; SI: V_CMP_GE_U64 +; FUNC-LABEL: {{^}}i64_uge: +; SI: v_cmp_ge_u64 define void @i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp uge i64 %a, %b @@ -214,8 +197,8 @@ entry: ret void } -; FUNC-LABEL: @i64_ult -; SI: V_CMP_LT_U64 +; FUNC-LABEL: {{^}}i64_ult: +; SI: v_cmp_lt_u64 define void @i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp ult i64 %a, %b @@ -224,8 +207,8 @@ entry: ret void } -; FUNC-LABEL: @i64_ule -; SI: V_CMP_LE_U64 +; FUNC-LABEL: {{^}}i64_ule: +; SI: v_cmp_le_u64 define void @i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp ule i64 %a, %b @@ -234,8 +217,8 @@ entry: ret void } -; FUNC-LABEL: @i64_sgt -; SI: V_CMP_GT_I64 +; FUNC-LABEL: {{^}}i64_sgt: +; SI: v_cmp_gt_i64 define void @i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp sgt i64 %a, %b @@ -244,8 +227,8 @@ entry: ret void } -; FUNC-LABEL: @i64_sge -; SI: V_CMP_GE_I64 +; FUNC-LABEL: {{^}}i64_sge: +; SI: v_cmp_ge_i64 define void @i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp sge i64 %a, %b @@ -254,8 +237,8 @@ entry: ret void } -; FUNC-LABEL: @i64_slt -; SI: V_CMP_LT_I64 +; FUNC-LABEL: {{^}}i64_slt: +; SI: v_cmp_lt_i64 define void @i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp slt i64 %a, %b @@ -264,8 +247,8 @@ entry: ret void } -; FUNC-LABEL: @i64_sle -; SI: V_CMP_LE_I64 +; FUNC-LABEL: {{^}}i64_sle: +; SI: v_cmp_le_i64 define void @i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %0 = icmp sle i64 %a, %b |