summaryrefslogtreecommitdiff
path: root/test/CodeGen/Mips/mips64-f128.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/Mips/mips64-f128.ll')
-rw-r--r--test/CodeGen/Mips/mips64-f128.ll80
1 files changed, 40 insertions, 40 deletions
diff --git a/test/CodeGen/Mips/mips64-f128.ll b/test/CodeGen/Mips/mips64-f128.ll
index f0cbbd08d79a8..d9c93810438f5 100644
--- a/test/CodeGen/Mips/mips64-f128.ll
+++ b/test/CodeGen/Mips/mips64-f128.ll
@@ -1,10 +1,10 @@
-; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips4 -soft-float -O1 \
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips4 -mattr=+soft-float -O1 \
; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
-; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64 -soft-float -O1 \
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64 -mattr=+soft-float -O1 \
; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
-; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r2 -soft-float -O1 \
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r2 -mattr=+soft-float -O1 \
; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
-; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r6 -soft-float -O1 \
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r6 -mattr=+soft-float -O1 \
; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=CMP_CC_FMT
@gld0 = external global fp128
@@ -18,8 +18,8 @@
define fp128 @addLD() {
entry:
- %0 = load fp128* @gld0, align 16
- %1 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld0, align 16
+ %1 = load fp128, fp128* @gld1, align 16
%add = fadd fp128 %0, %1
ret fp128 %add
}
@@ -29,8 +29,8 @@ entry:
define fp128 @subLD() {
entry:
- %0 = load fp128* @gld0, align 16
- %1 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld0, align 16
+ %1 = load fp128, fp128* @gld1, align 16
%sub = fsub fp128 %0, %1
ret fp128 %sub
}
@@ -40,8 +40,8 @@ entry:
define fp128 @mulLD() {
entry:
- %0 = load fp128* @gld0, align 16
- %1 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld0, align 16
+ %1 = load fp128, fp128* @gld1, align 16
%mul = fmul fp128 %0, %1
ret fp128 %mul
}
@@ -51,8 +51,8 @@ entry:
define fp128 @divLD() {
entry:
- %0 = load fp128* @gld0, align 16
- %1 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld0, align 16
+ %1 = load fp128, fp128* @gld1, align 16
%div = fdiv fp128 %0, %1
ret fp128 %div
}
@@ -247,7 +247,7 @@ entry:
define fp128 @libcall1_fabsl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @fabsl(fp128 %0) nounwind readnone
ret fp128 %call
}
@@ -259,7 +259,7 @@ declare fp128 @fabsl(fp128) #1
define fp128 @libcall1_ceill() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @ceill(fp128 %0) nounwind readnone
ret fp128 %call
}
@@ -271,7 +271,7 @@ declare fp128 @ceill(fp128) #1
define fp128 @libcall1_sinl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @sinl(fp128 %0) nounwind
ret fp128 %call
}
@@ -283,7 +283,7 @@ declare fp128 @sinl(fp128) #2
define fp128 @libcall1_cosl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @cosl(fp128 %0) nounwind
ret fp128 %call
}
@@ -295,7 +295,7 @@ declare fp128 @cosl(fp128) #2
define fp128 @libcall1_expl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @expl(fp128 %0) nounwind
ret fp128 %call
}
@@ -307,7 +307,7 @@ declare fp128 @expl(fp128) #2
define fp128 @libcall1_exp2l() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @exp2l(fp128 %0) nounwind
ret fp128 %call
}
@@ -319,7 +319,7 @@ declare fp128 @exp2l(fp128) #2
define fp128 @libcall1_logl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @logl(fp128 %0) nounwind
ret fp128 %call
}
@@ -331,7 +331,7 @@ declare fp128 @logl(fp128) #2
define fp128 @libcall1_log2l() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @log2l(fp128 %0) nounwind
ret fp128 %call
}
@@ -343,7 +343,7 @@ declare fp128 @log2l(fp128) #2
define fp128 @libcall1_log10l() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @log10l(fp128 %0) nounwind
ret fp128 %call
}
@@ -355,7 +355,7 @@ declare fp128 @log10l(fp128) #2
define fp128 @libcall1_nearbyintl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @nearbyintl(fp128 %0) nounwind readnone
ret fp128 %call
}
@@ -367,7 +367,7 @@ declare fp128 @nearbyintl(fp128) #1
define fp128 @libcall1_floorl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @floorl(fp128 %0) nounwind readnone
ret fp128 %call
}
@@ -379,7 +379,7 @@ declare fp128 @floorl(fp128) #1
define fp128 @libcall1_sqrtl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @sqrtl(fp128 %0) nounwind
ret fp128 %call
}
@@ -391,7 +391,7 @@ declare fp128 @sqrtl(fp128) #2
define fp128 @libcall1_rintl() {
entry:
- %0 = load fp128* @gld0, align 16
+ %0 = load fp128, fp128* @gld0, align 16
%call = tail call fp128 @rintl(fp128 %0) nounwind readnone
ret fp128 %call
}
@@ -424,8 +424,8 @@ declare fp128 @llvm.powi.f128(fp128, i32) #3
define fp128 @libcall2_copysignl() {
entry:
- %0 = load fp128* @gld0, align 16
- %1 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld0, align 16
+ %1 = load fp128, fp128* @gld1, align 16
%call = tail call fp128 @copysignl(fp128 %0, fp128 %1) nounwind readnone
ret fp128 %call
}
@@ -437,8 +437,8 @@ declare fp128 @copysignl(fp128, fp128) #1
define fp128 @libcall2_powl() {
entry:
- %0 = load fp128* @gld0, align 16
- %1 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld0, align 16
+ %1 = load fp128, fp128* @gld1, align 16
%call = tail call fp128 @powl(fp128 %0, fp128 %1) nounwind
ret fp128 %call
}
@@ -450,8 +450,8 @@ declare fp128 @powl(fp128, fp128) #2
define fp128 @libcall2_fmodl() {
entry:
- %0 = load fp128* @gld0, align 16
- %1 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld0, align 16
+ %1 = load fp128, fp128* @gld1, align 16
%call = tail call fp128 @fmodl(fp128 %0, fp128 %1) nounwind
ret fp128 %call
}
@@ -463,9 +463,9 @@ declare fp128 @fmodl(fp128, fp128) #2
define fp128 @libcall3_fmal() {
entry:
- %0 = load fp128* @gld0, align 16
- %1 = load fp128* @gld2, align 16
- %2 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld0, align 16
+ %1 = load fp128, fp128* @gld2, align 16
+ %2 = load fp128, fp128* @gld1, align 16
%3 = tail call fp128 @llvm.fma.f128(fp128 %0, fp128 %2, fp128 %1)
ret fp128 %3
}
@@ -539,7 +539,7 @@ entry:
define fp128 @load_LD_LD() {
entry:
- %0 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld1, align 16
ret fp128 %0
}
@@ -551,7 +551,7 @@ entry:
define fp128 @load_LD_float() {
entry:
- %0 = load float* @gf1, align 4
+ %0 = load float, float* @gf1, align 4
%conv = fpext float %0 to fp128
ret fp128 %conv
}
@@ -564,7 +564,7 @@ entry:
define fp128 @load_LD_double() {
entry:
- %0 = load double* @gd1, align 8
+ %0 = load double, double* @gd1, align 8
%conv = fpext double %0 to fp128
ret fp128 %conv
}
@@ -579,7 +579,7 @@ entry:
define void @store_LD_LD() {
entry:
- %0 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld1, align 16
store fp128 %0, fp128* @gld0, align 16
ret void
}
@@ -595,7 +595,7 @@ entry:
define void @store_LD_float() {
entry:
- %0 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld1, align 16
%conv = fptrunc fp128 %0 to float
store float %conv, float* @gf1, align 4
ret void
@@ -612,7 +612,7 @@ entry:
define void @store_LD_double() {
entry:
- %0 = load fp128* @gld1, align 16
+ %0 = load fp128, fp128* @gld1, align 16
%conv = fptrunc fp128 %0 to double
store double %conv, double* @gd1, align 8
ret void