summaryrefslogtreecommitdiff
path: root/test/CodeGen/Mips
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/Mips')
-rw-r--r--test/CodeGen/Mips/2008-07-16-SignExtInReg.ll1
-rw-r--r--test/CodeGen/Mips/2008-08-01-AsmInline.ll18
-rw-r--r--test/CodeGen/Mips/2013-11-18-fp64-const0.ll31
-rw-r--r--test/CodeGen/Mips/align16.ll8
-rw-r--r--test/CodeGen/Mips/alloca16.ll4
-rw-r--r--test/CodeGen/Mips/atomic.ll495
-rw-r--r--test/CodeGen/Mips/atomicops.ll4
-rw-r--r--test/CodeGen/Mips/beqzc.ll20
-rw-r--r--test/CodeGen/Mips/beqzc1.ll24
-rw-r--r--test/CodeGen/Mips/biggot.ll4
-rw-r--r--test/CodeGen/Mips/blez_bgez.ll36
-rw-r--r--test/CodeGen/Mips/blockaddr.ll10
-rw-r--r--test/CodeGen/Mips/brdelayslot.ll31
-rw-r--r--test/CodeGen/Mips/brsize3.ll33
-rw-r--r--test/CodeGen/Mips/brsize3a.ll26
-rw-r--r--test/CodeGen/Mips/bswap.ll7
-rw-r--r--test/CodeGen/Mips/buildpairextractelementf64.ll23
-rw-r--r--test/CodeGen/Mips/check-noat.ll2
-rwxr-xr-xtest/CodeGen/Mips/cmov.ll44
-rw-r--r--test/CodeGen/Mips/cmplarge.ll38
-rw-r--r--test/CodeGen/Mips/const-mult.ll49
-rw-r--r--test/CodeGen/Mips/const1.ll35
-rw-r--r--test/CodeGen/Mips/const4a.ll180
-rw-r--r--test/CodeGen/Mips/const6.ll164
-rw-r--r--test/CodeGen/Mips/const6a.ll29
-rw-r--r--test/CodeGen/Mips/ctlz.ll27
-rw-r--r--test/CodeGen/Mips/disable-tail-merge.ll33
-rw-r--r--test/CodeGen/Mips/divrem.ll48
-rw-r--r--test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll80
-rw-r--r--test/CodeGen/Mips/dsp-patterns.ll46
-rw-r--r--test/CodeGen/Mips/dsp-vec-load-store.ll11
-rw-r--r--test/CodeGen/Mips/eh-return64.ll12
-rw-r--r--test/CodeGen/Mips/emit-big-cst.ll17
-rw-r--r--test/CodeGen/Mips/ex2.ll11
-rw-r--r--test/CodeGen/Mips/extins.ll9
-rw-r--r--test/CodeGen/Mips/f16abs.ll37
-rw-r--r--test/CodeGen/Mips/fcopysign-f32-f64.ll35
-rw-r--r--test/CodeGen/Mips/fixdfsf.ll18
-rw-r--r--test/CodeGen/Mips/fp16instrinsmc.ll391
-rw-r--r--test/CodeGen/Mips/fp16mix.ll92
-rw-r--r--test/CodeGen/Mips/fpneeded.ll2
-rw-r--r--test/CodeGen/Mips/fpnotneeded.ll2
-rw-r--r--test/CodeGen/Mips/fptr2.ll20
-rw-r--r--test/CodeGen/Mips/frame-address.ll7
-rw-r--r--test/CodeGen/Mips/helloworld.ll26
-rw-r--r--test/CodeGen/Mips/hf16call32.ll1030
-rw-r--r--test/CodeGen/Mips/hf16call32_body.ll294
-rw-r--r--test/CodeGen/Mips/hf1_body.ll21
-rw-r--r--test/CodeGen/Mips/hfptrcall.ll125
-rw-r--r--test/CodeGen/Mips/i32k.ll19
-rw-r--r--test/CodeGen/Mips/i64arg.ll32
-rw-r--r--test/CodeGen/Mips/inlineasm-operand-code.ll24
-rw-r--r--test/CodeGen/Mips/int-to-float-conversion.ll48
-rw-r--r--test/CodeGen/Mips/largefr1.ll41
-rw-r--r--test/CodeGen/Mips/largeimmprinting.ll4
-rw-r--r--test/CodeGen/Mips/lazy-binding.ll41
-rw-r--r--test/CodeGen/Mips/lit.local.cfg2
-rw-r--r--test/CodeGen/Mips/longbranch.ll8
-rw-r--r--test/CodeGen/Mips/mips16_32_1.ll2
-rw-r--r--test/CodeGen/Mips/mips16_32_10.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_3.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_4.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_5.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_6.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_7.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_8.ll8
-rw-r--r--test/CodeGen/Mips/mips16_32_9.ll6
-rw-r--r--test/CodeGen/Mips/mips16_fpret.ll76
-rw-r--r--test/CodeGen/Mips/mips16fpe.ll56
-rw-r--r--test/CodeGen/Mips/mips64-f128.ll146
-rw-r--r--test/CodeGen/Mips/mips64-libcall.ll4
-rw-r--r--test/CodeGen/Mips/mips64instrs.ll32
-rw-r--r--test/CodeGen/Mips/misha.ll4
-rw-r--r--test/CodeGen/Mips/mno-ldc1-sdc1.ll93
-rw-r--r--test/CodeGen/Mips/msa/2r.ll257
-rw-r--r--test/CodeGen/Mips/msa/2r_vector_scalar.ll87
-rw-r--r--test/CodeGen/Mips/msa/2rf.ll323
-rw-r--r--test/CodeGen/Mips/msa/2rf_exup.ll82
-rw-r--r--test/CodeGen/Mips/msa/2rf_float_int.ll90
-rw-r--r--test/CodeGen/Mips/msa/2rf_fq.ll82
-rw-r--r--test/CodeGen/Mips/msa/2rf_int_float.ll217
-rw-r--r--test/CodeGen/Mips/msa/2rf_tq.ll50
-rw-r--r--test/CodeGen/Mips/msa/3r-a.ll1191
-rw-r--r--test/CodeGen/Mips/msa/3r-b.ll494
-rw-r--r--test/CodeGen/Mips/msa/3r-c.ll446
-rw-r--r--test/CodeGen/Mips/msa/3r-d.ll478
-rw-r--r--test/CodeGen/Mips/msa/3r-i.ll358
-rw-r--r--test/CodeGen/Mips/msa/3r-m.ll862
-rw-r--r--test/CodeGen/Mips/msa/3r-p.ll182
-rw-r--r--test/CodeGen/Mips/msa/3r-s.ll1353
-rw-r--r--test/CodeGen/Mips/msa/3r-v.ll105
-rw-r--r--test/CodeGen/Mips/msa/3r_4r.ll206
-rw-r--r--test/CodeGen/Mips/msa/3r_4r_widen.ll307
-rw-r--r--test/CodeGen/Mips/msa/3r_splat.ll94
-rw-r--r--test/CodeGen/Mips/msa/3rf.ll485
-rw-r--r--test/CodeGen/Mips/msa/3rf_4rf.ll106
-rw-r--r--test/CodeGen/Mips/msa/3rf_4rf_q.ll206
-rw-r--r--test/CodeGen/Mips/msa/3rf_exdo.ll50
-rw-r--r--test/CodeGen/Mips/msa/3rf_float_int.ll50
-rw-r--r--test/CodeGen/Mips/msa/3rf_int_float.ll974
-rw-r--r--test/CodeGen/Mips/msa/3rf_q.ll94
-rw-r--r--test/CodeGen/Mips/msa/arithmetic.ll726
-rw-r--r--test/CodeGen/Mips/msa/arithmetic_float.ll456
-rw-r--r--test/CodeGen/Mips/msa/basic_operations.ll481
-rw-r--r--test/CodeGen/Mips/msa/basic_operations_float.ll207
-rw-r--r--test/CodeGen/Mips/msa/bit.ll537
-rw-r--r--test/CodeGen/Mips/msa/bitcast.ll1210
-rw-r--r--test/CodeGen/Mips/msa/bitwise.ll1639
-rw-r--r--test/CodeGen/Mips/msa/compare.ll2079
-rw-r--r--test/CodeGen/Mips/msa/compare_float.ll663
-rw-r--r--test/CodeGen/Mips/msa/elm_copy.ll162
-rw-r--r--test/CodeGen/Mips/msa/elm_cxcmsa.ll168
-rw-r--r--test/CodeGen/Mips/msa/elm_insv.ll192
-rw-r--r--test/CodeGen/Mips/msa/elm_move.ll25
-rw-r--r--test/CodeGen/Mips/msa/elm_shift_slide.ll158
-rw-r--r--test/CodeGen/Mips/msa/endian.ll107
-rw-r--r--test/CodeGen/Mips/msa/frameindex.ll85
-rw-r--r--test/CodeGen/Mips/msa/i10.ll89
-rw-r--r--test/CodeGen/Mips/msa/i5-a.ll82
-rw-r--r--test/CodeGen/Mips/msa/i5-b.ll439
-rw-r--r--test/CodeGen/Mips/msa/i5-c.ll386
-rw-r--r--test/CodeGen/Mips/msa/i5-m.ll310
-rw-r--r--test/CodeGen/Mips/msa/i5-s.ll82
-rw-r--r--test/CodeGen/Mips/msa/i5_ld_st.ll150
-rw-r--r--test/CodeGen/Mips/msa/i8.ll211
-rw-r--r--test/CodeGen/Mips/msa/inline-asm.ll34
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll134
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll138
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll31
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll27
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll141
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll149
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll143
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll152
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll33
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s525530439.ll139
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s997348632.ll143
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll23
-rw-r--r--test/CodeGen/Mips/msa/shift-dagcombine.ll70
-rw-r--r--test/CodeGen/Mips/msa/shuffle.ll803
-rw-r--r--test/CodeGen/Mips/msa/special.ll26
-rw-r--r--test/CodeGen/Mips/msa/spill.ll601
-rw-r--r--test/CodeGen/Mips/msa/vec.ll946
-rw-r--r--test/CodeGen/Mips/msa/vecs10.ll47
-rw-r--r--test/CodeGen/Mips/nomips16.ll38
-rw-r--r--test/CodeGen/Mips/o32_cc.ll242
-rw-r--r--test/CodeGen/Mips/o32_cc_byval.ll62
-rw-r--r--test/CodeGen/Mips/o32_cc_vararg.ll20
-rw-r--r--test/CodeGen/Mips/optimize-fp-math.ll32
-rw-r--r--test/CodeGen/Mips/powif64_16.ll26
-rw-r--r--test/CodeGen/Mips/private.ll4
-rw-r--r--test/CodeGen/Mips/ra-allocatable.ll367
-rw-r--r--test/CodeGen/Mips/return-vector.ll42
-rw-r--r--test/CodeGen/Mips/rotate.ll5
-rw-r--r--test/CodeGen/Mips/sel1c.ll21
-rw-r--r--test/CodeGen/Mips/sel2c.ll21
-rw-r--r--test/CodeGen/Mips/selectcc.ll16
-rw-r--r--test/CodeGen/Mips/selnek.ll2
-rw-r--r--test/CodeGen/Mips/setcc-se.ll155
-rw-r--r--test/CodeGen/Mips/simplebr.ll37
-rw-r--r--test/CodeGen/Mips/sint-fp-store_pattern.ll52
-rw-r--r--test/CodeGen/Mips/stack-alignment.ll13
-rw-r--r--test/CodeGen/Mips/stackcoloring.ll39
-rw-r--r--test/CodeGen/Mips/stchar.ll4
-rw-r--r--test/CodeGen/Mips/tailcall.ll13
-rw-r--r--test/CodeGen/Mips/tls-alias.ll4
-rw-r--r--test/CodeGen/Mips/tls-models.ll32
-rw-r--r--test/CodeGen/Mips/tls.ll34
-rw-r--r--test/CodeGen/Mips/tnaked.ll8
-rw-r--r--test/CodeGen/Mips/trap.ll11
-rw-r--r--test/CodeGen/Mips/trap1.ll13
-rw-r--r--test/CodeGen/Mips/unalignedload.ll20
172 files changed, 28455 insertions, 923 deletions
diff --git a/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll b/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
index 8479ad222d306..3381143c761d7 100644
--- a/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
+++ b/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s
; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float < %s | FileCheck %s
define signext i8 @A(i8 %e.0, i8 signext %sum) nounwind {
entry:
diff --git a/test/CodeGen/Mips/2008-08-01-AsmInline.ll b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
index dbde742ad3fe6..e274bc0e14f0d 100644
--- a/test/CodeGen/Mips/2008-08-01-AsmInline.ll
+++ b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
@@ -51,3 +51,21 @@ entry:
ret void
}
+; Check that RA doesn't allocate registers in the clobber list.
+; CHECK-LABEL: foo4:
+; CHECK: #APP
+; CHECK-NOT: ulh $2
+; CHECK: #NO_APP
+; CHECK: #APP
+; CHECK-NOT: $f0
+; CHECK: #NO_APP
+
+define void @foo4() {
+entry:
+ %0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"()
+ store i32 %0, i32* @gi2, align 4
+ %1 = load float* @gf0, align 4
+ %2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1)
+ store double %2, double* @gd0, align 8
+ ret void
+}
diff --git a/test/CodeGen/Mips/2013-11-18-fp64-const0.ll b/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
new file mode 100644
index 0000000000000..f8390d9a1ca75
--- /dev/null
+++ b/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=mips -mattr=-fp64 < %s | FileCheck -check-prefix=CHECK-FP32 %s
+; RUN: llc -march=mips -mattr=+fp64 < %s | FileCheck -check-prefix=CHECK-FP64 %s
+
+; This test case is a simplified version of an llvm-stress generated test with
+; seed=3718491962.
+; It originally failed on MIPS32 with FP64 with the following error:
+; LLVM ERROR: ran out of registers during register allocation
+; This was caused by impossible register class restrictions caused by the use
+; of BuildPairF64 instead of BuildPairF64_64.
+
+define void @autogen_SD3718491962() {
+BB:
+ ; CHECK-FP32: mtc1 $zero, $f{{[0-3]*[02468]}}
+ ; CHECK-FP32: mtc1 $zero, $f{{[0-3]*[13579]}}
+
+ ; CHECK-FP64: mtc1 $zero, $f{{[0-9]+}}
+ ; CHECK-FP64-NOT: mtc1 $zero,
+ ; FIXME: A redundant mthc1 is currently emitted. Add a -NOT when it is
+ ; eliminated
+
+ %Cmp = fcmp ule double 0.000000e+00, undef
+ %Cmp11 = fcmp ueq double 0xFDBD965CF1BB7FDA, undef
+ br label %CF88
+
+CF88: ; preds = %CF86
+ %Sl18 = select i1 %Cmp, i1 %Cmp11, i1 %Cmp
+ br i1 %Sl18, label %CF88, label %CF85
+
+CF85: ; preds = %CF88
+ ret void
+}
diff --git a/test/CodeGen/Mips/align16.ll b/test/CodeGen/Mips/align16.ll
index 99139abbe8481..267cff54291d0 100644
--- a/test/CodeGen/Mips/align16.ll
+++ b/test/CodeGen/Mips/align16.ll
@@ -25,7 +25,7 @@ entry:
call void @p(i32* %arrayidx1)
ret void
}
-; 16: save $ra, $s0, $s1, 2040
-; 16: addiu $sp, -48 # 16 bit inst
-; 16: addiu $sp, 48 # 16 bit inst
-; 16: restore $ra, $s0, $s1, 2040 \ No newline at end of file
+; 16: save $ra, $s0, $s1, $s2, 2040
+; 16: addiu $sp, -56 # 16 bit inst
+; 16: addiu $sp, 56 # 16 bit inst
+; 16: restore $ra, $s0, $s1, $s2, 2040
diff --git a/test/CodeGen/Mips/alloca16.ll b/test/CodeGen/Mips/alloca16.ll
index 5ae9a847917bd..017665f00bd46 100644
--- a/test/CodeGen/Mips/alloca16.ll
+++ b/test/CodeGen/Mips/alloca16.ll
@@ -19,8 +19,8 @@ entry:
define void @test() nounwind {
entry:
-; 16: .frame $16,24,$ra
-; 16: save $ra, $s0, $s1, 24
+; 16: .frame $sp,24,$ra
+; 16: save $ra, $s0, $s1, $s2, 24
; 16: move $16, $sp
; 16: move ${{[0-9]+}}, $sp
; 16: subu $[[REGISTER:[0-9]+]], ${{[0-9]+}}, ${{[0-9]+}}
diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll
index 819f258c2a404..0e60fe1fbfbc5 100644
--- a/test/CodeGen/Mips/atomic.ll
+++ b/test/CodeGen/Mips/atomic.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=mipsel --disable-machine-licm < %s | FileCheck %s
+; RUN: llc -march=mipsel --disable-machine-licm < %s | FileCheck %s -check-prefix=CHECK-EL
+; RUN: llc -march=mips --disable-machine-licm < %s | FileCheck %s -check-prefix=CHECK-EB
@x = common global i32 0, align 4
@@ -7,13 +8,21 @@ entry:
%0 = atomicrmw add i32* @x, i32 %incr monotonic
ret i32 %0
-; CHECK: AtomicLoadAdd32:
-; CHECK: lw $[[R0:[0-9]+]], %got(x)
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK: addu $[[R2:[0-9]+]], $[[R1]], $4
-; CHECK: sc $[[R2]], 0($[[R0]])
-; CHECK: beq $[[R2]], $zero, $[[BB0]]
+; CHECK-EL-LABEL: AtomicLoadAdd32:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll $[[R1:[0-9]+]], 0($[[R0]])
+; CHECK-EL: addu $[[R2:[0-9]+]], $[[R1]], $4
+; CHECK-EL: sc $[[R2]], 0($[[R0]])
+; CHECK-EL: beqz $[[R2]], $[[BB0]]
+
+; CHECK-EB-LABEL: AtomicLoadAdd32:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll $[[R1:[0-9]+]], 0($[[R0]])
+; CHECK-EB: addu $[[R2:[0-9]+]], $[[R1]], $4
+; CHECK-EB: sc $[[R2]], 0($[[R0]])
+; CHECK-EB: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicLoadNand32(i32 %incr) nounwind {
@@ -21,14 +30,23 @@ entry:
%0 = atomicrmw nand i32* @x, i32 %incr monotonic
ret i32 %0
-; CHECK: AtomicLoadNand32:
-; CHECK: lw $[[R0:[0-9]+]], %got(x)
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK: and $[[R3:[0-9]+]], $[[R1]], $4
-; CHECK: nor $[[R2:[0-9]+]], $zero, $[[R3]]
-; CHECK: sc $[[R2]], 0($[[R0]])
-; CHECK: beq $[[R2]], $zero, $[[BB0]]
+; CHECK-EL-LABEL: AtomicLoadNand32:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll $[[R1:[0-9]+]], 0($[[R0]])
+; CHECK-EL: and $[[R3:[0-9]+]], $[[R1]], $4
+; CHECK-EL: nor $[[R2:[0-9]+]], $zero, $[[R3]]
+; CHECK-EL: sc $[[R2]], 0($[[R0]])
+; CHECK-EL: beqz $[[R2]], $[[BB0]]
+
+; CHECK-EB-LABEL: AtomicLoadNand32:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll $[[R1:[0-9]+]], 0($[[R0]])
+; CHECK-EB: and $[[R3:[0-9]+]], $[[R1]], $4
+; CHECK-EB: nor $[[R2:[0-9]+]], $zero, $[[R3]]
+; CHECK-EB: sc $[[R2]], 0($[[R0]])
+; CHECK-EB: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicSwap32(i32 %newval) nounwind {
@@ -39,12 +57,19 @@ entry:
%0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
ret i32 %0
-; CHECK: AtomicSwap32:
-; CHECK: lw $[[R0:[0-9]+]], %got(x)
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll ${{[0-9]+}}, 0($[[R0]])
-; CHECK: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK: beq $[[R2]], $zero, $[[BB0]]
+; CHECK-EL-LABEL: AtomicSwap32:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll ${{[0-9]+}}, 0($[[R0]])
+; CHECK-EL: sc $[[R2:[0-9]+]], 0($[[R0]])
+; CHECK-EL: beqz $[[R2]], $[[BB0]]
+
+; CHECK-EB-LABEL: AtomicSwap32:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll ${{[0-9]+}}, 0($[[R0]])
+; CHECK-EB: sc $[[R2:[0-9]+]], 0($[[R0]])
+; CHECK-EB: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
@@ -55,14 +80,23 @@ entry:
%0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic
ret i32 %0
-; CHECK: AtomicCmpSwap32:
-; CHECK: lw $[[R0:[0-9]+]], %got(x)
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $2, 0($[[R0]])
-; CHECK: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
-; CHECK: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK: beq $[[R2]], $zero, $[[BB0]]
-; CHECK: $[[BB1]]:
+; CHECK-EL-LABEL: AtomicCmpSwap32:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll $2, 0($[[R0]])
+; CHECK-EL: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
+; CHECK-EL: sc $[[R2:[0-9]+]], 0($[[R0]])
+; CHECK-EL: beqz $[[R2]], $[[BB0]]
+; CHECK-EL: $[[BB1]]:
+
+; CHECK-EB-LABEL: AtomicCmpSwap32:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll $2, 0($[[R0]])
+; CHECK-EB: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
+; CHECK-EB: sc $[[R2:[0-9]+]], 0($[[R0]])
+; CHECK-EB: beqz $[[R2]], $[[BB0]]
+; CHECK-EB: $[[BB1]]:
}
@@ -74,30 +108,56 @@ entry:
%0 = atomicrmw add i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK: AtomicLoadAdd8:
-; CHECK: lw $[[R0:[0-9]+]], %got(y)
-; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK: sc $[[R14]], 0($[[R2]])
-; CHECK: beq $[[R14]], $zero, $[[BB0]]
-
-; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK: sra $2, $[[R17]], 24
+; CHECK-EL-LABEL: AtomicLoadAdd8:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
+; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
+; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
+
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; CHECK-EL: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
+; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
+; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; CHECK-EL: sc $[[R14]], 0($[[R2]])
+; CHECK-EL: beqz $[[R14]], $[[BB0]]
+
+; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
+; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
+; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EL: sra $2, $[[R17]], 24
+
+; CHECK-EB-LABEL: AtomicLoadAdd8:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
+; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
+; CHECK-EB: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; CHECK-EB: sc $[[R14]], 0($[[R2]])
+; CHECK-EB: beqz $[[R14]], $[[BB0]]
+
+; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EB: sra $2, $[[R17]], 24
}
define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
@@ -105,30 +165,56 @@ entry:
%0 = atomicrmw sub i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK: AtomicLoadSub8:
-; CHECK: lw $[[R0:[0-9]+]], %got(y)
-; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK: sc $[[R14]], 0($[[R2]])
-; CHECK: beq $[[R14]], $zero, $[[BB0]]
-
-; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK: sra $2, $[[R17]], 24
+; CHECK-EL-LABEL: AtomicLoadSub8:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
+; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
+; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
+
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; CHECK-EL: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
+; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
+; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; CHECK-EL: sc $[[R14]], 0($[[R2]])
+; CHECK-EL: beqz $[[R14]], $[[BB0]]
+
+; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
+; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
+; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EL: sra $2, $[[R17]], 24
+
+; CHECK-EB-LABEL: AtomicLoadSub8:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
+; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
+; CHECK-EB: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; CHECK-EB: sc $[[R14]], 0($[[R2]])
+; CHECK-EB: beqz $[[R14]], $[[BB0]]
+
+; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EB: sra $2, $[[R17]], 24
}
define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
@@ -136,31 +222,58 @@ entry:
%0 = atomicrmw nand i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK: AtomicLoadNand8:
-; CHECK: lw $[[R0:[0-9]+]], %got(y)
-; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK: nor $[[R11:[0-9]+]], $zero, $[[R18]]
-; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK: sc $[[R14]], 0($[[R2]])
-; CHECK: beq $[[R14]], $zero, $[[BB0]]
-
-; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK: sra $2, $[[R17]], 24
+; CHECK-EL-LABEL: AtomicLoadNand8:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
+; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
+; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
+
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; CHECK-EL: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
+; CHECK-EL: nor $[[R11:[0-9]+]], $zero, $[[R18]]
+; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
+; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
+; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; CHECK-EL: sc $[[R14]], 0($[[R2]])
+; CHECK-EL: beqz $[[R14]], $[[BB0]]
+
+; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
+; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
+; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EL: sra $2, $[[R17]], 24
+
+; CHECK-EB-LABEL: AtomicLoadNand8:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
+; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
+; CHECK-EB: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
+; CHECK-EB: nor $[[R11:[0-9]+]], $zero, $[[R18]]
+; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; CHECK-EB: sc $[[R14]], 0($[[R2]])
+; CHECK-EB: beqz $[[R14]], $[[BB0]]
+
+; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EB: sra $2, $[[R17]], 24
}
define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
@@ -168,29 +281,54 @@ entry:
%0 = atomicrmw xchg i8* @y, i8 %newval monotonic
ret i8 %0
-; CHECK: AtomicSwap8:
-; CHECK: lw $[[R0:[0-9]+]], %got(y)
-; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK: and $[[R18:[0-9]+]], $[[R9]], $[[R6]]
-; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
-; CHECK: sc $[[R14]], 0($[[R2]])
-; CHECK: beq $[[R14]], $zero, $[[BB0]]
-
-; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK: sra $2, $[[R17]], 24
+; CHECK-EL-LABEL: AtomicSwap8:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
+; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
+; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
+
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; CHECK-EL: and $[[R18:[0-9]+]], $[[R9]], $[[R6]]
+; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
+; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
+; CHECK-EL: sc $[[R14]], 0($[[R2]])
+; CHECK-EL: beqz $[[R14]], $[[BB0]]
+
+; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
+; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
+; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EL: sra $2, $[[R17]], 24
+
+; CHECK-EB-LABEL: AtomicSwap8:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
+; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
+; CHECK-EB: and $[[R18:[0-9]+]], $[[R9]], $[[R7]]
+; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
+; CHECK-EB: sc $[[R14]], 0($[[R2]])
+; CHECK-EB: beqz $[[R14]], $[[BB0]]
+
+; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EB: sra $2, $[[R17]], 24
}
define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
@@ -198,34 +336,64 @@ entry:
%0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic
ret i8 %0
-; CHECK: AtomicCmpSwap8:
-; CHECK: lw $[[R0:[0-9]+]], %got(y)
-; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK: andi $[[R8:[0-9]+]], $4, 255
-; CHECK: sllv $[[R9:[0-9]+]], $[[R8]], $[[R4]]
-; CHECK: andi $[[R10:[0-9]+]], $5, 255
-; CHECK: sllv $[[R11:[0-9]+]], $[[R10]], $[[R4]]
-
-; CHECK: $[[BB0:[A-Z_0-9]+]]:
-; CHECK: ll $[[R12:[0-9]+]], 0($[[R2]])
-; CHECK: and $[[R13:[0-9]+]], $[[R12]], $[[R6]]
-; CHECK: bne $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
-
-; CHECK: and $[[R14:[0-9]+]], $[[R12]], $[[R7]]
-; CHECK: or $[[R15:[0-9]+]], $[[R14]], $[[R11]]
-; CHECK: sc $[[R15]], 0($[[R2]])
-; CHECK: beq $[[R15]], $zero, $[[BB0]]
-
-; CHECK: $[[BB1]]:
-; CHECK: srlv $[[R16:[0-9]+]], $[[R13]], $[[R4]]
-; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK: sra $2, $[[R17]], 24
+; CHECK-EL-LABEL: AtomicCmpSwap8:
+; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
+; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
+; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
+; CHECK-EL: andi $[[R8:[0-9]+]], $4, 255
+; CHECK-EL: sllv $[[R9:[0-9]+]], $[[R8]], $[[R4]]
+; CHECK-EL: andi $[[R10:[0-9]+]], $5, 255
+; CHECK-EL: sllv $[[R11:[0-9]+]], $[[R10]], $[[R4]]
+
+; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EL: ll $[[R12:[0-9]+]], 0($[[R2]])
+; CHECK-EL: and $[[R13:[0-9]+]], $[[R12]], $[[R6]]
+; CHECK-EL: bne $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
+
+; CHECK-EL: and $[[R14:[0-9]+]], $[[R12]], $[[R7]]
+; CHECK-EL: or $[[R15:[0-9]+]], $[[R14]], $[[R11]]
+; CHECK-EL: sc $[[R15]], 0($[[R2]])
+; CHECK-EL: beqz $[[R15]], $[[BB0]]
+
+; CHECK-EL: $[[BB1]]:
+; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R13]], $[[R4]]
+; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
+; CHECK-EL: sra $2, $[[R17]], 24
+
+; CHECK-EB-LABEL: AtomicCmpSwap8:
+; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
+; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
+; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
+; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; CHECK-EB: andi $[[R9:[0-9]+]], $4, 255
+; CHECK-EB: sllv $[[R10:[0-9]+]], $[[R9]], $[[R5]]
+; CHECK-EB: andi $[[R11:[0-9]+]], $5, 255
+; CHECK-EB: sllv $[[R12:[0-9]+]], $[[R11]], $[[R5]]
+
+; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
+; CHECK-EB: ll $[[R13:[0-9]+]], 0($[[R2]])
+; CHECK-EB: and $[[R14:[0-9]+]], $[[R13]], $[[R7]]
+; CHECK-EB: bne $[[R14]], $[[R10]], $[[BB1:[A-Z_0-9]+]]
+
+; CHECK-EB: and $[[R15:[0-9]+]], $[[R13]], $[[R8]]
+; CHECK-EB: or $[[R16:[0-9]+]], $[[R15]], $[[R12]]
+; CHECK-EB: sc $[[R16]], 0($[[R2]])
+; CHECK-EB: beqz $[[R16]], $[[BB0]]
+
+; CHECK-EB: $[[BB1]]:
+; CHECK-EB: srlv $[[R17:[0-9]+]], $[[R14]], $[[R5]]
+; CHECK-EB: sll $[[R18:[0-9]+]], $[[R17]], 24
+; CHECK-EB: sra $2, $[[R18]], 24
}
@countsint = common global i32 0, align 4
@@ -235,12 +403,19 @@ entry:
%0 = atomicrmw add i32* @countsint, i32 %v seq_cst
ret i32 %0
-; CHECK: CheckSync:
-; CHECK: sync 0
-; CHECK: ll
-; CHECK: sc
-; CHECK: beq
-; CHECK: sync 0
+; CHECK-EL-LABEL: CheckSync:
+; CHECK-EL: sync 0
+; CHECK-EL: ll
+; CHECK-EL: sc
+; CHECK-EL: beq
+; CHECK-EL: sync 0
+
+; CHECK-EB-LABEL: CheckSync:
+; CHECK-EB: sync 0
+; CHECK-EB: ll
+; CHECK-EB: sc
+; CHECK-EB: beq
+; CHECK-EB: sync 0
}
; make sure that this assertion in
diff --git a/test/CodeGen/Mips/atomicops.ll b/test/CodeGen/Mips/atomicops.ll
index b9c3804e0d720..0f0f01afc1420 100644
--- a/test/CodeGen/Mips/atomicops.ll
+++ b/test/CodeGen/Mips/atomicops.ll
@@ -7,7 +7,7 @@ entry:
%0 = atomicrmw add i32* %mem, i32 %val seq_cst
%add = add nsw i32 %0, %c
ret i32 %add
-; 16: foo:
+; 16-LABEL: foo:
; 16: lw ${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}})
; 16: lw ${{[0-9]+}}, %call16(__sync_fetch_and_add_4)(${{[0-9]+}})
}
@@ -26,7 +26,7 @@ entry:
%4 = atomicrmw xchg i32* %x, i32 1 seq_cst
%5 = load volatile i32* %x, align 4
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %4, i32 %5) nounwind
-; 16: main:
+; 16-LABEL: main:
; 16: lw ${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}})
; 16: lw ${{[0-9]+}}, %call16(__sync_fetch_and_add_4)(${{[0-9]+}})
; 16: lw ${{[0-9]+}}, %call16(__sync_val_compare_and_swap_4)(${{[0-9]+}})
diff --git a/test/CodeGen/Mips/beqzc.ll b/test/CodeGen/Mips/beqzc.ll
new file mode 100644
index 0000000000000..4a294c2d817e9
--- /dev/null
+++ b/test/CodeGen/Mips/beqzc.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=cond-b-short
+
+@i = global i32 0, align 4
+@j = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define i32 @main() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %cmp = icmp eq i32 %0, 0
+ %. = select i1 %cmp, i32 10, i32 55
+ store i32 %., i32* @j, align 4
+; cond-b-short: beqz ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+ ret i32 0
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
+
diff --git a/test/CodeGen/Mips/beqzc1.ll b/test/CodeGen/Mips/beqzc1.ll
new file mode 100644
index 0000000000000..8f929a8e35416
--- /dev/null
+++ b/test/CodeGen/Mips/beqzc1.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=cond-b-short
+
+@i = global i32 0, align 4
+@j = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define i32 @main() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+; cond-b-short: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+if.then: ; preds = %entry
+ store i32 10, i32* @j, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i32 0
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
diff --git a/test/CodeGen/Mips/biggot.ll b/test/CodeGen/Mips/biggot.ll
index c4ad851c82587..da287eea6fd15 100644
--- a/test/CodeGen/Mips/biggot.ll
+++ b/test/CodeGen/Mips/biggot.ll
@@ -31,12 +31,12 @@ declare void @foo0(i32)
define void @foo2(i32* nocapture %d, i32* nocapture %s, i32 %n) nounwind {
entry:
-; O32: foo2:
+; O32-LABEL: foo2:
; O32: lui $[[R2:[0-9]+]], %call_hi(memcpy)
; O32: addu $[[R3:[0-9]+]], $[[R2]], ${{[a-z0-9]+}}
; O32: lw ${{[0-9]+}}, %call_lo(memcpy)($[[R3]])
-; N64: foo2:
+; N64-LABEL: foo2:
; N64: lui $[[R2:[0-9]+]], %call_hi(memcpy)
; N64: daddu $[[R3:[0-9]+]], $[[R2]], ${{[a-z0-9]+}}
; N64: ld ${{[0-9]+}}, %call_lo(memcpy)($[[R3]])
diff --git a/test/CodeGen/Mips/blez_bgez.ll b/test/CodeGen/Mips/blez_bgez.ll
new file mode 100644
index 0000000000000..f6a5e4f47a5af
--- /dev/null
+++ b/test/CodeGen/Mips/blez_bgez.ll
@@ -0,0 +1,36 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
+
+; CHECK-LABEL: test_blez:
+; CHECK: blez ${{[0-9]+}}, $BB
+
+define void @test_blez(i32 %a) {
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo1()
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+declare void @foo1()
+
+; CHECK-LABEL: test_bgez:
+; CHECK: bgez ${{[0-9]+}}, $BB
+
+define void @test_bgez(i32 %a) {
+entry:
+ %cmp = icmp slt i32 %a, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo1()
+ br label %if.end
+
+if.end:
+ ret void
+}
diff --git a/test/CodeGen/Mips/blockaddr.ll b/test/CodeGen/Mips/blockaddr.ll
index 7de7fa6f6bdb0..beab65f47196a 100644
--- a/test/CodeGen/Mips/blockaddr.ll
+++ b/test/CodeGen/Mips/blockaddr.ll
@@ -4,6 +4,8 @@
; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-2
@reg = common global i8* null, align 4
@@ -36,6 +38,14 @@ entry:
; STATIC-N64: daddiu ${{[0-9]+}}, $[[R2]], %got_ofst($tmp[[T2]])
; STATIC-N64: ld $[[R3:[0-9]+]], %got_page($tmp[[T3:[0-9]+]])
; STATIC-N64: daddiu ${{[0-9]+}}, $[[R3]], %got_ofst($tmp[[T3]])
+; STATIC-MIPS16-1: .ent f
+; STATIC-MIPS16-2: .ent f
+; STATIC-MIPS16-1: li $[[R1_16:[0-9]+]], %hi($tmp[[TI_16:[0-9]+]])
+; STATIC-MIPS16-1: sll ${{[0-9]+}}, $[[R1_16]], 16
+; STATIC-MIPS16-2: li ${{[0-9]+}}, %lo($tmp{{[0-9]+}})
+; STATIC-MIPS16-1 jal dummy
+; STATIC-MIPS16-2 jal dummy
+
define void @f() nounwind {
entry:
%call = tail call i8* @dummy(i8* blockaddress(@f, %baz))
diff --git a/test/CodeGen/Mips/brdelayslot.ll b/test/CodeGen/Mips/brdelayslot.ll
index 2deb037c9c390..68341c1ba25b8 100644
--- a/test/CodeGen/Mips/brdelayslot.ll
+++ b/test/CodeGen/Mips/brdelayslot.ll
@@ -31,7 +31,7 @@ declare void @foo2(i32)
;
define void @foo3(i32 %a) nounwind {
entry:
-; Default: foo3:
+; Default-LABEL: foo3:
; Default: jalr
; Default: cvt.d.w
@@ -49,7 +49,7 @@ declare void @foo4(double)
; Check that branch delay slot can be filled with an instruction with operand
; $1.
;
-; Default: foo5:
+; Default-LABEL: foo5:
; Default-NOT: nop
define void @foo5(i32 %a) nounwind {
@@ -76,7 +76,7 @@ if.end:
; Check that delay slot filler can place mov.s or mov.d in delay slot.
;
-; Default: foo6:
+; Default-LABEL: foo6:
; Default-NOT: nop
; Default: .end foo6
@@ -90,7 +90,7 @@ declare void @foo7(double, float)
; Check that a store can move past other memory instructions.
;
-; STATICO1: foo8:
+; STATICO1-LABEL: foo8:
; STATICO1: jalr ${{[0-9]+}}
; STATICO1-NEXT: sw ${{[0-9]+}}, %lo(g1)
@@ -109,7 +109,7 @@ entry:
; Test searchForward. Check that the second jal's slot is filled with another
; instruction in the same block.
;
-; FORWARD: foo10:
+; FORWARD-LABEL: foo10:
; FORWARD: jal foo11
; FORWARD: jal foo11
; FORWARD-NOT: nop
@@ -130,10 +130,10 @@ declare void @foo11()
; Check that delay slots of branches in both the entry block and loop body are
; filled.
;
-; SUCCBB: succbbs_loop1:
-; SUCCBB: bne ${{[0-9]+}}, $zero, $BB
+; SUCCBB-LABEL: succbbs_loop1:
+; SUCCBB: blez $5, $BB
; SUCCBB-NEXT: addiu
-; SUCCBB: bne ${{[0-9]+}}, $zero, $BB
+; SUCCBB: bnez ${{[0-9]+}}, $BB
; SUCCBB-NEXT: addiu
define i32 @succbbs_loop1(i32* nocapture %a, i32 %n) {
@@ -158,9 +158,16 @@ for.end: ; preds = %for.body, %entry
; Check that the first branch has its slot filled.
;
-; SUCCBB: succbbs_br1:
-; SUCCBB: beq ${{[0-9]+}}, $zero, $BB
-; SUCCBB-NEXT: lw $25, %call16(foo100)
+; SUCCBB-LABEL: succbbs_br1:
+; SUCCBB: beqz ${{[0-9]+}}, $BB
+; SUCCBB-NEXT: lw ${{[0-9]+}}, %got(foo101)(${{[0-9]+}})
+
+define internal fastcc void @foo101() {
+entry:
+ tail call void @foo100()
+ tail call void @foo100()
+ ret void
+}
define void @succbbs_br1(i32 %a) {
entry:
@@ -168,7 +175,7 @@ entry:
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- tail call void @foo100() #1
+ tail call fastcc void @foo101()
br label %if.end
if.end: ; preds = %entry, %if.then
diff --git a/test/CodeGen/Mips/brsize3.ll b/test/CodeGen/Mips/brsize3.ll
new file mode 100644
index 0000000000000..7b1f44001a9a9
--- /dev/null
+++ b/test/CodeGen/Mips/brsize3.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=b-no-short
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=b-long
+
+; ModuleID = 'brsize3.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+; Function Attrs: noreturn nounwind optsize
+define void @foo() #0 {
+entry:
+ br label %x
+
+x: ; preds = %x, %entry
+ tail call void asm sideeffect ".space 60000", ""() #1, !srcloc !1
+ br label %x
+; b-long: $BB0_1:
+; b-long: #APP
+; b-long: .space 60000
+; b-long: #NO_APP
+; b-long: b $BB0_1
+; b-no-short: $BB0_1:
+; b-no-short: #APP
+; b-no-short: .space 60000
+; b-no-short: #NO_APP
+; b-no-short-NOT: b $BB0_1 # 16 bit inst
+
+}
+
+attributes #0 = { noreturn nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind }
+
+!1 = metadata !{i32 45}
diff --git a/test/CodeGen/Mips/brsize3a.ll b/test/CodeGen/Mips/brsize3a.ll
new file mode 100644
index 0000000000000..6382fa228e197
--- /dev/null
+++ b/test/CodeGen/Mips/brsize3a.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=b-short
+
+; ModuleID = 'brsize3.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+; Function Attrs: noreturn nounwind optsize
+define void @foo() #0 {
+entry:
+ br label %x
+
+x: ; preds = %x, %entry
+ tail call void asm sideeffect ".space 200", ""() #1, !srcloc !1
+ br label %x
+; b-short: $BB0_1:
+; b-short: #APP
+; b-short: .space 200
+; b-short: #NO_APP
+; b-short: b $BB0_1 # 16 bit inst
+
+}
+
+attributes #0 = { noreturn nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind }
+
+!1 = metadata !{i32 45}
diff --git a/test/CodeGen/Mips/bswap.ll b/test/CodeGen/Mips/bswap.ll
index a8fc2cdc74310..f17b91aab8024 100644
--- a/test/CodeGen/Mips/bswap.ll
+++ b/test/CodeGen/Mips/bswap.ll
@@ -1,20 +1,23 @@
; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=MIPS32
; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=MIPS64
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float | FileCheck %s -check-prefix=mips16
define i32 @bswap32(i32 %x) nounwind readnone {
entry:
-; MIPS32: bswap32:
+; MIPS32-LABEL: bswap32:
; MIPS32: wsbh $[[R0:[0-9]+]]
; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
+; mips16: .ent bswap32
%or.3 = call i32 @llvm.bswap.i32(i32 %x)
ret i32 %or.3
}
define i64 @bswap64(i64 %x) nounwind readnone {
entry:
-; MIPS64: bswap64:
+; MIPS64-LABEL: bswap64:
; MIPS64: dsbh $[[R0:[0-9]+]]
; MIPS64: dshd ${{[0-9]+}}, $[[R0]]
+; mips16: .ent bswap64
%or.7 = call i64 @llvm.bswap.i64(i64 %x)
ret i64 %or.7
}
diff --git a/test/CodeGen/Mips/buildpairextractelementf64.ll b/test/CodeGen/Mips/buildpairextractelementf64.ll
index 585bc250fb8c8..490d4273c5b61 100644
--- a/test/CodeGen/Mips/buildpairextractelementf64.ll
+++ b/test/CodeGen/Mips/buildpairextractelementf64.ll
@@ -1,20 +1,31 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s
-; RUN: llc < %s -march=mips | FileCheck %s
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=FP32
+; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=FP32
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64
+; RUN: llc -march=mips -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64
+
@a = external global i32
+; CHECK-LABEL: f:
+; FP32: mtc1
+; FP32: mtc1
+; FP64-DAG: mtc1
+; FP64-DAG: mthc1
+
define double @f(i32 %a1, double %d) nounwind {
entry:
-; CHECK: mtc1
-; CHECK: mtc1
store i32 %a1, i32* @a, align 4
%add = fadd double %d, 2.000000e+00
ret double %add
}
+; CHECK-LABEL: f3:
+; FP32: mfc1
+; FP32: mfc1
+; FP64-DAG: mfc1
+; FP64-DAG: mfhc1
+
define void @f3(double %d, i32 %a1) nounwind {
entry:
-; CHECK: mfc1
-; CHECK: mfc1
tail call void @f2(i32 %a1, double %d) nounwind
ret void
}
diff --git a/test/CodeGen/Mips/check-noat.ll b/test/CodeGen/Mips/check-noat.ll
index bfeff677b34d0..cfcd367e87af2 100644
--- a/test/CodeGen/Mips/check-noat.ll
+++ b/test/CodeGen/Mips/check-noat.ll
@@ -2,7 +2,7 @@
define void @f() nounwind readnone {
entry:
-; CHECK: f:
+; CHECK-LABEL: f:
; CHECK: .set noat
; CHECK: .set at
diff --git a/test/CodeGen/Mips/cmov.ll b/test/CodeGen/Mips/cmov.ll
index 81925a4953ce4..c24c5ac26ae1b 100755
--- a/test/CodeGen/Mips/cmov.ll
+++ b/test/CodeGen/Mips/cmov.ll
@@ -5,12 +5,12 @@
@i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
@i3 = common global i32* null, align 4
-; O32: lw $[[R0:[0-9]+]], %got(i3)
-; O32: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(i1)
-; O32: movn $[[R0]], $[[R1]], ${{[0-9]+}}
-; N64: ldr $[[R0:[0-9]+]]
-; N64: ld $[[R1:[0-9]+]], %got_disp(i1)
-; N64: movn $[[R0]], $[[R1]], ${{[0-9]+}}
+; O32-DAG: lw $[[R0:[0-9]+]], %got(i3)
+; O32-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(i1)
+; O32: movn $[[R0]], $[[R1]], ${{[0-9]+}}
+; N64-DAG: ldr $[[R0:[0-9]+]]
+; N64-DAG: ld $[[R1:[0-9]+]], %got_disp(i1)
+; N64: movn $[[R0]], $[[R1]], ${{[0-9]+}}
define i32* @cmov1(i32 %s) nounwind readonly {
entry:
%tobool = icmp ne i32 %s, 0
@@ -22,11 +22,11 @@ entry:
@c = global i32 1, align 4
@d = global i32 0, align 4
-; O32: cmov2:
+; O32-LABEL: cmov2:
; O32: addiu $[[R1:[0-9]+]], ${{[a-z0-9]+}}, %got(d)
; O32: addiu $[[R0:[0-9]+]], ${{[a-z0-9]+}}, %got(c)
; O32: movn $[[R1]], $[[R0]], ${{[0-9]+}}
-; N64: cmov2:
+; N64-LABEL: cmov2:
; N64: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(d)
; N64: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got_disp(c)
; N64: movn $[[R1]], $[[R0]], ${{[0-9]+}}
@@ -39,7 +39,7 @@ entry:
ret i32 %cond
}
-; O32: cmov3:
+; O32-LABEL: cmov3:
; O32: xori $[[R0:[0-9]+]], ${{[0-9]+}}, 234
; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
define i32 @cmov3(i32 %a, i32 %b, i32 %c) nounwind readnone {
@@ -49,7 +49,7 @@ entry:
ret i32 %cond
}
-; N64: cmov4:
+; N64-LABEL: cmov4:
; N64: xori $[[R0:[0-9]+]], ${{[0-9]+}}, 234
; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
define i64 @cmov4(i32 %a, i64 %b, i64 %c) nounwind readnone {
@@ -67,7 +67,7 @@ entry:
; (movz t, (setlt a, N + 1), f)
; if N + 1 fits in 16-bit.
-; O32: slti0:
+; O32-LABEL: slti0:
; O32: slti $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
@@ -78,7 +78,7 @@ entry:
ret i32 %cond
}
-; O32: slti1:
+; O32-LABEL: slti1:
; O32: slt ${{[0-9]+}}
define i32 @slti1(i32 %a) {
@@ -88,7 +88,7 @@ entry:
ret i32 %cond
}
-; O32: slti2:
+; O32-LABEL: slti2:
; O32: slti $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
@@ -99,7 +99,7 @@ entry:
ret i32 %cond
}
-; O32: slti3:
+; O32-LABEL: slti3:
; O32: slt ${{[0-9]+}}
define i32 @slti3(i32 %a) {
@@ -111,7 +111,7 @@ entry:
; 64-bit patterns.
-; N64: slti64_0:
+; N64-LABEL: slti64_0:
; N64: slti $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
@@ -122,7 +122,7 @@ entry:
ret i64 %conv
}
-; N64: slti64_1:
+; N64-LABEL: slti64_1:
; N64: slt ${{[0-9]+}}
define i64 @slti64_1(i64 %a) {
@@ -132,7 +132,7 @@ entry:
ret i64 %conv
}
-; N64: slti64_2:
+; N64-LABEL: slti64_2:
; N64: slti $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
@@ -143,7 +143,7 @@ entry:
ret i64 %conv
}
-; N64: slti64_3:
+; N64-LABEL: slti64_3:
; N64: slt ${{[0-9]+}}
define i64 @slti64_3(i64 %a) {
@@ -155,7 +155,7 @@ entry:
; sltiu instructions.
-; O32: sltiu0:
+; O32-LABEL: sltiu0:
; O32: sltiu $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
@@ -166,7 +166,7 @@ entry:
ret i32 %cond
}
-; O32: sltiu1:
+; O32-LABEL: sltiu1:
; O32: sltu ${{[0-9]+}}
define i32 @sltiu1(i32 %a) {
@@ -176,7 +176,7 @@ entry:
ret i32 %cond
}
-; O32: sltiu2:
+; O32-LABEL: sltiu2:
; O32: sltiu $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
@@ -187,7 +187,7 @@ entry:
ret i32 %cond
}
-; O32: sltiu3:
+; O32-LABEL: sltiu3:
; O32: sltu ${{[0-9]+}}
define i32 @sltiu3(i32 %a) {
diff --git a/test/CodeGen/Mips/cmplarge.ll b/test/CodeGen/Mips/cmplarge.ll
new file mode 100644
index 0000000000000..2a3d30a95492f
--- /dev/null
+++ b/test/CodeGen/Mips/cmplarge.ll
@@ -0,0 +1,38 @@
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=cmp16
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mipsel--linux-gnu"
+
+%struct.StorablePicture = type { i32, i32, i32, i32 }
+
+
+
+define void @getSubImagesLuma(%struct.StorablePicture* nocapture %s) #0 {
+entry:
+ %size_y = getelementptr inbounds %struct.StorablePicture* %s, i32 0, i32 1
+ %0 = load i32* %size_y, align 4
+ %sub = add nsw i32 %0, -1
+ %add5 = add nsw i32 %0, 20
+ %cmp6 = icmp sgt i32 %add5, -20
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %j.07 = phi i32 [ %inc, %for.body ], [ -20, %entry ]
+ %call = tail call i32 bitcast (i32 (...)* @iClip3 to i32 (i32, i32, i32)*)(i32 0, i32 %sub, i32 %j.07) #2
+ %inc = add nsw i32 %j.07, 1
+ %1 = load i32* %size_y, align 4
+ %add = add nsw i32 %1, 20
+ %cmp = icmp slt i32 %inc, %add
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; cmp16: .ent getSubImagesLuma
+; cmp16: .end getSubImagesLuma
+declare i32 @iClip3(...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/Mips/const-mult.ll b/test/CodeGen/Mips/const-mult.ll
new file mode 100644
index 0000000000000..8c0cbe3396b75
--- /dev/null
+++ b/test/CodeGen/Mips/const-mult.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=CHECK
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=CHECK
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=CHECK64
+
+; CHECK-LABEL: mul5_32:
+; CHECK: sll $[[R0:[0-9]+]], $4, 2
+; CHECK: addu ${{[0-9]+}}, $[[R0]], $4
+
+define i32 @mul5_32(i32 %a) {
+entry:
+ %mul = mul nsw i32 %a, 5
+ ret i32 %mul
+}
+
+; CHECK-LABEL: mul27_32:
+; CHECK-DAG: sll $[[R0:[0-9]+]], $4, 2
+; CHECK-DAG: addu $[[R1:[0-9]+]], $[[R0]], $4
+; CHECK-DAG: sll $[[R2:[0-9]+]], $4, 5
+; CHECK: subu ${{[0-9]+}}, $[[R2]], $[[R1]]
+
+define i32 @mul27_32(i32 %a) {
+entry:
+ %mul = mul nsw i32 %a, 27
+ ret i32 %mul
+}
+
+; CHECK-LABEL: muln2147483643_32:
+; CHECK-DAG: sll $[[R0:[0-9]+]], $4, 2
+; CHECK-DAG: addu $[[R1:[0-9]+]], $[[R0]], $4
+; CHECK-DAG: sll $[[R2:[0-9]+]], $4, 31
+; CHECK: addu ${{[0-9]+}}, $[[R2]], $[[R1]]
+
+define i32 @muln2147483643_32(i32 %a) {
+entry:
+ %mul = mul nsw i32 %a, -2147483643
+ ret i32 %mul
+}
+
+; CHECK64-LABEL: muln9223372036854775805_64:
+; CHECK64-DAG: dsll $[[R0:[0-9]+]], $4, 1
+; CHECK64-DAG: daddu $[[R1:[0-9]+]], $[[R0]], $4
+; CHECK64-DAG: dsll $[[R2:[0-9]+]], $4, 63
+; CHECK64: daddu ${{[0-9]+}}, $[[R2]], $[[R1]]
+
+define i64 @muln9223372036854775805_64(i64 %a) {
+entry:
+ %mul = mul nsw i64 %a, -9223372036854775805
+ ret i64 %mul
+}
diff --git a/test/CodeGen/Mips/const1.ll b/test/CodeGen/Mips/const1.ll
new file mode 100644
index 0000000000000..cb2bacaf17a59
--- /dev/null
+++ b/test/CodeGen/Mips/const1.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips16-constant-islands < %s | FileCheck %s
+
+; ModuleID = 'const1.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mipsel-unknown-linux"
+
+@i = common global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+@l = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @t() #0 {
+entry:
+ store i32 -559023410, i32* @i, align 4
+ store i32 -559023410, i32* @j, align 4
+ store i32 -87105875, i32* @k, align 4
+ store i32 262991277, i32* @l, align 4
+ ret void
+; CHECK: lw ${{[0-9]+}}, $CPI0_0
+; CHECK: lw ${{[0-9]+}}, $CPI0_1
+; CHECK: lw ${{[0-9]+}}, $CPI0_2
+; CHECK: $CPI0_0:
+; CHECK: .4byte 3735943886
+; CHECK: $CPI0_1:
+; CHECK: .4byte 4207861421
+; CHECK: $CPI0_2:
+; CHECK: .4byte 262991277
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b754974ec32ab712ea7d8b52cd8037b24e7d6ed3) (gitosis@dmz-portal.mips.com:llvm.git 8e211187b501bc73edb938fde0019c9a20bcffd5)"}
diff --git a/test/CodeGen/Mips/const4a.ll b/test/CodeGen/Mips/const4a.ll
new file mode 100644
index 0000000000000..0332327cec69d
--- /dev/null
+++ b/test/CodeGen/Mips/const4a.ll
@@ -0,0 +1,180 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands -mips-constant-islands-no-load-relaxation < %s | FileCheck %s -check-prefix=no-load-relax
+
+; ModuleID = 'const4.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@i = common global i32 0, align 4
+@b = common global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+@l = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @t() #0 {
+entry:
+ store i32 -559023410, i32* @i, align 4
+ %0 = load i32* @b, align 4
+; no-load-relax lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %if.then, label %if.else
+; no-load-relax: beqz ${{[0-9]+}}, $BB0_3
+; no-load-relax: lw ${{[0-9]+}}, %call16(foo)(${{[0-9]+}})
+; no-load-relax: b $BB0_4
+; no-load-relax: .align 2
+; no-load-relax: $CPI0_0:
+; no-load-relax: .4byte 3735943886
+; no-load-relax: $BB0_3:
+; no-load-relax: lw ${{[0-9]+}}, %call16(goo)(${{[0-9]+}})
+if.then: ; preds = %entry
+ call void bitcast (void (...)* @foo to void ()*)()
+ br label %if.end
+
+if.else: ; preds = %entry
+ call void bitcast (void (...)* @goo to void ()*)()
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ ret void
+}
+
+declare void @foo(...) #1
+
+declare void @goo(...) #1
+
+declare void @hoo(...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b310439121c875937d78cc49cc969bc1197fc025) (gitosis@dmz-portal.mips.com:llvm.git 7fc0ca9656ebec8dad61f72f5a5ddfb232c070fd)"}
diff --git a/test/CodeGen/Mips/const6.ll b/test/CodeGen/Mips/const6.ll
new file mode 100644
index 0000000000000..20cdc09f7be18
--- /dev/null
+++ b/test/CodeGen/Mips/const6.ll
@@ -0,0 +1,164 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=load-relax
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands -mips-constant-islands-no-load-relaxation < %s | FileCheck %s -check-prefix=no-load-relax
+
+; ModuleID = 'const6.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@i = common global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+@l = common global i32 0, align 4
+@b = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @t() #0 {
+entry:
+ store i32 -559023410, i32* @i, align 4
+; load-relax: lw ${{[0-9]+}}, $CPI0_0
+; load-relax: jrc $ra
+; load-relax: .align 2
+; load-relax: $CPI0_0:
+; load-relax: .4byte 3735943886
+; load-relax: .end t
+
+; no-load-relax: lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst
+; no-load-relax: jalrc ${{[0-9]+}}
+; no-load-relax: b $BB0_2
+; no-load-relax: .align 2
+; no-load-relax: $CPI0_0:
+; no-load-relax: .4byte 3735943886
+; no-load-relax: $BB0_2:
+
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ call void bitcast (void (...)* @hoo to void ()*)()
+ ret void
+}
+
+declare void @hoo(...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b310439121c875937d78cc49cc969bc1197fc025) (gitosis@dmz-portal.mips.com:llvm.git 7fc0ca9656ebec8dad61f72f5a5ddfb232c070fd)"}
+
+
diff --git a/test/CodeGen/Mips/const6a.ll b/test/CodeGen/Mips/const6a.ll
new file mode 100644
index 0000000000000..8b402accc7de6
--- /dev/null
+++ b/test/CodeGen/Mips/const6a.ll
@@ -0,0 +1,29 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=load-relax1
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=load-relax
+
+; ModuleID = 'const6a.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@i = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @t() #0 {
+entry:
+ store i32 -559023410, i32* @i, align 4
+; load-relax-NOT: lw ${{[0-9]+}}, $CPI0_0 # 16 bit inst
+; load-relax1: lw ${{[0-9]+}}, $CPI0_0
+; load-relax: jrc $ra
+; load-relax: .align 2
+; load-relax: $CPI0_0:
+; load-relax: .4byte 3735943886
+; load-relax: .end t
+ call void asm sideeffect ".space 40000", ""() #1, !srcloc !1
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind }
+
+!1 = metadata !{i32 121}
diff --git a/test/CodeGen/Mips/ctlz.ll b/test/CodeGen/Mips/ctlz.ll
new file mode 100644
index 0000000000000..2ddb72755ac8e
--- /dev/null
+++ b/test/CodeGen/Mips/ctlz.ll
@@ -0,0 +1,27 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static
+
+@x = global i32 28912, align 4
+@y = common global i32 0, align 4
+
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* @x, align 4
+ %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 true)
+ store i32 %1, i32* @y, align 4
+ ret i32 0
+}
+
+; static: .end main
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ctlz.i32(i32, i1) #1
+
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind readnone }
+
diff --git a/test/CodeGen/Mips/disable-tail-merge.ll b/test/CodeGen/Mips/disable-tail-merge.ll
new file mode 100644
index 0000000000000..b4c093aa8528a
--- /dev/null
+++ b/test/CodeGen/Mips/disable-tail-merge.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s
+
+@g0 = common global i32 0, align 4
+@g1 = common global i32 0, align 4
+
+; CHECK: addiu ${{[0-9]+}}, ${{[0-9]+}}, 23
+; CHECK: addiu ${{[0-9]+}}, ${{[0-9]+}}, 23
+
+define i32 @test1(i32 %a) {
+entry:
+ %tobool = icmp eq i32 %a, 0
+ %0 = load i32* @g0, align 4
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then:
+ %add = add nsw i32 %0, 1
+ store i32 %add, i32* @g0, align 4
+ %1 = load i32* @g1, align 4
+ %add1 = add nsw i32 %1, 23
+ br label %if.end
+
+if.else:
+ %add2 = add nsw i32 %0, 11
+ store i32 %add2, i32* @g0, align 4
+ %2 = load i32* @g1, align 4
+ %add3 = add nsw i32 %2, 23
+ br label %if.end
+
+if.end:
+ %storemerge = phi i32 [ %add3, %if.else ], [ %add1, %if.then ]
+ store i32 %storemerge, i32* @g1, align 4
+ ret i32 %storemerge
+}
diff --git a/test/CodeGen/Mips/divrem.ll b/test/CodeGen/Mips/divrem.ll
index c470d1ce2ce5b..b631c3b279f4d 100644
--- a/test/CodeGen/Mips/divrem.ll
+++ b/test/CodeGen/Mips/divrem.ll
@@ -1,34 +1,60 @@
-; RUN: llc -march=mips < %s | FileCheck %s
+; RUN: llc -march=mips -verify-machineinstrs < %s |\
+; RUN: FileCheck %s -check-prefix=TRAP
+; RUN: llc -march=mips -mno-check-zero-division < %s |\
+; RUN: FileCheck %s -check-prefix=NOCHECK
+
+; TRAP-LABEL: sdiv1:
+; TRAP: div $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
+; TRAP: teq $[[R0]], $zero, 7
+; TRAP: mflo
+
+; NOCHECK-LABEL: sdiv1:
+; NOCHECK-NOT: teq
+; NOCHECK: .end sdiv1
+
+@g0 = common global i32 0, align 4
+@g1 = common global i32 0, align 4
-; CHECK: div $zero,
define i32 @sdiv1(i32 %a0, i32 %a1) nounwind readnone {
entry:
%div = sdiv i32 %a0, %a1
ret i32 %div
}
-; CHECK: div $zero,
+; TRAP-LABEL: srem1:
+; TRAP: div $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
+; TRAP: teq $[[R0]], $zero, 7
+; TRAP: mfhi
+
define i32 @srem1(i32 %a0, i32 %a1) nounwind readnone {
entry:
%rem = srem i32 %a0, %a1
ret i32 %rem
}
-; CHECK: divu $zero,
+; TRAP-LABEL: udiv1:
+; TRAP: divu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
+; TRAP: teq $[[R0]], $zero, 7
+; TRAP: mflo
+
define i32 @udiv1(i32 %a0, i32 %a1) nounwind readnone {
entry:
%div = udiv i32 %a0, %a1
ret i32 %div
}
-; CHECK: divu $zero,
+; TRAP-LABEL: urem1:
+; TRAP: divu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
+; TRAP: teq $[[R0]], $zero, 7
+; TRAP: mfhi
+
define i32 @urem1(i32 %a0, i32 %a1) nounwind readnone {
entry:
%rem = urem i32 %a0, %a1
ret i32 %rem
}
-; CHECK: div $zero,
+; TRAP: div $zero,
define i32 @sdivrem1(i32 %a0, i32 %a1, i32* nocapture %r) nounwind {
entry:
%rem = srem i32 %a0, %a1
@@ -37,7 +63,7 @@ entry:
ret i32 %div
}
-; CHECK: divu $zero,
+; TRAP: divu $zero,
define i32 @udivrem1(i32 %a0, i32 %a1, i32* nocapture %r) nounwind {
entry:
%rem = urem i32 %a0, %a1
@@ -45,3 +71,11 @@ entry:
%div = udiv i32 %a0, %a1
ret i32 %div
}
+
+define i32 @killFlags() {
+entry:
+ %0 = load i32* @g0, align 4
+ %1 = load i32* @g1, align 4
+ %div = sdiv i32 %0, %1
+ ret i32 %div
+}
diff --git a/test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll b/test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll
index 9f2f0661f9979..a5fe34c1f684f 100644
--- a/test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll
+++ b/test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s
-; CHECK: select_v2q15_eq_:
+; CHECK-LABEL: select_v2q15_eq_:
; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
; CHECK: pick.ph ${{[0-9]+}}, $6, $7
@@ -17,7 +17,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2q15_lt_:
+; CHECK-LABEL: select_v2q15_lt_:
; CHECK: cmp.lt.ph $4, $5
; CHECK: pick.ph ${{[0-9]+}}, $6, $7
@@ -34,7 +34,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2q15_le_:
+; CHECK-LABEL: select_v2q15_le_:
; CHECK: cmp.le.ph $4, $5
; CHECK: pick.ph ${{[0-9]+}}, $6, $7
@@ -51,7 +51,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2q15_ne_:
+; CHECK-LABEL: select_v2q15_ne_:
; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
; CHECK: pick.ph ${{[0-9]+}}, $7, $6
@@ -68,7 +68,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2q15_gt_:
+; CHECK-LABEL: select_v2q15_gt_:
; CHECK: cmp.le.ph $4, $5
; CHECK: pick.ph ${{[0-9]+}}, $7, $6
@@ -85,7 +85,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2q15_ge_:
+; CHECK-LABEL: select_v2q15_ge_:
; CHECK: cmp.lt.ph $4, $5
; CHECK: pick.ph ${{[0-9]+}}, $7, $6
@@ -102,7 +102,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4ui8_eq_:
+; CHECK-LABEL: select_v4ui8_eq_:
; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
; CHECK: pick.qb ${{[0-9]+}}, $6, $7
@@ -119,7 +119,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4ui8_lt_:
+; CHECK-LABEL: select_v4ui8_lt_:
; CHECK: cmpu.lt.qb $4, $5
; CHECK: pick.qb ${{[0-9]+}}, $6, $7
@@ -136,7 +136,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4ui8_le_:
+; CHECK-LABEL: select_v4ui8_le_:
; CHECK: cmpu.le.qb $4, $5
; CHECK: pick.qb ${{[0-9]+}}, $6, $7
@@ -153,7 +153,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4ui8_ne_:
+; CHECK-LABEL: select_v4ui8_ne_:
; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
; CHECK: pick.qb ${{[0-9]+}}, $7, $6
@@ -170,7 +170,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4ui8_gt_:
+; CHECK-LABEL: select_v4ui8_gt_:
; CHECK: cmpu.le.qb $4, $5
; CHECK: pick.qb ${{[0-9]+}}, $7, $6
@@ -187,7 +187,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4ui8_ge_:
+; CHECK-LABEL: select_v4ui8_ge_:
; CHECK: cmpu.lt.qb $4, $5
; CHECK: pick.qb ${{[0-9]+}}, $7, $6
@@ -204,7 +204,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2ui16_lt_:
+; CHECK-LABEL: select_v2ui16_lt_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -221,7 +221,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2ui16_le_:
+; CHECK-LABEL: select_v2ui16_le_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -238,7 +238,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2ui16_gt_:
+; CHECK-LABEL: select_v2ui16_gt_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -255,7 +255,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v2ui16_ge_:
+; CHECK-LABEL: select_v2ui16_ge_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -272,7 +272,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4i8_lt_:
+; CHECK-LABEL: select_v4i8_lt_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -289,7 +289,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4i8_le_:
+; CHECK-LABEL: select_v4i8_le_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -306,7 +306,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4i8_gt_:
+; CHECK-LABEL: select_v4i8_gt_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -323,7 +323,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: select_v4i8_ge_:
+; CHECK-LABEL: select_v4i8_ge_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -340,7 +340,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2q15_eq_:
+; CHECK-LABEL: compare_v2q15_eq_:
; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -355,7 +355,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2q15_lt_:
+; CHECK-LABEL: compare_v2q15_lt_:
; CHECK: cmp.lt.ph $4, $5
; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -370,7 +370,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2q15_le_:
+; CHECK-LABEL: compare_v2q15_le_:
; CHECK: cmp.le.ph $4, $5
; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -385,7 +385,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2q15_ne_:
+; CHECK-LABEL: compare_v2q15_ne_:
; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -400,7 +400,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2q15_gt_:
+; CHECK-LABEL: compare_v2q15_gt_:
; CHECK: cmp.le.ph $4, $5
; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -415,7 +415,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2q15_ge_:
+; CHECK-LABEL: compare_v2q15_ge_:
; CHECK: cmp.lt.ph $4, $5
; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -430,7 +430,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4ui8_eq_:
+; CHECK-LABEL: compare_v4ui8_eq_:
; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -445,7 +445,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4ui8_lt_:
+; CHECK-LABEL: compare_v4ui8_lt_:
; CHECK: cmpu.lt.qb $4, $5
; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -460,7 +460,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4ui8_le_:
+; CHECK-LABEL: compare_v4ui8_le_:
; CHECK: cmpu.le.qb $4, $5
; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -475,7 +475,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4ui8_ne_:
+; CHECK-LABEL: compare_v4ui8_ne_:
; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -490,7 +490,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4ui8_gt_:
+; CHECK-LABEL: compare_v4ui8_gt_:
; CHECK: cmpu.le.qb $4, $5
; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -505,7 +505,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4ui8_ge_:
+; CHECK-LABEL: compare_v4ui8_ge_:
; CHECK: cmpu.lt.qb $4, $5
; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
@@ -520,7 +520,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2ui16_lt_:
+; CHECK-LABEL: compare_v2ui16_lt_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -535,7 +535,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2ui16_le_:
+; CHECK-LABEL: compare_v2ui16_le_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -550,7 +550,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2ui16_gt_:
+; CHECK-LABEL: compare_v2ui16_gt_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -565,7 +565,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v2ui16_ge_:
+; CHECK-LABEL: compare_v2ui16_ge_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -580,7 +580,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4i8_lt_:
+; CHECK-LABEL: compare_v4i8_lt_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -595,7 +595,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4i8_le_:
+; CHECK-LABEL: compare_v4i8_le_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -610,7 +610,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4i8_gt_:
+; CHECK-LABEL: compare_v4i8_gt_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
@@ -625,7 +625,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; CHECK: compare_v4i8_ge_:
+; CHECK-LABEL: compare_v4i8_ge_:
; CHECK-NOT: cmp
; CHECK-NOT: pick
diff --git a/test/CodeGen/Mips/dsp-patterns.ll b/test/CodeGen/Mips/dsp-patterns.ll
index eeb7140ca2cb6..f5bb3abed90e9 100644
--- a/test/CodeGen/Mips/dsp-patterns.ll
+++ b/test/CodeGen/Mips/dsp-patterns.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s -check-prefix=R1
; RUN: llc -march=mips -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
-; R1: test_lbux:
+; R1-LABEL: test_lbux:
; R1: lbux ${{[0-9]+}}
define zeroext i8 @test_lbux(i8* nocapture %b, i32 %i) {
@@ -11,7 +11,7 @@ entry:
ret i8 %0
}
-; R1: test_lhx:
+; R1-LABEL: test_lhx:
; R1: lhx ${{[0-9]+}}
define signext i16 @test_lhx(i16* nocapture %b, i32 %i) {
@@ -21,7 +21,7 @@ entry:
ret i16 %0
}
-; R1: test_lwx:
+; R1-LABEL: test_lwx:
; R1: lwx ${{[0-9]+}}
define i32 @test_lwx(i32* nocapture %b, i32 %i) {
@@ -31,7 +31,7 @@ entry:
ret i32 %0
}
-; R1: test_add_v2q15_:
+; R1-LABEL: test_add_v2q15_:
; R1: addq.ph ${{[0-9]+}}
define { i32 } @test_add_v2q15_(i32 %a.coerce, i32 %b.coerce) {
@@ -44,7 +44,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: test_sub_v2q15_:
+; R1-LABEL: test_sub_v2q15_:
; R1: subq.ph ${{[0-9]+}}
define { i32 } @test_sub_v2q15_(i32 %a.coerce, i32 %b.coerce) {
@@ -57,11 +57,11 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R2: test_mul_v2q15_:
+; R2-LABEL: test_mul_v2q15_:
; R2: mul.ph ${{[0-9]+}}
; mul.ph is an R2 instruction. Check that multiply node gets expanded.
-; R1: test_mul_v2q15_:
+; R1-LABEL: test_mul_v2q15_:
; R1: mul ${{[0-9]+}}
; R1: mul ${{[0-9]+}}
@@ -75,7 +75,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: test_add_v4i8_:
+; R1-LABEL: test_add_v4i8_:
; R1: addu.qb ${{[0-9]+}}
define { i32 } @test_add_v4i8_(i32 %a.coerce, i32 %b.coerce) {
@@ -88,7 +88,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: test_sub_v4i8_:
+; R1-LABEL: test_sub_v4i8_:
; R1: subu.qb ${{[0-9]+}}
define { i32 } @test_sub_v4i8_(i32 %a.coerce, i32 %b.coerce) {
@@ -102,7 +102,7 @@ entry:
}
; DSP-ASE doesn't have a v4i8 multiply instruction. Check that multiply node gets expanded.
-; R2: test_mul_v4i8_:
+; R2-LABEL: test_mul_v4i8_:
; R2: mul ${{[0-9]+}}
; R2: mul ${{[0-9]+}}
; R2: mul ${{[0-9]+}}
@@ -118,7 +118,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: test_addsc:
+; R1-LABEL: test_addsc:
; R1: addsc ${{[0-9]+}}
; R1: addwc ${{[0-9]+}}
@@ -128,7 +128,7 @@ entry:
ret i64 %add
}
-; R1: shift1_v2i16_shl_:
+; R1-LABEL: shift1_v2i16_shl_:
; R1: shll.ph ${{[0-9]+}}, ${{[0-9]+}}, 15
define { i32 } @shift1_v2i16_shl_(i32 %a0.coerce) {
@@ -140,7 +140,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: shift1_v2i16_sra_:
+; R1-LABEL: shift1_v2i16_sra_:
; R1: shra.ph ${{[0-9]+}}, ${{[0-9]+}}, 15
define { i32 } @shift1_v2i16_sra_(i32 %a0.coerce) {
@@ -152,9 +152,9 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: shift1_v2ui16_srl_:
+; R1-LABEL: shift1_v2ui16_srl_:
; R1-NOT: shrl.ph
-; R2: shift1_v2ui16_srl_:
+; R2-LABEL: shift1_v2ui16_srl_:
; R2: shrl.ph ${{[0-9]+}}, ${{[0-9]+}}, 15
define { i32 } @shift1_v2ui16_srl_(i32 %a0.coerce) {
@@ -166,7 +166,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: shift1_v4i8_shl_:
+; R1-LABEL: shift1_v4i8_shl_:
; R1: shll.qb ${{[0-9]+}}, ${{[0-9]+}}, 7
define { i32 } @shift1_v4i8_shl_(i32 %a0.coerce) {
@@ -178,9 +178,9 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: shift1_v4i8_sra_:
+; R1-LABEL: shift1_v4i8_sra_:
; R1-NOT: shra.qb
-; R2: shift1_v4i8_sra_:
+; R2-LABEL: shift1_v4i8_sra_:
; R2: shra.qb ${{[0-9]+}}, ${{[0-9]+}}, 7
define { i32 } @shift1_v4i8_sra_(i32 %a0.coerce) {
@@ -192,7 +192,7 @@ entry:
ret { i32 } %.fca.0.insert
}
-; R1: shift1_v4ui8_srl_:
+; R1-LABEL: shift1_v4ui8_srl_:
; R1: shrl.qb ${{[0-9]+}}, ${{[0-9]+}}, 7
define { i32 } @shift1_v4ui8_srl_(i32 %a0.coerce) {
@@ -206,7 +206,7 @@ entry:
; Check that shift node is expanded if splat element size is not 16-bit.
;
-; R1: test_vector_splat_imm_v2q15:
+; R1-LABEL: test_vector_splat_imm_v2q15:
; R1-NOT: shll.ph
define { i32 } @test_vector_splat_imm_v2q15(i32 %a.coerce) {
@@ -220,7 +220,7 @@ entry:
; Check that shift node is expanded if splat element size is not 8-bit.
;
-; R1: test_vector_splat_imm_v4i8:
+; R1-LABEL: test_vector_splat_imm_v4i8:
; R1-NOT: shll.qb
define { i32 } @test_vector_splat_imm_v4i8(i32 %a.coerce) {
@@ -234,7 +234,7 @@ entry:
; Check that shift node is expanded if shift amount doesn't fit in 4-bit sa field.
;
-; R1: test_shift_amount_v2q15:
+; R1-LABEL: test_shift_amount_v2q15:
; R1-NOT: shll.ph
define { i32 } @test_shift_amount_v2q15(i32 %a.coerce) {
@@ -248,7 +248,7 @@ entry:
; Check that shift node is expanded if shift amount doesn't fit in 3-bit sa field.
;
-; R1: test_shift_amount_v4i8:
+; R1-LABEL: test_shift_amount_v4i8:
; R1-NOT: shll.qb
define { i32 } @test_shift_amount_v4i8(i32 %a.coerce) {
diff --git a/test/CodeGen/Mips/dsp-vec-load-store.ll b/test/CodeGen/Mips/dsp-vec-load-store.ll
new file mode 100644
index 0000000000000..7e4a8fedaa8c1
--- /dev/null
+++ b/test/CodeGen/Mips/dsp-vec-load-store.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=mipsel -mattr=+dsp < %s
+
+@g1 = common global <2 x i8> zeroinitializer, align 2
+@g0 = common global <2 x i8> zeroinitializer, align 2
+
+define void @extend_load_trunc_store_v2i8() {
+entry:
+ %0 = load <2 x i8>* @g1, align 2
+ store <2 x i8> %0, <2 x i8>* @g0, align 2
+ ret void
+}
diff --git a/test/CodeGen/Mips/eh-return64.ll b/test/CodeGen/Mips/eh-return64.ll
index 373a9a1144536..32fc5e61899a7 100644
--- a/test/CodeGen/Mips/eh-return64.ll
+++ b/test/CodeGen/Mips/eh-return64.ll
@@ -52,7 +52,9 @@ entry:
unreachable
; CHECK: f2
+; CHECK: .cfi_startproc
; CHECK: daddiu $sp, $sp, -[[spoffset:[0-9]+]]
+; CHECK: .cfi_def_cfa_offset [[spoffset]]
; check that $a0-$a3 are saved on stack.
; CHECK: sd $4, [[offset0:[0-9]+]]($sp)
@@ -61,10 +63,10 @@ entry:
; CHECK: sd $7, [[offset3:[0-9]+]]($sp)
; check that .cfi_offset directives are emitted for $a0-$a3.
-; CHECK: .cfi_offset 4,
-; CHECK: .cfi_offset 5,
-; CHECK: .cfi_offset 6,
-; CHECK: .cfi_offset 7,
+; CHECK: .cfi_offset 4, -8
+; CHECK: .cfi_offset 5, -16
+; CHECK: .cfi_offset 6, -24
+; CHECK: .cfi_offset 7, -32
; check that stack adjustment and handler are put in $v1 and $v0.
; CHECK: move $3, $4
@@ -83,5 +85,5 @@ entry:
; CHECK: move $ra, $2
; CHECK: jr $ra
; CHECK: daddu $sp, $sp, $3
-
+; CHECK: .cfi_endproc
}
diff --git a/test/CodeGen/Mips/emit-big-cst.ll b/test/CodeGen/Mips/emit-big-cst.ll
new file mode 100644
index 0000000000000..a168743859a35
--- /dev/null
+++ b/test/CodeGen/Mips/emit-big-cst.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=mips < %s | FileCheck %s
+; Check assembly printing of odd constants.
+
+; CHECK: bigCst:
+; CHECK-NEXT: .8byte 1845068520838224192
+; CHECK-NEXT: .8byte 11776
+; CHECK-NEXT: .size bigCst, 16
+
+@bigCst = internal constant i82 483673642326615442599424
+
+define void @accessBig(i64* %storage) {
+ %addr = bitcast i64* %storage to i82*
+ %bigLoadedCst = load volatile i82* @bigCst
+ %tmp = add i82 %bigLoadedCst, 1
+ store i82 %tmp, i82* %addr
+ ret void
+}
diff --git a/test/CodeGen/Mips/ex2.ll b/test/CodeGen/Mips/ex2.ll
index 67d19e4b84ca0..c5535e7661a73 100644
--- a/test/CodeGen/Mips/ex2.ll
+++ b/test/CodeGen/Mips/ex2.ll
@@ -4,12 +4,15 @@
@_ZTIPKc = external constant i8*
define i32 @main() {
-; 16: main:
+; 16-LABEL: main:
; 16: .cfi_startproc
-; 16: save $ra, $s0, $s1, 32
-; 16: .cfi_offset 17, -8
-; 16: .cfi_offset 16, -12
+; 16: save $ra, $s0, $s1, $s2, 40
+; 16: .cfi_def_cfa_offset 40
+; 16: .cfi_offset 18, -8
+; 16: .cfi_offset 17, -12
+; 16: .cfi_offset 16, -16
; 16: .cfi_offset 31, -4
+; 16: .cfi_endproc
entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
diff --git a/test/CodeGen/Mips/extins.ll b/test/CodeGen/Mips/extins.ll
index a164f7047b5cc..efaeeea96a5e7 100644
--- a/test/CodeGen/Mips/extins.ll
+++ b/test/CodeGen/Mips/extins.ll
@@ -1,8 +1,10 @@
-; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=32R2
+; RUN: llc < %s -march=mips -mcpu=mips16 | FileCheck %s -check-prefix=16
define i32 @ext0_5_9(i32 %s, i32 %pos, i32 %sz) nounwind readnone {
entry:
-; CHECK: ext ${{[0-9]+}}, $4, 5, 9
+; 32R2: ext ${{[0-9]+}}, $4, 5, 9
+; 16-NOT: ext ${{[0-9]+}}
%shr = lshr i32 %s, 5
%and = and i32 %shr, 511
ret i32 %and
@@ -10,7 +12,8 @@ entry:
define void @ins2_5_9(i32 %s, i32* nocapture %d) nounwind {
entry:
-; CHECK: ins ${{[0-9]+}}, $4, 5, 9
+; 32R2: ins ${{[0-9]+}}, $4, 5, 9
+; 16-NOT: ins ${{[0-9]+}}
%and = shl i32 %s, 5
%shl = and i32 %and, 16352
%tmp3 = load i32* %d, align 4
diff --git a/test/CodeGen/Mips/f16abs.ll b/test/CodeGen/Mips/f16abs.ll
new file mode 100644
index 0000000000000..928914f067dd6
--- /dev/null
+++ b/test/CodeGen/Mips/f16abs.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static
+
+@y = global double -1.450000e+00, align 8
+@x = common global double 0.000000e+00, align 8
+
+@y1 = common global float 0.000000e+00, align 4
+@x1 = common global float 0.000000e+00, align 4
+
+
+
+; Function Attrs: nounwind optsize
+define i32 @main() #0 {
+entry:
+ %0 = load double* @y, align 8
+ %call = tail call double @fabs(double %0) #2
+ store double %call, double* @x, align 8
+; static-NOT: .ent __call_stub_fp_fabs
+; static-NOT: jal fabs
+ %1 = load float* @y1, align 4
+ %call2 = tail call float @fabsf(float %1) #2
+ store float %call2, float* @x1, align 4
+; static-NOT: .ent __call_stub_fp_fabsf
+; static-NOT: jal fabsf
+ ret i32 0
+}
+
+; Function Attrs: nounwind optsize readnone
+declare double @fabs(double) #1
+
+declare float @fabsf(float) #1
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind optsize readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #2 = { nounwind optsize readnone }
+
+
+
diff --git a/test/CodeGen/Mips/fcopysign-f32-f64.ll b/test/CodeGen/Mips/fcopysign-f32-f64.ll
index b36473d6f57ac..9f88d0c956b19 100644
--- a/test/CodeGen/Mips/fcopysign-f32-f64.ll
+++ b/test/CodeGen/Mips/fcopysign-f32-f64.ll
@@ -7,14 +7,15 @@ declare float @copysignf(float, float) nounwind readnone
define float @func2(float %d, double %f) nounwind readnone {
entry:
-; 64: func2
-; 64: lui $[[T0:[0-9]+]], 32767
-; 64: ori $[[MSK0:[0-9]+]], $[[T0]], 65535
-; 64: and $[[AND0:[0-9]+]], ${{[0-9]+}}, $[[MSK0]]
-; 64: dsrl ${{[0-9]+}}, ${{[0-9]+}}, 63
-; 64: sll $[[SLL:[0-9]+]], ${{[0-9]+}}, 31
-; 64: or $[[OR:[0-9]+]], $[[AND0]], $[[SLL]]
-; 64: mtc1 $[[OR]], $f0
+; 64: func2
+; 64-DAG: lui $[[T0:[0-9]+]], 32767
+; 64-DAG: ori $[[MSK0:[0-9]+]], $[[T0]], 65535
+; 64-DAG: and $[[AND0:[0-9]+]], ${{[0-9]+}}, $[[MSK0]]
+; 64-DAG: dsrl $[[DSRL:[0-9]+]], ${{[0-9]+}}, 63
+; 64-DAG: sll $[[SLL0:[0-9]+]], $[[DSRL]], 0
+; 64-DAG: sll $[[SLL1:[0-9]+]], $[[SLL0]], 31
+; 64: or $[[OR:[0-9]+]], $[[AND0]], $[[SLL1]]
+; 64: mtc1 $[[OR]], $f0
; 64R2: dext ${{[0-9]+}}, ${{[0-9]+}}, 63, 1
; 64R2: ins $[[INS:[0-9]+]], ${{[0-9]+}}, 31, 1
@@ -29,14 +30,16 @@ entry:
define double @func3(double %d, float %f) nounwind readnone {
entry:
-; 64: daddiu $[[T0:[0-9]+]], $zero, 1
-; 64: dsll $[[T1:[0-9]+]], $[[T0]], 63
-; 64: daddiu $[[MSK0:[0-9]+]], $[[T1]], -1
-; 64: and $[[AND0:[0-9]+]], ${{[0-9]+}}, $[[MSK0]]
-; 64: srl ${{[0-9]+}}, ${{[0-9]+}}, 31
-; 64: dsll $[[DSLL:[0-9]+]], ${{[0-9]+}}, 63
-; 64: or $[[OR:[0-9]+]], $[[AND0]], $[[DSLL]]
-; 64: dmtc1 $[[OR]], $f0
+; 64: func3
+; 64-DAG: daddiu $[[T0:[0-9]+]], $zero, 1
+; 64-DAG: dsll $[[T1:[0-9]+]], $[[T0]], 63
+; 64-DAG: daddiu $[[MSK0:[0-9]+]], $[[T1]], -1
+; 64-DAG: and $[[AND0:[0-9]+]], ${{[0-9]+}}, $[[MSK0]]
+; 64-DAG: srl $[[SRL:[0-9]+]], ${{[0-9]+}}, 31
+; 64-DAG: sll $[[SLL:[0-9]+]], $[[SRL]], 0
+; 64-DAG: dsll $[[DSLL:[0-9]+]], $[[SLL]], 63
+; 64: or $[[OR:[0-9]+]], $[[AND0]], $[[DSLL]]
+; 64: dmtc1 $[[OR]], $f0
; 64R2: ext ${{[0-9]+}}, ${{[0-9]+}}, 31, 1
; 64R2: dins $[[INS:[0-9]+]], ${{[0-9]+}}, 63, 1
diff --git a/test/CodeGen/Mips/fixdfsf.ll b/test/CodeGen/Mips/fixdfsf.ll
new file mode 100644
index 0000000000000..b08eefd712351
--- /dev/null
+++ b/test/CodeGen/Mips/fixdfsf.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic2
+
+@x = common global double 0.000000e+00, align 8
+@y = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define void @foo() {
+entry:
+ %0 = load double* @x, align 8
+ %conv = fptoui double %0 to i32
+ store i32 %conv, i32* @y, align 4
+; pic1: lw ${{[0-9]+}}, %call16(__fixunsdfsi)(${{[0-9]+}})
+; pic2: lw ${{[0-9]+}}, %got(__mips16_call_stub_2)(${{[0-9]+}})
+ ret void
+}
+
+
diff --git a/test/CodeGen/Mips/fp16instrinsmc.ll b/test/CodeGen/Mips/fp16instrinsmc.ll
new file mode 100644
index 0000000000000..bb43d2711c268
--- /dev/null
+++ b/test/CodeGen/Mips/fp16instrinsmc.ll
@@ -0,0 +1,391 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=1010111 -mips-os16 < %s | FileCheck %s -check-prefix=fmask
+
+@x = global float 1.500000e+00, align 4
+@xn = global float -1.900000e+01, align 4
+@negone = global float -1.000000e+00, align 4
+@one = global float 1.000000e+00, align 4
+@xd = global double 0x40048B0A8EA4481E, align 8
+@xdn = global double 0xC0311F9ADD373963, align 8
+@negoned = global double -1.000000e+00, align 8
+@oned = global float 1.000000e+00, align 4
+@y = common global float 0.000000e+00, align 4
+@yd = common global double 0.000000e+00, align 8
+
+; Function Attrs: nounwind
+define void @foo1() #0 {
+; fmask: .ent foo1
+; fmask: .set noreorder
+; fmask: .set nomacro
+; fmask: .set noat
+; fmask: .set at
+; fmask: .set macro
+; fmask: .set reorder
+; fmask: .end foo1
+entry:
+ %0 = load float* @x, align 4
+ %1 = load float* @one, align 4
+ %call = call float @copysignf(float %0, float %1) #2
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @copysignf(float, float) #1
+
+; Function Attrs: nounwind
+define void @foo2() #0 {
+; fmask: .ent foo2
+; fmask: save {{.*}}
+; fmask: .end foo2
+entry:
+ %0 = load float* @x, align 4
+ %1 = load float* @negone, align 4
+ %call = call float @copysignf(float %0, float %1) #2
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @foo3() #0 {
+entry:
+; fmask: .ent foo3
+; fmask: .set noreorder
+; fmask: .set nomacro
+; fmask: .set noat
+; fmask: .set at
+; fmask: .set macro
+; fmask: .set reorder
+; fmask: .end foo3
+ %0 = load double* @xd, align 8
+ %1 = load float* @oned, align 4
+ %conv = fpext float %1 to double
+ %call = call double @copysign(double %0, double %conv) #2
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare double @copysign(double, double) #1
+
+; Function Attrs: nounwind
+define void @foo4() #0 {
+entry:
+; fmask: .ent foo4
+; fmask: save {{.*}}
+; fmask: .end foo4
+ %0 = load double* @xd, align 8
+ %1 = load double* @negoned, align 8
+ %call = call double @copysign(double %0, double %1) #2
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @foo5() #0 {
+entry:
+ %0 = load float* @xn, align 4
+ %call = call float @fabsf(float %0) #2
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @fabsf(float) #1
+
+; Function Attrs: nounwind
+define void @foo6() #0 {
+entry:
+ %0 = load double* @xdn, align 8
+ %call = call double @fabs(double %0) #2
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare double @fabs(double) #1
+
+; Function Attrs: nounwind
+define void @foo7() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @sinf(float %0) #3
+;pic: lw ${{[0-9]+}}, %call16(sinf)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind
+declare float @sinf(float) #0
+
+; Function Attrs: nounwind
+define void @foo8() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @sin(double %0) #3
+;pic: lw ${{[0-9]+}}, %call16(sin)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind
+declare double @sin(double) #0
+
+; Function Attrs: nounwind
+define void @foo9() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @cosf(float %0) #3
+;pic: lw ${{[0-9]+}}, %call16(cosf)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind
+declare float @cosf(float) #0
+
+; Function Attrs: nounwind
+define void @foo10() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @cos(double %0) #3
+;pic: lw ${{[0-9]+}}, %call16(cos)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind
+declare double @cos(double) #0
+
+; Function Attrs: nounwind
+define void @foo11() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @sqrtf(float %0) #3
+;pic: lw ${{[0-9]+}}, %call16(sqrtf)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind
+declare float @sqrtf(float) #0
+
+; Function Attrs: nounwind
+define void @foo12() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @sqrt(double %0) #3
+;pic: lw ${{[0-9]+}}, %call16(sqrt)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind
+declare double @sqrt(double) #0
+
+; Function Attrs: nounwind
+define void @foo13() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @floorf(float %0) #2
+;pic: lw ${{[0-9]+}}, %call16(floorf)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @floorf(float) #1
+
+; Function Attrs: nounwind
+define void @foo14() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @floor(double %0) #2
+;pic: lw ${{[0-9]+}}, %call16(floor)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare double @floor(double) #1
+
+; Function Attrs: nounwind
+define void @foo15() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @nearbyintf(float %0) #2
+;pic: lw ${{[0-9]+}}, %call16(nearbyintf)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @nearbyintf(float) #1
+
+; Function Attrs: nounwind
+define void @foo16() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @nearbyint(double %0) #2
+;pic: lw ${{[0-9]+}}, %call16(nearbyint)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare double @nearbyint(double) #1
+
+; Function Attrs: nounwind
+define void @foo17() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @ceilf(float %0) #2
+;pic: lw ${{[0-9]+}}, %call16(ceilf)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @ceilf(float) #1
+
+; Function Attrs: nounwind
+define void @foo18() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @ceil(double %0) #2
+;pic: lw ${{[0-9]+}}, %call16(ceil)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare double @ceil(double) #1
+
+; Function Attrs: nounwind
+define void @foo19() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @rintf(float %0) #2
+;pic: lw ${{[0-9]+}}, %call16(rintf)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @rintf(float) #1
+
+; Function Attrs: nounwind
+define void @foo20() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @rint(double %0) #2
+;pic: lw ${{[0-9]+}}, %call16(rint)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare double @rint(double) #1
+
+; Function Attrs: nounwind
+define void @foo21() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @truncf(float %0) #2
+;pic: lw ${{[0-9]+}}, %call16(truncf)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @truncf(float) #1
+
+; Function Attrs: nounwind
+define void @foo22() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @trunc(double %0) #2
+;pic: lw ${{[0-9]+}}, %call16(trunc)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare double @trunc(double) #1
+
+; Function Attrs: nounwind
+define void @foo23() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @log2f(float %0) #3
+;pic: lw ${{[0-9]+}}, %call16(log2f)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind
+declare float @log2f(float) #0
+
+; Function Attrs: nounwind
+define void @foo24() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @log2(double %0) #3
+;pic: lw ${{[0-9]+}}, %call16(log2)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind
+declare double @log2(double) #0
+
+; Function Attrs: nounwind
+define void @foo25() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @exp2f(float %0) #3
+;pic: lw ${{[0-9]+}}, %call16(exp2f)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
+ store float %call, float* @y, align 4
+ ret void
+}
+
+; Function Attrs: nounwind
+declare float @exp2f(float) #0
+
+; Function Attrs: nounwind
+define void @foo26() #0 {
+entry:
+ %0 = load double* @xd, align 8
+ %call = call double @exp2(double %0) #3
+;pic: lw ${{[0-9]+}}, %call16(exp2)(${{[0-9]+}})
+;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
+ store double %call, double* @yd, align 8
+ ret void
+}
+
+; Function Attrs: nounwind
+declare double @exp2(double) #0
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
diff --git a/test/CodeGen/Mips/fp16mix.ll b/test/CodeGen/Mips/fp16mix.ll
new file mode 100644
index 0000000000000..8d85099ba9f20
--- /dev/null
+++ b/test/CodeGen/Mips/fp16mix.ll
@@ -0,0 +1,92 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=10 -mips-os16 < %s | FileCheck %s -check-prefix=fmask1
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=01 -mips-os16 < %s | FileCheck %s -check-prefix=fmask2
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=10. -mips-os16 < %s | FileCheck %s -check-prefix=fmask1nr
+
+; Function Attrs: nounwind optsize readnone
+define void @foo1() {
+entry:
+ ret void
+; fmask1: .ent foo1
+; fmask1: .set noreorder
+; fmask1: .set nomacro
+; fmask1: .set noat
+; fmask1: .set at
+; fmask1: .set macro
+; fmask1: .set reorder
+; fmask1: .end foo1
+; fmask2: .ent foo1
+; fmask2: save {{.*}}
+; fmask2: .end foo1
+; fmask1nr: .ent foo1
+; fmask1nr: .set noreorder
+; fmask1nr: .set nomacro
+; fmask1nr: .set noat
+; fmask1nr: .set at
+; fmask1nr: .set macro
+; fmask1nr: .set reorder
+; fmask1nr: .end foo1
+}
+
+; Function Attrs: nounwind optsize readnone
+define void @foo2() {
+entry:
+ ret void
+; fmask2: .ent foo2
+; fmask2: .set noreorder
+; fmask2: .set nomacro
+; fmask2: .set noat
+; fmask2: .set at
+; fmask2: .set macro
+; fmask2: .set reorder
+; fmask2: .end foo2
+; fmask1: .ent foo2
+; fmask1: save {{.*}}
+; fmask1: .end foo2
+; fmask1nr: .ent foo2
+; fmask1nr: save {{.*}}
+; fmask1nr: .end foo2
+}
+
+; Function Attrs: nounwind optsize readnone
+define void @foo3() {
+entry:
+ ret void
+; fmask1: .ent foo3
+; fmask1: .set noreorder
+; fmask1: .set nomacro
+; fmask1: .set noat
+; fmask1: .set at
+; fmask1: .set macro
+; fmask1: .set reorder
+; fmask1: .end foo3
+; fmask2: .ent foo3
+; fmask2: save {{.*}}
+; fmask2: .end foo3
+; fmask1r: .ent foo3
+; fmask1r: save {{.*}}
+; fmask1r: .end foo3
+}
+
+; Function Attrs: nounwind optsize readnone
+define void @foo4() {
+entry:
+ ret void
+; fmask2: .ent foo4
+; fmask2: .set noreorder
+; fmask2: .set nomacro
+; fmask2: .set noat
+; fmask2: .set at
+; fmask2: .set macro
+; fmask2: .set reorder
+; fmask2: .end foo4
+; fmask1: .ent foo4
+; fmask1: save {{.*}}
+; fmask1: .end foo4
+; fmask1nr: .ent foo4
+; fmask1nr: save {{.*}}
+; fmask1nr: .end foo4
+}
+
+
diff --git a/test/CodeGen/Mips/fpneeded.ll b/test/CodeGen/Mips/fpneeded.ll
index 623883a0d5c06..dcdebb92e40e2 100644
--- a/test/CodeGen/Mips/fpneeded.ll
+++ b/test/CodeGen/Mips/fpneeded.ll
@@ -131,7 +131,7 @@ entry:
; 32: .set reorder
; 32: .end foo3
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
define void @vv() #0 {
entry:
diff --git a/test/CodeGen/Mips/fpnotneeded.ll b/test/CodeGen/Mips/fpnotneeded.ll
index dc2ec10817f3e..b4fab6414223e 100644
--- a/test/CodeGen/Mips/fpnotneeded.ll
+++ b/test/CodeGen/Mips/fpnotneeded.ll
@@ -57,7 +57,7 @@ entry:
; 32: restore {{.+}}
; 32: .end foo
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
define float @fv() #0 {
diff --git a/test/CodeGen/Mips/fptr2.ll b/test/CodeGen/Mips/fptr2.ll
new file mode 100644
index 0000000000000..77028dbde9aaf
--- /dev/null
+++ b/test/CodeGen/Mips/fptr2.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static16
+
+; Function Attrs: nounwind
+define double @my_mul(double %a, double %b) #0 {
+entry:
+ %a.addr = alloca double, align 8
+ %b.addr = alloca double, align 8
+ store double %a, double* %a.addr, align 8
+ store double %b, double* %b.addr, align 8
+ %0 = load double* %a.addr, align 8
+ %1 = load double* %b.addr, align 8
+ %mul = fmul double %0, %1
+ ret double %mul
+}
+
+; static16: .ent __fn_stub_my_mul
+; static16: .set reorder
+; static16-NEXT: #NO_APP
+; static16: .end __fn_stub_my_mul
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
diff --git a/test/CodeGen/Mips/frame-address.ll b/test/CodeGen/Mips/frame-address.ll
index 92946d9ffd683..0ab7da30e785f 100644
--- a/test/CodeGen/Mips/frame-address.ll
+++ b/test/CodeGen/Mips/frame-address.ll
@@ -2,11 +2,16 @@
declare i8* @llvm.frameaddress(i32) nounwind readnone
-define i8* @f() nounwind {
+define i8* @f() nounwind uwtable {
entry:
%0 = call i8* @llvm.frameaddress(i32 0)
ret i8* %0
+; CHECK: .cfi_startproc
+; CHECK: .cfi_def_cfa_offset 8
+; CHECK: .cfi_offset 30, -4
; CHECK: move $fp, $sp
+; CHECK: .cfi_def_cfa_register 30
; CHECK: move $2, $fp
+; CHECK: .cfi_endproc
}
diff --git a/test/CodeGen/Mips/helloworld.ll b/test/CodeGen/Mips/helloworld.ll
index 56ee60785f46b..058a041c16a92 100644
--- a/test/CodeGen/Mips/helloworld.ll
+++ b/test/CodeGen/Mips/helloworld.ll
@@ -1,11 +1,11 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=C1
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=C2
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=PE
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s | FileCheck %s -check-prefix=ST1
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s | FileCheck %s -check-prefix=ST2
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=C1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=C2
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=PE
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s | FileCheck %s -check-prefix=ST1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s | FileCheck %s -check-prefix=ST2
;
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=SR
-; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=SR32
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=SR
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=SR32
@.str = private unnamed_addr constant [13 x i8] c"hello world\0A\00", align 1
@@ -25,17 +25,19 @@ entry:
; SR32: .set noreorder
; SR32: .set nomacro
; SR32: .set noat
-; SR: save $ra, $s0, $s1, [[FS:[0-9]+]]
-; PE: li $[[T1:[0-9]+]], %hi(_gp_disp)
-; PE: addiu $[[T2:[0-9]+]], $pc, %lo(_gp_disp)
-; PE: sll $[[T3:[0-9]+]], $[[T1]], 16
+; SR: save $ra, $s0, $s1, $s2, [[FS:[0-9]+]]
+; PE: .ent main
+; PE: .align 2
+; PE-NEXT: li $[[T1:[0-9]+]], %hi(_gp_disp)
+; PE-NEXT: addiu $[[T2:[0-9]+]], $pc, %lo(_gp_disp)
+; PE: sll $[[T3:[0-9]+]], $[[T1]], 16
; C1: lw ${{[0-9]+}}, %got($.str)(${{[0-9]+}})
; C2: lw ${{[0-9]+}}, %call16(printf)(${{[0-9]+}})
; C1: addiu ${{[0-9]+}}, %lo($.str)
; C2: move $25, ${{[0-9]+}}
; C1: move $gp, ${{[0-9]+}}
; C1: jalrc ${{[0-9]+}}
-; SR: restore $ra, $s0, $s1, [[FS]]
+; SR: restore $ra, $s0, $s1, $s2, [[FS]]
; PE: li $2, 0
; PE: jrc $ra
diff --git a/test/CodeGen/Mips/hf16call32.ll b/test/CodeGen/Mips/hf16call32.ll
new file mode 100644
index 0000000000000..461438e8bec06
--- /dev/null
+++ b/test/CodeGen/Mips/hf16call32.ll
@@ -0,0 +1,1030 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=stel
+
+@x = common global float 0.000000e+00, align 4
+@y = common global float 0.000000e+00, align 4
+@xd = common global double 0.000000e+00, align 8
+@yd = common global double 0.000000e+00, align 8
+@xy = common global { float, float } zeroinitializer, align 4
+@xyd = common global { double, double } zeroinitializer, align 8
+@ret_sf = common global float 0.000000e+00, align 4
+@ret_df = common global double 0.000000e+00, align 8
+@ret_sc = common global { float, float } zeroinitializer, align 4
+@ret_dc = common global { double, double } zeroinitializer, align 8
+@lx = common global float 0.000000e+00, align 4
+@ly = common global float 0.000000e+00, align 4
+@lxd = common global double 0.000000e+00, align 8
+@lyd = common global double 0.000000e+00, align 8
+@lxy = common global { float, float } zeroinitializer, align 4
+@lxyd = common global { double, double } zeroinitializer, align 8
+@lret_sf = common global float 0.000000e+00, align 4
+@lret_df = common global double 0.000000e+00, align 8
+@lret_sc = common global { float, float } zeroinitializer, align 4
+@lret_dc = common global { double, double } zeroinitializer, align 8
+@.str = private unnamed_addr constant [10 x i8] c"%f %f %i\0A\00", align 1
+@.str1 = private unnamed_addr constant [16 x i8] c"%f=%f %f=%f %i\0A\00", align 1
+@.str2 = private unnamed_addr constant [22 x i8] c"%f=%f %f=%f %f=%f %i\0A\00", align 1
+@.str3 = private unnamed_addr constant [18 x i8] c"%f+%fi=%f+%fi %i\0A\00", align 1
+@.str4 = private unnamed_addr constant [24 x i8] c"%f+%fi=%f+%fi %f=%f %i\0A\00", align 1
+
+; Function Attrs: nounwind
+define void @clear() #0 {
+entry:
+ store float 1.000000e+00, float* @x, align 4
+ store float 1.000000e+00, float* @y, align 4
+ store double 1.000000e+00, double* @xd, align 8
+ store double 1.000000e+00, double* @yd, align 8
+ store float 1.000000e+00, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0)
+ store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1)
+ store double 1.000000e+00, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0)
+ store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1)
+ store float 1.000000e+00, float* @ret_sf, align 4
+ store double 1.000000e+00, double* @ret_df, align 8
+ store float 1.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ store double 1.000000e+00, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ store float 0.000000e+00, float* @lx, align 4
+ store float 0.000000e+00, float* @ly, align 4
+ store double 0.000000e+00, double* @lxd, align 8
+ store double 0.000000e+00, double* @lyd, align 8
+ store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @lxy, i32 0, i32 0)
+ store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @lxy, i32 0, i32 1)
+ store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @lxyd, i32 0, i32 0)
+ store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @lxyd, i32 0, i32 1)
+ store float 0.000000e+00, float* @lret_sf, align 4
+ store double 0.000000e+00, double* @lret_df, align 8
+ store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ store float 0.000000e+00, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ store double 0.000000e+00, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ ret void
+}
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ call void @clear()
+ store float 1.500000e+00, float* @lx, align 4
+ %0 = load float* @lx, align 4
+ call void @v_sf(float %0)
+ %1 = load float* @x, align 4
+ %conv = fpext float %1 to double
+ %2 = load float* @lx, align 4
+ %conv1 = fpext float %2 to double
+ %3 = load float* @x, align 4
+ %4 = load float* @lx, align 4
+ %cmp = fcmp oeq float %3, %4
+ %conv2 = zext i1 %cmp to i32
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %conv, double %conv1, i32 %conv2)
+ call void @clear()
+ store double 0x41678C29C0000000, double* @lxd, align 8
+ %5 = load double* @lxd, align 8
+ call void @v_df(double %5)
+ %6 = load double* @xd, align 8
+ %7 = load double* @lxd, align 8
+ %8 = load double* @xd, align 8
+ %9 = load double* @lxd, align 8
+ %cmp3 = fcmp oeq double %8, %9
+ %conv4 = zext i1 %cmp3 to i32
+ %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %6, double %7, i32 %conv4)
+ call void @clear()
+ store float 9.000000e+00, float* @lx, align 4
+ store float 1.000000e+01, float* @ly, align 4
+ %10 = load float* @lx, align 4
+ %11 = load float* @ly, align 4
+ call void @v_sf_sf(float %10, float %11)
+ %12 = load float* @x, align 4
+ %conv6 = fpext float %12 to double
+ %13 = load float* @lx, align 4
+ %conv7 = fpext float %13 to double
+ %14 = load float* @y, align 4
+ %conv8 = fpext float %14 to double
+ %15 = load float* @ly, align 4
+ %conv9 = fpext float %15 to double
+ %16 = load float* @x, align 4
+ %17 = load float* @lx, align 4
+ %cmp10 = fcmp oeq float %16, %17
+ br i1 %cmp10, label %land.rhs, label %land.end
+
+land.rhs: ; preds = %entry
+ %18 = load float* @y, align 4
+ %19 = load float* @ly, align 4
+ %cmp12 = fcmp oeq float %18, %19
+ br label %land.end
+
+land.end: ; preds = %land.rhs, %entry
+ %20 = phi i1 [ false, %entry ], [ %cmp12, %land.rhs ]
+ %land.ext = zext i1 %20 to i32
+ %call14 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %conv6, double %conv7, double %conv8, double %conv9, i32 %land.ext)
+ call void @clear()
+ store float 0x3FFE666660000000, float* @lx, align 4
+ store double 0x4007E613249FF279, double* @lyd, align 8
+ %21 = load float* @lx, align 4
+ %22 = load double* @lyd, align 8
+ call void @v_sf_df(float %21, double %22)
+ %23 = load float* @x, align 4
+ %conv15 = fpext float %23 to double
+ %24 = load float* @lx, align 4
+ %conv16 = fpext float %24 to double
+ %25 = load double* @yd, align 8
+ %26 = load double* @lyd, align 8
+ %27 = load float* @x, align 4
+ %28 = load float* @lx, align 4
+ %cmp17 = fcmp oeq float %27, %28
+ %conv18 = zext i1 %cmp17 to i32
+ %29 = load double* @yd, align 8
+ %30 = load double* @lyd, align 8
+ %cmp19 = fcmp oeq double %29, %30
+ %conv20 = zext i1 %cmp19 to i32
+ %and = and i32 %conv18, %conv20
+ %call21 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %conv15, double %conv16, double %25, double %26, i32 %and)
+ call void @clear()
+ store double 0x4194E54F94000000, double* @lxd, align 8
+ store float 7.600000e+01, float* @ly, align 4
+ %31 = load double* @lxd, align 8
+ %32 = load float* @ly, align 4
+ call void @v_df_sf(double %31, float %32)
+ %33 = load double* @xd, align 8
+ %34 = load double* @lxd, align 8
+ %35 = load float* @y, align 4
+ %conv22 = fpext float %35 to double
+ %36 = load float* @ly, align 4
+ %conv23 = fpext float %36 to double
+ %37 = load double* @xd, align 8
+ %38 = load double* @lxd, align 8
+ %cmp24 = fcmp oeq double %37, %38
+ %conv25 = zext i1 %cmp24 to i32
+ %39 = load float* @y, align 4
+ %40 = load float* @ly, align 4
+ %cmp26 = fcmp oeq float %39, %40
+ %conv27 = zext i1 %cmp26 to i32
+ %and28 = and i32 %conv25, %conv27
+ %call29 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %33, double %34, double %conv22, double %conv23, i32 %and28)
+ call void @clear()
+ store double 7.365198e+07, double* @lxd, align 8
+ store double 0x416536CD80000000, double* @lyd, align 8
+ %41 = load double* @lxd, align 8
+ %42 = load double* @lyd, align 8
+ call void @v_df_df(double %41, double %42)
+ %43 = load double* @xd, align 8
+ %44 = load double* @lxd, align 8
+ %45 = load double* @yd, align 8
+ %46 = load double* @lyd, align 8
+ %47 = load double* @xd, align 8
+ %48 = load double* @lxd, align 8
+ %cmp30 = fcmp oeq double %47, %48
+ %conv31 = zext i1 %cmp30 to i32
+ %49 = load double* @yd, align 8
+ %50 = load double* @lyd, align 8
+ %cmp32 = fcmp oeq double %49, %50
+ %conv33 = zext i1 %cmp32 to i32
+ %and34 = and i32 %conv31, %conv33
+ %call35 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %43, double %44, double %45, double %46, i32 %and34)
+ call void @clear()
+ store float 0x4016666660000000, float* @ret_sf, align 4
+ %call36 = call float @sf_v()
+ store float %call36, float* @lret_sf, align 4
+ %51 = load float* @ret_sf, align 4
+ %conv37 = fpext float %51 to double
+ %52 = load float* @lret_sf, align 4
+ %conv38 = fpext float %52 to double
+ %53 = load float* @ret_sf, align 4
+ %54 = load float* @lret_sf, align 4
+ %cmp39 = fcmp oeq float %53, %54
+ %conv40 = zext i1 %cmp39 to i32
+ %call41 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %conv37, double %conv38, i32 %conv40)
+ call void @clear()
+ store float 4.587300e+06, float* @ret_sf, align 4
+ store float 3.420000e+02, float* @lx, align 4
+ %55 = load float* @lx, align 4
+ %call42 = call float @sf_sf(float %55)
+ store float %call42, float* @lret_sf, align 4
+ %56 = load float* @ret_sf, align 4
+ %conv43 = fpext float %56 to double
+ %57 = load float* @lret_sf, align 4
+ %conv44 = fpext float %57 to double
+ %58 = load float* @x, align 4
+ %conv45 = fpext float %58 to double
+ %59 = load float* @lx, align 4
+ %conv46 = fpext float %59 to double
+ %60 = load float* @ret_sf, align 4
+ %61 = load float* @lret_sf, align 4
+ %cmp47 = fcmp oeq float %60, %61
+ %conv48 = zext i1 %cmp47 to i32
+ %62 = load float* @x, align 4
+ %63 = load float* @lx, align 4
+ %cmp49 = fcmp oeq float %62, %63
+ %conv50 = zext i1 %cmp49 to i32
+ %and51 = and i32 %conv48, %conv50
+ %call52 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %conv43, double %conv44, double %conv45, double %conv46, i32 %and51)
+ call void @clear()
+ store float 4.445910e+06, float* @ret_sf, align 4
+ store double 0x419A7DB294000000, double* @lxd, align 8
+ %64 = load double* @lxd, align 8
+ %call53 = call float @sf_df(double %64)
+ store float %call53, float* @lret_sf, align 4
+ %65 = load float* @ret_sf, align 4
+ %conv54 = fpext float %65 to double
+ %66 = load float* @lret_sf, align 4
+ %conv55 = fpext float %66 to double
+ %67 = load double* @xd, align 8
+ %68 = load double* @lxd, align 8
+ %69 = load float* @ret_sf, align 4
+ %70 = load float* @lret_sf, align 4
+ %cmp56 = fcmp oeq float %69, %70
+ %conv57 = zext i1 %cmp56 to i32
+ %71 = load double* @xd, align 8
+ %72 = load double* @lxd, align 8
+ %cmp58 = fcmp oeq double %71, %72
+ %conv59 = zext i1 %cmp58 to i32
+ %and60 = and i32 %conv57, %conv59
+ %call61 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %conv54, double %conv55, double %67, double %68, i32 %and60)
+ call void @clear()
+ store float 0x3FFF4BC6A0000000, float* @ret_sf, align 4
+ store float 4.445500e+03, float* @lx, align 4
+ store float 0x4068ACCCC0000000, float* @ly, align 4
+ %73 = load float* @lx, align 4
+ %74 = load float* @ly, align 4
+ %call62 = call float @sf_sf_sf(float %73, float %74)
+ store float %call62, float* @lret_sf, align 4
+ %75 = load float* @ret_sf, align 4
+ %conv63 = fpext float %75 to double
+ %76 = load float* @lret_sf, align 4
+ %conv64 = fpext float %76 to double
+ %77 = load float* @x, align 4
+ %conv65 = fpext float %77 to double
+ %78 = load float* @lx, align 4
+ %conv66 = fpext float %78 to double
+ %79 = load float* @y, align 4
+ %conv67 = fpext float %79 to double
+ %80 = load float* @ly, align 4
+ %conv68 = fpext float %80 to double
+ %81 = load float* @ret_sf, align 4
+ %82 = load float* @lret_sf, align 4
+ %cmp69 = fcmp oeq float %81, %82
+ br i1 %cmp69, label %land.lhs.true, label %land.end76
+
+land.lhs.true: ; preds = %land.end
+ %83 = load float* @x, align 4
+ %84 = load float* @lx, align 4
+ %cmp71 = fcmp oeq float %83, %84
+ br i1 %cmp71, label %land.rhs73, label %land.end76
+
+land.rhs73: ; preds = %land.lhs.true
+ %85 = load float* @y, align 4
+ %86 = load float* @ly, align 4
+ %cmp74 = fcmp oeq float %85, %86
+ br label %land.end76
+
+land.end76: ; preds = %land.rhs73, %land.lhs.true, %land.end
+ %87 = phi i1 [ false, %land.lhs.true ], [ false, %land.end ], [ %cmp74, %land.rhs73 ]
+ %land.ext77 = zext i1 %87 to i32
+ %call78 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %conv63, double %conv64, double %conv65, double %conv66, double %conv67, double %conv68, i32 %land.ext77)
+ call void @clear()
+ store float 9.991300e+04, float* @ret_sf, align 4
+ store float 1.114500e+04, float* @lx, align 4
+ store double 9.994445e+07, double* @lyd, align 8
+ %88 = load float* @lx, align 4
+ %89 = load double* @lyd, align 8
+ %call79 = call float @sf_sf_df(float %88, double %89)
+ store float %call79, float* @lret_sf, align 4
+ %90 = load float* @ret_sf, align 4
+ %conv80 = fpext float %90 to double
+ %91 = load float* @lret_sf, align 4
+ %conv81 = fpext float %91 to double
+ %92 = load float* @x, align 4
+ %conv82 = fpext float %92 to double
+ %93 = load float* @lx, align 4
+ %conv83 = fpext float %93 to double
+ %94 = load double* @yd, align 8
+ %95 = load double* @lyd, align 8
+ %96 = load float* @ret_sf, align 4
+ %97 = load float* @lret_sf, align 4
+ %cmp84 = fcmp oeq float %96, %97
+ br i1 %cmp84, label %land.lhs.true86, label %land.end92
+
+land.lhs.true86: ; preds = %land.end76
+ %98 = load float* @x, align 4
+ %99 = load float* @lx, align 4
+ %cmp87 = fcmp oeq float %98, %99
+ br i1 %cmp87, label %land.rhs89, label %land.end92
+
+land.rhs89: ; preds = %land.lhs.true86
+ %100 = load double* @yd, align 8
+ %101 = load double* @lyd, align 8
+ %cmp90 = fcmp oeq double %100, %101
+ br label %land.end92
+
+land.end92: ; preds = %land.rhs89, %land.lhs.true86, %land.end76
+ %102 = phi i1 [ false, %land.lhs.true86 ], [ false, %land.end76 ], [ %cmp90, %land.rhs89 ]
+ %land.ext93 = zext i1 %102 to i32
+ %call94 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %conv80, double %conv81, double %conv82, double %conv83, double %94, double %95, i32 %land.ext93)
+ call void @clear()
+ store float 0x417CCC7A00000000, float* @ret_sf, align 4
+ store double 0x4172034530000000, double* @lxd, align 8
+ store float 4.456200e+04, float* @ly, align 4
+ %103 = load double* @lxd, align 8
+ %104 = load float* @ly, align 4
+ %call95 = call float @sf_df_sf(double %103, float %104)
+ store float %call95, float* @lret_sf, align 4
+ %105 = load float* @ret_sf, align 4
+ %conv96 = fpext float %105 to double
+ %106 = load float* @lret_sf, align 4
+ %conv97 = fpext float %106 to double
+ %107 = load double* @xd, align 8
+ %108 = load double* @lxd, align 8
+ %109 = load float* @y, align 4
+ %conv98 = fpext float %109 to double
+ %110 = load float* @ly, align 4
+ %conv99 = fpext float %110 to double
+ %111 = load float* @ret_sf, align 4
+ %112 = load float* @lret_sf, align 4
+ %cmp100 = fcmp oeq float %111, %112
+ br i1 %cmp100, label %land.lhs.true102, label %land.end108
+
+land.lhs.true102: ; preds = %land.end92
+ %113 = load double* @xd, align 8
+ %114 = load double* @lxd, align 8
+ %cmp103 = fcmp oeq double %113, %114
+ br i1 %cmp103, label %land.rhs105, label %land.end108
+
+land.rhs105: ; preds = %land.lhs.true102
+ %115 = load float* @y, align 4
+ %116 = load float* @ly, align 4
+ %cmp106 = fcmp oeq float %115, %116
+ br label %land.end108
+
+land.end108: ; preds = %land.rhs105, %land.lhs.true102, %land.end92
+ %117 = phi i1 [ false, %land.lhs.true102 ], [ false, %land.end92 ], [ %cmp106, %land.rhs105 ]
+ %land.ext109 = zext i1 %117 to i32
+ %call110 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %conv96, double %conv97, double %107, double %108, double %conv98, double %conv99, i32 %land.ext109)
+ call void @clear()
+ store float 3.987721e+06, float* @ret_sf, align 4
+ store double 0x3FF1F49F6DDDC2D8, double* @lxd, align 8
+ store double 0x409129F306A2B170, double* @lyd, align 8
+ %118 = load double* @lxd, align 8
+ %119 = load double* @lyd, align 8
+ %call111 = call float @sf_df_df(double %118, double %119)
+ store float %call111, float* @lret_sf, align 4
+ %120 = load float* @ret_sf, align 4
+ %conv112 = fpext float %120 to double
+ %121 = load float* @lret_sf, align 4
+ %conv113 = fpext float %121 to double
+ %122 = load double* @xd, align 8
+ %123 = load double* @lxd, align 8
+ %124 = load double* @yd, align 8
+ %125 = load double* @lyd, align 8
+ %126 = load float* @ret_sf, align 4
+ %127 = load float* @lret_sf, align 4
+ %cmp114 = fcmp oeq float %126, %127
+ br i1 %cmp114, label %land.lhs.true116, label %land.end122
+
+land.lhs.true116: ; preds = %land.end108
+ %128 = load double* @xd, align 8
+ %129 = load double* @lxd, align 8
+ %cmp117 = fcmp oeq double %128, %129
+ br i1 %cmp117, label %land.rhs119, label %land.end122
+
+land.rhs119: ; preds = %land.lhs.true116
+ %130 = load double* @yd, align 8
+ %131 = load double* @lyd, align 8
+ %cmp120 = fcmp oeq double %130, %131
+ br label %land.end122
+
+land.end122: ; preds = %land.rhs119, %land.lhs.true116, %land.end108
+ %132 = phi i1 [ false, %land.lhs.true116 ], [ false, %land.end108 ], [ %cmp120, %land.rhs119 ]
+ %land.ext123 = zext i1 %132 to i32
+ %call124 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %conv112, double %conv113, double %122, double %123, double %124, double %125, i32 %land.ext123)
+ call void @clear()
+ store double 1.561234e+01, double* @ret_df, align 8
+ %call125 = call double @df_v()
+ store double %call125, double* @lret_df, align 8
+ %133 = load double* @ret_df, align 8
+ %134 = load double* @lret_df, align 8
+ %135 = load double* @ret_df, align 8
+ %136 = load double* @lret_df, align 8
+ %cmp126 = fcmp oeq double %135, %136
+ %conv127 = zext i1 %cmp126 to i32
+ %call128 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %133, double %134, i32 %conv127)
+ call void @clear()
+ store double 1.345873e+01, double* @ret_df, align 8
+ store float 3.434520e+05, float* @lx, align 4
+ %137 = load float* @lx, align 4
+ %call129 = call double @df_sf(float %137)
+ store double %call129, double* @lret_df, align 8
+ %138 = load double* @ret_df, align 8
+ %139 = load double* @lret_df, align 8
+ %140 = load float* @x, align 4
+ %conv130 = fpext float %140 to double
+ %141 = load float* @lx, align 4
+ %conv131 = fpext float %141 to double
+ %142 = load double* @ret_df, align 8
+ %143 = load double* @lret_df, align 8
+ %cmp132 = fcmp oeq double %142, %143
+ %conv133 = zext i1 %cmp132 to i32
+ %144 = load float* @x, align 4
+ %145 = load float* @lx, align 4
+ %cmp134 = fcmp oeq float %144, %145
+ %conv135 = zext i1 %cmp134 to i32
+ %and136 = and i32 %conv133, %conv135
+ %call137 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %138, double %139, double %conv130, double %conv131, i32 %and136)
+ call void @clear()
+ store double 0x4084F3AB7AA25D8D, double* @ret_df, align 8
+ store double 0x4114F671D2F1A9FC, double* @lxd, align 8
+ %146 = load double* @lxd, align 8
+ %call138 = call double @df_df(double %146)
+ store double %call138, double* @lret_df, align 8
+ %147 = load double* @ret_df, align 8
+ %148 = load double* @lret_df, align 8
+ %149 = load double* @xd, align 8
+ %150 = load double* @lxd, align 8
+ %151 = load double* @ret_df, align 8
+ %152 = load double* @lret_df, align 8
+ %cmp139 = fcmp oeq double %151, %152
+ %conv140 = zext i1 %cmp139 to i32
+ %153 = load double* @xd, align 8
+ %154 = load double* @lxd, align 8
+ %cmp141 = fcmp oeq double %153, %154
+ %conv142 = zext i1 %cmp141 to i32
+ %and143 = and i32 %conv140, %conv142
+ %call144 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str1, i32 0, i32 0), double %147, double %148, double %149, double %150, i32 %and143)
+ call void @clear()
+ store double 6.781956e+03, double* @ret_df, align 8
+ store float 4.445500e+03, float* @lx, align 4
+ store float 0x4068ACCCC0000000, float* @ly, align 4
+ %155 = load float* @lx, align 4
+ %156 = load float* @ly, align 4
+ %call145 = call double @df_sf_sf(float %155, float %156)
+ store double %call145, double* @lret_df, align 8
+ %157 = load double* @ret_df, align 8
+ %158 = load double* @lret_df, align 8
+ %159 = load float* @x, align 4
+ %conv146 = fpext float %159 to double
+ %160 = load float* @lx, align 4
+ %conv147 = fpext float %160 to double
+ %161 = load float* @y, align 4
+ %conv148 = fpext float %161 to double
+ %162 = load float* @ly, align 4
+ %conv149 = fpext float %162 to double
+ %163 = load double* @ret_df, align 8
+ %164 = load double* @lret_df, align 8
+ %cmp150 = fcmp oeq double %163, %164
+ br i1 %cmp150, label %land.lhs.true152, label %land.end158
+
+land.lhs.true152: ; preds = %land.end122
+ %165 = load float* @x, align 4
+ %166 = load float* @lx, align 4
+ %cmp153 = fcmp oeq float %165, %166
+ br i1 %cmp153, label %land.rhs155, label %land.end158
+
+land.rhs155: ; preds = %land.lhs.true152
+ %167 = load float* @y, align 4
+ %168 = load float* @ly, align 4
+ %cmp156 = fcmp oeq float %167, %168
+ br label %land.end158
+
+land.end158: ; preds = %land.rhs155, %land.lhs.true152, %land.end122
+ %169 = phi i1 [ false, %land.lhs.true152 ], [ false, %land.end122 ], [ %cmp156, %land.rhs155 ]
+ %land.ext159 = zext i1 %169 to i32
+ %call160 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %157, double %158, double %conv146, double %conv147, double %conv148, double %conv149, i32 %land.ext159)
+ call void @clear()
+ store double 1.889130e+05, double* @ret_df, align 8
+ store float 9.111450e+05, float* @lx, align 4
+ store double 0x4185320A58000000, double* @lyd, align 8
+ %170 = load float* @lx, align 4
+ %171 = load double* @lyd, align 8
+ %call161 = call double @df_sf_df(float %170, double %171)
+ store double %call161, double* @lret_df, align 8
+ %172 = load double* @ret_df, align 8
+ %173 = load double* @lret_df, align 8
+ %174 = load float* @x, align 4
+ %conv162 = fpext float %174 to double
+ %175 = load float* @lx, align 4
+ %conv163 = fpext float %175 to double
+ %176 = load double* @yd, align 8
+ %177 = load double* @lyd, align 8
+ %178 = load double* @ret_df, align 8
+ %179 = load double* @lret_df, align 8
+ %cmp164 = fcmp oeq double %178, %179
+ br i1 %cmp164, label %land.lhs.true166, label %land.end172
+
+land.lhs.true166: ; preds = %land.end158
+ %180 = load float* @x, align 4
+ %181 = load float* @lx, align 4
+ %cmp167 = fcmp oeq float %180, %181
+ br i1 %cmp167, label %land.rhs169, label %land.end172
+
+land.rhs169: ; preds = %land.lhs.true166
+ %182 = load double* @yd, align 8
+ %183 = load double* @lyd, align 8
+ %cmp170 = fcmp oeq double %182, %183
+ br label %land.end172
+
+land.end172: ; preds = %land.rhs169, %land.lhs.true166, %land.end158
+ %184 = phi i1 [ false, %land.lhs.true166 ], [ false, %land.end158 ], [ %cmp170, %land.rhs169 ]
+ %land.ext173 = zext i1 %184 to i32
+ %call174 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %172, double %173, double %conv162, double %conv163, double %176, double %177, i32 %land.ext173)
+ call void @clear()
+ store double 0x418B2DB900000000, double* @ret_df, align 8
+ store double 0x41B1EF2ED3000000, double* @lxd, align 8
+ store float 1.244562e+06, float* @ly, align 4
+ %185 = load double* @lxd, align 8
+ %186 = load float* @ly, align 4
+ %call175 = call double @df_df_sf(double %185, float %186)
+ store double %call175, double* @lret_df, align 8
+ %187 = load double* @ret_df, align 8
+ %188 = load double* @lret_df, align 8
+ %189 = load double* @xd, align 8
+ %190 = load double* @lxd, align 8
+ %191 = load float* @y, align 4
+ %conv176 = fpext float %191 to double
+ %192 = load float* @ly, align 4
+ %conv177 = fpext float %192 to double
+ %193 = load double* @ret_df, align 8
+ %194 = load double* @lret_df, align 8
+ %cmp178 = fcmp oeq double %193, %194
+ br i1 %cmp178, label %land.lhs.true180, label %land.end186
+
+land.lhs.true180: ; preds = %land.end172
+ %195 = load double* @xd, align 8
+ %196 = load double* @lxd, align 8
+ %cmp181 = fcmp oeq double %195, %196
+ br i1 %cmp181, label %land.rhs183, label %land.end186
+
+land.rhs183: ; preds = %land.lhs.true180
+ %197 = load float* @y, align 4
+ %198 = load float* @ly, align 4
+ %cmp184 = fcmp oeq float %197, %198
+ br label %land.end186
+
+land.end186: ; preds = %land.rhs183, %land.lhs.true180, %land.end172
+ %199 = phi i1 [ false, %land.lhs.true180 ], [ false, %land.end172 ], [ %cmp184, %land.rhs183 ]
+ %land.ext187 = zext i1 %199 to i32
+ %call188 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %187, double %188, double %189, double %190, double %conv176, double %conv177, i32 %land.ext187)
+ call void @clear()
+ store double 3.987721e+06, double* @ret_df, align 8
+ store double 5.223560e+00, double* @lxd, align 8
+ store double 0x40B7D37CC1A8AC5C, double* @lyd, align 8
+ %200 = load double* @lxd, align 8
+ %201 = load double* @lyd, align 8
+ %call189 = call double @df_df_df(double %200, double %201)
+ store double %call189, double* @lret_df, align 8
+ %202 = load double* @ret_df, align 8
+ %203 = load double* @lret_df, align 8
+ %204 = load double* @xd, align 8
+ %205 = load double* @lxd, align 8
+ %206 = load double* @yd, align 8
+ %207 = load double* @lyd, align 8
+ %208 = load double* @ret_df, align 8
+ %209 = load double* @lret_df, align 8
+ %cmp190 = fcmp oeq double %208, %209
+ br i1 %cmp190, label %land.lhs.true192, label %land.end198
+
+land.lhs.true192: ; preds = %land.end186
+ %210 = load double* @xd, align 8
+ %211 = load double* @lxd, align 8
+ %cmp193 = fcmp oeq double %210, %211
+ br i1 %cmp193, label %land.rhs195, label %land.end198
+
+land.rhs195: ; preds = %land.lhs.true192
+ %212 = load double* @yd, align 8
+ %213 = load double* @lyd, align 8
+ %cmp196 = fcmp oeq double %212, %213
+ br label %land.end198
+
+land.end198: ; preds = %land.rhs195, %land.lhs.true192, %land.end186
+ %214 = phi i1 [ false, %land.lhs.true192 ], [ false, %land.end186 ], [ %cmp196, %land.rhs195 ]
+ %land.ext199 = zext i1 %214 to i32
+ %call200 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str2, i32 0, i32 0), double %202, double %203, double %204, double %205, double %206, double %207, i32 %land.ext199)
+ call void @clear()
+ store float 4.500000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ store float 7.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ %call201 = call { float, float } @sc_v()
+ %215 = extractvalue { float, float } %call201, 0
+ %216 = extractvalue { float, float } %call201, 1
+ store float %215, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ store float %216, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ %ret_sc.real = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ %ret_sc.imag = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ %conv202 = fpext float %ret_sc.real to double
+ %conv203 = fpext float %ret_sc.imag to double
+ %ret_sc.real204 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ %ret_sc.imag205 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ %conv206 = fpext float %ret_sc.real204 to double
+ %conv207 = fpext float %ret_sc.imag205 to double
+ %lret_sc.real = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ %lret_sc.imag = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ %conv208 = fpext float %lret_sc.real to double
+ %conv209 = fpext float %lret_sc.imag to double
+ %lret_sc.real210 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ %lret_sc.imag211 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ %conv212 = fpext float %lret_sc.real210 to double
+ %conv213 = fpext float %lret_sc.imag211 to double
+ %ret_sc.real214 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ %ret_sc.imag215 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ %lret_sc.real216 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ %lret_sc.imag217 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ %cmp.r = fcmp oeq float %ret_sc.real214, %lret_sc.real216
+ %cmp.i = fcmp oeq float %ret_sc.imag215, %lret_sc.imag217
+ %and.ri = and i1 %cmp.r, %cmp.i
+ %conv218 = zext i1 %and.ri to i32
+ %call219 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([18 x i8]* @.str3, i32 0, i32 0), double %conv202, double %conv207, double %conv208, double %conv213, i32 %conv218)
+ call void @clear()
+ store float 0x3FF7A99300000000, float* @lx, align 4
+ store float 4.500000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ store float 7.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ %217 = load float* @lx, align 4
+ %call220 = call { float, float } @sc_sf(float %217)
+ %218 = extractvalue { float, float } %call220, 0
+ %219 = extractvalue { float, float } %call220, 1
+ store float %218, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ store float %219, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ %ret_sc.real221 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ %ret_sc.imag222 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ %conv223 = fpext float %ret_sc.real221 to double
+ %conv224 = fpext float %ret_sc.imag222 to double
+ %ret_sc.real225 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ %ret_sc.imag226 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ %conv227 = fpext float %ret_sc.real225 to double
+ %conv228 = fpext float %ret_sc.imag226 to double
+ %lret_sc.real229 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ %lret_sc.imag230 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ %conv231 = fpext float %lret_sc.real229 to double
+ %conv232 = fpext float %lret_sc.imag230 to double
+ %lret_sc.real233 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ %lret_sc.imag234 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ %conv235 = fpext float %lret_sc.real233 to double
+ %conv236 = fpext float %lret_sc.imag234 to double
+ %220 = load float* @x, align 4
+ %conv237 = fpext float %220 to double
+ %221 = load float* @lx, align 4
+ %conv238 = fpext float %221 to double
+ %ret_sc.real239 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0)
+ %ret_sc.imag240 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1)
+ %lret_sc.real241 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0)
+ %lret_sc.imag242 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1)
+ %cmp.r243 = fcmp oeq float %ret_sc.real239, %lret_sc.real241
+ %cmp.i244 = fcmp oeq float %ret_sc.imag240, %lret_sc.imag242
+ %and.ri245 = and i1 %cmp.r243, %cmp.i244
+ br i1 %and.ri245, label %land.rhs247, label %land.end250
+
+land.rhs247: ; preds = %land.end198
+ %222 = load float* @x, align 4
+ %223 = load float* @lx, align 4
+ %cmp248 = fcmp oeq float %222, %223
+ br label %land.end250
+
+land.end250: ; preds = %land.rhs247, %land.end198
+ %224 = phi i1 [ false, %land.end198 ], [ %cmp248, %land.rhs247 ]
+ %land.ext251 = zext i1 %224 to i32
+ %call252 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8]* @.str4, i32 0, i32 0), double %conv223, double %conv228, double %conv231, double %conv236, double %conv237, double %conv238, i32 %land.ext251)
+ call void @clear()
+ store double 1.234500e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ store double 7.677000e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ %call253 = call { double, double } @dc_v()
+ %225 = extractvalue { double, double } %call253, 0
+ %226 = extractvalue { double, double } %call253, 1
+ store double %225, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ store double %226, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ %ret_dc.real = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ %ret_dc.imag = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ %ret_dc.real254 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ %ret_dc.imag255 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ %lret_dc.real = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ %lret_dc.imag = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ %lret_dc.real256 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ %lret_dc.imag257 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ %ret_dc.real258 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ %ret_dc.imag259 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ %lret_dc.real260 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ %lret_dc.imag261 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ %cmp.r262 = fcmp oeq double %ret_dc.real258, %lret_dc.real260
+ %cmp.i263 = fcmp oeq double %ret_dc.imag259, %lret_dc.imag261
+ %and.ri264 = and i1 %cmp.r262, %cmp.i263
+ %conv265 = zext i1 %and.ri264 to i32
+ %call266 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([18 x i8]* @.str3, i32 0, i32 0), double %ret_dc.real, double %ret_dc.imag255, double %lret_dc.real, double %lret_dc.imag257, i32 %conv265)
+ call void @clear()
+ store double 0x40AAF6F532617C1C, double* @lxd, align 8
+ store double 4.444500e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ store double 7.888000e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ %227 = load float* @lx, align 4
+ %call267 = call { double, double } @dc_sf(float %227)
+ %228 = extractvalue { double, double } %call267, 0
+ %229 = extractvalue { double, double } %call267, 1
+ store double %228, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ store double %229, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ %ret_dc.real268 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ %ret_dc.imag269 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ %ret_dc.real270 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ %ret_dc.imag271 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ %lret_dc.real272 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ %lret_dc.imag273 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ %lret_dc.real274 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ %lret_dc.imag275 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ %230 = load float* @x, align 4
+ %conv276 = fpext float %230 to double
+ %231 = load float* @lx, align 4
+ %conv277 = fpext float %231 to double
+ %ret_dc.real278 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0)
+ %ret_dc.imag279 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1)
+ %lret_dc.real280 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0)
+ %lret_dc.imag281 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1)
+ %cmp.r282 = fcmp oeq double %ret_dc.real278, %lret_dc.real280
+ %cmp.i283 = fcmp oeq double %ret_dc.imag279, %lret_dc.imag281
+ %and.ri284 = and i1 %cmp.r282, %cmp.i283
+ br i1 %and.ri284, label %land.rhs286, label %land.end289
+
+land.rhs286: ; preds = %land.end250
+ %232 = load float* @x, align 4
+ %233 = load float* @lx, align 4
+ %cmp287 = fcmp oeq float %232, %233
+ br label %land.end289
+
+land.end289: ; preds = %land.rhs286, %land.end250
+ %234 = phi i1 [ false, %land.end250 ], [ %cmp287, %land.rhs286 ]
+ %land.ext290 = zext i1 %234 to i32
+ %call291 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8]* @.str4, i32 0, i32 0), double %ret_dc.real268, double %ret_dc.imag271, double %lret_dc.real272, double %lret_dc.imag275, double %conv276, double %conv277, i32 %land.ext290)
+ %235 = load i32* %retval
+ ret i32 %235
+}
+
+declare void @v_sf(float) #1
+; stel: .section .mips16.call.fp.v_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_v_sf
+; stel: mtc1 $4,$f12
+; stel: lui $25,%hi(v_sf)
+; stel: addiu $25,$25,%lo(v_sf)
+; stel: jr $25
+; stel: .end __call_stub_fp_v_sf
+
+declare i32 @printf(i8*, ...) #1
+
+declare void @v_df(double) #1
+; stel: .section .mips16.call.fp.v_df,"ax",@progbits
+; stel: .ent __call_stub_fp_v_df
+; stel: #APP
+; setl: .set reorder
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: lui $25,%hi(v_df)
+; stel: addiu $25,$25,%lo(v_df)
+; stel: jr $25
+; stel: .end __call_stub_fp_v_df
+
+declare void @v_sf_sf(float, float) #1
+; stel: .section .mips16.call.fp.v_sf_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_v_sf_sf
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f14
+; stel: lui $25,%hi(v_sf_sf)
+; stel: addiu $25,$25,%lo(v_sf_sf)
+; stel: jr $25
+; stel: .end __call_stub_fp_v_sf_sf
+
+declare void @v_sf_df(float, double) #1
+; stel: .section .mips16.call.fp.v_sf_df,"ax",@progbits
+; stel: .ent __call_stub_fp_v_sf_df
+; stel: mtc1 $4,$f12
+; stel: mtc1 $6,$f14
+; stel: mtc1 $7,$f15
+; stel: lui $25,%hi(v_sf_df)
+; stel: addiu $25,$25,%lo(v_sf_df)
+; stel: jr $25
+; stel: .end __call_stub_fp_v_sf_df
+
+declare void @v_df_sf(double, float) #1
+; stel: .section .mips16.call.fp.v_df_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_v_df_sf
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: mtc1 $6,$f14
+; stel: lui $25,%hi(v_df_sf)
+; stel: addiu $25,$25,%lo(v_df_sf)
+; stel: jr $25
+; stel: .end __call_stub_fp_v_df_sf
+
+declare void @v_df_df(double, double) #1
+; stel: .section .mips16.call.fp.v_df_df,"ax",@progbits
+; stel: .ent __call_stub_fp_v_df_df
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: mtc1 $6,$f14
+; stel: mtc1 $7,$f15
+; stel: lui $25,%hi(v_df_df)
+; stel: addiu $25,$25,%lo(v_df_df)
+; stel: jr $25
+; stel: .end __call_stub_fp_v_df_df
+
+declare float @sf_v() #1
+; stel: .section .mips16.call.fp.sf_v,"ax",@progbits
+; stel: .ent __call_stub_fp_sf_v
+; stel: move $18, $31
+; stel: jal sf_v
+; stel: mfc1 $2,$f0
+; stel: jr $18
+; stel: .end __call_stub_fp_sf_v
+
+declare float @sf_sf(float) #1
+; stel: .section .mips16.call.fp.sf_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_sf_sf
+; stel: mtc1 $4,$f12
+; stel: move $18, $31
+; stel: jal sf_sf
+; stel: mfc1 $2,$f0
+; stel: jr $18
+; stel: .end __call_stub_fp_sf_sf
+
+declare float @sf_df(double) #1
+; stel: .section .mips16.call.fp.sf_df,"ax",@progbits
+; stel: .ent __call_stub_fp_sf_df
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: move $18, $31
+; stel: jal sf_df
+; stel: mfc1 $2,$f0
+; stel: jr $18
+; stel: .end __call_stub_fp_sf_df
+
+declare float @sf_sf_sf(float, float) #1
+; stel: .section .mips16.call.fp.sf_sf_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_sf_sf_sf
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f14
+; stel: move $18, $31
+; stel: jal sf_sf_sf
+; stel: mfc1 $2,$f0
+; stel: jr $18
+; stel: .end __call_stub_fp_sf_sf_sf
+
+declare float @sf_sf_df(float, double) #1
+; stel: .section .mips16.call.fp.sf_sf_df,"ax",@progbits
+; stel: .ent __call_stub_fp_sf_sf_df
+; stel: mtc1 $4,$f12
+; stel: mtc1 $6,$f14
+; stel: mtc1 $7,$f15
+; stel: move $18, $31
+; stel: jal sf_sf_df
+; stel: mfc1 $2,$f0
+; stel: jr $18
+; stel: .end __call_stub_fp_sf_sf_df
+
+declare float @sf_df_sf(double, float) #1
+; stel: .section .mips16.call.fp.sf_df_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_sf_df_sf
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: mtc1 $6,$f14
+; stel: move $18, $31
+; stel: jal sf_df_sf
+; stel: mfc1 $2,$f0
+; stel: jr $18
+; stel: .end __call_stub_fp_sf_df_sf
+
+declare float @sf_df_df(double, double) #1
+; stel: .section .mips16.call.fp.sf_df_df,"ax",@progbits
+; stel: .ent __call_stub_fp_sf_df_df
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: mtc1 $6,$f14
+; stel: mtc1 $7,$f15
+; stel: move $18, $31
+; stel: jal sf_df_df
+; stel: mfc1 $2,$f0
+; stel: jr $18
+; stel: .end __call_stub_fp_sf_df_df
+
+declare double @df_v() #1
+; stel: .section .mips16.call.fp.df_v,"ax",@progbits
+; stel: .ent __call_stub_fp_df_v
+; stel: move $18, $31
+; stel: jal df_v
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_df_v
+
+declare double @df_sf(float) #1
+; stel: .section .mips16.call.fp.df_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_df_sf
+; stel: mtc1 $4,$f12
+; stel: move $18, $31
+; stel: jal df_sf
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_df_sf
+
+declare double @df_df(double) #1
+; stel: .section .mips16.call.fp.df_df,"ax",@progbits
+; stel: .ent __call_stub_fp_df_df
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: move $18, $31
+; stel: jal df_df
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_df_df
+
+declare double @df_sf_sf(float, float) #1
+; stel: .section .mips16.call.fp.df_sf_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_df_sf_sf
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f14
+; stel: move $18, $31
+; stel: jal df_sf_sf
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_df_sf_sf
+
+declare double @df_sf_df(float, double) #1
+; stel: .section .mips16.call.fp.df_sf_df,"ax",@progbits
+; stel: .ent __call_stub_fp_df_sf_df
+; stel: mtc1 $4,$f12
+; stel: mtc1 $6,$f14
+; stel: mtc1 $7,$f15
+; stel: move $18, $31
+; stel: jal df_sf_df
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_df_sf_df
+
+declare double @df_df_sf(double, float) #1
+; stel: .section .mips16.call.fp.df_df_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_df_df_sf
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: mtc1 $6,$f14
+; stel: move $18, $31
+; stel: jal df_df_sf
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_df_df_sf
+
+declare double @df_df_df(double, double) #1
+; stel: .section .mips16.call.fp.df_df_df,"ax",@progbits
+; stel: .ent __call_stub_fp_df_df_df
+; stel: mtc1 $4,$f12
+; stel: mtc1 $5,$f13
+; stel: mtc1 $6,$f14
+; stel: mtc1 $7,$f15
+; stel: move $18, $31
+; stel: jal df_df_df
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_df_df_df
+
+declare { float, float } @sc_v() #1
+; stel: .section .mips16.call.fp.sc_v,"ax",@progbits
+; stel: .ent __call_stub_fp_sc_v
+; stel: move $18, $31
+; stel: jal sc_v
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f2
+; stel: jr $18
+; stel: .end __call_stub_fp_sc_v
+
+declare { float, float } @sc_sf(float) #1
+; stel: .section .mips16.call.fp.sc_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_sc_sf
+; stel: mtc1 $4,$f12
+; stel: move $18, $31
+; stel: jal sc_sf
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f2
+; stel: jr $18
+; stel: .end __call_stub_fp_sc_sf
+
+declare { double, double } @dc_v() #1
+; stel: .section .mips16.call.fp.dc_v,"ax",@progbits
+; stel: .ent __call_stub_fp_dc_v
+; stel: move $18, $31
+; stel: jal dc_v
+; stel: mfc1 $4,$f2
+; stel: mfc1 $5,$f3
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_dc_v
+
+declare { double, double } @dc_sf(float) #1
+; stel: .section .mips16.call.fp.dc_sf,"ax",@progbits
+; stel: .ent __call_stub_fp_dc_sf
+; stel: mtc1 $4,$f12
+; stel: move $18, $31
+; stel: jal dc_sf
+; stel: mfc1 $4,$f2
+; stel: mfc1 $5,$f3
+; stel: mfc1 $2,$f0
+; stel: mfc1 $3,$f1
+; stel: jr $18
+; stel: .end __call_stub_fp_dc_sf
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/hf16call32_body.ll b/test/CodeGen/Mips/hf16call32_body.ll
new file mode 100644
index 0000000000000..34bae26f85f3a
--- /dev/null
+++ b/test/CodeGen/Mips/hf16call32_body.ll
@@ -0,0 +1,294 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=stel
+
+@x = external global float
+@xd = external global double
+@y = external global float
+@yd = external global double
+@ret_sf = external global float
+@ret_df = external global double
+@ret_sc = external global { float, float }
+@ret_dc = external global { double, double }
+
+; Function Attrs: nounwind
+define void @v_sf(float %p) #0 {
+entry:
+ %p.addr = alloca float, align 4
+ store float %p, float* %p.addr, align 4
+ %0 = load float* %p.addr, align 4
+ store float %0, float* @x, align 4
+ ret void
+}
+; stel: .section .mips16.fn.v_sf,"ax",@progbits
+; stel: .ent __fn_stub_v_sf
+; stel: la $25,v_sf
+; stel: mfc1 $4,$f12
+; stel: jr $25
+; stel: __fn_local_v_sf = v_sf
+; stel: .end __fn_stub_v_sf
+
+declare i32 @printf(i8*, ...) #1
+
+; Function Attrs: nounwind
+define void @v_df(double %p) #0 {
+entry:
+ %p.addr = alloca double, align 8
+ store double %p, double* %p.addr, align 8
+ %0 = load double* %p.addr, align 8
+ store double %0, double* @xd, align 8
+ ret void
+}
+
+; stel: .section .mips16.fn.v_df,"ax",@progbits
+; stel: .ent __fn_stub_v_df
+; stel: la $25,v_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: jr $25
+; stel: __fn_local_v_df = v_df
+; stel: .end __fn_stub_v_df
+
+; Function Attrs: nounwind
+define void @v_sf_sf(float %p1, float %p2) #0 {
+entry:
+ %p1.addr = alloca float, align 4
+ %p2.addr = alloca float, align 4
+ store float %p1, float* %p1.addr, align 4
+ store float %p2, float* %p2.addr, align 4
+ %0 = load float* %p1.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load float* %p2.addr, align 4
+ store float %1, float* @y, align 4
+ ret void
+}
+
+; stel: .section .mips16.fn.v_sf_sf,"ax",@progbits
+; stel: .ent __fn_stub_v_sf_sf
+; stel: la $25,v_sf_sf
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f14
+; stel: jr $25
+; stel: __fn_local_v_sf_sf = v_sf_sf
+; stel: .end __fn_stub_v_sf_sf
+
+; Function Attrs: nounwind
+define void @v_sf_df(float %p1, double %p2) #0 {
+entry:
+ %p1.addr = alloca float, align 4
+ %p2.addr = alloca double, align 8
+ store float %p1, float* %p1.addr, align 4
+ store double %p2, double* %p2.addr, align 8
+ %0 = load float* %p1.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load double* %p2.addr, align 8
+ store double %1, double* @yd, align 8
+ ret void
+}
+
+; stel: .section .mips16.fn.v_sf_df,"ax",@progbits
+; stel: .ent __fn_stub_v_sf_df
+; stel: la $25,v_sf_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $6,$f14
+; stel: mfc1 $7,$f15
+; stel: jr $25
+; stel: __fn_local_v_sf_df = v_sf_df
+; stel: .end __fn_stub_v_sf_df
+
+; Function Attrs: nounwind
+define void @v_df_sf(double %p1, float %p2) #0 {
+entry:
+ %p1.addr = alloca double, align 8
+ %p2.addr = alloca float, align 4
+ store double %p1, double* %p1.addr, align 8
+ store float %p2, float* %p2.addr, align 4
+ %0 = load double* %p1.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load float* %p2.addr, align 4
+ store float %1, float* @y, align 4
+ ret void
+}
+
+; stel: .section .mips16.fn.v_df_sf,"ax",@progbits
+; stel: .ent __fn_stub_v_df_sf
+; stel: la $25,v_df_sf
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: mfc1 $6,$f14
+; stel: jr $25
+; stel: __fn_local_v_df_sf = v_df_sf
+; stel: .end __fn_stub_v_df_sf
+
+; Function Attrs: nounwind
+define void @v_df_df(double %p1, double %p2) #0 {
+entry:
+ %p1.addr = alloca double, align 8
+ %p2.addr = alloca double, align 8
+ store double %p1, double* %p1.addr, align 8
+ store double %p2, double* %p2.addr, align 8
+ %0 = load double* %p1.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load double* %p2.addr, align 8
+ store double %1, double* @yd, align 8
+ ret void
+}
+
+; stel: .section .mips16.fn.v_df_df,"ax",@progbits
+; stel: .ent __fn_stub_v_df_df
+; stel: la $25,v_df_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: mfc1 $6,$f14
+; stel: mfc1 $7,$f15
+; stel: jr $25
+; stel: __fn_local_v_df_df = v_df_df
+; stel: .end __fn_stub_v_df_df
+
+; Function Attrs: nounwind
+define float @sf_v() #0 {
+entry:
+ %0 = load float* @ret_sf, align 4
+ ret float %0
+}
+
+; Function Attrs: nounwind
+define float @sf_sf(float %p) #0 {
+entry:
+ %p.addr = alloca float, align 4
+ store float %p, float* %p.addr, align 4
+ %0 = load float* %p.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load float* @ret_sf, align 4
+ ret float %1
+}
+
+
+; stel: .section .mips16.fn.sf_sf,"ax",@progbits
+; stel: .ent __fn_stub_sf_sf
+; stel: la $25,sf_sf
+; stel: mfc1 $4,$f12
+; stel: jr $25
+; stel: __fn_local_sf_sf = sf_sf
+; stel: .end __fn_stub_sf_sf
+
+
+; Function Attrs: nounwind
+define float @sf_df(double %p) #0 {
+entry:
+ %p.addr = alloca double, align 8
+ store double %p, double* %p.addr, align 8
+ %0 = load double* %p.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load float* @ret_sf, align 4
+ ret float %1
+}
+
+; stel: .section .mips16.fn.sf_df,"ax",@progbits
+; stel: .ent __fn_stub_sf_df
+; stel: la $25,sf_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: jr $25
+; stel: __fn_local_sf_df = sf_df
+; stel: .end __fn_stub_sf_df
+
+; Function Attrs: nounwind
+define float @sf_sf_sf(float %p1, float %p2) #0 {
+entry:
+ %p1.addr = alloca float, align 4
+ %p2.addr = alloca float, align 4
+ store float %p1, float* %p1.addr, align 4
+ store float %p2, float* %p2.addr, align 4
+ %0 = load float* %p1.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load float* %p2.addr, align 4
+ store float %1, float* @y, align 4
+ %2 = load float* @ret_sf, align 4
+ ret float %2
+}
+
+; stel: .section .mips16.fn.sf_sf_sf,"ax",@progbits
+; stel: .ent __fn_stub_sf_sf_sf
+; stel: la $25,sf_sf_sf
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f14
+; stel: jr $25
+; stel: __fn_local_sf_sf_sf = sf_sf_sf
+; stel: .end __fn_stub_sf_sf_sf
+
+; Function Attrs: nounwind
+define float @sf_sf_df(float %p1, double %p2) #0 {
+entry:
+ %p1.addr = alloca float, align 4
+ %p2.addr = alloca double, align 8
+ store float %p1, float* %p1.addr, align 4
+ store double %p2, double* %p2.addr, align 8
+ %0 = load float* %p1.addr, align 4
+ store float %0, float* @x, align 4
+ %1 = load double* %p2.addr, align 8
+ store double %1, double* @yd, align 8
+ %2 = load float* @ret_sf, align 4
+ ret float %2
+}
+
+; stel: .section .mips16.fn.sf_sf_df,"ax",@progbits
+; stel: .ent __fn_stub_sf_sf_df
+; stel: la $25,sf_sf_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $6,$f14
+; stel: mfc1 $7,$f15
+; stel: jr $25
+; stel: __fn_local_sf_sf_df = sf_sf_df
+; stel: .end __fn_stub_sf_sf_df
+
+; Function Attrs: nounwind
+define float @sf_df_sf(double %p1, float %p2) #0 {
+entry:
+ %p1.addr = alloca double, align 8
+ %p2.addr = alloca float, align 4
+ store double %p1, double* %p1.addr, align 8
+ store float %p2, float* %p2.addr, align 4
+ %0 = load double* %p1.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load float* %p2.addr, align 4
+ store float %1, float* @y, align 4
+ %2 = load float* @ret_sf, align 4
+ ret float %2
+}
+
+; stel: .section .mips16.fn.sf_df_sf,"ax",@progbits
+; stel: .ent __fn_stub_sf_df_sf
+; stel: la $25,sf_df_sf
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: mfc1 $6,$f14
+; stel: jr $25
+; stel: __fn_local_sf_df_sf = sf_df_sf
+; stel: .end __fn_stub_sf_df_sf
+
+; Function Attrs: nounwind
+define float @sf_df_df(double %p1, double %p2) #0 {
+entry:
+ %p1.addr = alloca double, align 8
+ %p2.addr = alloca double, align 8
+ store double %p1, double* %p1.addr, align 8
+ store double %p2, double* %p2.addr, align 8
+ %0 = load double* %p1.addr, align 8
+ store double %0, double* @xd, align 8
+ %1 = load double* %p2.addr, align 8
+ store double %1, double* @yd, align 8
+ %2 = load float* @ret_sf, align 4
+ ret float %2
+}
+
+; stel: .section .mips16.fn.sf_df_df,"ax",@progbits
+; stel: .ent __fn_stub_sf_df_df
+; stel: la $25,sf_df_df
+; stel: mfc1 $4,$f12
+; stel: mfc1 $5,$f13
+; stel: mfc1 $6,$f14
+; stel: mfc1 $7,$f15
+; stel: jr $25
+; stel: __fn_local_sf_df_df = sf_df_df
+; stel: .end __fn_stub_sf_df_df
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/hf1_body.ll b/test/CodeGen/Mips/hf1_body.ll
new file mode 100644
index 0000000000000..b2cce92aa1a42
--- /dev/null
+++ b/test/CodeGen/Mips/hf1_body.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=picfp16
+
+@x = external global float
+
+; Function Attrs: nounwind
+define void @v_sf(float %p) #0 {
+entry:
+ %p.addr = alloca float, align 4
+ store float %p, float* %p.addr, align 4
+ %0 = load float* %p.addr, align 4
+ store float %0, float* @x, align 4
+ ret void
+}
+; picfp16: .ent __fn_stub_v_sf
+; picfp16: .cpload $25
+; picfp16: .set reorder
+; picfp16: .reloc 0,R_MIPS_NONE,v_sf
+; picfp16: la $25,$__fn_local_v_sf
+; picfp16: mfc1 $4,$f12
+; picfp16: jr $25
+; picfp16: .end __fn_stub_v_sf
diff --git a/test/CodeGen/Mips/hfptrcall.ll b/test/CodeGen/Mips/hfptrcall.ll
new file mode 100644
index 0000000000000..25639dad63a82
--- /dev/null
+++ b/test/CodeGen/Mips/hfptrcall.ll
@@ -0,0 +1,125 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=picel
+
+@ptrsv = global float ()* @sv, align 4
+@ptrdv = global double ()* @dv, align 4
+@ptrscv = global { float, float } ()* @scv, align 4
+@ptrdcv = global { double, double } ()* @dcv, align 4
+@x = common global float 0.000000e+00, align 4
+@.str = private unnamed_addr constant [4 x i8] c"%f\0A\00", align 1
+@xd = common global double 0.000000e+00, align 8
+@xy = common global { float, float } zeroinitializer, align 4
+@.str1 = private unnamed_addr constant [10 x i8] c"%f + %fi\0A\00", align 1
+@xyd = common global { double, double } zeroinitializer, align 8
+
+; Function Attrs: nounwind
+define float @sv() #0 {
+entry:
+ ret float 1.000000e+01
+}
+; picel: .ent sv
+; picel: lw ${{[0-9]+}}, %call16(__mips16_ret_sf)(${{[0-9]+}})
+; picel: .end sv
+
+; Function Attrs: nounwind
+define double @dv() #0 {
+entry:
+ ret double 1.500000e+01
+}
+
+; picel: .ent dv
+; picel: lw ${{[0-9]+}}, %call16(__mips16_ret_df)(${{[0-9]+}})
+; picel: .end dv
+
+; Function Attrs: nounwind
+define { float, float } @scv() #0 {
+entry:
+ %retval = alloca { float, float }, align 4
+ %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ store float 5.000000e+00, float* %real
+ store float 9.900000e+01, float* %imag
+ %0 = load { float, float }* %retval
+ ret { float, float } %0
+}
+
+; picel: .ent scv
+; picel: lw ${{[0-9]+}}, %call16(__mips16_ret_sc)(${{[0-9]+}})
+; picel: .end scv
+
+; Function Attrs: nounwind
+define { double, double } @dcv() #0 {
+entry:
+ %retval = alloca { double, double }, align 8
+ %real = getelementptr inbounds { double, double }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { double, double }* %retval, i32 0, i32 1
+ store double 0x416BC8B0A0000000, double* %real
+ store double 0x41CDCCB763800000, double* %imag
+ %0 = load { double, double }* %retval
+ ret { double, double } %0
+}
+
+; picel: .ent dcv
+; picel: lw ${{[0-9]+}}, %call16(__mips16_ret_dc)(${{[0-9]+}})
+; picel: .end dcv
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %0 = load float ()** @ptrsv, align 4
+ %call = call float %0()
+ store float %call, float* @x, align 4
+ %1 = load float* @x, align 4
+ %conv = fpext float %1 to double
+ %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), double %conv)
+ %2 = load double ()** @ptrdv, align 4
+ %call2 = call double %2()
+ store double %call2, double* @xd, align 8
+ %3 = load double* @xd, align 8
+ %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), double %3)
+ %4 = load { float, float } ()** @ptrscv, align 4
+ %call4 = call { float, float } %4()
+ %5 = extractvalue { float, float } %call4, 0
+ %6 = extractvalue { float, float } %call4, 1
+ store float %5, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0)
+ store float %6, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1)
+ %xy.real = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0)
+ %xy.imag = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1)
+ %conv5 = fpext float %xy.real to double
+ %conv6 = fpext float %xy.imag to double
+ %xy.real7 = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0)
+ %xy.imag8 = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1)
+ %conv9 = fpext float %xy.real7 to double
+ %conv10 = fpext float %xy.imag8 to double
+ %call11 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str1, i32 0, i32 0), double %conv5, double %conv10)
+ %7 = load { double, double } ()** @ptrdcv, align 4
+ %call12 = call { double, double } %7()
+ %8 = extractvalue { double, double } %call12, 0
+ %9 = extractvalue { double, double } %call12, 1
+ store double %8, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0)
+ store double %9, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1)
+ %xyd.real = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0)
+ %xyd.imag = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1)
+ %xyd.real13 = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0)
+ %xyd.imag14 = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1)
+ %call15 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str1, i32 0, i32 0), double %xyd.real, double %xyd.imag14)
+ ret i32 0
+}
+
+; picel: .ent main
+
+; picel: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_0)(${{[0-9]+}})
+
+; picel: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_0)(${{[0-9]+}})
+
+; picel: lw ${{[0-9]+}}, %got(__mips16_call_stub_sc_0)(${{[0-9]+}})
+
+; picel: lw ${{[0-9]+}}, %got(__mips16_call_stub_dc_0)(${{[0-9]+}})
+
+
+declare i32 @printf(i8*, ...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
+
diff --git a/test/CodeGen/Mips/i32k.ll b/test/CodeGen/Mips/i32k.ll
index c6da8b1ac9a03..f4dd1eb78a1d1 100644
--- a/test/CodeGen/Mips/i32k.ll
+++ b/test/CodeGen/Mips/i32k.ll
@@ -1,16 +1,23 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16a
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16b
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
@.str = private unnamed_addr constant [4 x i8] c"%i\0A\00", align 1
define i32 @main() nounwind {
entry:
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 1075344593) nounwind
-; 16a: li ${{[0-9]+}}, 29905
-; 16b: li ${{[0-9]+}}, 16408
+; 16: lw ${{[0-9]+}}, 1f
+; 16: b 2f
+; 16: .align 2
+; 16: 1: .word 1075344593
+; 16: 2:
+
%call1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 -1075344593) nounwind
-; 16a: li ${{[0-9]+}}, 49127
-; 16b: li ${{[0-9]+}}, 35631
+
+; 16: lw ${{[0-9]+}}, 1f
+; 16: b 2f
+; 16: .align 2
+; 16: 1: .word -1075344593
+; 16: 2:
ret i32 0
}
diff --git a/test/CodeGen/Mips/i64arg.ll b/test/CodeGen/Mips/i64arg.ll
index 704014cba0107..5b2d135180354 100644
--- a/test/CodeGen/Mips/i64arg.ll
+++ b/test/CodeGen/Mips/i64arg.ll
@@ -2,27 +2,27 @@
define void @f1(i64 %ll1, float %f, i64 %ll, i32 %i, float %f2) nounwind {
entry:
-; CHECK: move $[[R1:[0-9]+]], $5
-; CHECK: move $[[R0:[0-9]+]], $4
-; CHECK: ori $6, ${{[0-9]+}}, 3855
-; CHECK: ori $7, ${{[0-9]+}}, 22136
-; CHECK: lw $25, %call16(ff1)
+; CHECK-DAG: lw $[[R2:[0-9]+]], 80($sp)
+; CHECK-DAG: lw $[[R3:[0-9]+]], 84($sp)
+; CHECK-DAG: move $[[R1:[0-9]+]], $5
+; CHECK-DAG: move $[[R0:[0-9]+]], $4
+; CHECK-DAG: ori $6, ${{[0-9]+}}, 3855
+; CHECK-DAG: ori $7, ${{[0-9]+}}, 22136
+; CHECK-DAG: lw $25, %call16(ff1)
; CHECK: jalr
tail call void @ff1(i32 %i, i64 1085102592623924856) nounwind
-; CHECK: lw $25, %call16(ff2)
-; CHECK: lw $[[R2:[0-9]+]], 80($sp)
-; CHECK: lw $[[R3:[0-9]+]], 84($sp)
-; CHECK: move $4, $[[R2]]
-; CHECK: move $5, $[[R3]]
+; CHECK-DAG: lw $25, %call16(ff2)
+; CHECK-DAG: move $4, $[[R2]]
+; CHECK-DAG: move $5, $[[R3]]
; CHECK: jalr $25
tail call void @ff2(i64 %ll, double 3.000000e+00) nounwind
%sub = add nsw i32 %i, -1
-; CHECK: lw $25, %call16(ff3)
-; CHECK: sw $[[R1]], 28($sp)
-; CHECK: sw $[[R0]], 24($sp)
-; CHECK: move $6, $[[R2]]
-; CHECK: move $7, $[[R3]]
-; CHECK: jalr $25
+; CHECK-DAG: lw $25, %call16(ff3)
+; CHECK-DAG: sw $[[R1]], 28($sp)
+; CHECK-DAG: sw $[[R0]], 24($sp)
+; CHECK-DAG: move $6, $[[R2]]
+; CHECK-DAG: move $7, $[[R3]]
+; CHECK: jalr $25
tail call void @ff3(i32 %i, i64 %ll, i32 %sub, i64 %ll1) nounwind
ret void
}
diff --git a/test/CodeGen/Mips/inlineasm-operand-code.ll b/test/CodeGen/Mips/inlineasm-operand-code.ll
index 01978994b2f56..7bb4adc31bd81 100644
--- a/test/CodeGen/Mips/inlineasm-operand-code.ll
+++ b/test/CodeGen/Mips/inlineasm-operand-code.ll
@@ -10,7 +10,7 @@
; X with -3
define i32 @constraint_X() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_X:
+;CHECK_LITTLE_32-LABEL: constraint_X:
;CHECK_LITTLE_32: #APP
;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},0xfffffffffffffffd
;CHECK_LITTLE_32: #NO_APP
@@ -21,7 +21,7 @@ entry:
; x with -3
define i32 @constraint_x() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_x:
+;CHECK_LITTLE_32-LABEL: constraint_x:
;CHECK_LITTLE_32: #APP
;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},0xfffd
;CHECK_LITTLE_32: #NO_APP
@@ -32,7 +32,7 @@ entry:
; d with -3
define i32 @constraint_d() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_d:
+;CHECK_LITTLE_32-LABEL: constraint_d:
;CHECK_LITTLE_32: #APP
;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},-3
;CHECK_LITTLE_32: #NO_APP
@@ -43,7 +43,7 @@ entry:
; m with -3
define i32 @constraint_m() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_m:
+;CHECK_LITTLE_32-LABEL: constraint_m:
;CHECK_LITTLE_32: #APP
;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},-4
;CHECK_LITTLE_32: #NO_APP
@@ -54,7 +54,7 @@ entry:
; z with -3
define i32 @constraint_z() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_z:
+;CHECK_LITTLE_32-LABEL: constraint_z:
;CHECK_LITTLE_32: #APP
;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},-3
;CHECK_LITTLE_32: #NO_APP
@@ -71,7 +71,7 @@ entry:
; a long long in 32 bit mode (use to assert)
define i32 @constraint_longlong() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_longlong:
+;CHECK_LITTLE_32-LABEL: constraint_longlong:
;CHECK_LITTLE_32: #APP
;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},3
;CHECK_LITTLE_32: #NO_APP
@@ -82,7 +82,7 @@ entry:
; D, in little endian the source reg will be 4 bytes into the long long
define i32 @constraint_D() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_D:
+;CHECK_LITTLE_32-LABEL: constraint_D:
;CHECK_LITTLE_32: lw ${{[0-9]+}}, %got(uval)(${{[0-9,a-z]+}})
;CHECK_LITTLE_32: lw $[[SECOND:[0-9]+]], 4(${{[0-9]+}})
;CHECK_LITTLE_32: lw $[[FIRST:[0-9]+]], 0(${{[0-9]+}})
@@ -91,7 +91,7 @@ entry:
;CHECK_LITTLE_32: #NO_APP
; D, in big endian the source reg will also be 4 bytes into the long long
-;CHECK_BIG_32: constraint_D:
+;CHECK_BIG_32-LABEL: constraint_D:
;CHECK_BIG_32: lw ${{[0-9]+}}, %got(uval)(${{[0-9,a-z]+}})
;CHECK_BIG_32: lw $[[SECOND:[0-9]+]], 4(${{[0-9]+}})
;CHECK_BIG_32: lw $[[FIRST:[0-9]+]], 0(${{[0-9]+}})
@@ -107,7 +107,7 @@ entry:
; L, in little endian the source reg will be 0 bytes into the long long
define i32 @constraint_L() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_L:
+;CHECK_LITTLE_32-LABEL: constraint_L:
;CHECK_LITTLE_32: lw ${{[0-9]+}}, %got(uval)(${{[0-9,a-z]+}})
;CHECK_LITTLE_32: lw $[[SECOND:[0-9]+]], 4(${{[0-9]+}})
;CHECK_LITTLE_32: lw $[[FIRST:[0-9]+]], 0(${{[0-9]+}})
@@ -115,7 +115,7 @@ entry:
;CHECK_LITTLE_32: or ${{[0-9]+}},$[[FIRST]],${{[0-9]+}}
;CHECK_LITTLE_32: #NO_APP
; L, in big endian the source reg will be 4 bytes into the long long
-;CHECK_BIG_32: constraint_L:
+;CHECK_BIG_32-LABEL: constraint_L:
;CHECK_BIG_32: lw ${{[0-9]+}}, %got(uval)(${{[0-9,a-z]+}})
;CHECK_BIG_32: lw $[[SECOND:[0-9]+]], 4(${{[0-9]+}})
;CHECK_BIG_32: lw $[[FIRST:[0-9]+]], 0(${{[0-9]+}})
@@ -131,7 +131,7 @@ entry:
; M, in little endian the source reg will be 4 bytes into the long long
define i32 @constraint_M() nounwind {
entry:
-;CHECK_LITTLE_32: constraint_M:
+;CHECK_LITTLE_32-LABEL: constraint_M:
;CHECK_LITTLE_32: lw ${{[0-9]+}}, %got(uval)(${{[0-9,a-z]+}})
;CHECK_LITTLE_32: lw $[[SECOND:[0-9]+]], 4(${{[0-9]+}})
;CHECK_LITTLE_32: lw $[[FIRST:[0-9]+]], 0(${{[0-9]+}})
@@ -139,7 +139,7 @@ entry:
;CHECK_LITTLE_32: or ${{[0-9]+}},$[[SECOND]],${{[0-9]+}}
;CHECK_LITTLE_32: #NO_APP
; M, in big endian the source reg will be 0 bytes into the long long
-;CHECK_BIG_32: constraint_M:
+;CHECK_BIG_32-LABEL: constraint_M:
;CHECK_BIG_32: lw ${{[0-9]+}}, %got(uval)(${{[0-9,a-z]+}})
;CHECK_BIG_32: lw $[[SECOND:[0-9]+]], 4(${{[0-9]+}})
;CHECK_BIG_32: lw $[[FIRST:[0-9]+]], 0(${{[0-9]+}})
diff --git a/test/CodeGen/Mips/int-to-float-conversion.ll b/test/CodeGen/Mips/int-to-float-conversion.ll
new file mode 100644
index 0000000000000..c2baf442f4ae1
--- /dev/null
+++ b/test/CodeGen/Mips/int-to-float-conversion.ll
@@ -0,0 +1,48 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
+
+@i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
+@i3 = common global i32* null, align 4
+
+; 32-LABEL: test_float_int_:
+; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]]
+; 32: cvt.s.w $f{{[0-9]+}}, $f[[R0]]
+
+define float @test_float_int_(i32 %a) {
+entry:
+ %conv = sitofp i32 %a to float
+ ret float %conv
+}
+
+; 32-LABEL: test_double_int_:
+; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]]
+; 32: cvt.d.w $f{{[0-9]+}}, $f[[R0]]
+; 64-LABEL: test_double_int_:
+; 64: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]]
+; 64: cvt.d.w $f{{[0-9]+}}, $f[[R0]]
+
+define double @test_double_int_(i32 %a) {
+entry:
+ %conv = sitofp i32 %a to double
+ ret double %conv
+}
+
+; 64-LABEL: test_float_LL_:
+; 64: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]]
+; 64: cvt.s.l $f{{[0-9]+}}, $f[[R0]]
+
+define float @test_float_LL_(i64 %a) {
+entry:
+ %conv = sitofp i64 %a to float
+ ret float %conv
+}
+
+; 64-LABEL: test_double_LL_:
+; 64: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]]
+; 64: cvt.d.l $f{{[0-9]+}}, $f[[R0]]
+
+define double @test_double_LL_(i64 %a) {
+entry:
+ %conv = sitofp i64 %a to double
+ ret double %conv
+}
diff --git a/test/CodeGen/Mips/largefr1.ll b/test/CodeGen/Mips/largefr1.ll
index 0fe89f71d9f38..9a5fd08d17acc 100644
--- a/test/CodeGen/Mips/largefr1.ll
+++ b/test/CodeGen/Mips/largefr1.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s -check-prefix=1
+
@i = common global i32 0, align 4
@j = common global i32 0, align 4
@.str = private unnamed_addr constant [8 x i8] c"%i %i \0A\00", align 1
@@ -22,22 +23,34 @@ entry:
define i32 @main() nounwind {
entry:
-; 1: main:
-; 1: 1: .word -797992
-; 1: li ${{[0-9]+}}, 12
-; 1: sll ${{[0-9]+}}, ${{[0-9]+}}, 16
+; 1-LABEL: main:
+; 1: 1: .word -798000
+; 1: lw ${{[0-9]+}}, 1f
+; 1: b 2f
+; 1: .align 2
+; 1: .word 800020
+
+; 1: b 2f
+; 1: .align 2
+; 1: .word 400020
+
+; 1: move ${{[0-9]+}}, $sp
; 1: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 2: move $sp, ${{[0-9]+}}
-; 2: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 1: li ${{[0-9]+}}, 6
-; 1: sll ${{[0-9]+}}, ${{[0-9]+}}, 16
+; 1: addiu ${{[0-9]+}}, ${{[0-9]+}}, 0
+
+
+
+; 1: b 2f
+; 1: .align 2
+; 1: .word 400220
+
+; 1: move ${{[0-9]+}}, $sp
; 1: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 2: move $sp, ${{[0-9]+}}
-; 2: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 1: addiu ${{[0-9]+}}, ${{[0-9]+}}, 6800
-; 1: li ${{[0-9]+}}, 1
-; 1: sll ${{[0-9]+}}, ${{[0-9]+}}, 16
-; 2: li ${{[0-9]+}}, 34463
+; 1: lw ${{[0-9]+}}, 0(${{[0-9]+}})
+
+
+
+
%retval = alloca i32, align 4
%one = alloca [100000 x i32], align 4
%two = alloca [100000 x i32], align 4
diff --git a/test/CodeGen/Mips/largeimmprinting.ll b/test/CodeGen/Mips/largeimmprinting.ll
index 1e96346d1dd73..09fee3d9063fb 100644
--- a/test/CodeGen/Mips/largeimmprinting.ll
+++ b/test/CodeGen/Mips/largeimmprinting.ll
@@ -18,11 +18,11 @@ entry:
; 64: dsll $[[R0]], $[[R0]], 48
; 64: daddiu $[[R0]], $[[R0]], -1
; 64: dsll $[[R0]], $[[R0]], 16
-; 64: daddiu $[[R0]], $[[R0]], -48
+; 64: daddiu $[[R0]], $[[R0]], -32
; 64: daddu $sp, $sp, $[[R0]]
; 64: lui $[[R1:[0-9]+]], 1
; 64: daddu $[[R1]], $sp, $[[R1]]
-; 64: sd $ra, 40($[[R1]])
+; 64: sd $ra, 24($[[R1]])
%agg.tmp = alloca %struct.S1, align 1
%tmp = getelementptr inbounds %struct.S1* %agg.tmp, i32 0, i32 0, i32 0
diff --git a/test/CodeGen/Mips/lazy-binding.ll b/test/CodeGen/Mips/lazy-binding.ll
new file mode 100644
index 0000000000000..839155adad9a8
--- /dev/null
+++ b/test/CodeGen/Mips/lazy-binding.ll
@@ -0,0 +1,41 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s
+
+; CHECK-LABEL: foo6:
+; CHECK: %while.body
+; CHECK: lw $25, %call16(foo2)(${{[0-9]+}})
+; CHECK: jalr $25
+; CHECK: %while.end
+
+define void @foo6(i32 %n) {
+entry:
+ %tobool1 = icmp eq i32 %n, 0
+ br i1 %tobool1, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %n.addr.02 = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %dec = add nsw i32 %n.addr.02, -1
+ tail call void @foo2()
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+declare void @foo2()
+
+; CHECK-LABEL: foo1:
+; CHECK: lw $25, %call16(foo2)(${{[0-9]+}})
+; CHECK: jalr $25
+; CHECK: lw $25, %call16(foo2)(${{[0-9]+}})
+; CHECK: jalr $25
+; CHECK: lw $25, %call16(foo2)(${{[0-9]+}})
+; CHECK: jalr $25
+
+define void @foo1() {
+entry:
+ tail call void @foo2()
+ tail call void @foo2()
+ tail call void @foo2()
+ ret void
+}
diff --git a/test/CodeGen/Mips/lit.local.cfg b/test/CodeGen/Mips/lit.local.cfg
index e157c540b5383..1fa54b428cd97 100644
--- a/test/CodeGen/Mips/lit.local.cfg
+++ b/test/CodeGen/Mips/lit.local.cfg
@@ -1,5 +1,3 @@
-config.suffixes = ['.ll', '.c', '.cpp', '.test']
-
targets = set(config.root.targets_to_build.split())
if not 'Mips' in targets:
config.unsupported = True
diff --git a/test/CodeGen/Mips/longbranch.ll b/test/CodeGen/Mips/longbranch.ll
index 1a4f79c191e14..af192d0e92170 100644
--- a/test/CodeGen/Mips/longbranch.ll
+++ b/test/CodeGen/Mips/longbranch.ll
@@ -1,13 +1,17 @@
-; RUN: llc -march=mipsel -force-mips-long-branch < %s | FileCheck %s -check-prefix=O32
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 -force-mips-long-branch < %s | FileCheck %s -check-prefix=N64
+; RUN: llc -march=mipsel -force-mips-long-branch -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=O32
+; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 -force-mips-long-branch -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=N64
@g0 = external global i32
define void @foo1(i32 %s) nounwind {
entry:
+; O32: nop
+; O32: addiu $sp, $sp, -8
; O32: bal
; O32: lui $1, 0
; O32: addiu $1, $1, {{[0-9]+}}
+; N64: nop
+; N64: daddiu $sp, $sp, -16
; N64: lui $1, 0
; N64: daddiu $1, $1, 0
; N64: dsll $1, $1, 16
diff --git a/test/CodeGen/Mips/mips16_32_1.ll b/test/CodeGen/Mips/mips16_32_1.ll
index 6f4826ea96003..e156641d4e508 100644
--- a/test/CodeGen/Mips/mips16_32_1.ll
+++ b/test/CodeGen/Mips/mips16_32_1.ll
@@ -11,4 +11,4 @@ entry:
; CHECK: save {{.+}}
; CHECK: restore {{.+}}
; CHECK: .end foo
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_10.ll b/test/CodeGen/Mips/mips16_32_10.ll
index 330dbfec63b92..7c017b8e4b755 100644
--- a/test/CodeGen/Mips/mips16_32_10.ll
+++ b/test/CodeGen/Mips/mips16_32_10.ll
@@ -54,6 +54,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_3.ll b/test/CodeGen/Mips/mips16_32_3.ll
index 8874a88725344..dd94ec1ce80af 100644
--- a/test/CodeGen/Mips/mips16_32_3.ll
+++ b/test/CodeGen/Mips/mips16_32_3.ll
@@ -65,6 +65,6 @@ entry:
; 32: .set reorder
; 32: .end main
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_4.ll b/test/CodeGen/Mips/mips16_32_4.ll
index cdaed6c71be09..5e49071394458 100644
--- a/test/CodeGen/Mips/mips16_32_4.ll
+++ b/test/CodeGen/Mips/mips16_32_4.ll
@@ -60,6 +60,6 @@ entry:
; 32: .end main
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_5.ll b/test/CodeGen/Mips/mips16_32_5.ll
index 45e0bf49ddd2e..17900a2dc75fb 100644
--- a/test/CodeGen/Mips/mips16_32_5.ll
+++ b/test/CodeGen/Mips/mips16_32_5.ll
@@ -75,6 +75,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_6.ll b/test/CodeGen/Mips/mips16_32_6.ll
index f4b8e7a91adca..a77031af8be6d 100644
--- a/test/CodeGen/Mips/mips16_32_6.ll
+++ b/test/CodeGen/Mips/mips16_32_6.ll
@@ -81,6 +81,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_7.ll b/test/CodeGen/Mips/mips16_32_7.ll
index f8726eadc70cc..895b5d4346a81 100644
--- a/test/CodeGen/Mips/mips16_32_7.ll
+++ b/test/CodeGen/Mips/mips16_32_7.ll
@@ -71,6 +71,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_8.ll b/test/CodeGen/Mips/mips16_32_8.ll
index e51f296f9df3f..4152d687093e9 100644
--- a/test/CodeGen/Mips/mips16_32_8.ll
+++ b/test/CodeGen/Mips/mips16_32_8.ll
@@ -68,7 +68,7 @@ entry:
; 32: .set reorder
; 32: .end main
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_9.ll b/test/CodeGen/Mips/mips16_32_9.ll
index f5ff368490155..c9b494f2a8904 100644
--- a/test/CodeGen/Mips/mips16_32_9.ll
+++ b/test/CodeGen/Mips/mips16_32_9.ll
@@ -46,6 +46,6 @@ entry:
-attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_fpret.ll b/test/CodeGen/Mips/mips16_fpret.ll
new file mode 100644
index 0000000000000..c132f63cfb016
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_fpret.ll
@@ -0,0 +1,76 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=2
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=3
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=4
+
+
+@x = global float 0x41F487E980000000, align 4
+@dx = global double 0x41CDCC8BC4800000, align 8
+@cx = global { float, float } { float 1.000000e+00, float 9.900000e+01 }, align 4
+@dcx = global { double, double } { double 0x42CE5E14A412B480, double 0x423AA4C580DB0000 }, align 8
+
+define float @foox() {
+entry:
+ %0 = load float* @x, align 4
+ ret float %0
+; 1: .ent foox
+; 1: lw $2, %lo(x)(${{[0-9]+}})
+; 1: jal __mips16_ret_sf
+}
+
+define double @foodx() {
+entry:
+ %0 = load double* @dx, align 8
+ ret double %0
+; 1: .ent foodx
+; 1: lw $2, %lo(dx)(${{[0-9]+}})
+; 1: jal __mips16_ret_df
+; 2: .ent foodx
+; 2: lw $3, 4(${{[0-9]+}})
+; 2: jal __mips16_ret_df
+
+}
+
+define { float, float } @foocx() {
+entry:
+ %retval = alloca { float, float }, align 4
+ %cx.real = load float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 0)
+ %cx.imag = load float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 1)
+ %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ store float %cx.real, float* %real
+ store float %cx.imag, float* %imag
+ %0 = load { float, float }* %retval
+ ret { float, float } %0
+; 1: .ent foocx
+; 1: lw $2, %lo(cx)(${{[0-9]+}})
+; 1: jal __mips16_ret_sc
+; 2: .ent foocx
+; 2: lw $3, 4(${{[0-9]+}})
+; 2: jal __mips16_ret_sc
+}
+
+define { double, double } @foodcx() {
+entry:
+ %retval = alloca { double, double }, align 8
+ %dcx.real = load double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 0)
+ %dcx.imag = load double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 1)
+ %real = getelementptr inbounds { double, double }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { double, double }* %retval, i32 0, i32 1
+ store double %dcx.real, double* %real
+ store double %dcx.imag, double* %imag
+ %0 = load { double, double }* %retval
+ ret { double, double } %0
+; 1: .ent foodcx
+; 1: lw ${{[0-9]}}, %lo(dcx)(${{[0-9]+}})
+; 1: jal __mips16_ret_dc
+; 2: .ent foodcx
+; 2: lw ${{[0-9]}}, 4(${{[0-9]+}})
+; 2: jal __mips16_ret_dc
+; 3: .ent foodcx
+; 3: lw $4, 8(${{[0-9]+}})
+; 3: jal __mips16_ret_dc
+; 4: .ent foodcx
+; 4: lw $5, 12(${{[0-9]+}})
+; 4: jal __mips16_ret_dc
+}
diff --git a/test/CodeGen/Mips/mips16fpe.ll b/test/CodeGen/Mips/mips16fpe.ll
index 433543607967b..10c5163f7fd0a 100644
--- a/test/CodeGen/Mips/mips16fpe.ll
+++ b/test/CodeGen/Mips/mips16fpe.ll
@@ -41,7 +41,7 @@
define void @test_addsf3() nounwind {
entry:
-;16hf: test_addsf3:
+;16hf-LABEL: test_addsf3:
%0 = load float* @x, align 4
%1 = load float* @y, align 4
%add = fadd float %0, %1
@@ -52,7 +52,7 @@ entry:
define void @test_adddf3() nounwind {
entry:
-;16hf: test_adddf3:
+;16hf-LABEL: test_adddf3:
%0 = load double* @xd, align 8
%1 = load double* @yd, align 8
%add = fadd double %0, %1
@@ -63,7 +63,7 @@ entry:
define void @test_subsf3() nounwind {
entry:
-;16hf: test_subsf3:
+;16hf-LABEL: test_subsf3:
%0 = load float* @x, align 4
%1 = load float* @y, align 4
%sub = fsub float %0, %1
@@ -74,7 +74,7 @@ entry:
define void @test_subdf3() nounwind {
entry:
-;16hf: test_subdf3:
+;16hf-LABEL: test_subdf3:
%0 = load double* @xd, align 8
%1 = load double* @yd, align 8
%sub = fsub double %0, %1
@@ -85,7 +85,7 @@ entry:
define void @test_mulsf3() nounwind {
entry:
-;16hf: test_mulsf3:
+;16hf-LABEL: test_mulsf3:
%0 = load float* @x, align 4
%1 = load float* @y, align 4
%mul = fmul float %0, %1
@@ -96,7 +96,7 @@ entry:
define void @test_muldf3() nounwind {
entry:
-;16hf: test_muldf3:
+;16hf-LABEL: test_muldf3:
%0 = load double* @xd, align 8
%1 = load double* @yd, align 8
%mul = fmul double %0, %1
@@ -107,7 +107,7 @@ entry:
define void @test_divsf3() nounwind {
entry:
-;16hf: test_divsf3:
+;16hf-LABEL: test_divsf3:
%0 = load float* @y, align 4
%1 = load float* @x, align 4
%div = fdiv float %0, %1
@@ -118,7 +118,7 @@ entry:
define void @test_divdf3() nounwind {
entry:
-;16hf: test_divdf3:
+;16hf-LABEL: test_divdf3:
%0 = load double* @yd, align 8
%mul = fmul double %0, 2.000000e+00
%1 = load double* @xd, align 8
@@ -130,7 +130,7 @@ entry:
define void @test_extendsfdf2() nounwind {
entry:
-;16hf: test_extendsfdf2:
+;16hf-LABEL: test_extendsfdf2:
%0 = load float* @x, align 4
%conv = fpext float %0 to double
store double %conv, double* @extendsfdf2_result, align 8
@@ -140,7 +140,7 @@ entry:
define void @test_truncdfsf2() nounwind {
entry:
-;16hf: test_truncdfsf2:
+;16hf-LABEL: test_truncdfsf2:
%0 = load double* @xd2, align 8
%conv = fptrunc double %0 to float
store float %conv, float* @truncdfsf2_result, align 4
@@ -150,7 +150,7 @@ entry:
define void @test_fix_truncsfsi() nounwind {
entry:
-;16hf: test_fix_truncsfsi:
+;16hf-LABEL: test_fix_truncsfsi:
%0 = load float* @x, align 4
%conv = fptosi float %0 to i32
store i32 %conv, i32* @fix_truncsfsi_result, align 4
@@ -160,7 +160,7 @@ entry:
define void @test_fix_truncdfsi() nounwind {
entry:
-;16hf: test_fix_truncdfsi:
+;16hf-LABEL: test_fix_truncdfsi:
%0 = load double* @xd, align 8
%conv = fptosi double %0 to i32
store i32 %conv, i32* @fix_truncdfsi_result, align 4
@@ -170,7 +170,7 @@ entry:
define void @test_floatsisf() nounwind {
entry:
-;16hf: test_floatsisf:
+;16hf-LABEL: test_floatsisf:
%0 = load i32* @si, align 4
%conv = sitofp i32 %0 to float
store float %conv, float* @floatsisf_result, align 4
@@ -180,7 +180,7 @@ entry:
define void @test_floatsidf() nounwind {
entry:
-;16hf: test_floatsidf:
+;16hf-LABEL: test_floatsidf:
%0 = load i32* @si, align 4
%conv = sitofp i32 %0 to double
store double %conv, double* @floatsidf_result, align 8
@@ -190,7 +190,7 @@ entry:
define void @test_floatunsisf() nounwind {
entry:
-;16hf: test_floatunsisf:
+;16hf-LABEL: test_floatunsisf:
%0 = load i32* @ui, align 4
%conv = uitofp i32 %0 to float
store float %conv, float* @floatunsisf_result, align 4
@@ -200,7 +200,7 @@ entry:
define void @test_floatunsidf() nounwind {
entry:
-;16hf: test_floatunsidf:
+;16hf-LABEL: test_floatunsidf:
%0 = load i32* @ui, align 4
%conv = uitofp i32 %0 to double
store double %conv, double* @floatunsidf_result, align 8
@@ -210,7 +210,7 @@ entry:
define void @test_eqsf2() nounwind {
entry:
-;16hf: test_eqsf2:
+;16hf-LABEL: test_eqsf2:
%0 = load float* @x, align 4
%1 = load float* @xx, align 4
%cmp = fcmp oeq float %0, %1
@@ -222,7 +222,7 @@ entry:
define void @test_eqdf2() nounwind {
entry:
-;16hf: test_eqdf2:
+;16hf-LABEL: test_eqdf2:
%0 = load double* @xd, align 8
%1 = load double* @xxd, align 8
%cmp = fcmp oeq double %0, %1
@@ -234,7 +234,7 @@ entry:
define void @test_nesf2() nounwind {
entry:
-;16hf: test_nesf2:
+;16hf-LABEL: test_nesf2:
%0 = load float* @x, align 4
%1 = load float* @y, align 4
%cmp = fcmp une float %0, %1
@@ -246,7 +246,7 @@ entry:
define void @test_nedf2() nounwind {
entry:
-;16hf: test_nedf2:
+;16hf-LABEL: test_nedf2:
%0 = load double* @xd, align 8
%1 = load double* @yd, align 8
%cmp = fcmp une double %0, %1
@@ -258,7 +258,7 @@ entry:
define void @test_gesf2() nounwind {
entry:
-;16hf: test_gesf2:
+;16hf-LABEL: test_gesf2:
%0 = load float* @x, align 4
%1 = load float* @xx, align 4
%cmp = fcmp oge float %0, %1
@@ -273,7 +273,7 @@ entry:
define void @test_gedf2() nounwind {
entry:
-;16hf: test_gedf2:
+;16hf-LABEL: test_gedf2:
%0 = load double* @xd, align 8
%1 = load double* @xxd, align 8
%cmp = fcmp oge double %0, %1
@@ -288,7 +288,7 @@ entry:
define void @test_ltsf2() nounwind {
entry:
-;16hf: test_ltsf2:
+;16hf-LABEL: test_ltsf2:
%0 = load float* @x, align 4
%1 = load float* @xx, align 4
%lnot = fcmp uge float %0, %1
@@ -304,7 +304,7 @@ entry:
define void @test_ltdf2() nounwind {
entry:
-;16hf: test_ltdf2:
+;16hf-LABEL: test_ltdf2:
%0 = load double* @xd, align 8
%1 = load double* @xxd, align 8
%lnot = fcmp uge double %0, %1
@@ -320,7 +320,7 @@ entry:
define void @test_lesf2() nounwind {
entry:
-;16hf: test_lesf2:
+;16hf-LABEL: test_lesf2:
%0 = load float* @x, align 4
%1 = load float* @xx, align 4
%cmp = fcmp ole float %0, %1
@@ -335,7 +335,7 @@ entry:
define void @test_ledf2() nounwind {
entry:
-;16hf: test_ledf2:
+;16hf-LABEL: test_ledf2:
%0 = load double* @xd, align 8
%1 = load double* @xxd, align 8
%cmp = fcmp ole double %0, %1
@@ -350,7 +350,7 @@ entry:
define void @test_gtsf2() nounwind {
entry:
-;16hf: test_gtsf2:
+;16hf-LABEL: test_gtsf2:
%0 = load float* @x, align 4
%1 = load float* @xx, align 4
%lnot = fcmp ule float %0, %1
@@ -365,7 +365,7 @@ entry:
define void @test_gtdf2() nounwind {
entry:
-;16hf: test_gtdf2:
+;16hf-LABEL: test_gtdf2:
%0 = load double* @xd, align 8
%1 = load double* @xxd, align 8
%lnot = fcmp ule double %0, %1
diff --git a/test/CodeGen/Mips/mips64-f128.ll b/test/CodeGen/Mips/mips64-f128.ll
index 5892cab4f8eaf..dc8bbfdd5bafd 100644
--- a/test/CodeGen/Mips/mips64-f128.ll
+++ b/test/CodeGen/Mips/mips64-f128.ll
@@ -7,7 +7,7 @@
@gf1 = external global float
@gd1 = external global double
-; CHECK: addLD:
+; CHECK-LABEL: addLD:
; CHECK: ld $25, %call16(__addtf3)
define fp128 @addLD() {
@@ -18,7 +18,7 @@ entry:
ret fp128 %add
}
-; CHECK: subLD:
+; CHECK-LABEL: subLD:
; CHECK: ld $25, %call16(__subtf3)
define fp128 @subLD() {
@@ -29,7 +29,7 @@ entry:
ret fp128 %sub
}
-; CHECK: mulLD:
+; CHECK-LABEL: mulLD:
; CHECK: ld $25, %call16(__multf3)
define fp128 @mulLD() {
@@ -40,7 +40,7 @@ entry:
ret fp128 %mul
}
-; CHECK: divLD:
+; CHECK-LABEL: divLD:
; CHECK: ld $25, %call16(__divtf3)
define fp128 @divLD() {
@@ -51,7 +51,7 @@ entry:
ret fp128 %div
}
-; CHECK: conv_LD_char:
+; CHECK-LABEL: conv_LD_char:
; CHECK: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_char(i8 signext %a) {
@@ -60,7 +60,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_LD_short:
+; CHECK-LABEL: conv_LD_short:
; CHECK: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_short(i16 signext %a) {
@@ -69,7 +69,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_LD_int:
+; CHECK-LABEL: conv_LD_int:
; CHECK: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_int(i32 %a) {
@@ -78,7 +78,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_LD_LL:
+; CHECK-LABEL: conv_LD_LL:
; CHECK: ld $25, %call16(__floatditf)
define fp128 @conv_LD_LL(i64 %a) {
@@ -87,7 +87,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_LD_UChar:
+; CHECK-LABEL: conv_LD_UChar:
; CHECK: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UChar(i8 zeroext %a) {
@@ -96,7 +96,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_LD_UShort:
+; CHECK-LABEL: conv_LD_UShort:
; CHECK: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UShort(i16 zeroext %a) {
@@ -105,7 +105,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_LD_UInt:
+; CHECK-LABEL: conv_LD_UInt:
; CHECK: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UInt(i32 %a) {
@@ -114,7 +114,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_LD_ULL:
+; CHECK-LABEL: conv_LD_ULL:
; CHECK: ld $25, %call16(__floatunditf)
define fp128 @conv_LD_ULL(i64 %a) {
@@ -123,7 +123,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_char_LD:
+; CHECK-LABEL: conv_char_LD:
; CHECK: ld $25, %call16(__fixtfsi)
define signext i8 @conv_char_LD(fp128 %a) {
@@ -132,7 +132,7 @@ entry:
ret i8 %conv
}
-; CHECK: conv_short_LD:
+; CHECK-LABEL: conv_short_LD:
; CHECK: ld $25, %call16(__fixtfsi)
define signext i16 @conv_short_LD(fp128 %a) {
@@ -141,7 +141,7 @@ entry:
ret i16 %conv
}
-; CHECK: conv_int_LD:
+; CHECK-LABEL: conv_int_LD:
; CHECK: ld $25, %call16(__fixtfsi)
define i32 @conv_int_LD(fp128 %a) {
@@ -150,7 +150,7 @@ entry:
ret i32 %conv
}
-; CHECK: conv_LL_LD:
+; CHECK-LABEL: conv_LL_LD:
; CHECK: ld $25, %call16(__fixtfdi)
define i64 @conv_LL_LD(fp128 %a) {
@@ -159,7 +159,7 @@ entry:
ret i64 %conv
}
-; CHECK: conv_UChar_LD:
+; CHECK-LABEL: conv_UChar_LD:
; CHECK: ld $25, %call16(__fixtfsi)
define zeroext i8 @conv_UChar_LD(fp128 %a) {
@@ -168,7 +168,7 @@ entry:
ret i8 %conv
}
-; CHECK: conv_UShort_LD:
+; CHECK-LABEL: conv_UShort_LD:
; CHECK: ld $25, %call16(__fixtfsi)
define zeroext i16 @conv_UShort_LD(fp128 %a) {
@@ -177,7 +177,7 @@ entry:
ret i16 %conv
}
-; CHECK: conv_UInt_LD:
+; CHECK-LABEL: conv_UInt_LD:
; CHECK: ld $25, %call16(__fixunstfsi)
define i32 @conv_UInt_LD(fp128 %a) {
@@ -186,7 +186,7 @@ entry:
ret i32 %conv
}
-; CHECK: conv_ULL_LD:
+; CHECK-LABEL: conv_ULL_LD:
; CHECK: ld $25, %call16(__fixunstfdi)
define i64 @conv_ULL_LD(fp128 %a) {
@@ -195,7 +195,7 @@ entry:
ret i64 %conv
}
-; CHECK: conv_LD_float:
+; CHECK-LABEL: conv_LD_float:
; CHECK: ld $25, %call16(__extendsftf2)
define fp128 @conv_LD_float(float %a) {
@@ -204,7 +204,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_LD_double:
+; CHECK-LABEL: conv_LD_double:
; CHECK: ld $25, %call16(__extenddftf2)
define fp128 @conv_LD_double(double %a) {
@@ -213,7 +213,7 @@ entry:
ret fp128 %conv
}
-; CHECK: conv_float_LD:
+; CHECK-LABEL: conv_float_LD:
; CHECK: ld $25, %call16(__trunctfsf2)
define float @conv_float_LD(fp128 %a) {
@@ -222,7 +222,7 @@ entry:
ret float %conv
}
-; CHECK: conv_double_LD:
+; CHECK-LABEL: conv_double_LD:
; CHECK: ld $25, %call16(__trunctfdf2)
define double @conv_double_LD(fp128 %a) {
@@ -231,13 +231,13 @@ entry:
ret double %conv
}
-; CHECK: libcall1_fabsl:
-; CHECK: ld $[[R0:[0-9]+]], 8($[[R4:[0-9]+]])
-; CHECK: daddiu $[[R1:[0-9]+]], $zero, 1
-; CHECK: dsll $[[R2:[0-9]+]], $[[R1]], 63
-; CHECK: daddiu $[[R3:[0-9]+]], $[[R2]], -1
-; CHECK: and $4, $[[R0]], $[[R3]]
-; CHECK: ld $2, 0($[[R4]])
+; CHECK-LABEL: libcall1_fabsl:
+; CHECK-DAG: ld $[[R0:[0-9]+]], 8($[[R4:[0-9]+]])
+; CHECK-DAG: daddiu $[[R1:[0-9]+]], $zero, 1
+; CHECK-DAG: dsll $[[R2:[0-9]+]], $[[R1]], 63
+; CHECK-DAG: daddiu $[[R3:[0-9]+]], $[[R2]], -1
+; CHECK-DAG: and $4, $[[R0]], $[[R3]]
+; CHECK-DAG: ld $2, 0($[[R4]])
define fp128 @libcall1_fabsl() {
entry:
@@ -248,7 +248,7 @@ entry:
declare fp128 @fabsl(fp128) #1
-; CHECK: libcall1_ceill:
+; CHECK-LABEL: libcall1_ceill:
; CHECK: ld $25, %call16(ceill)
define fp128 @libcall1_ceill() {
@@ -260,7 +260,7 @@ entry:
declare fp128 @ceill(fp128) #1
-; CHECK: libcall1_sinl:
+; CHECK-LABEL: libcall1_sinl:
; CHECK: ld $25, %call16(sinl)
define fp128 @libcall1_sinl() {
@@ -272,7 +272,7 @@ entry:
declare fp128 @sinl(fp128) #2
-; CHECK: libcall1_cosl:
+; CHECK-LABEL: libcall1_cosl:
; CHECK: ld $25, %call16(cosl)
define fp128 @libcall1_cosl() {
@@ -284,7 +284,7 @@ entry:
declare fp128 @cosl(fp128) #2
-; CHECK: libcall1_expl:
+; CHECK-LABEL: libcall1_expl:
; CHECK: ld $25, %call16(expl)
define fp128 @libcall1_expl() {
@@ -296,7 +296,7 @@ entry:
declare fp128 @expl(fp128) #2
-; CHECK: libcall1_exp2l:
+; CHECK-LABEL: libcall1_exp2l:
; CHECK: ld $25, %call16(exp2l)
define fp128 @libcall1_exp2l() {
@@ -308,7 +308,7 @@ entry:
declare fp128 @exp2l(fp128) #2
-; CHECK: libcall1_logl:
+; CHECK-LABEL: libcall1_logl:
; CHECK: ld $25, %call16(logl)
define fp128 @libcall1_logl() {
@@ -320,7 +320,7 @@ entry:
declare fp128 @logl(fp128) #2
-; CHECK: libcall1_log2l:
+; CHECK-LABEL: libcall1_log2l:
; CHECK: ld $25, %call16(log2l)
define fp128 @libcall1_log2l() {
@@ -332,7 +332,7 @@ entry:
declare fp128 @log2l(fp128) #2
-; CHECK: libcall1_log10l:
+; CHECK-LABEL: libcall1_log10l:
; CHECK: ld $25, %call16(log10l)
define fp128 @libcall1_log10l() {
@@ -344,7 +344,7 @@ entry:
declare fp128 @log10l(fp128) #2
-; CHECK: libcall1_nearbyintl:
+; CHECK-LABEL: libcall1_nearbyintl:
; CHECK: ld $25, %call16(nearbyintl)
define fp128 @libcall1_nearbyintl() {
@@ -356,7 +356,7 @@ entry:
declare fp128 @nearbyintl(fp128) #1
-; CHECK: libcall1_floorl:
+; CHECK-LABEL: libcall1_floorl:
; CHECK: ld $25, %call16(floorl)
define fp128 @libcall1_floorl() {
@@ -368,7 +368,7 @@ entry:
declare fp128 @floorl(fp128) #1
-; CHECK: libcall1_sqrtl:
+; CHECK-LABEL: libcall1_sqrtl:
; CHECK: ld $25, %call16(sqrtl)
define fp128 @libcall1_sqrtl() {
@@ -380,7 +380,7 @@ entry:
declare fp128 @sqrtl(fp128) #2
-; CHECK: libcall1_rintl:
+; CHECK-LABEL: libcall1_rintl:
; CHECK: ld $25, %call16(rintl)
define fp128 @libcall1_rintl() {
@@ -392,7 +392,7 @@ entry:
declare fp128 @rintl(fp128) #1
-; CHECK: libcall_powil:
+; CHECK-LABEL: libcall_powil:
; CHECK: ld $25, %call16(__powitf2)
define fp128 @libcall_powil(fp128 %a, i32 %b) {
@@ -403,18 +403,18 @@ entry:
declare fp128 @llvm.powi.f128(fp128, i32) #3
-; CHECK: libcall2_copysignl:
-; CHECK: daddiu $[[R2:[0-9]+]], $zero, 1
-; CHECK: dsll $[[R3:[0-9]+]], $[[R2]], 63
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $[[R1:[0-9]+]], 8($[[R0]])
-; CHECK: and $[[R4:[0-9]+]], $[[R1]], $[[R3]]
-; CHECK: ld $[[R5:[0-9]+]], %got_disp(gld0)
-; CHECK: ld $[[R6:[0-9]+]], 8($[[R5]])
-; CHECK: daddiu $[[R7:[0-9]+]], $[[R3]], -1
-; CHECK: and $[[R8:[0-9]+]], $[[R6]], $[[R7]]
-; CHECK: or $4, $[[R8]], $[[R4]]
-; CHECK: ld $2, 0($[[R5]])
+; CHECK-LABEL: libcall2_copysignl:
+; CHECK-DAG: daddiu $[[R2:[0-9]+]], $zero, 1
+; CHECK-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63
+; CHECK-DAG: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; CHECK-DAG: ld $[[R1:[0-9]+]], 8($[[R0]])
+; CHECK-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]]
+; CHECK-DAG: ld $[[R5:[0-9]+]], %got_disp(gld0)
+; CHECK-DAG: ld $[[R6:[0-9]+]], 8($[[R5]])
+; CHECK-DAG: daddiu $[[R7:[0-9]+]], $[[R3]], -1
+; CHECK-DAG: and $[[R8:[0-9]+]], $[[R6]], $[[R7]]
+; CHECK-DAG: or $4, $[[R8]], $[[R4]]
+; CHECK-DAG: ld $2, 0($[[R5]])
define fp128 @libcall2_copysignl() {
entry:
@@ -426,7 +426,7 @@ entry:
declare fp128 @copysignl(fp128, fp128) #1
-; CHECK: libcall2_powl:
+; CHECK-LABEL: libcall2_powl:
; CHECK: ld $25, %call16(powl)
define fp128 @libcall2_powl() {
@@ -439,7 +439,7 @@ entry:
declare fp128 @powl(fp128, fp128) #2
-; CHECK: libcall2_fmodl:
+; CHECK-LABEL: libcall2_fmodl:
; CHECK: ld $25, %call16(fmodl)
define fp128 @libcall2_fmodl() {
@@ -452,7 +452,7 @@ entry:
declare fp128 @fmodl(fp128, fp128) #2
-; CHECK: libcall3_fmal:
+; CHECK-LABEL: libcall3_fmal:
; CHECK: ld $25, %call16(fmal)
define fp128 @libcall3_fmal() {
@@ -466,7 +466,7 @@ entry:
declare fp128 @llvm.fma.f128(fp128, fp128, fp128) #4
-; CHECK: cmp_lt:
+; CHECK-LABEL: cmp_lt:
; CHECK: ld $25, %call16(__lttf2)
define i32 @cmp_lt(fp128 %a, fp128 %b) {
@@ -476,7 +476,7 @@ entry:
ret i32 %conv
}
-; CHECK: cmp_le:
+; CHECK-LABEL: cmp_le:
; CHECK: ld $25, %call16(__letf2)
define i32 @cmp_le(fp128 %a, fp128 %b) {
@@ -486,7 +486,7 @@ entry:
ret i32 %conv
}
-; CHECK: cmp_gt:
+; CHECK-LABEL: cmp_gt:
; CHECK: ld $25, %call16(__gttf2)
define i32 @cmp_gt(fp128 %a, fp128 %b) {
@@ -496,7 +496,7 @@ entry:
ret i32 %conv
}
-; CHECK: cmp_ge:
+; CHECK-LABEL: cmp_ge:
; CHECK: ld $25, %call16(__getf2)
define i32 @cmp_ge(fp128 %a, fp128 %b) {
@@ -506,7 +506,7 @@ entry:
ret i32 %conv
}
-; CHECK: cmp_eq:
+; CHECK-LABEL: cmp_eq:
; CHECK: ld $25, %call16(__eqtf2)
define i32 @cmp_eq(fp128 %a, fp128 %b) {
@@ -516,7 +516,7 @@ entry:
ret i32 %conv
}
-; CHECK: cmp_ne:
+; CHECK-LABEL: cmp_ne:
; CHECK: ld $25, %call16(__netf2)
define i32 @cmp_ne(fp128 %a, fp128 %b) {
@@ -526,7 +526,7 @@ entry:
ret i32 %conv
}
-; CHECK: load_LD_LD:
+; CHECK-LABEL: load_LD_LD:
; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
; CHECK: ld $2, 0($[[R0]])
; CHECK: ld $4, 8($[[R0]])
@@ -537,7 +537,7 @@ entry:
ret fp128 %0
}
-; CHECK: load_LD_float:
+; CHECK-LABEL: load_LD_float:
; CHECK: ld $[[R0:[0-9]+]], %got_disp(gf1)
; CHECK: lw $4, 0($[[R0]])
; CHECK: ld $25, %call16(__extendsftf2)
@@ -550,7 +550,7 @@ entry:
ret fp128 %conv
}
-; CHECK: load_LD_double:
+; CHECK-LABEL: load_LD_double:
; CHECK: ld $[[R0:[0-9]+]], %got_disp(gd1)
; CHECK: ld $4, 0($[[R0]])
; CHECK: ld $25, %call16(__extenddftf2)
@@ -563,7 +563,7 @@ entry:
ret fp128 %conv
}
-; CHECK: store_LD_LD:
+; CHECK-LABEL: store_LD_LD:
; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
; CHECK: ld $[[R1:[0-9]+]], 0($[[R0]])
; CHECK: ld $[[R2:[0-9]+]], 8($[[R0]])
@@ -578,7 +578,7 @@ entry:
ret void
}
-; CHECK: store_LD_float:
+; CHECK-LABEL: store_LD_float:
; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
; CHECK: ld $4, 0($[[R0]])
; CHECK: ld $5, 8($[[R0]])
@@ -595,7 +595,7 @@ entry:
ret void
}
-; CHECK: store_LD_double:
+; CHECK-LABEL: store_LD_double:
; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
; CHECK: ld $4, 0($[[R0]])
; CHECK: ld $5, 8($[[R0]])
@@ -612,7 +612,7 @@ entry:
ret void
}
-; CHECK: select_LD:
+; CHECK-LABEL: select_LD:
; CHECK: movn $8, $6, $4
; CHECK: movn $9, $7, $4
; CHECK: move $2, $8
@@ -625,7 +625,7 @@ entry:
ret fp128 %cond
}
-; CHECK: selectCC_LD:
+; CHECK-LABEL: selectCC_LD:
; CHECK: move $[[R0:[0-9]+]], $11
; CHECK: move $[[R1:[0-9]+]], $10
; CHECK: move $[[R2:[0-9]+]], $9
diff --git a/test/CodeGen/Mips/mips64-libcall.ll b/test/CodeGen/Mips/mips64-libcall.ll
index d54598be70d80..290baafd18b9c 100644
--- a/test/CodeGen/Mips/mips64-libcall.ll
+++ b/test/CodeGen/Mips/mips64-libcall.ll
@@ -5,7 +5,7 @@
; Check that %add is not passed in an integer register.
;
-; HARD: callfloor:
+; HARD-LABEL: callfloor:
; HARD-NOT: dmfc1 $4
define double @callfloor(double %d) nounwind readnone {
@@ -19,7 +19,7 @@ declare double @floor(double) nounwind readnone
; Check call16.
;
-; SOFT: f64add:
+; SOFT-LABEL: f64add:
; SOFT: ld $25, %call16(__adddf3)
define double @f64add(double %a, double %b) {
diff --git a/test/CodeGen/Mips/mips64instrs.ll b/test/CodeGen/Mips/mips64instrs.ll
index 041831149057e..2894d698adcc9 100644
--- a/test/CodeGen/Mips/mips64instrs.ll
+++ b/test/CodeGen/Mips/mips64instrs.ll
@@ -1,4 +1,7 @@
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64 -verify-machineinstrs < %s | FileCheck %s
+
+@gll0 = common global i64 0, align 8
+@gll1 = common global i64 0, align 8
define i64 @f0(i64 %a0, i64 %a1) nounwind readnone {
entry:
@@ -86,23 +89,33 @@ entry:
define i64 @f14(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK: ddiv $zero
+; CHECK-LABEL: f14:
+; CHECK: ddiv $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
+; CHECK: teq $[[R0]], $zero, 7
; CHECK: mflo
- %div = sdiv i64 %a, %b
+ %0 = load i64* @gll0, align 8
+ %1 = load i64* @gll1, align 8
+ %div = sdiv i64 %0, %1
ret i64 %div
}
-define i64 @f15(i64 %a, i64 %b) nounwind readnone {
+define i64 @f15() nounwind readnone {
entry:
-; CHECK: ddivu $zero
+; CHECK-LABEL: f15:
+; CHECK: ddivu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
+; CHECK: teq $[[R0]], $zero, 7
; CHECK: mflo
- %div = udiv i64 %a, %b
+ %0 = load i64* @gll0, align 8
+ %1 = load i64* @gll1, align 8
+ %div = udiv i64 %0, %1
ret i64 %div
}
define i64 @f16(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK: ddiv $zero
+; CHECK-LABEL: f16:
+; CHECK: ddiv $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
+; CHECK: teq $[[R0]], $zero, 7
; CHECK: mfhi
%rem = srem i64 %a, %b
ret i64 %rem
@@ -110,7 +123,9 @@ entry:
define i64 @f17(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK: ddivu $zero
+; CHECK-LABEL: f17:
+; CHECK: ddivu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
+; CHECK: teq $[[R0]], $zero, 7
; CHECK: mfhi
%rem = urem i64 %a, %b
ret i64 %rem
@@ -140,4 +155,3 @@ entry:
%neg = xor i64 %or, -1
ret i64 %neg
}
-
diff --git a/test/CodeGen/Mips/misha.ll b/test/CodeGen/Mips/misha.ll
index 80637edb16746..65d3b7b5d874a 100644
--- a/test/CodeGen/Mips/misha.ll
+++ b/test/CodeGen/Mips/misha.ll
@@ -25,10 +25,10 @@ for.body: ; preds = %for.body.lr.ph, %fo
%inc = add nsw i32 %i.010, 1
%cmp = icmp eq i32 %inc, %conv
br i1 %cmp, label %for.end, label %for.body
-; 16: sumc:
+; 16-LABEL: sumc:
; 16: lbu ${{[0-9]+}}, 0(${{[0-9]+}})
; 16: lbu ${{[0-9]+}}, 0(${{[0-9]+}})
-; 16: sum:
+; 16-LABEL: sum:
; 16: lhu ${{[0-9]+}}, 0(${{[0-9]+}})
; 16: lhu ${{[0-9]+}}, 0(${{[0-9]+}})
diff --git a/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
new file mode 100644
index 0000000000000..f4854f880542d
--- /dev/null
+++ b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
@@ -0,0 +1,93 @@
+; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s -check-prefix=LE-PIC
+; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 < %s | \
+; RUN: FileCheck %s -check-prefix=LE-STATIC
+; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 < %s | \
+; RUN: FileCheck %s -check-prefix=BE-PIC
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | \
+; RUN: FileCheck %s -check-prefix=CHECK-LDC1-SDC1
+
+@g0 = common global double 0.000000e+00, align 8
+
+; LE-PIC-LABEL: test_ldc1:
+; LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; LE-PIC-DAG: mtc1 $[[R0]], $f0
+; LE-PIC-DAG: mtc1 $[[R1]], $f1
+; LE-STATIC-LABEL: test_ldc1:
+; LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
+; LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
+; LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
+; LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
+; LE-STATIC-DAG: mtc1 $[[R1]], $f0
+; LE-STATIC-DAG: mtc1 $[[R3]], $f1
+; BE-PIC-LABEL: test_ldc1:
+; BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; BE-PIC-DAG: mtc1 $[[R1]], $f0
+; BE-PIC-DAG: mtc1 $[[R0]], $f1
+; CHECK-LDC1-SDC1-LABEL: test_ldc1:
+; CHECK-LDC1-SDC1: ldc1 $f{{[0-9]+}}
+
+define double @test_ldc1() {
+entry:
+ %0 = load double* @g0, align 8
+ ret double %0
+}
+
+; LE-PIC-LABEL: test_sdc1:
+; LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+; LE-STATIC-LABEL: test_sdc1:
+; LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; LE-STATIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
+; LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
+; LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
+; LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
+; BE-PIC-LABEL: test_sdc1:
+; BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; BE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
+; BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
+; CHECK-LDC1-SDC1-LABEL: test_sdc1:
+; CHECK-LDC1-SDC1: sdc1 $f{{[0-9]+}}
+
+define void @test_sdc1(double %a) {
+entry:
+ store double %a, double* @g0, align 8
+ ret void
+}
+
+
+; LE-PIC-LABEL: test_ldxc1:
+; LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; LE-PIC-DAG: mtc1 $[[R0]], $f0
+; LE-PIC-DAG: mtc1 $[[R1]], $f1
+; CHECK-LDC1-SDC1-LABEL: test_ldxc1:
+; CHECK-LDC1-SDC1: ldxc1 $f{{[0-9]+}}
+
+define double @test_ldxc1(double* nocapture readonly %a, i32 %i) {
+entry:
+ %arrayidx = getelementptr inbounds double* %a, i32 %i
+ %0 = load double* %arrayidx, align 8
+ ret double %0
+}
+
+; LE-PIC-LABEL: test_sdxc1:
+; LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+; CHECK-LDC1-SDC1-LABEL: test_sdxc1:
+; CHECK-LDC1-SDC1: sdxc1 $f{{[0-9]+}}
+
+define void @test_sdxc1(double %b, double* nocapture %a, i32 %i) {
+entry:
+ %arrayidx = getelementptr inbounds double* %a, i32 %i
+ store double %b, double* %arrayidx, align 8
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/2r.ll b/test/CodeGen/Mips/msa/2r.ll
new file mode 100644
index 0000000000000..da35ad82cad1b
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2r.ll
@@ -0,0 +1,257 @@
+; Test the MSA intrinsics that are encoded with the 2R instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_nloc_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_nloc_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nloc_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_nloc_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.nloc.b(<16 x i8> %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_nloc_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.nloc.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_nloc_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nloc_b_ARG1)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nloc.b [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nloc_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nloc_b_test
+;
+@llvm_mips_nloc_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_nloc_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_nloc_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_nloc_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.nloc.h(<8 x i16> %0)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_nloc_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.nloc.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_nloc_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nloc_h_ARG1)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nloc.h [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nloc_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nloc_h_test
+;
+@llvm_mips_nloc_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_nloc_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_nloc_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_nloc_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.nloc.w(<4 x i32> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_nloc_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.nloc.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_nloc_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nloc_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nloc.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nloc_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nloc_w_test
+;
+@llvm_mips_nloc_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_nloc_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_nloc_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_nloc_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.nloc.d(<2 x i64> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_nloc_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.nloc.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_nloc_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nloc_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nloc.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nloc_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nloc_d_test
+;
+@llvm_mips_nlzc_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_nlzc_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nlzc_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_nlzc_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.nlzc.b(<16 x i8> %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_nlzc_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.nlzc.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_nlzc_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nlzc_b_ARG1)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nlzc.b [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nlzc_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nlzc_b_test
+;
+@llvm_mips_nlzc_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_nlzc_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_nlzc_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_nlzc_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.nlzc.h(<8 x i16> %0)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_nlzc_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.nlzc.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_nlzc_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nlzc_h_ARG1)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nlzc.h [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nlzc_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nlzc_h_test
+;
+@llvm_mips_nlzc_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_nlzc_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_nlzc_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_nlzc_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.nlzc.w(<4 x i32> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_nlzc_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.nlzc.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_nlzc_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nlzc_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nlzc.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nlzc_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nlzc_w_test
+;
+@llvm_mips_nlzc_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_nlzc_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_nlzc_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_nlzc_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.nlzc.d(<2 x i64> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_nlzc_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.nlzc.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_nlzc_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_nlzc_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: nlzc.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_nlzc_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_nlzc_d_test
+;
+@llvm_mips_pcnt_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_pcnt_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pcnt_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_pcnt_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.pcnt.b(<16 x i8> %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_pcnt_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.pcnt.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_pcnt_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_pcnt_b_ARG1)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: pcnt.b [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_pcnt_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_pcnt_b_test
+;
+@llvm_mips_pcnt_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_pcnt_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pcnt_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_pcnt_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.pcnt.h(<8 x i16> %0)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_pcnt_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.pcnt.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_pcnt_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_pcnt_h_ARG1)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: pcnt.h [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_pcnt_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_pcnt_h_test
+;
+@llvm_mips_pcnt_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_pcnt_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pcnt_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_pcnt_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.pcnt.w(<4 x i32> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_pcnt_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.pcnt.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_pcnt_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_pcnt_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: pcnt.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_pcnt_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_pcnt_w_test
+;
+@llvm_mips_pcnt_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_pcnt_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pcnt_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_pcnt_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.pcnt.d(<2 x i64> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_pcnt_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.pcnt.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_pcnt_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_pcnt_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: pcnt.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_pcnt_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_pcnt_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2r_vector_scalar.ll b/test/CodeGen/Mips/msa/2r_vector_scalar.ll
new file mode 100644
index 0000000000000..6f6e1b9ce2f8f
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2r_vector_scalar.ll
@@ -0,0 +1,87 @@
+; Test the MSA intrinsics that are encoded with the 2R instruction format and
+; convert scalars to vectors.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fill_b_ARG1 = global i32 23, align 16
+@llvm_mips_fill_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_fill_b_test() nounwind {
+entry:
+ %0 = load i32* @llvm_mips_fill_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.fill.b(i32 %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_fill_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.fill.b(i32) nounwind
+
+; CHECK: llvm_mips_fill_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]],
+; CHECK-DAG: fill.b [[R2:\$w[0-9]+]], [[R1]]
+; CHECK-DAG: st.b [[R2]],
+; CHECK: .size llvm_mips_fill_b_test
+;
+@llvm_mips_fill_h_ARG1 = global i32 23, align 16
+@llvm_mips_fill_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_fill_h_test() nounwind {
+entry:
+ %0 = load i32* @llvm_mips_fill_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.fill.h(i32 %0)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_fill_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.fill.h(i32) nounwind
+
+; CHECK: llvm_mips_fill_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]],
+; CHECK-DAG: fill.h [[R2:\$w[0-9]+]], [[R1]]
+; CHECK-DAG: st.h [[R2]],
+; CHECK: .size llvm_mips_fill_h_test
+;
+@llvm_mips_fill_w_ARG1 = global i32 23, align 16
+@llvm_mips_fill_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fill_w_test() nounwind {
+entry:
+ %0 = load i32* @llvm_mips_fill_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.fill.w(i32 %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_fill_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fill.w(i32) nounwind
+
+; CHECK: llvm_mips_fill_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]],
+; CHECK-DAG: fill.w [[R2:\$w[0-9]+]], [[R1]]
+; CHECK-DAG: st.w [[R2]],
+; CHECK: .size llvm_mips_fill_w_test
+;
+@llvm_mips_fill_d_ARG1 = global i64 23, align 16
+@llvm_mips_fill_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fill_d_test() nounwind {
+entry:
+ %0 = load i64* @llvm_mips_fill_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.fill.d(i64 %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_fill_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fill.d(i64) nounwind
+
+; CHECK: llvm_mips_fill_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], 4(
+; CHECK-DAG: ldi.b [[R3:\$w[0-9]+]], 0
+; CHECK-DAG: insert.w [[R3]][0], [[R1]]
+; CHECK-DAG: insert.w [[R3]][1], [[R2]]
+; CHECK-DAG: insert.w [[R3]][2], [[R1]]
+; CHECK-DAG: insert.w [[R3]][3], [[R2]]
+; CHECK-DAG: st.w [[R3]],
+; CHECK: .size llvm_mips_fill_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf.ll b/test/CodeGen/Mips/msa/2rf.ll
new file mode 100644
index 0000000000000..b361ef5eae219
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf.ll
@@ -0,0 +1,323 @@
+; Test the MSA intrinsics that are encoded with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_flog2_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_flog2_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_flog2_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.flog2.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.flog2.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_flog2_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_flog2_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: flog2.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_flog2_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_flog2_w_test
+;
+@llvm_mips_flog2_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_flog2_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_flog2_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.flog2.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.flog2.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_flog2_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_flog2_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: flog2.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_flog2_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_flog2_d_test
+
+define void @flog2_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1
+ %1 = tail call <4 x float> @llvm.log2.v4f32(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.log2.v4f32(<4 x float> %val)
+
+; CHECK: flog2_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_flog2_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: flog2.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_flog2_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size flog2_w_test
+
+define void @flog2_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1
+ %1 = tail call <2 x double> @llvm.log2.v2f64(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.log2.v2f64(<2 x double> %val)
+
+; CHECK: flog2_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_flog2_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: flog2.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_flog2_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size flog2_d_test
+;
+@llvm_mips_frint_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_frint_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_frint_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_frint_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.frint.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.frint.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_frint_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frint_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frint.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frint_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frint_w_test
+;
+@llvm_mips_frint_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_frint_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_frint_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_frint_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.frint.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.frint.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_frint_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frint_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frint.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frint_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frint_d_test
+
+define void @frint_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_frint_w_ARG1
+ %1 = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.rint.v4f32(<4 x float>) nounwind
+
+; CHECK: frint_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frint_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frint.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frint_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size frint_w_test
+
+define void @frint_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_frint_d_ARG1
+ %1 = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.rint.v2f64(<2 x double>) nounwind
+
+; CHECK: frint_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frint_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frint.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frint_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size frint_d_test
+;
+@llvm_mips_frcp_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_frcp_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_frcp_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_frcp_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.frcp.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_frcp_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.frcp.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_frcp_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frcp_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frcp.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frcp_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frcp_w_test
+;
+@llvm_mips_frcp_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_frcp_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_frcp_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_frcp_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.frcp.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_frcp_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.frcp.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_frcp_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frcp_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frcp.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frcp_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frcp_d_test
+;
+@llvm_mips_frsqrt_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_frsqrt_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_frsqrt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_frsqrt_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.frsqrt.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_frsqrt_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.frsqrt.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_frsqrt_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frsqrt_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frsqrt.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frsqrt_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frsqrt_w_test
+;
+@llvm_mips_frsqrt_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_frsqrt_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_frsqrt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_frsqrt_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.frsqrt.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_frsqrt_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.frsqrt.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_frsqrt_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_frsqrt_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: frsqrt.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_frsqrt_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_frsqrt_d_test
+;
+@llvm_mips_fsqrt_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsqrt_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fsqrt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_fsqrt_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fsqrt.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fsqrt_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_fsqrt_w_test
+;
+@llvm_mips_fsqrt_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsqrt_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fsqrt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_fsqrt_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fsqrt.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fsqrt_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_fsqrt_d_test
+
+define void @fsqrt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1
+ %1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind
+
+; CHECK: fsqrt_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fsqrt.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size fsqrt_w_test
+
+define void @fsqrt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1
+ %1 = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.sqrt.v2f64(<2 x double>) nounwind
+
+; CHECK: fsqrt_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fsqrt_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fsqrt.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fsqrt_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size fsqrt_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_exup.ll b/test/CodeGen/Mips/msa/2rf_exup.ll
new file mode 100644
index 0000000000000..8d7cc367040a3
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_exup.ll
@@ -0,0 +1,82 @@
+; Test the MSA floating point conversion intrinsics (e.g. float->double) that
+; are encoded with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fexupl_w_ARG1 = global <8 x half> <half 0.000000e+00, half 1.000000e+00, half 2.000000e+00, half 3.000000e+00, half 4.000000e+00, half 5.000000e+00, half 6.000000e+00, half 7.000000e+00>, align 16
+@llvm_mips_fexupl_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fexupl_w_test() nounwind {
+entry:
+ %0 = load <8 x half>* @llvm_mips_fexupl_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_fexupl_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fexupl.w(<8 x half>) nounwind
+
+; CHECK: llvm_mips_fexupl_w_test:
+; CHECK: ld.h
+; CHECK: fexupl.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fexupl_w_test
+;
+@llvm_mips_fexupl_d_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fexupl_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fexupl_d_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fexupl_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.fexupl.d(<4 x float> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_fexupl_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fexupl.d(<4 x float>) nounwind
+
+; CHECK: llvm_mips_fexupl_d_test:
+; CHECK: ld.w
+; CHECK: fexupl.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fexupl_d_test
+;
+@llvm_mips_fexupr_w_ARG1 = global <8 x half> <half 0.000000e+00, half 1.000000e+00, half 2.000000e+00, half 3.000000e+00, half 4.000000e+00, half 5.000000e+00, half 6.000000e+00, half 7.000000e+00>, align 16
+@llvm_mips_fexupr_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fexupr_w_test() nounwind {
+entry:
+ %0 = load <8 x half>* @llvm_mips_fexupr_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_fexupr_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fexupr.w(<8 x half>) nounwind
+
+; CHECK: llvm_mips_fexupr_w_test:
+; CHECK: ld.h
+; CHECK: fexupr.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fexupr_w_test
+;
+@llvm_mips_fexupr_d_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fexupr_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fexupr_d_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fexupr_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.fexupr.d(<4 x float> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_fexupr_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fexupr.d(<4 x float>) nounwind
+
+; CHECK: llvm_mips_fexupr_d_test:
+; CHECK: ld.w
+; CHECK: fexupr.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fexupr_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_float_int.ll b/test/CodeGen/Mips/msa/2rf_float_int.ll
new file mode 100644
index 0000000000000..3b5dfda2d1e60
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_float_int.ll
@@ -0,0 +1,90 @@
+; Test the MSA integer to floating point conversion intrinsics that are encoded
+; with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ffint_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ffint_s_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_ffint_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ffint_s_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.ffint.s.w(<4 x i32> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_ffint_s_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.ffint.s.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_ffint_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ffint_s_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ffint_s.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ffint_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ffint_s_w_test
+;
+@llvm_mips_ffint_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ffint_s_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_ffint_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ffint_s_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.ffint.s.d(<2 x i64> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_ffint_s_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.ffint.s.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_ffint_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ffint_s_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ffint_s.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ffint_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ffint_s_d_test
+;
+@llvm_mips_ffint_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ffint_u_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_ffint_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ffint_u_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.ffint.u.w(<4 x i32> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_ffint_u_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.ffint.u.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_ffint_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ffint_u_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ffint_u.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ffint_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ffint_u_w_test
+;
+@llvm_mips_ffint_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ffint_u_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_ffint_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ffint_u_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.ffint.u.d(<2 x i64> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_ffint_u_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.ffint.u.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_ffint_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ffint_u_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ffint_u.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ffint_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ffint_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_fq.ll b/test/CodeGen/Mips/msa/2rf_fq.ll
new file mode 100644
index 0000000000000..021dd937fad3f
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_fq.ll
@@ -0,0 +1,82 @@
+; Test the MSA fixed-point to floating point conversion intrinsics that are
+; encoded with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ffql_w_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ffql_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_ffql_w_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ffql_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.ffql.w(<8 x i16> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_ffql_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.ffql.w(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_ffql_w_test:
+; CHECK: ld.h
+; CHECK: ffql.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ffql_w_test
+;
+@llvm_mips_ffql_d_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ffql_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_ffql_d_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ffql_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.ffql.d(<4 x i32> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_ffql_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.ffql.d(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_ffql_d_test:
+; CHECK: ld.w
+; CHECK: ffql.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ffql_d_test
+;
+@llvm_mips_ffqr_w_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ffqr_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_ffqr_w_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ffqr_w_ARG1
+ %1 = tail call <4 x float> @llvm.mips.ffqr.w(<8 x i16> %0)
+ store <4 x float> %1, <4 x float>* @llvm_mips_ffqr_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.ffqr.w(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_ffqr_w_test:
+; CHECK: ld.h
+; CHECK: ffqr.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ffqr_w_test
+;
+@llvm_mips_ffqr_d_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ffqr_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_ffqr_d_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ffqr_d_ARG1
+ %1 = tail call <2 x double> @llvm.mips.ffqr.d(<4 x i32> %0)
+ store <2 x double> %1, <2 x double>* @llvm_mips_ffqr_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.ffqr.d(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_ffqr_d_test:
+; CHECK: ld.w
+; CHECK: ffqr.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ffqr_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_int_float.ll b/test/CodeGen/Mips/msa/2rf_int_float.ll
new file mode 100644
index 0000000000000..4665ae066a4f7
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_int_float.ll
@@ -0,0 +1,217 @@
+; Test the MSA floating point to integer intrinsics that are encoded with the
+; 2RF instruction format. This includes conversions but other instructions such
+; as fclass are also here.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fclass_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fclass_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fclass_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fclass_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.fclass.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_fclass_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fclass.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_fclass_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fclass_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fclass.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fclass_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_fclass_w_test
+;
+@llvm_mips_fclass_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fclass_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fclass_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fclass_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.fclass.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_fclass_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fclass.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_fclass_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_fclass_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: fclass.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_fclass_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_fclass_d_test
+;
+@llvm_mips_ftrunc_s_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftrunc_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftrunc_s_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftrunc_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_ftrunc_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftrunc_s_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftrunc_s.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftrunc_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftrunc_s_w_test
+;
+@llvm_mips_ftrunc_s_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftrunc_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ftrunc_s_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftrunc_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_ftrunc_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftrunc_s_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftrunc_s.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftrunc_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftrunc_s_d_test
+;
+@llvm_mips_ftrunc_u_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftrunc_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftrunc_u_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftrunc_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_ftrunc_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftrunc_u_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftrunc_u.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftrunc_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftrunc_u_w_test
+;
+@llvm_mips_ftrunc_u_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftrunc_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ftrunc_u_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftrunc_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_ftrunc_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftrunc_u_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftrunc_u.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftrunc_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftrunc_u_d_test
+;
+@llvm_mips_ftint_s_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftint_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftint_s_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftint_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ftint.s.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftint.s.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_ftint_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftint_s_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftint_s.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftint_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftint_s_w_test
+;
+@llvm_mips_ftint_s_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftint_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ftint_s_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftint_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ftint.s.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ftint.s.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_ftint_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftint_s_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftint_s.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftint_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftint_s_d_test
+;
+@llvm_mips_ftint_u_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftint_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftint_u_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftint_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ftint.u.w(<4 x float> %0)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftint.u.w(<4 x float>) nounwind
+
+; CHECK: llvm_mips_ftint_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftint_u_w_ARG1)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftint_u.w [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftint_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftint_u_w_test
+;
+@llvm_mips_ftint_u_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftint_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ftint_u_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftint_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ftint.u.d(<2 x double> %0)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ftint.u.d(<2 x double>) nounwind
+
+; CHECK: llvm_mips_ftint_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ftint_u_d_ARG1)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ftint_u.d [[WD:\$w[0-9]+]], [[WS]]
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ftint_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R2]])
+; CHECK: .size llvm_mips_ftint_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/2rf_tq.ll b/test/CodeGen/Mips/msa/2rf_tq.ll
new file mode 100644
index 0000000000000..6f3c508f5b8c0
--- /dev/null
+++ b/test/CodeGen/Mips/msa/2rf_tq.ll
@@ -0,0 +1,50 @@
+; Test the MSA floating-point to fixed-point conversion intrinsics that are
+; encoded with the 2RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ftq_h_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_ftq_h_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_ftq_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ftq_h_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_ftq_h_ARG1
+ %1 = load <4 x float>* @llvm_mips_ftq_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ftq.h(<4 x float> %0, <4 x float> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ftq_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ftq.h(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_ftq_h_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ftq.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ftq_h_test
+;
+@llvm_mips_ftq_w_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_ftq_w_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_ftq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ftq_w_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_ftq_w_ARG1
+ %1 = load <2 x double>* @llvm_mips_ftq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ftq.w(<2 x double> %0, <2 x double> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ftq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ftq.w(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_ftq_w_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ftq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ftq_w_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-a.ll b/test/CodeGen/Mips/msa/3r-a.ll
new file mode 100644
index 0000000000000..dab15b66b7cec
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-a.ll
@@ -0,0 +1,1191 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'a'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+; It should fail to compile without fp64.
+; RUN: not llc -march=mips -mattr=+msa < %s 2>&1 | \
+; RUN: FileCheck -check-prefix=FP32ERROR %s
+; FP32ERROR: LLVM ERROR: MSA requires a 64-bit FPU register file (FR=1 mode).
+
+@llvm_mips_add_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_add_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_add_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_add_a_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_add_a_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_add_a_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.add.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_add_a_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_add_a_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_add_a_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: add_a.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_add_a_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_add_a_b_test
+;
+@llvm_mips_add_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_add_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_add_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_add_a_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_add_a_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_add_a_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.add.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_add_a_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_add_a_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_add_a_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: add_a.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_add_a_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_add_a_h_test
+;
+@llvm_mips_add_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_add_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_add_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_add_a_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_add_a_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_add_a_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.add.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_add_a_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_add_a_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_add_a_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: add_a.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_add_a_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_add_a_w_test
+;
+@llvm_mips_add_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_add_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_add_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_add_a_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_add_a_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_add_a_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.add.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_add_a_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_add_a_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_add_a_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: add_a.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_add_a_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_add_a_d_test
+;
+@llvm_mips_adds_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_adds_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_adds_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_adds_a_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_adds_a_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_adds_a_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.adds.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_adds_a_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_a_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_a_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_a.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_a_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_a_b_test
+;
+@llvm_mips_adds_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_adds_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_adds_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_adds_a_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_adds_a_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_adds_a_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.adds.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_adds_a_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_a_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_a_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_a.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_a_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_a_h_test
+;
+@llvm_mips_adds_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_adds_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_adds_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_adds_a_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_adds_a_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_adds_a_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.adds.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_adds_a_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_a_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_a_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_a.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_a_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_a_w_test
+;
+@llvm_mips_adds_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_adds_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_adds_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_adds_a_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_adds_a_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_adds_a_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.adds.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_adds_a_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_a_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_a_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_a.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_a_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_a_d_test
+;
+@llvm_mips_adds_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_adds_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_adds_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_adds_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_adds_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_adds_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.adds.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_adds_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_s_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_s_b_test
+;
+@llvm_mips_adds_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_adds_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_adds_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_adds_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_adds_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_adds_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.adds.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_adds_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_s_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_s_h_test
+;
+@llvm_mips_adds_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_adds_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_adds_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_adds_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_adds_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_adds_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.adds.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_adds_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_s_w_test
+;
+@llvm_mips_adds_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_adds_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_adds_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_adds_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_adds_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_adds_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.adds.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_adds_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_s_d_test
+;
+@llvm_mips_adds_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_adds_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_adds_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_adds_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_adds_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_adds_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.adds.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_adds_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_u_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_u_b_test
+;
+@llvm_mips_adds_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_adds_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_adds_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_adds_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_adds_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_adds_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.adds.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_adds_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_u_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_u_h_test
+;
+@llvm_mips_adds_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_adds_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_adds_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_adds_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_adds_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_adds_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.adds.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_adds_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_u_w_test
+;
+@llvm_mips_adds_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_adds_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_adds_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_adds_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_adds_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_adds_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.adds.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_adds_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_adds_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_adds_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: adds_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_adds_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_adds_u_d_test
+;
+@llvm_mips_addv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_addv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_addv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_addv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_addv_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_addv_b_test
+;
+@llvm_mips_addv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_addv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_addv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_addv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_addv_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_addv_h_test
+;
+@llvm_mips_addv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_addv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_addv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_addv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_addv_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_addv_w_test
+;
+@llvm_mips_addv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_addv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_addv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_addv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_addv_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_addv_d_test
+;
+
+define void @addv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2
+ %2 = add <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
+ ret void
+}
+
+; CHECK: addv_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size addv_b_test
+;
+
+define void @addv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2
+ %2 = add <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
+ ret void
+}
+
+; CHECK: addv_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size addv_h_test
+;
+
+define void @addv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2
+ %2 = add <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
+ ret void
+}
+
+; CHECK: addv_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size addv_w_test
+;
+
+define void @addv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2
+ %2 = add <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
+ ret void
+}
+
+; CHECK: addv_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_addv_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_addv_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: addv.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_addv_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size addv_d_test
+;
+@llvm_mips_asub_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_asub_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_asub_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_asub_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_asub_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_asub_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.asub.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_asub_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_s_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_s_b_test
+;
+@llvm_mips_asub_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_asub_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_asub_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_asub_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_asub_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_asub_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.asub.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_asub_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_s_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_s_h_test
+;
+@llvm_mips_asub_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_asub_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_asub_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_asub_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_asub_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_asub_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.asub.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_asub_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_s_w_test
+;
+@llvm_mips_asub_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_asub_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_asub_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_asub_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_asub_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_asub_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.asub.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_asub_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_s_d_test
+;
+@llvm_mips_asub_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_asub_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_asub_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_asub_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_asub_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_asub_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.asub.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_asub_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_u_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_u_b_test
+;
+@llvm_mips_asub_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_asub_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_asub_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_asub_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_asub_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_asub_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.asub.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_asub_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_u_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_u_h_test
+;
+@llvm_mips_asub_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_asub_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_asub_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_asub_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_asub_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_asub_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.asub.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_asub_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_u_w_test
+;
+@llvm_mips_asub_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_asub_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_asub_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_asub_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_asub_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_asub_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.asub.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_asub_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_asub_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_asub_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: asub_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_asub_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_asub_u_d_test
+;
+@llvm_mips_ave_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ave_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ave_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ave_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ave_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ave_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ave.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ave_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_s_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_s_b_test
+;
+@llvm_mips_ave_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ave_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ave_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ave_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ave_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ave_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ave.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ave_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_s_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_s_h_test
+;
+@llvm_mips_ave_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ave_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ave_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ave_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ave_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ave_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ave.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ave_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_s_w_test
+;
+@llvm_mips_ave_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ave_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ave_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ave_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ave_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ave_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ave.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ave_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_s_d_test
+;
+@llvm_mips_ave_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ave_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ave_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ave_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ave_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ave_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ave.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ave_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_u_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_u_b_test
+;
+@llvm_mips_ave_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ave_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ave_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ave_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ave_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ave_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ave.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ave_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_u_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_u_h_test
+;
+@llvm_mips_ave_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ave_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ave_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ave_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ave_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ave_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ave.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ave_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_u_w_test
+;
+@llvm_mips_ave_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ave_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ave_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ave_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ave_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ave_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ave.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ave_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_ave_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_ave_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ave_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_ave_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_ave_u_d_test
+;
+@llvm_mips_aver_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_aver_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_aver_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_aver_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_aver_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_aver_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.aver.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_aver_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_s_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_s_b_test
+;
+@llvm_mips_aver_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_aver_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_aver_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_aver_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_aver_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_aver_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.aver.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_aver_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_s_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_s_h_test
+;
+@llvm_mips_aver_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_aver_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_aver_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_aver_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_aver_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_aver_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.aver.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_aver_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_s_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_s_w_test
+;
+@llvm_mips_aver_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_aver_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_aver_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_aver_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_aver_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_aver_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.aver.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_aver_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_s_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_s_d_test
+;
+@llvm_mips_aver_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_aver_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_aver_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_aver_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_aver_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_aver_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.aver.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_aver_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_u_b_RES)
+; CHECK-DAG: st.b [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_u_b_test
+;
+@llvm_mips_aver_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_aver_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_aver_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_aver_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_aver_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_aver_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.aver.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_aver_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_u_h_RES)
+; CHECK-DAG: st.h [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_u_h_test
+;
+@llvm_mips_aver_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_aver_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_aver_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_aver_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_aver_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_aver_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.aver.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_aver_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_u_w_RES)
+; CHECK-DAG: st.w [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_u_w_test
+;
+@llvm_mips_aver_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_aver_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_aver_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_aver_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_aver_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_aver_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.aver.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_aver_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_aver_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_aver_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: aver_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_aver_u_d_RES)
+; CHECK-DAG: st.d [[WD]], 0([[R3]])
+; CHECK: .size llvm_mips_aver_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-b.ll b/test/CodeGen/Mips/msa/3r-b.ll
new file mode 100644
index 0000000000000..a05d19b4d490c
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-b.ll
@@ -0,0 +1,494 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'b'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_bclr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bclr_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bclr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bclr_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bclr_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bclr_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bclr.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_bclr_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: bclr.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bclr_b_test
+;
+@llvm_mips_bclr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bclr_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bclr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bclr_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bclr_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bclr_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bclr.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_bclr_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: bclr.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bclr_h_test
+;
+@llvm_mips_bclr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bclr_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bclr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bclr_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bclr_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bclr_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_bclr_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: bclr.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bclr_w_test
+;
+@llvm_mips_bclr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bclr_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bclr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bclr_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bclr_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bclr_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bclr.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_bclr_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: bclr.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bclr_d_test
+
+@llvm_mips_binsl_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsl_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsl_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_binsl_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_binsl_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_binsl_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_binsl_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_binsl_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.binsl.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_binsl_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.binsl.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_binsl_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsl_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsl_b_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsl_b_ARG3)(
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsl.b [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.b [[R4]], 0(
+; CHECK: .size llvm_mips_binsl_b_test
+
+@llvm_mips_binsl_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsl_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsl_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_binsl_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_binsl_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_binsl_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_binsl_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_binsl_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.binsl.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_binsl_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.binsl.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_binsl_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsl_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsl_h_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsl_h_ARG3)(
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.h [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsl.h [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.h [[R4]], 0(
+; CHECK: .size llvm_mips_binsl_h_test
+
+@llvm_mips_binsl_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsl_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsl_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_binsl_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_binsl_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_binsl_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_binsl_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_binsl_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.binsl.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_binsl_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.binsl.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_binsl_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsl_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsl_w_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsl_w_ARG3)(
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.w [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsl.w [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.w [[R4]], 0(
+; CHECK: .size llvm_mips_binsl_w_test
+
+@llvm_mips_binsl_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsl_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsl_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_binsl_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_binsl_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_binsl_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_binsl_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_binsl_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.binsl.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_binsl_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.binsl.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_binsl_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsl_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsl_d_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsl_d_ARG3)(
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.d [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsl.d [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.d [[R4]], 0(
+; CHECK: .size llvm_mips_binsl_d_test
+
+@llvm_mips_binsr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsr_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsr_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_binsr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_binsr_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_binsr_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_binsr_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_binsr_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.binsr.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_binsr_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.binsr.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_binsr_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsr_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsr_b_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsr_b_ARG3)(
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsr.b [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.b [[R4]], 0(
+; CHECK: .size llvm_mips_binsr_b_test
+
+@llvm_mips_binsr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsr_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsr_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_binsr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_binsr_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_binsr_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_binsr_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_binsr_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.binsr.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_binsr_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.binsr.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_binsr_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsr_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsr_h_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsr_h_ARG3)(
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.h [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsr.h [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.h [[R4]], 0(
+; CHECK: .size llvm_mips_binsr_h_test
+
+@llvm_mips_binsr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsr_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsr_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_binsr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_binsr_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_binsr_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_binsr_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_binsr_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.binsr.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_binsr_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.binsr.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_binsr_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsr_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsr_w_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsr_w_ARG3)(
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.w [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsr.w [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.w [[R4]], 0(
+; CHECK: .size llvm_mips_binsr_w_test
+
+@llvm_mips_binsr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsr_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsr_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_binsr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_binsr_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_binsr_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_binsr_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_binsr_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.binsr.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_binsr_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.binsr.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_binsr_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsr_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsr_d_ARG2)(
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_binsr_d_ARG3)(
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R5:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: ld.d [[R6:\$w[0-9]+]], 0([[R3]])
+; CHECK-DAG: binsr.d [[R4]], [[R5]], [[R6]]
+; CHECK-DAG: st.d [[R4]], 0(
+; CHECK: .size llvm_mips_binsr_d_test
+
+@llvm_mips_bneg_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bneg_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bneg_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bneg_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bneg_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bneg_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bneg.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_bneg_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: bneg.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bneg_b_test
+;
+@llvm_mips_bneg_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bneg_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bneg_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bneg_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bneg_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bneg_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bneg.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_bneg_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: bneg.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bneg_h_test
+;
+@llvm_mips_bneg_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bneg_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bneg_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bneg_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bneg_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bneg_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_bneg_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: bneg.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bneg_w_test
+;
+@llvm_mips_bneg_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bneg_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bneg_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bneg_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bneg_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bneg_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bneg.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_bneg_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: bneg.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bneg_d_test
+;
+@llvm_mips_bset_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bset_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bset_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bset_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bset_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bset_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bset.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_bset_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: bset.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bset_b_test
+;
+@llvm_mips_bset_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bset_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bset_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bset_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bset_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bset_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bset.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_bset_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: bset.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bset_h_test
+;
+@llvm_mips_bset_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bset_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bset_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bset_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bset_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bset_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_bset_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: bset.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bset_w_test
+;
+@llvm_mips_bset_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bset_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bset_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bset_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bset_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bset_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bset.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_bset_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: bset.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bset_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-c.ll b/test/CodeGen/Mips/msa/3r-c.ll
new file mode 100644
index 0000000000000..6ec92c284fec3
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-c.ll
@@ -0,0 +1,446 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'c'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ceq_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ceq_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ceq_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ceq_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ceq_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ceq_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ceq.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ceq_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ceq.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ceq_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ceq.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ceq_b_test
+;
+@llvm_mips_ceq_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ceq_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ceq_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ceq_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ceq_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ceq_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ceq.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ceq_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ceq.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ceq_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ceq.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ceq_h_test
+;
+@llvm_mips_ceq_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ceq_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ceq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ceq_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ceq_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ceq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ceq.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ceq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ceq.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ceq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ceq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ceq_w_test
+;
+@llvm_mips_ceq_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ceq_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ceq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ceq_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ceq_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ceq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ceq.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ceq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ceq.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ceq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ceq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ceq_d_test
+;
+@llvm_mips_cle_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_cle_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_cle_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_cle_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_cle_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_cle_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.cle.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.cle.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_cle_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: cle_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_cle_s_b_test
+;
+@llvm_mips_cle_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_cle_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_cle_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_cle_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_cle_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_cle_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.cle.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.cle.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_cle_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: cle_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_cle_s_h_test
+;
+@llvm_mips_cle_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_cle_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_cle_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_cle_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_cle_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_cle_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.cle.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.cle.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_cle_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: cle_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_cle_s_w_test
+;
+@llvm_mips_cle_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_cle_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_cle_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_cle_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_cle_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_cle_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.cle.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.cle.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_cle_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: cle_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_cle_s_d_test
+;
+@llvm_mips_cle_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_cle_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_cle_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_cle_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_cle_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_cle_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.cle.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.cle.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_cle_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: cle_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_cle_u_b_test
+;
+@llvm_mips_cle_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_cle_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_cle_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_cle_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_cle_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_cle_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.cle.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.cle.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_cle_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: cle_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_cle_u_h_test
+;
+@llvm_mips_cle_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_cle_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_cle_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_cle_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_cle_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_cle_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.cle.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.cle.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_cle_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: cle_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_cle_u_w_test
+;
+@llvm_mips_cle_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_cle_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_cle_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_cle_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_cle_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_cle_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.cle.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.cle.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_cle_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: cle_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_cle_u_d_test
+;
+@llvm_mips_clt_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clt_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_clt_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clt_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clt_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_clt_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.clt.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clt.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_clt_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: clt_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clt_s_b_test
+;
+@llvm_mips_clt_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clt_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_clt_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clt_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clt_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_clt_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.clt.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clt.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_clt_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: clt_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clt_s_h_test
+;
+@llvm_mips_clt_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clt_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_clt_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clt_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clt_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_clt_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.clt.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clt.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_clt_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: clt_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clt_s_w_test
+;
+@llvm_mips_clt_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clt_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_clt_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clt_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clt_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_clt_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.clt.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clt.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_clt_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: clt_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clt_s_d_test
+;
+@llvm_mips_clt_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clt_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_clt_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clt_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clt_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_clt_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.clt.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clt.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_clt_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: clt_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clt_u_b_test
+;
+@llvm_mips_clt_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clt_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_clt_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clt_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clt_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_clt_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.clt.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clt.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_clt_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: clt_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clt_u_h_test
+;
+@llvm_mips_clt_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clt_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_clt_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clt_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clt_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_clt_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.clt.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clt.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_clt_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: clt_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clt_u_w_test
+;
+@llvm_mips_clt_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clt_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_clt_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clt_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clt_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_clt_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.clt.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clt.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_clt_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: clt_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clt_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-d.ll b/test/CodeGen/Mips/msa/3r-d.ll
new file mode 100644
index 0000000000000..0099554a8eeab
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-d.ll
@@ -0,0 +1,478 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'd'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_div_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_div_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_div_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_div_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_div_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: div_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_div_s_b_test
+;
+@llvm_mips_div_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_div_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_div_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_div_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_div_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: div_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_div_s_h_test
+;
+@llvm_mips_div_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_div_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_div_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_div_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_div_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: div_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_div_s_w_test
+;
+@llvm_mips_div_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_div_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_div_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_div_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_div_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: div_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_div_s_d_test
+;
+
+define void @div_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2
+ %2 = sdiv <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
+ ret void
+}
+
+; CHECK: div_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: div_s.b
+; CHECK: st.b
+; CHECK: .size div_s_b_test
+
+define void @div_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2
+ %2 = sdiv <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
+ ret void
+}
+
+; CHECK: div_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: div_s.h
+; CHECK: st.h
+; CHECK: .size div_s_h_test
+
+define void @div_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2
+ %2 = sdiv <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
+ ret void
+}
+
+; CHECK: div_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: div_s.w
+; CHECK: st.w
+; CHECK: .size div_s_w_test
+
+define void @div_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2
+ %2 = sdiv <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
+ ret void
+}
+
+; CHECK: div_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: div_s.d
+; CHECK: st.d
+; CHECK: .size div_s_d_test
+;
+@llvm_mips_div_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_div_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_div_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_div_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_div_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: div_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_div_u_b_test
+;
+@llvm_mips_div_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_div_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_div_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_div_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_div_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: div_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_div_u_h_test
+;
+@llvm_mips_div_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_div_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_div_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_div_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_div_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: div_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_div_u_w_test
+;
+@llvm_mips_div_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_div_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_div_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_div_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_div_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: div_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_div_u_d_test
+;
+
+define void @div_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2
+ %2 = udiv <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
+ ret void
+}
+
+; CHECK: div_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: div_u.b
+; CHECK: st.b
+; CHECK: .size div_u_b_test
+
+define void @div_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2
+ %2 = udiv <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
+ ret void
+}
+
+; CHECK: div_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: div_u.h
+; CHECK: st.h
+; CHECK: .size div_u_h_test
+
+define void @div_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2
+ %2 = udiv <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
+ ret void
+}
+
+; CHECK: div_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: div_u.w
+; CHECK: st.w
+; CHECK: .size div_u_w_test
+
+define void @div_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2
+ %2 = udiv <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
+ ret void
+}
+
+; CHECK: div_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: div_u.d
+; CHECK: st.d
+; CHECK: .size div_u_d_test
+;
+@llvm_mips_dotp_s_h_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3,
+ i8 4, i8 5, i8 6, i8 7,
+ i8 8, i8 9, i8 10, i8 11,
+ i8 12, i8 13, i8 14, i8 15>,
+ align 16
+@llvm_mips_dotp_s_h_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19,
+ i8 20, i8 21, i8 22, i8 23,
+ i8 24, i8 25, i8 26, i8 27,
+ i8 28, i8 29, i8 30, i8 31>,
+ align 16
+@llvm_mips_dotp_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0,
+ i16 0, i16 0, i16 0, i16 0>,
+ align 16
+
+define void @llvm_mips_dotp_s_h_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.dotp.s.h(<16 x i8> %0, <16 x i8> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dotp.s.h(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dotp_s_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: dotp_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dotp_s_h_test
+;
+@llvm_mips_dotp_s_w_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3,
+ i16 4, i16 5, i16 6, i16 7>,
+ align 16
+@llvm_mips_dotp_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7,
+ i16 8, i16 9, i16 10, i16 11>,
+ align 16
+@llvm_mips_dotp_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>,
+ align 16
+
+define void @llvm_mips_dotp_s_w_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.dotp.s.w(<8 x i16> %0, <8 x i16> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dotp.s.w(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dotp_s_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: dotp_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dotp_s_w_test
+;
+@llvm_mips_dotp_s_d_ARG1 = global <4 x i32> <i32 0, i32 1, i32 0, i32 1>,
+ align 16
+@llvm_mips_dotp_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 2, i32 3>,
+ align 16
+@llvm_mips_dotp_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dotp_s_d_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.dotp.s.d(<4 x i32> %0, <4 x i32> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dotp.s.d(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dotp_s_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: dotp_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dotp_s_d_test
+;
+@llvm_mips_dotp_u_h_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3,
+ i8 4, i8 5, i8 6, i8 7,
+ i8 8, i8 9, i8 10, i8 11,
+ i8 12, i8 13, i8 14, i8 15>,
+ align 16
+@llvm_mips_dotp_u_h_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19,
+ i8 20, i8 21, i8 22, i8 23,
+ i8 24, i8 25, i8 26, i8 27,
+ i8 28, i8 29, i8 30, i8 31>,
+ align 16
+@llvm_mips_dotp_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0,
+ i16 0, i16 0, i16 0, i16 0>,
+ align 16
+
+define void @llvm_mips_dotp_u_h_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.dotp.u.h(<16 x i8> %0, <16 x i8> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dotp.u.h(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dotp_u_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: dotp_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dotp_u_h_test
+;
+@llvm_mips_dotp_u_w_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3,
+ i16 4, i16 5, i16 6, i16 7>,
+ align 16
+@llvm_mips_dotp_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7,
+ i16 8, i16 9, i16 10, i16 11>,
+ align 16
+@llvm_mips_dotp_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>,
+ align 16
+
+define void @llvm_mips_dotp_u_w_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.dotp.u.w(<8 x i16> %0, <8 x i16> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dotp.u.w(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dotp_u_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: dotp_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dotp_u_w_test
+;
+@llvm_mips_dotp_u_d_ARG1 = global <4 x i32> <i32 0, i32 1, i32 0, i32 1>,
+ align 16
+@llvm_mips_dotp_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 2, i32 3>,
+ align 16
+@llvm_mips_dotp_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dotp_u_d_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.dotp.u.d(<4 x i32> %0, <4 x i32> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dotp.u.d(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dotp_u_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: dotp_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dotp_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-i.ll b/test/CodeGen/Mips/msa/3r-i.ll
new file mode 100644
index 0000000000000..2ef30471b026e
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-i.ll
@@ -0,0 +1,358 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'i'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ilvev_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ilvev_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ilvev_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ilvev_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ilvev_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ilvev_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ilvev.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvev_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ilvev.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ilvev_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ilvev.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ilvev_b_test
+;
+@llvm_mips_ilvev_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ilvev_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ilvev_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ilvev_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ilvev_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ilvev_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ilvev.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvev_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ilvev.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ilvev_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ilvev.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ilvev_h_test
+;
+@llvm_mips_ilvev_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ilvev_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ilvev_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ilvev_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ilvev_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ilvev_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ilvev.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvev_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ilvev.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ilvev_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ilvev.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ilvev_w_test
+;
+@llvm_mips_ilvev_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ilvev_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ilvev_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ilvev_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ilvev_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ilvev_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ilvev.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvev_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ilvev.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ilvev_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ilvev.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ilvev_d_test
+;
+@llvm_mips_ilvl_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ilvl_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ilvl_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ilvl_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ilvl_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ilvl_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ilvl.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvl_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ilvl.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ilvl_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ilvl.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ilvl_b_test
+;
+@llvm_mips_ilvl_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ilvl_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ilvl_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ilvl_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ilvl_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ilvl_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ilvl.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvl_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ilvl.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ilvl_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ilvl.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ilvl_h_test
+;
+@llvm_mips_ilvl_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ilvl_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ilvl_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ilvl_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ilvl_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ilvl_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ilvl.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvl_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ilvl.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ilvl_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ilvl.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ilvl_w_test
+;
+@llvm_mips_ilvl_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ilvl_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ilvl_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ilvl_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ilvl_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ilvl_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ilvl.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvl_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ilvl.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ilvl_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ilvl.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ilvl_d_test
+;
+@llvm_mips_ilvod_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ilvod_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ilvod_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ilvod_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ilvod_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ilvod_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ilvod.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvod_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ilvod.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ilvod_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ilvod.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ilvod_b_test
+;
+@llvm_mips_ilvod_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ilvod_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ilvod_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ilvod_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ilvod_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ilvod_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ilvod.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvod_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ilvod.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ilvod_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ilvod.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ilvod_h_test
+;
+@llvm_mips_ilvod_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ilvod_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ilvod_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ilvod_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ilvod_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ilvod_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ilvod.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvod_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ilvod.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ilvod_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ilvod.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ilvod_w_test
+;
+@llvm_mips_ilvod_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ilvod_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ilvod_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ilvod_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ilvod_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ilvod_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ilvod.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvod_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ilvod.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ilvod_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ilvod.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ilvod_d_test
+;
+@llvm_mips_ilvr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ilvr_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_ilvr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ilvr_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ilvr_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_ilvr_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.ilvr.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvr_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ilvr.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_ilvr_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ilvr.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ilvr_b_test
+;
+@llvm_mips_ilvr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ilvr_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_ilvr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ilvr_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ilvr_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_ilvr_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.ilvr.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvr_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ilvr.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_ilvr_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ilvr.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ilvr_h_test
+;
+@llvm_mips_ilvr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ilvr_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_ilvr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ilvr_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ilvr_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_ilvr_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.ilvr.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvr_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ilvr.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_ilvr_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ilvr.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ilvr_w_test
+;
+@llvm_mips_ilvr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ilvr_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_ilvr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ilvr_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ilvr_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_ilvr_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.ilvr.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvr_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ilvr.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_ilvr_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ilvr.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ilvr_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-m.ll b/test/CodeGen/Mips/msa/3r-m.ll
new file mode 100644
index 0000000000000..ddfd720a2f843
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-m.ll
@@ -0,0 +1,862 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'm'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_max_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_max_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_max_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_a_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_max_a_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_max_a_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.max.a.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_max_a_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.max.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_a_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_a.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_a_b_test
+;
+@llvm_mips_max_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_max_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_max_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_a_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_max_a_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_max_a_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.max.a.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_max_a_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.max.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_a_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_a.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_a_h_test
+;
+@llvm_mips_max_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_max_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_max_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_a_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_max_a_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_max_a_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.max.a.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_max_a_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.max.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_a_w_test
+;
+@llvm_mips_max_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_max_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_max_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_a_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_max_a_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_max_a_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.max.a.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_max_a_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.max.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_a_d_test
+;
+@llvm_mips_max_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_max_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_max_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_max_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_max_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.max.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_max_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.max.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_s_b_test
+;
+@llvm_mips_max_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_max_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_max_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_max_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_max_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.max.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_max_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.max.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_s_h_test
+;
+@llvm_mips_max_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_max_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_max_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_max_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_max_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.max.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_max_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.max.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_s_w_test
+;
+@llvm_mips_max_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_max_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_max_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_max_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_max_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.max.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_max_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.max.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_s_d_test
+;
+@llvm_mips_max_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_max_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_max_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_max_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_max_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.max.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_max_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.max.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_u_b_test
+;
+@llvm_mips_max_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_max_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_max_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_max_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_max_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.max.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_max_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.max.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_u_h_test
+;
+@llvm_mips_max_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_max_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_max_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_max_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_max_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.max.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_max_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.max.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_u_w_test
+;
+@llvm_mips_max_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_max_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_max_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_max_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_max_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.max.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_max_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.max.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_u_d_test
+;
+@llvm_mips_min_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_min_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_min_a_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_a_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_min_a_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_min_a_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.min.a.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_min_a_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.min.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_a_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_a.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_a_b_test
+;
+@llvm_mips_min_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_min_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_min_a_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_a_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_min_a_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_min_a_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.min.a.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_min_a_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.min.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_a_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_a.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_a_h_test
+;
+@llvm_mips_min_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_min_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_min_a_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_a_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_min_a_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_min_a_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.min.a.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_min_a_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.min.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_a_w_test
+;
+@llvm_mips_min_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_min_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_min_a_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_a_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_min_a_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_min_a_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.min.a.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_min_a_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.min.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_a_d_test
+;
+@llvm_mips_min_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_min_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_min_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_min_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_min_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.min.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_min_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.min.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_s_b_test
+;
+@llvm_mips_min_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_min_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_min_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_min_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_min_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.min.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_min_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.min.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_s_h_test
+;
+@llvm_mips_min_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_min_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_min_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_min_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_min_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.min.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_min_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.min.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_s_w_test
+;
+@llvm_mips_min_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_min_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_min_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_min_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_min_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.min.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_min_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.min.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_s_d_test
+;
+@llvm_mips_min_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_min_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_min_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_min_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_min_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.min.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_min_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.min.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_u_b_test
+;
+@llvm_mips_min_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_min_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_min_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_min_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_min_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.min.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_min_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.min.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_u_h_test
+;
+@llvm_mips_min_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_min_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_min_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_min_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_min_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.min.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_min_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.min.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_u_w_test
+;
+@llvm_mips_min_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_min_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_min_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_min_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_min_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.min.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_min_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.min.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_u_d_test
+;
+@llvm_mips_mod_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mod_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_mod_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mod_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mod_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_mod_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mod_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mod_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mod_s_b_test
+;
+@llvm_mips_mod_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mod_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mod_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mod_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mod_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mod_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mod_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mod_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mod_s_h_test
+;
+@llvm_mips_mod_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mod_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mod_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mod_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mod_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mod_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mod_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mod_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mod_s_w_test
+;
+@llvm_mips_mod_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mod_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_mod_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mod_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mod_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_mod_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mod_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mod_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mod_s_d_test
+;
+@llvm_mips_mod_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mod_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_mod_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mod_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mod_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_mod_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mod_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mod_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mod_u_b_test
+;
+@llvm_mips_mod_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mod_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mod_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mod_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mod_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mod_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mod_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mod_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mod_u_h_test
+;
+@llvm_mips_mod_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mod_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mod_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mod_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mod_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mod_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mod_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mod_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mod_u_w_test
+;
+@llvm_mips_mod_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mod_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_mod_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mod_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mod_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_mod_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mod_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mod_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mod_u_d_test
+;
+@llvm_mips_mulv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mulv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_mulv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mulv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mulv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mulv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mulv_b_test
+;
+@llvm_mips_mulv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mulv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mulv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mulv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mulv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mulv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mulv_h_test
+;
+@llvm_mips_mulv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mulv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mulv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mulv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mulv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mulv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mulv_w_test
+;
+@llvm_mips_mulv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mulv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_mulv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mulv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mulv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mulv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mulv_d_test
+
+define void @mulv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2
+ %2 = mul <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+ ret void
+}
+
+; CHECK: mulv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mulv.b
+; CHECK: st.b
+; CHECK: .size mulv_b_test
+
+define void @mulv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2
+ %2 = mul <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+ ret void
+}
+
+; CHECK: mulv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mulv.h
+; CHECK: st.h
+; CHECK: .size mulv_h_test
+
+define void @mulv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2
+ %2 = mul <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+ ret void
+}
+
+; CHECK: mulv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mulv.w
+; CHECK: st.w
+; CHECK: .size mulv_w_test
+
+define void @mulv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2
+ %2 = mul <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+ ret void
+}
+
+; CHECK: mulv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mulv.d
+; CHECK: st.d
+; CHECK: .size mulv_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-p.ll b/test/CodeGen/Mips/msa/3r-p.ll
new file mode 100644
index 0000000000000..852023b0824a2
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-p.ll
@@ -0,0 +1,182 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'p'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_pckev_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_pckev_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_pckev_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pckev_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_pckev_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_pckev_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.pckev.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_pckev_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.pckev.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_pckev_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: pckev.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_pckev_b_test
+;
+@llvm_mips_pckev_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_pckev_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_pckev_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pckev_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_pckev_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_pckev_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.pckev.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_pckev_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.pckev.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_pckev_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: pckev.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_pckev_h_test
+;
+@llvm_mips_pckev_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_pckev_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_pckev_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pckev_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_pckev_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_pckev_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.pckev.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_pckev_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.pckev.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_pckev_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: pckev.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_pckev_w_test
+;
+@llvm_mips_pckev_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_pckev_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_pckev_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pckev_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_pckev_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_pckev_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.pckev.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_pckev_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.pckev.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_pckev_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: pckev.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_pckev_d_test
+;
+@llvm_mips_pckod_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_pckod_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_pckod_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pckod_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_pckod_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_pckod_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.pckod.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_pckod_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.pckod.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_pckod_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: pckod.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_pckod_b_test
+;
+@llvm_mips_pckod_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_pckod_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_pckod_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pckod_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_pckod_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_pckod_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.pckod.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_pckod_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.pckod.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_pckod_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: pckod.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_pckod_h_test
+;
+@llvm_mips_pckod_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_pckod_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_pckod_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pckod_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_pckod_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_pckod_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.pckod.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_pckod_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.pckod.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_pckod_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: pckod.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_pckod_w_test
+;
+@llvm_mips_pckod_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_pckod_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_pckod_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pckod_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_pckod_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_pckod_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.pckod.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_pckod_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.pckod.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_pckod_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: pckod.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_pckod_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-s.ll b/test/CodeGen/Mips/msa/3r-s.ll
new file mode 100644
index 0000000000000..30cf265233e51
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-s.ll
@@ -0,0 +1,1353 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 's'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_sld_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sld_b_ARG2 = global i32 10, align 16
+@llvm_mips_sld_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sld_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sld_b_ARG1
+ %1 = load i32* @llvm_mips_sld_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, i32 %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sld_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sld_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
+; CHECK-DAG: sld.b [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_sld_b_test
+;
+@llvm_mips_sld_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sld_h_ARG2 = global i32 10, align 16
+@llvm_mips_sld_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sld_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sld_h_ARG1
+ %1 = load i32* @llvm_mips_sld_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, i32 %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sld_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sld_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_h_ARG1)
+; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
+; CHECK-DAG: sld.h [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_sld_h_test
+;
+@llvm_mips_sld_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sld_w_ARG2 = global i32 10, align 16
+@llvm_mips_sld_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sld_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sld_w_ARG1
+ %1 = load i32* @llvm_mips_sld_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, i32 %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sld_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sld_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_w_ARG1)
+; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
+; CHECK-DAG: sld.w [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_sld_w_test
+;
+@llvm_mips_sld_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sld_d_ARG2 = global i32 10, align 16
+@llvm_mips_sld_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sld_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sld_d_ARG1
+ %1 = load i32* @llvm_mips_sld_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, i32 %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sld_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sld_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_d_ARG1)
+; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
+; CHECK-DAG: sld.d [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_sld_d_test
+;
+@llvm_mips_sll_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sll_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_sll_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sll_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_sll_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_sll_b_test
+;
+@llvm_mips_sll_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sll_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_sll_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sll_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_sll_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_sll_h_test
+;
+@llvm_mips_sll_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sll_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_sll_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sll_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_sll_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_sll_w_test
+;
+@llvm_mips_sll_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sll_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_sll_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sll_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_sll_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_sll_d_test
+
+define void @sll_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2
+ %2 = shl <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+ ret void
+}
+
+; CHECK: sll_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size sll_b_test
+
+define void @sll_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2
+ %2 = shl <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+ ret void
+}
+
+; CHECK: sll_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size sll_h_test
+
+define void @sll_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2
+ %2 = shl <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+ ret void
+}
+
+; CHECK: sll_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size sll_w_test
+
+define void @sll_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2
+ %2 = shl <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+ ret void
+}
+
+; CHECK: sll_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sll_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sll_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sll.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size sll_d_test
+;
+@llvm_mips_sra_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sra_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_sra_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sra_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_sra_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_sra_b_test
+;
+@llvm_mips_sra_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sra_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_sra_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sra_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_sra_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_sra_h_test
+;
+@llvm_mips_sra_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sra_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_sra_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sra_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_sra_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_sra_w_test
+;
+@llvm_mips_sra_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sra_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_sra_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sra_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_sra_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_sra_d_test
+;
+
+define void @sra_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2
+ %2 = ashr <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+ ret void
+}
+
+; CHECK: sra_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size sra_b_test
+
+define void @sra_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2
+ %2 = ashr <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+ ret void
+}
+
+; CHECK: sra_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size sra_h_test
+
+define void @sra_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2
+ %2 = ashr <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+ ret void
+}
+
+; CHECK: sra_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size sra_w_test
+
+define void @sra_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2
+ %2 = ashr <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+ ret void
+}
+
+; CHECK: sra_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sra_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sra_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: sra.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size sra_d_test
+
+@llvm_mips_srar_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srar_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_srar_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srar_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srar_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_srar_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.srar.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_srar_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srar.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_srar_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srar_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srar_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srar.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_srar_b_test
+;
+@llvm_mips_srar_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srar_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_srar_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srar_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srar_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_srar_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.srar.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_srar_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srar.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_srar_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srar_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srar_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srar.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_srar_h_test
+;
+@llvm_mips_srar_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srar_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_srar_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srar_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srar_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_srar_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.srar.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_srar_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srar.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_srar_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srar_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srar_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srar.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_srar_w_test
+;
+@llvm_mips_srar_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srar_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_srar_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srar_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srar_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_srar_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.srar.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_srar_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srar.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_srar_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srar_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srar_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srar.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_srar_d_test
+;
+@llvm_mips_srl_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srl_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_srl_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srl_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_srl_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_srl_b_test
+;
+@llvm_mips_srl_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srl_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_srl_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srl_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_srl_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_srl_h_test
+;
+@llvm_mips_srl_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srl_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_srl_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srl_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_srl_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_srl_w_test
+;
+@llvm_mips_srl_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srl_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_srl_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srl_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srl.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_srl_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_srl_d_test
+;
+@llvm_mips_srlr_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srlr_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_srlr_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srlr_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srlr_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_srlr_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.srlr.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_srlr_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srlr.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_srlr_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srlr_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srlr_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srlr.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_srlr_b_test
+;
+@llvm_mips_srlr_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srlr_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_srlr_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srlr_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srlr_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_srlr_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.srlr.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_srlr_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srlr.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_srlr_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srlr_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srlr_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srlr.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_srlr_h_test
+;
+@llvm_mips_srlr_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srlr_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_srlr_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srlr_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srlr_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_srlr_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.srlr.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_srlr_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srlr.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_srlr_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srlr_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srlr_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srlr.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_srlr_w_test
+;
+@llvm_mips_srlr_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srlr_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_srlr_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srlr_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srlr_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_srlr_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.srlr.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_srlr_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srlr.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_srlr_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srlr_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srlr_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srlr.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_srlr_d_test
+;
+
+define void @srl_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2
+ %2 = lshr <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+ ret void
+}
+
+; CHECK: srl_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size srl_b_test
+
+define void @srl_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2
+ %2 = lshr <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+ ret void
+}
+
+; CHECK: srl_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size srl_h_test
+
+define void @srl_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2
+ %2 = lshr <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+ ret void
+}
+
+; CHECK: srl_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size srl_w_test
+
+define void @srl_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2
+ %2 = lshr <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+ ret void
+}
+
+; CHECK: srl_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_srl_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_srl_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: srl.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size srl_d_test
+
+@llvm_mips_subs_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subs_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subs_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subs_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subs_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subs_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subs.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subs.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subs_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subs_s_b_test
+;
+@llvm_mips_subs_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subs_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subs_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subs_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subs_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subs_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subs.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subs.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subs_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subs_s_h_test
+;
+@llvm_mips_subs_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subs_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subs_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subs_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subs_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subs_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subs.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subs.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subs_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subs_s_w_test
+;
+@llvm_mips_subs_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subs_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subs_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subs_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subs_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subs_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subs.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subs.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subs_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subs_s_d_test
+;
+@llvm_mips_subs_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subs_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subs_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subs_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subs_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subs_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subs.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subs.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subs_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subs_u_b_test
+;
+@llvm_mips_subs_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subs_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subs_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subs_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subs_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subs_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subs.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subs.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subs_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subs_u_h_test
+;
+@llvm_mips_subs_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subs_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subs_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subs_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subs_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subs_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subs.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subs.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subs_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subs_u_w_test
+;
+@llvm_mips_subs_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subs_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subs_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subs_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subs_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subs_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subs.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subs.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subs_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subs_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subs_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subs_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subs_u_d_test
+;
+@llvm_mips_subsus_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subsus_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subsus_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subsus_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subsus.u.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subsus_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subsus.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subsus_u_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsus_u_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsus_u_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsus_u.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subsus_u_b_test
+;
+@llvm_mips_subsus_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subsus_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subsus_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subsus_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subsus.u.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subsus_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subsus.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subsus_u_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsus_u_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsus_u_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsus_u.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subsus_u_h_test
+;
+@llvm_mips_subsus_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subsus_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subsus_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subsus_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subsus.u.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subsus_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subsus.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subsus_u_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsus_u_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsus_u_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsus_u.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subsus_u_w_test
+;
+@llvm_mips_subsus_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subsus_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subsus_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subsus_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subsus.u.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subsus_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subsus.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subsus_u_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsus_u_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsus_u_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsus_u.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subsus_u_d_test
+;
+@llvm_mips_subsuu_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subsuu_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subsuu_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subsuu_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subsuu_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsuu_s_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsuu_s_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsuu_s.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subsuu_s_b_test
+;
+@llvm_mips_subsuu_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subsuu_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subsuu_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subsuu_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subsuu_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsuu_s_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsuu_s_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsuu_s.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subsuu_s_h_test
+;
+@llvm_mips_subsuu_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subsuu_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subsuu_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subsuu_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subsuu_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsuu_s_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsuu_s_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsuu_s.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subsuu_s_w_test
+;
+@llvm_mips_subsuu_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subsuu_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subsuu_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subsuu_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subsuu_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subsuu_s_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subsuu_s_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subsuu_s.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subsuu_s_d_test
+;
+@llvm_mips_subv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_subv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subv_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size llvm_mips_subv_b_test
+;
+@llvm_mips_subv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_subv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subv_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size llvm_mips_subv_h_test
+;
+@llvm_mips_subv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_subv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subv_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size llvm_mips_subv_w_test
+;
+@llvm_mips_subv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_subv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subv_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size llvm_mips_subv_d_test
+;
+
+define void @subv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2
+ %2 = sub <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+ ret void
+}
+
+; CHECK: subv_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_b_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_b_ARG2)
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.b [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.b [[WD]]
+; CHECK: .size subv_b_test
+
+define void @subv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2
+ %2 = sub <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+ ret void
+}
+
+; CHECK: subv_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_h_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_h_ARG2)
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.h [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.h [[WD]]
+; CHECK: .size subv_h_test
+
+define void @subv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2
+ %2 = sub <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+ ret void
+}
+
+; CHECK: subv_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_w_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_w_ARG2)
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.w [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.w [[WD]]
+; CHECK: .size subv_w_test
+
+define void @subv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2
+ %2 = sub <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+ ret void
+}
+
+; CHECK: subv_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_subv_d_ARG1)
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_subv_d_ARG2)
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WT:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: subv.d [[WD:\$w[0-9]+]], [[WS]], [[WT]]
+; CHECK-DAG: st.d [[WD]]
+; CHECK: .size subv_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r-v.ll b/test/CodeGen/Mips/msa/3r-v.ll
new file mode 100644
index 0000000000000..c9693f90d556f
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r-v.ll
@@ -0,0 +1,105 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format.
+; There are lots of these so this covers those beginning with 'v'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_vshf_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_vshf_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_vshf_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_vshf_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_vshf_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_vshf_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_vshf_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_vshf_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_vshf_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: vshf.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_vshf_b_test
+;
+@llvm_mips_vshf_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_vshf_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_vshf_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_vshf_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_vshf_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_vshf_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_vshf_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_vshf_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_vshf_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: vshf.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_vshf_h_test
+;
+@llvm_mips_vshf_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_vshf_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_vshf_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_vshf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_vshf_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_vshf_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_vshf_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_vshf_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_vshf_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: vshf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_vshf_w_test
+;
+@llvm_mips_vshf_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_vshf_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_vshf_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_vshf_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_vshf_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_vshf_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_vshf_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_vshf_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.vshf.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_vshf_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: vshf.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_vshf_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r_4r.ll b/test/CodeGen/Mips/msa/3r_4r.ll
new file mode 100644
index 0000000000000..b7fd7283788c6
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r_4r.ll
@@ -0,0 +1,206 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format and
+; use the result as a third operand.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_maddv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_maddv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_maddv_b_ARG3 = global <16 x i8> <i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39, i8 40, i8 41, i8 42, i8 43, i8 44, i8 45, i8 46, i8 47>, align 16
+@llvm_mips_maddv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maddv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_maddv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_maddv_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_maddv_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.maddv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_maddv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.maddv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_maddv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: maddv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maddv_b_test
+;
+@llvm_mips_maddv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_maddv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_maddv_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_maddv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maddv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_maddv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_maddv_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_maddv_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.maddv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_maddv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.maddv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_maddv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: maddv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maddv_h_test
+;
+@llvm_mips_maddv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_maddv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_maddv_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_maddv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maddv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_maddv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_maddv_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_maddv_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.maddv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_maddv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.maddv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_maddv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: maddv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maddv_w_test
+;
+@llvm_mips_maddv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_maddv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_maddv_d_ARG3 = global <2 x i64> <i64 4, i64 5>, align 16
+@llvm_mips_maddv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maddv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_maddv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_maddv_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_maddv_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.maddv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_maddv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.maddv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_maddv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: maddv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maddv_d_test
+;
+@llvm_mips_msubv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_msubv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_msubv_b_ARG3 = global <16 x i8> <i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39, i8 40, i8 41, i8 42, i8 43, i8 44, i8 45, i8 46, i8 47>, align 16
+@llvm_mips_msubv_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_msubv_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_msubv_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_msubv_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_msubv_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.msubv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_msubv_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.msubv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_msubv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: msubv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_msubv_b_test
+;
+@llvm_mips_msubv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_msubv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_msubv_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_msubv_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msubv_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_msubv_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_msubv_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_msubv_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.msubv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_msubv_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.msubv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msubv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msubv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msubv_h_test
+;
+@llvm_mips_msubv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_msubv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_msubv_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_msubv_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msubv_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_msubv_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_msubv_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_msubv_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.msubv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_msubv_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.msubv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msubv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msubv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msubv_w_test
+;
+@llvm_mips_msubv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_msubv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_msubv_d_ARG3 = global <2 x i64> <i64 4, i64 5>, align 16
+@llvm_mips_msubv_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_msubv_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_msubv_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_msubv_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_msubv_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.msubv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_msubv_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.msubv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_msubv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: msubv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_msubv_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r_4r_widen.ll b/test/CodeGen/Mips/msa/3r_4r_widen.ll
new file mode 100644
index 0000000000000..7063e4566a786
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r_4r_widen.ll
@@ -0,0 +1,307 @@
+; Test the MSA intrinsics that are encoded with the 3R instruction format and
+; use the result as a third operand and results in wider elements than the
+; operands had.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_dpadd_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_dpadd_s_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
+@llvm_mips_dpadd_s_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
+@llvm_mips_dpadd_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_dpadd_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dpadd_s_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
+ %2 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dpadd_s_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.h
+; CHECK: dpadd_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dpadd_s_h_test
+;
+@llvm_mips_dpadd_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_dpadd_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
+@llvm_mips_dpadd_s_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
+@llvm_mips_dpadd_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_dpadd_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dpadd_s_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
+ %2 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dpadd_s_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.w
+; CHECK: dpadd_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dpadd_s_w_test
+;
+@llvm_mips_dpadd_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_dpadd_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
+@llvm_mips_dpadd_s_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
+@llvm_mips_dpadd_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dpadd_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_dpadd_s_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
+ %2 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dpadd_s_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.d
+; CHECK: dpadd_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dpadd_s_d_test
+;
+@llvm_mips_dpadd_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_dpadd_u_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
+@llvm_mips_dpadd_u_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
+@llvm_mips_dpadd_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_dpadd_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dpadd_u_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
+ %2 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dpadd_u_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.h
+; CHECK: dpadd_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dpadd_u_h_test
+;
+@llvm_mips_dpadd_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_dpadd_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
+@llvm_mips_dpadd_u_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
+@llvm_mips_dpadd_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_dpadd_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dpadd_u_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
+ %2 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dpadd_u_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.w
+; CHECK: dpadd_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dpadd_u_w_test
+;
+@llvm_mips_dpadd_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_dpadd_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
+@llvm_mips_dpadd_u_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
+@llvm_mips_dpadd_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dpadd_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_dpadd_u_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
+ %2 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dpadd_u_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.d
+; CHECK: dpadd_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dpadd_u_d_test
+;
+@llvm_mips_dpsub_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_dpsub_s_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
+@llvm_mips_dpsub_s_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
+@llvm_mips_dpsub_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_dpsub_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dpsub_s_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG2
+ %2 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dpsub_s_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.h
+; CHECK: dpsub_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dpsub_s_h_test
+;
+@llvm_mips_dpsub_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_dpsub_s_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
+@llvm_mips_dpsub_s_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
+@llvm_mips_dpsub_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_dpsub_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dpsub_s_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG2
+ %2 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dpsub_s_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.w
+; CHECK: dpsub_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dpsub_s_w_test
+;
+@llvm_mips_dpsub_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_dpsub_s_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
+@llvm_mips_dpsub_s_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
+@llvm_mips_dpsub_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dpsub_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_dpsub_s_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG2
+ %2 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dpsub_s_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.d
+; CHECK: dpsub_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dpsub_s_d_test
+;
+@llvm_mips_dpsub_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_dpsub_u_h_ARG2 = global <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, align 16
+@llvm_mips_dpsub_u_h_ARG3 = global <16 x i8> <i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39>, align 16
+@llvm_mips_dpsub_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_dpsub_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_dpsub_u_h_ARG1
+ %1 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG2
+ %2 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_dpsub_u_h_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.h
+; CHECK: dpsub_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_dpsub_u_h_test
+;
+@llvm_mips_dpsub_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_dpsub_u_w_ARG2 = global <8 x i16> <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>, align 16
+@llvm_mips_dpsub_u_w_ARG3 = global <8 x i16> <i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>, align 16
+@llvm_mips_dpsub_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_dpsub_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_dpsub_u_w_ARG1
+ %1 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG2
+ %2 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_dpsub_u_w_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.w
+; CHECK: dpsub_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_dpsub_u_w_test
+;
+@llvm_mips_dpsub_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_dpsub_u_d_ARG2 = global <4 x i32> <i32 2, i32 3, i32 4, i32 5>, align 16
+@llvm_mips_dpsub_u_d_ARG3 = global <4 x i32> <i32 6, i32 7, i32 8, i32 9>, align 16
+@llvm_mips_dpsub_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_dpsub_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_dpsub_u_d_ARG1
+ %1 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG2
+ %2 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_dpsub_u_d_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.d
+; CHECK: dpsub_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_dpsub_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3r_splat.ll b/test/CodeGen/Mips/msa/3r_splat.ll
new file mode 100644
index 0000000000000..6b0cb26f8c818
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3r_splat.ll
@@ -0,0 +1,94 @@
+; Test the MSA splat intrinsics that are encoded with the 3R instruction
+; format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck -check-prefix=MIPS32 %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck -check-prefix=MIPS32 %s
+
+@llvm_mips_splat_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_splat_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_splat_b_test(i32 %a) nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_splat_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.splat.b(<16 x i8> %0, i32 %a)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_splat_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.splat.b(<16 x i8>, i32) nounwind
+
+; MIPS32: llvm_mips_splat_b_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_splat_b_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_splat_b_RES)(
+; MIPS32-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS32-DAG: splat.b [[R4:\$w[0-9]+]], [[R3]][$4]
+; MIPS32-DAG: st.b [[R4]], 0([[R2]])
+; MIPS32: .size llvm_mips_splat_b_test
+
+@llvm_mips_splat_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_splat_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_splat_h_test(i32 %a) nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_splat_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.splat.h(<8 x i16> %0, i32 %a)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_splat_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.splat.h(<8 x i16>, i32) nounwind
+
+; MIPS32: llvm_mips_splat_h_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_splat_h_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_splat_h_RES)(
+; MIPS32-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS32-DAG: splat.h [[R4:\$w[0-9]+]], [[R3]][$4]
+; MIPS32-DAG: st.h [[R4]], 0([[R2]])
+; MIPS32: .size llvm_mips_splat_h_test
+
+@llvm_mips_splat_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_splat_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_splat_w_test(i32 %a) nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_splat_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.splat.w(<4 x i32> %0, i32 %a)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_splat_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.splat.w(<4 x i32>, i32) nounwind
+
+; MIPS32: llvm_mips_splat_w_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_splat_w_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_splat_w_RES)(
+; MIPS32-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS32-DAG: splat.w [[R4:\$w[0-9]+]], [[R3]][$4]
+; MIPS32-DAG: st.w [[R4]], 0([[R2]])
+; MIPS32: .size llvm_mips_splat_w_test
+
+@llvm_mips_splat_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_splat_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_splat_d_test(i32 %a) nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_splat_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> %0, i32 %a)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_splat_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.splat.d(<2 x i64>, i32) nounwind
+
+; MIPS32: llvm_mips_splat_d_test:
+; FIXME: This test is currently disabled for MIPS32 because the indices are
+; difficult to match. This is because 64-bit values cannot be stored in
+; GPR32.
+; MIPS64-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_splat_d_ARG1)(
+; MIPS64-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_splat_d_RES)(
+; MIPS64-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS64-DAG: splat.d [[R4:\$w[0-9]+]], [[R3]][$4]
+; MIPS64-DAG: st.d [[R4]], 0([[R2]])
+; MIPS32: .size llvm_mips_splat_d_test
diff --git a/test/CodeGen/Mips/msa/3rf.ll b/test/CodeGen/Mips/msa/3rf.ll
new file mode 100644
index 0000000000000..ae665afcc9509
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf.ll
@@ -0,0 +1,485 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fadd_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fadd_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fadd_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fadd_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fadd_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fadd.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fadd_w_test
+;
+@llvm_mips_fadd_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fadd_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fadd_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fadd_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fadd_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fadd.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fadd_d_test
+
+define void @fadd_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2
+ %2 = fadd <4 x float> %0, %1
+ store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
+ ret void
+}
+
+; CHECK: fadd_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fadd.w
+; CHECK: st.w
+; CHECK: .size fadd_w_test
+
+define void @fadd_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2
+ %2 = fadd <2 x double> %0, %1
+ store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
+ ret void
+}
+
+; CHECK: fadd_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fadd.d
+; CHECK: st.d
+; CHECK: .size fadd_d_test
+;
+@llvm_mips_fdiv_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fdiv_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fdiv_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fdiv_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fdiv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fdiv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fdiv_w_test
+;
+@llvm_mips_fdiv_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fdiv_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fdiv_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fdiv_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fdiv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fdiv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fdiv_d_test
+
+define void @fdiv_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2
+ %2 = fdiv <4 x float> %0, %1
+ store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
+ ret void
+}
+
+; CHECK: fdiv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fdiv.w
+; CHECK: st.w
+; CHECK: .size fdiv_w_test
+
+define void @fdiv_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2
+ %2 = fdiv <2 x double> %0, %1
+ store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
+ ret void
+}
+
+; CHECK: fdiv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fdiv.d
+; CHECK: st.d
+; CHECK: .size fdiv_d_test
+;
+@llvm_mips_fmin_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmin_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmin_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmin_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmin_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmin_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmin_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmin_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmin.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmin_w_test
+;
+@llvm_mips_fmin_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmin_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmin_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmin_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmin_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmin_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmin_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmin_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmin.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmin_d_test
+;
+@llvm_mips_fmin_a_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmin_a_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmin_a_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmin_a_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmin_a_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmin_a_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmin.a.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmin_a_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmin.a.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmin_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmin_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmin_a_w_test
+;
+@llvm_mips_fmin_a_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmin_a_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmin_a_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmin_a_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmin_a_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmin_a_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmin.a.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmin_a_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmin.a.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmin_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmin_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmin_a_d_test
+;
+@llvm_mips_fmax_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmax_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmax_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmax_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmax_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmax_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmax_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmax_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmax.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmax_w_test
+;
+@llvm_mips_fmax_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmax_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmax_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmax_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmax_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmax_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmax_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmax_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmax.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmax_d_test
+;
+@llvm_mips_fmax_a_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmax_a_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmax_a_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmax_a_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmax_a_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmax_a_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmax_a_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmax_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmax_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmax_a_w_test
+;
+@llvm_mips_fmax_a_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmax_a_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmax_a_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmax_a_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmax_a_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmax_a_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmax_a_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmax_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmax_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmax_a_d_test
+;
+@llvm_mips_fmul_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmul_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmul_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmul_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmul_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmul.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmul_w_test
+;
+@llvm_mips_fmul_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmul_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmul_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmul_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmul_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmul.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmul_d_test
+
+define void @fmul_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2
+ %2 = fmul <4 x float> %0, %1
+ store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
+ ret void
+}
+
+; CHECK: fmul_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmul.w
+; CHECK: st.w
+; CHECK: .size fmul_w_test
+
+define void @fmul_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2
+ %2 = fmul <2 x double> %0, %1
+ store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
+ ret void
+}
+
+; CHECK: fmul_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmul.d
+; CHECK: st.d
+; CHECK: .size fmul_d_test
+;
+@llvm_mips_fsub_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsub_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsub_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fsub_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsub_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsub.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsub_w_test
+;
+@llvm_mips_fsub_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsub_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsub_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fsub_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsub_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsub.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsub_d_test
+;
+
+define void @fsub_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2
+ %2 = fsub <4 x float> %0, %1
+ store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
+ ret void
+}
+
+; CHECK: fsub_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsub.w
+; CHECK: st.w
+; CHECK: .size fsub_w_test
+
+define void @fsub_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2
+ %2 = fsub <2 x double> %0, %1
+ store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
+ ret void
+}
+
+; CHECK: fsub_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsub.d
+; CHECK: st.d
+; CHECK: .size fsub_d_test
diff --git a/test/CodeGen/Mips/msa/3rf_4rf.ll b/test/CodeGen/Mips/msa/3rf_4rf.ll
new file mode 100644
index 0000000000000..67ef7fd2bae15
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_4rf.ll
@@ -0,0 +1,106 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format and
+; use the result as a third operand.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fmadd_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmadd_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmadd_w_ARG3 = global <4 x float> <float 8.000000e+00, float 9.000000e+00, float 1.000000e+01, float 1.100000e+01>, align 16
+@llvm_mips_fmadd_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmadd_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmadd_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmadd_w_ARG2
+ %2 = load <4 x float>* @llvm_mips_fmadd_w_ARG3
+ %3 = tail call <4 x float> @llvm.mips.fmadd.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* @llvm_mips_fmadd_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmadd.w(<4 x float>, <4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmadd_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmadd.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmadd_w_test
+;
+@llvm_mips_fmadd_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmadd_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmadd_d_ARG3 = global <2 x double> <double 4.000000e+00, double 5.000000e+00>, align 16
+@llvm_mips_fmadd_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmadd_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmadd_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmadd_d_ARG2
+ %2 = load <2 x double>* @llvm_mips_fmadd_d_ARG3
+ %3 = tail call <2 x double> @llvm.mips.fmadd.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* @llvm_mips_fmadd_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmadd.d(<2 x double>, <2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmadd_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmadd.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmadd_d_test
+;
+@llvm_mips_fmsub_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fmsub_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fmsub_w_ARG3 = global <4 x float> <float 8.000000e+00, float 9.000000e+00, float 1.000000e+01, float 1.100000e+01>, align 16
+@llvm_mips_fmsub_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fmsub_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fmsub_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fmsub_w_ARG2
+ %2 = load <4 x float>* @llvm_mips_fmsub_w_ARG3
+ %3 = tail call <4 x float> @llvm.mips.fmsub.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* @llvm_mips_fmsub_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fmsub.w(<4 x float>, <4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fmsub_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fmsub.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fmsub_w_test
+;
+@llvm_mips_fmsub_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fmsub_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fmsub_d_ARG3 = global <2 x double> <double 4.000000e+00, double 5.000000e+00>, align 16
+@llvm_mips_fmsub_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fmsub_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fmsub_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fmsub_d_ARG2
+ %2 = load <2 x double>* @llvm_mips_fmsub_d_ARG3
+ %3 = tail call <2 x double> @llvm.mips.fmsub.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* @llvm_mips_fmsub_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fmsub.d(<2 x double>, <2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fmsub_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fmsub.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fmsub_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_4rf_q.ll b/test/CodeGen/Mips/msa/3rf_4rf_q.ll
new file mode 100644
index 0000000000000..de28be0b1c224
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_4rf_q.ll
@@ -0,0 +1,206 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format and
+; use the result as a third operand and perform fixed-point operations.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_madd_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_madd_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_madd_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_madd_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_madd_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_madd_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_madd_q_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_madd_q_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_madd_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: madd_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_madd_q_h_test
+;
+@llvm_mips_madd_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_madd_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_madd_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_madd_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_madd_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_madd_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_madd_q_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_madd_q_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.madd.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_madd_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: madd_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_madd_q_w_test
+;
+@llvm_mips_maddr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_maddr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_maddr_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_maddr_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maddr_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.maddr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_maddr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: maddr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maddr_q_h_test
+;
+@llvm_mips_maddr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_maddr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_maddr_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_maddr_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maddr_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.maddr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_maddr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: maddr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maddr_q_w_test
+;
+@llvm_mips_msub_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_msub_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_msub_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_msub_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msub_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_msub_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_msub_q_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_msub_q_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.msub.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msub_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msub_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msub_q_h_test
+;
+@llvm_mips_msub_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_msub_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_msub_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_msub_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msub_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_msub_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_msub_q_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_msub_q_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.msub.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msub_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msub_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msub_q_w_test
+;
+@llvm_mips_msubr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_msubr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_msubr_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+@llvm_mips_msubr_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msubr_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.msubr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msubr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msubr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msubr_q_h_test
+;
+@llvm_mips_msubr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_msubr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_msubr_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+@llvm_mips_msubr_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msubr_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.msubr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msubr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msubr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msubr_q_w_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_exdo.ll b/test/CodeGen/Mips/msa/3rf_exdo.ll
new file mode 100644
index 0000000000000..8a7f268a5069c
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_exdo.ll
@@ -0,0 +1,50 @@
+; Test the MSA floating-point conversion intrinsics that are encoded with the
+; 3RF instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fexdo_h_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fexdo_h_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fexdo_h_RES = global <8 x half> <half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00, half 0.000000e+00>, align 16
+
+define void @llvm_mips_fexdo_h_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fexdo_h_ARG1
+ %1 = load <4 x float>* @llvm_mips_fexdo_h_ARG2
+ %2 = tail call <8 x half> @llvm.mips.fexdo.h(<4 x float> %0, <4 x float> %1)
+ store <8 x half> %2, <8 x half>* @llvm_mips_fexdo_h_RES
+ ret void
+}
+
+declare <8 x half> @llvm.mips.fexdo.h(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fexdo_h_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fexdo.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_fexdo_h_test
+;
+@llvm_mips_fexdo_w_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fexdo_w_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fexdo_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fexdo_w_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fexdo_w_ARG1
+ %1 = load <2 x double>* @llvm_mips_fexdo_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fexdo.w(<2 x double> %0, <2 x double> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fexdo_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fexdo.w(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fexdo_w_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fexdo.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fexdo_w_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_float_int.ll b/test/CodeGen/Mips/msa/3rf_float_int.ll
new file mode 100644
index 0000000000000..7b01e1721db90
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_float_int.ll
@@ -0,0 +1,50 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format and
+; take an integer as an operand.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fexp2_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fexp2_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_fexp2_w_RES = global <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, align 16
+
+define void @llvm_mips_fexp2_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fexp2_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_fexp2_w_ARG2
+ %2 = tail call <4 x float> @llvm.mips.fexp2.w(<4 x float> %0, <4 x i32> %1)
+ store <4 x float> %2, <4 x float>* @llvm_mips_fexp2_w_RES
+ ret void
+}
+
+declare <4 x float> @llvm.mips.fexp2.w(<4 x float>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_fexp2_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fexp2.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fexp2_w_test
+;
+@llvm_mips_fexp2_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fexp2_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_fexp2_d_RES = global <2 x double> <double 0.000000e+00, double 0.000000e+00>, align 16
+
+define void @llvm_mips_fexp2_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fexp2_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_fexp2_d_ARG2
+ %2 = tail call <2 x double> @llvm.mips.fexp2.d(<2 x double> %0, <2 x i64> %1)
+ store <2 x double> %2, <2 x double>* @llvm_mips_fexp2_d_RES
+ ret void
+}
+
+declare <2 x double> @llvm.mips.fexp2.d(<2 x double>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_fexp2_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fexp2.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fexp2_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_int_float.ll b/test/CodeGen/Mips/msa/3rf_int_float.ll
new file mode 100644
index 0000000000000..5624771b8357f
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_int_float.ll
@@ -0,0 +1,974 @@
+; Test the MSA intrinsics that are encoded with the 3RF instruction format and
+; produce an integer as a result.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_fcaf_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcaf_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcaf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcaf_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcaf_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcaf_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcaf.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcaf_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcaf.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcaf_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcaf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcaf_w_test
+;
+@llvm_mips_fcaf_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcaf_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcaf_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcaf_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcaf_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcaf_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcaf.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcaf_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcaf.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcaf_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcaf.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcaf_d_test
+;
+@llvm_mips_fceq_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fceq_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fceq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fceq_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fceq_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fceq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fceq.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fceq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fceq.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fceq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fceq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fceq_w_test
+;
+@llvm_mips_fceq_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fceq_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fceq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fceq_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fceq_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fceq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fceq.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fceq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fceq.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fceq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fceq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fceq_d_test
+;
+@llvm_mips_fcle_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcle_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcle_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcle_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcle_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcle_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcle.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcle_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcle.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcle_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcle.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcle_w_test
+;
+@llvm_mips_fcle_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcle_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcle_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcle_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcle_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcle_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcle.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcle_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcle.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcle_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcle.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcle_d_test
+;
+@llvm_mips_fclt_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fclt_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fclt_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fclt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fclt_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fclt_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fclt.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fclt_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fclt.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fclt_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fclt.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fclt_w_test
+;
+@llvm_mips_fclt_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fclt_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fclt_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fclt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fclt_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fclt_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fclt.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fclt_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fclt.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fclt_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fclt.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fclt_d_test
+;
+@llvm_mips_fcor_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcor_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcor_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcor_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcor_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcor_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcor.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcor_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcor.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcor_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcor.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcor_w_test
+;
+@llvm_mips_fcor_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcor_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcor_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcor_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcor_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcor_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcor.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcor_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcor.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcor_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcor.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcor_d_test
+;
+@llvm_mips_fcne_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcne_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcne_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcne_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcne_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcne_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcne.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcne_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcne.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcne_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcne.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcne_w_test
+;
+@llvm_mips_fcne_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcne_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcne_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcne_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcne_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcne_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcne.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcne_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcne.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcne_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcne.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcne_d_test
+;
+@llvm_mips_fcueq_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcueq_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcueq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcueq_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcueq_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcueq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcueq.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcueq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcueq.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcueq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcueq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcueq_w_test
+;
+@llvm_mips_fcueq_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcueq_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcueq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcueq_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcueq_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcueq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcueq.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcueq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcueq.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcueq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcueq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcueq_d_test
+;
+@llvm_mips_fcult_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcult_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcult_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcult_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcult_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcult_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcult.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcult_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcult.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcult_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcult.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcult_w_test
+;
+@llvm_mips_fcult_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcult_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcult_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcult_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcult_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcult_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcult.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcult_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcult.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcult_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcult.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcult_d_test
+;
+@llvm_mips_fcule_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcule_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcule_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcule_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcule_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcule_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcule.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcule_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcule.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcule_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcule.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcule_w_test
+;
+@llvm_mips_fcule_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcule_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcule_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcule_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcule_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcule_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcule.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcule_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcule.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcule_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcule.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcule_d_test
+;
+@llvm_mips_fcun_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcun_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcun_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcun_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcun_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcun_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcun.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcun_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcun.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcun_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcun.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcun_w_test
+;
+@llvm_mips_fcun_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcun_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcun_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcun_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcun_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcun_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcun.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcun_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcun.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcun_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcun.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcun_d_test
+;
+@llvm_mips_fcune_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fcune_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fcune_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fcune_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fcune_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fcune_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fcune.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fcune_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fcune.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fcune_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fcune.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fcune_w_test
+;
+@llvm_mips_fcune_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fcune_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fcune_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fcune_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fcune_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fcune_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fcune.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fcune_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fcune.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fcune_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fcune.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fcune_d_test
+;
+@llvm_mips_fsaf_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsaf_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsaf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsaf_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsaf_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsaf_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsaf.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsaf_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsaf.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsaf_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsaf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsaf_w_test
+;
+@llvm_mips_fsaf_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsaf_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsaf_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsaf_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsaf_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsaf_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsaf.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsaf_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsaf.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsaf_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsaf.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsaf_d_test
+;
+@llvm_mips_fseq_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fseq_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fseq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fseq_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fseq_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fseq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fseq.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fseq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fseq.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fseq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fseq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fseq_w_test
+;
+@llvm_mips_fseq_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fseq_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fseq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fseq_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fseq_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fseq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fseq.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fseq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fseq.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fseq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fseq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fseq_d_test
+;
+@llvm_mips_fsle_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsle_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsle_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsle_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsle_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsle_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsle.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsle_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsle.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsle_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsle.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsle_w_test
+;
+@llvm_mips_fsle_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsle_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsle_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsle_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsle_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsle_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsle.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsle_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsle.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsle_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsle.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsle_d_test
+;
+@llvm_mips_fslt_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fslt_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fslt_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fslt_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fslt_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fslt_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fslt.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fslt_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fslt.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fslt_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fslt.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fslt_w_test
+;
+@llvm_mips_fslt_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fslt_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fslt_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fslt_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fslt_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fslt_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fslt.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fslt_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fslt.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fslt_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fslt.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fslt_d_test
+;
+@llvm_mips_fsor_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsor_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsor_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsor_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsor_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsor_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsor.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsor_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsor.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsor_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsor.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsor_w_test
+;
+@llvm_mips_fsor_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsor_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsor_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsor_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsor_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsor_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsor.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsor_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsor.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsor_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsor.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsor_d_test
+;
+@llvm_mips_fsne_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsne_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsne_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsne_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsne_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsne_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsne.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsne_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsne.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsne_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsne.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsne_w_test
+;
+@llvm_mips_fsne_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsne_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsne_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsne_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsne_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsne_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsne.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsne_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsne.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsne_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsne.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsne_d_test
+;
+@llvm_mips_fsueq_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsueq_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsueq_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsueq_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsueq_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsueq_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsueq.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsueq_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsueq.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsueq_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsueq.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsueq_w_test
+;
+@llvm_mips_fsueq_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsueq_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsueq_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsueq_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsueq_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsueq_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsueq.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsueq_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsueq.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsueq_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsueq.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsueq_d_test
+;
+@llvm_mips_fsult_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsult_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsult_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsult_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsult_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsult_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsult.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsult_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsult.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsult_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsult.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsult_w_test
+;
+@llvm_mips_fsult_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsult_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsult_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsult_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsult_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsult_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsult.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsult_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsult.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsult_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsult.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsult_d_test
+;
+@llvm_mips_fsule_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsule_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsule_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsule_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsule_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsule_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsule.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsule_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsule.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsule_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsule.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsule_w_test
+;
+@llvm_mips_fsule_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsule_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsule_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsule_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsule_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsule_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsule.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsule_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsule.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsule_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsule.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsule_d_test
+;
+@llvm_mips_fsun_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsun_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsun_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsun_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsun_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsun_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsun.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsun_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsun.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsun_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsun.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsun_w_test
+;
+@llvm_mips_fsun_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsun_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsun_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsun_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsun_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsun_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsun.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsun_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsun.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsun_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsun.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsun_d_test
+;
+@llvm_mips_fsune_w_ARG1 = global <4 x float> <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, align 16
+@llvm_mips_fsune_w_ARG2 = global <4 x float> <float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00>, align 16
+@llvm_mips_fsune_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_fsune_w_test() nounwind {
+entry:
+ %0 = load <4 x float>* @llvm_mips_fsune_w_ARG1
+ %1 = load <4 x float>* @llvm_mips_fsune_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.fsune.w(<4 x float> %0, <4 x float> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_fsune_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.fsune.w(<4 x float>, <4 x float>) nounwind
+
+; CHECK: llvm_mips_fsune_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: fsune.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_fsune_w_test
+;
+@llvm_mips_fsune_d_ARG1 = global <2 x double> <double 0.000000e+00, double 1.000000e+00>, align 16
+@llvm_mips_fsune_d_ARG2 = global <2 x double> <double 2.000000e+00, double 3.000000e+00>, align 16
+@llvm_mips_fsune_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_fsune_d_test() nounwind {
+entry:
+ %0 = load <2 x double>* @llvm_mips_fsune_d_ARG1
+ %1 = load <2 x double>* @llvm_mips_fsune_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.fsune.d(<2 x double> %0, <2 x double> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_fsune_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.fsune.d(<2 x double>, <2 x double>) nounwind
+
+; CHECK: llvm_mips_fsune_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: fsune.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_fsune_d_test
+;
diff --git a/test/CodeGen/Mips/msa/3rf_q.ll b/test/CodeGen/Mips/msa/3rf_q.ll
new file mode 100644
index 0000000000000..f7000ee913a66
--- /dev/null
+++ b/test/CodeGen/Mips/msa/3rf_q.ll
@@ -0,0 +1,94 @@
+; Test the MSA fixed-point intrinsics that are encoded with the 3RF instruction
+; format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_mul_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mul_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mul_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mul_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mul_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mul_q_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mul.q.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mul_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mul.q.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mul_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mul_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mul_q_h_test
+;
+@llvm_mips_mul_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mul_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mul_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mul_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mul_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mul_q_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mul.q.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mul_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mul.q.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mul_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mul_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mul_q_w_test
+;
+@llvm_mips_mulr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mulr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_mulr_q_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mulr_q_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.mulr.q.h(<8 x i16> %0, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_mulr_q_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mulr.q.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mulr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mulr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mulr_q_h_test
+;
+@llvm_mips_mulr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mulr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_mulr_q_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mulr_q_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.mulr.q.w(<4 x i32> %0, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_mulr_q_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mulr.q.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mulr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mulr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mulr_q_w_test
+;
diff --git a/test/CodeGen/Mips/msa/arithmetic.ll b/test/CodeGen/Mips/msa/arithmetic.ll
new file mode 100644
index 0000000000000..09ee5023c7b19
--- /dev/null
+++ b/test/CodeGen/Mips/msa/arithmetic.ll
@@ -0,0 +1,726 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: add_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = add <16 x i8> %1, %2
+ ; CHECK-DAG: addv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v16i8
+}
+
+define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: add_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = add <8 x i16> %1, %2
+ ; CHECK-DAG: addv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v8i16
+}
+
+define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: add_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = add <4 x i32> %1, %2
+ ; CHECK-DAG: addv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v4i32
+}
+
+define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: add_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = add <2 x i64> %1, %2
+ ; CHECK-DAG: addv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v2i64
+}
+
+define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: add_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = add <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: addvi.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v16i8_i
+}
+
+define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: add_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = add <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
+ i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: addvi.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v8i16_i
+}
+
+define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: add_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: addvi.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v4i32_i
+}
+
+define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: add_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = add <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: addvi.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v2i64_i
+}
+
+define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: sub_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = sub <16 x i8> %1, %2
+ ; CHECK-DAG: subv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v16i8
+}
+
+define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: sub_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = sub <8 x i16> %1, %2
+ ; CHECK-DAG: subv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v8i16
+}
+
+define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: sub_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = sub <4 x i32> %1, %2
+ ; CHECK-DAG: subv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v4i32
+}
+
+define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: sub_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = sub <2 x i64> %1, %2
+ ; CHECK-DAG: subv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v2i64
+}
+
+define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: sub_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = sub <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: subvi.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v16i8_i
+}
+
+define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: sub_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = sub <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
+ i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: subvi.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v8i16_i
+}
+
+define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: sub_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = sub <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: subvi.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v4i32_i
+}
+
+define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: sub_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = sub <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: subvi.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v2i64_i
+}
+
+define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: mul_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = mul <16 x i8> %1, %2
+ ; CHECK-DAG: mulv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v16i8
+}
+
+define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: mul_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = mul <8 x i16> %1, %2
+ ; CHECK-DAG: mulv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v8i16
+}
+
+define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: mul_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = mul <4 x i32> %1, %2
+ ; CHECK-DAG: mulv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v4i32
+}
+
+define void @mul_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: mul_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = mul <2 x i64> %1, %2
+ ; CHECK-DAG: mulv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v2i64
+}
+
+define void @maddv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: maddv_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %c
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <16 x i8> %2, %3
+ %5 = add <16 x i8> %4, %1
+ ; CHECK-DAG: maddv.b [[R1]], [[R2]], [[R3]]
+ store <16 x i8> %5, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size maddv_v16i8
+}
+
+define void @maddv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: maddv_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <8 x i16>* %c
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <8 x i16> %2, %3
+ %5 = add <8 x i16> %4, %1
+ ; CHECK-DAG: maddv.h [[R1]], [[R2]], [[R3]]
+ store <8 x i16> %5, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size maddv_v8i16
+}
+
+define void @maddv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: maddv_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x i32>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <4 x i32> %2, %3
+ %5 = add <4 x i32> %4, %1
+ ; CHECK-DAG: maddv.w [[R1]], [[R2]], [[R3]]
+ store <4 x i32> %5, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size maddv_v4i32
+}
+
+define void @maddv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: maddv_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x i64>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <2 x i64> %2, %3
+ %5 = add <2 x i64> %4, %1
+ ; CHECK-DAG: maddv.d [[R1]], [[R2]], [[R3]]
+ store <2 x i64> %5, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size maddv_v2i64
+}
+
+define void @msubv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: msubv_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %c
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <16 x i8> %2, %3
+ %5 = sub <16 x i8> %1, %4
+ ; CHECK-DAG: msubv.b [[R1]], [[R2]], [[R3]]
+ store <16 x i8> %5, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size msubv_v16i8
+}
+
+define void @msubv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: msubv_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <8 x i16>* %c
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <8 x i16> %2, %3
+ %5 = sub <8 x i16> %1, %4
+ ; CHECK-DAG: msubv.h [[R1]], [[R2]], [[R3]]
+ store <8 x i16> %5, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size msubv_v8i16
+}
+
+define void @msubv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: msubv_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x i32>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <4 x i32> %2, %3
+ %5 = sub <4 x i32> %1, %4
+ ; CHECK-DAG: msubv.w [[R1]], [[R2]], [[R3]]
+ store <4 x i32> %5, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size msubv_v4i32
+}
+
+define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: msubv_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x i64>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = mul <2 x i64> %2, %3
+ %5 = sub <2 x i64> %1, %4
+ ; CHECK-DAG: msubv.d [[R1]], [[R2]], [[R3]]
+ store <2 x i64> %5, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size msubv_v2i64
+}
+
+define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: div_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = sdiv <16 x i8> %1, %2
+ ; CHECK-DAG: div_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_s_v16i8
+}
+
+define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: div_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = sdiv <8 x i16> %1, %2
+ ; CHECK-DAG: div_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_s_v8i16
+}
+
+define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: div_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = sdiv <4 x i32> %1, %2
+ ; CHECK-DAG: div_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_s_v4i32
+}
+
+define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: div_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = sdiv <2 x i64> %1, %2
+ ; CHECK-DAG: div_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_s_v2i64
+}
+
+define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: div_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = udiv <16 x i8> %1, %2
+ ; CHECK-DAG: div_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_u_v16i8
+}
+
+define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: div_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = udiv <8 x i16> %1, %2
+ ; CHECK-DAG: div_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_u_v8i16
+}
+
+define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: div_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = udiv <4 x i32> %1, %2
+ ; CHECK-DAG: div_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_u_v4i32
+}
+
+define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: div_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = udiv <2 x i64> %1, %2
+ ; CHECK-DAG: div_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size div_u_v2i64
+}
+
+define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: mod_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = srem <16 x i8> %1, %2
+ ; CHECK-DAG: mod_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_s_v16i8
+}
+
+define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: mod_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = srem <8 x i16> %1, %2
+ ; CHECK-DAG: mod_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_s_v8i16
+}
+
+define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: mod_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = srem <4 x i32> %1, %2
+ ; CHECK-DAG: mod_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_s_v4i32
+}
+
+define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: mod_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = srem <2 x i64> %1, %2
+ ; CHECK-DAG: mod_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_s_v2i64
+}
+
+define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: mod_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = urem <16 x i8> %1, %2
+ ; CHECK-DAG: mod_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_u_v16i8
+}
+
+define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: mod_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = urem <8 x i16> %1, %2
+ ; CHECK-DAG: mod_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_u_v8i16
+}
+
+define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: mod_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = urem <4 x i32> %1, %2
+ ; CHECK-DAG: mod_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_u_v4i32
+}
+
+define void @mod_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: mod_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = urem <2 x i64> %1, %2
+ ; CHECK-DAG: mod_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mod_u_v2i64
+}
diff --git a/test/CodeGen/Mips/msa/arithmetic_float.ll b/test/CodeGen/Mips/msa/arithmetic_float.ll
new file mode 100644
index 0000000000000..dc38721292056
--- /dev/null
+++ b/test/CodeGen/Mips/msa/arithmetic_float.ll
@@ -0,0 +1,456 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: add_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fadd <4 x float> %1, %2
+ ; CHECK-DAG: fadd.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v4f32
+}
+
+define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: add_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fadd <2 x double> %1, %2
+ ; CHECK-DAG: fadd.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size add_v2f64
+}
+
+define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: sub_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fsub <4 x float> %1, %2
+ ; CHECK-DAG: fsub.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v4f32
+}
+
+define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: sub_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fsub <2 x double> %1, %2
+ ; CHECK-DAG: fsub.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sub_v2f64
+}
+
+define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: mul_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fmul <4 x float> %1, %2
+ ; CHECK-DAG: fmul.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v4f32
+}
+
+define void @mul_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: mul_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fmul <2 x double> %1, %2
+ ; CHECK-DAG: fmul.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mul_v2f64
+}
+
+define void @fma_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
+ <4 x float>* %c) nounwind {
+ ; CHECK: fma_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x float>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = tail call <4 x float> @llvm.fma.v4f32 (<4 x float> %1, <4 x float> %2,
+ <4 x float> %3)
+ ; CHECK-DAG: fmadd.w [[R1]], [[R2]], [[R3]]
+ store <4 x float> %4, <4 x float>* %d
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size fma_v4f32
+}
+
+define void @fma_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
+ <2 x double>* %c) nounwind {
+ ; CHECK: fma_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x double>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = tail call <2 x double> @llvm.fma.v2f64 (<2 x double> %1, <2 x double> %2,
+ <2 x double> %3)
+ ; CHECK-DAG: fmadd.d [[R1]], [[R2]], [[R3]]
+ store <2 x double> %4, <2 x double>* %d
+ ; CHECK-DAG: st.d [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size fma_v2f64
+}
+
+define void @fmsub_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
+ <4 x float>* %c) nounwind {
+ ; CHECK: fmsub_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x float>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = fmul <4 x float> %2, %3
+ %5 = fsub <4 x float> %1, %4
+ ; CHECK-DAG: fmsub.w [[R1]], [[R2]], [[R3]]
+ store <4 x float> %5, <4 x float>* %d
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size fmsub_v4f32
+}
+
+define void @fmsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
+ <2 x double>* %c) nounwind {
+ ; CHECK: fmsub_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x double>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = fmul <2 x double> %2, %3
+ %5 = fsub <2 x double> %1, %4
+ ; CHECK-DAG: fmsub.d [[R1]], [[R2]], [[R3]]
+ store <2 x double> %5, <2 x double>* %d
+ ; CHECK-DAG: st.d [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size fmsub_v2f64
+}
+
+define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: fdiv_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fdiv <4 x float> %1, %2
+ ; CHECK-DAG: fdiv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fdiv_v4f32
+}
+
+define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: fdiv_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fdiv <2 x double> %1, %2
+ ; CHECK-DAG: fdiv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fdiv_v2f64
+}
+
+define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: fabs_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x float> @llvm.fabs.v4f32 (<4 x float> %1)
+ ; CHECK-DAG: fmax_a.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fabs_v4f32
+}
+
+define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: fabs_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x double> @llvm.fabs.v2f64 (<2 x double> %1)
+ ; CHECK-DAG: fmax_a.d [[R3:\$w[0-9]+]], [[R1]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fabs_v2f64
+}
+
+define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: fexp2_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: ffint_u.w [[R4:\$w[0-9]+]], [[R3]]
+ ; CHECK-DAG: fexp2.w [[R4:\$w[0-9]+]], [[R3]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size fexp2_v4f32
+}
+
+define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: fexp2_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: ffint_u.d [[R4:\$w[0-9]+]], [[R3]]
+ ; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size fexp2_v2f64
+}
+
+define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: fexp2_v4f32_2:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
+ %3 = fmul <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>, %2
+ ; CHECK-DAG: lui [[R3:\$[0-9]+]], 16384
+ ; CHECK-DAG: fill.w [[R4:\$w[0-9]+]], [[R3]]
+ ; CHECK-DAG: fexp2.w [[R5:\$w[0-9]+]], [[R4]], [[R1]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R5]], 0($4)
+
+ ret void
+ ; CHECK: .size fexp2_v4f32_2
+}
+
+define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: .8byte 4611686018427387904
+ ; CHECK-NEXT: .8byte 4611686018427387904
+ ; CHECK: fexp2_v2f64_2:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
+ %3 = fmul <2 x double> <double 2.0, double 2.0>, %2
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo(
+ ; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size fexp2_v2f64_2
+}
+
+define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: fsqrt_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %1)
+ ; CHECK-DAG: fsqrt.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fsqrt_v4f32
+}
+
+define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: fsqrt_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %1)
+ ; CHECK-DAG: fsqrt.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size fsqrt_v2f64
+}
+
+define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ffint_u_v4f32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = uitofp <4 x i32> %1 to <4 x float>
+ ; CHECK-DAG: ffint_u.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ffint_u_v4f32
+}
+
+define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ffint_u_v2f64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = uitofp <2 x i64> %1 to <2 x double>
+ ; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ffint_u_v2f64
+}
+
+define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ffint_s_v4f32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = sitofp <4 x i32> %1 to <4 x float>
+ ; CHECK-DAG: ffint_s.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x float> %2, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ffint_s_v4f32
+}
+
+define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ffint_s_v2f64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = sitofp <2 x i64> %1 to <2 x double>
+ ; CHECK-DAG: ffint_s.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x double> %2, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ffint_s_v2f64
+}
+
+define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: ftrunc_u_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = fptoui <4 x float> %1 to <4 x i32>
+ ; CHECK-DAG: ftrunc_u.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ftrunc_u_v4f32
+}
+
+define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: ftrunc_u_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = fptoui <2 x double> %1 to <2 x i64>
+ ; CHECK-DAG: ftrunc_u.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ftrunc_u_v2f64
+}
+
+define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
+ ; CHECK: ftrunc_s_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = fptosi <4 x float> %1 to <4 x i32>
+ ; CHECK-DAG: ftrunc_s.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ftrunc_s_v4f32
+}
+
+define void @ftrunc_s_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
+ ; CHECK: ftrunc_s_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = fptosi <2 x double> %1 to <2 x i64>
+ ; CHECK-DAG: ftrunc_s.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ftrunc_s_v2f64
+}
+
+declare <4 x float> @llvm.fabs.v4f32(<4 x float> %Val)
+declare <2 x double> @llvm.fabs.v2f64(<2 x double> %Val)
+declare <4 x float> @llvm.exp2.v4f32(<4 x float> %val)
+declare <2 x double> @llvm.exp2.v2f64(<2 x double> %val)
+declare <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b,
+ <4 x float> %c)
+declare <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b,
+ <2 x double> %c)
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float> %Val)
+declare <2 x double> @llvm.sqrt.v2f64(<2 x double> %Val)
diff --git a/test/CodeGen/Mips/msa/basic_operations.ll b/test/CodeGen/Mips/msa/basic_operations.ll
new file mode 100644
index 0000000000000..0169a0780d36e
--- /dev/null
+++ b/test/CodeGen/Mips/msa/basic_operations.ll
@@ -0,0 +1,481 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32-AE -check-prefix=MIPS32-BE %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32-AE -check-prefix=MIPS32-LE %s
+
+@v4i8 = global <4 x i8> <i8 0, i8 0, i8 0, i8 0>
+@v16i8 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+@v8i16 = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
+@v4i32 = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+@v2i64 = global <2 x i64> <i64 0, i64 0>
+@i64 = global i64 0
+
+define void @const_v16i8() nounwind {
+ ; MIPS32-AE: const_v16i8:
+
+ store volatile <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1
+
+ store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, <16 x i8>*@v16i8
+ ; MIPS32-BE: ldi.h [[R1:\$w[0-9]+]], 256
+ ; MIPS32-LE: ldi.h [[R1:\$w[0-9]+]], 1
+
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>, <16 x i8>*@v16i8
+ ; MIPS32-BE-DAG: lui [[R2:\$[0-9]+]], 258
+ ; MIPS32-LE-DAG: lui [[R2:\$[0-9]+]], 1027
+ ; MIPS32-BE-DAG: ori [[R2]], [[R2]], 772
+ ; MIPS32-LE-DAG: ori [[R2]], [[R2]], 513
+ ; MIPS32-AE-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]]
+
+ store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32-AE: .size const_v16i8
+}
+
+define void @const_v8i16() nounwind {
+ ; MIPS32-AE: const_v8i16:
+
+ store volatile <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1
+
+ store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <8 x i16> <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 4
+
+ store volatile <8 x i16> <i16 1, i16 2, i16 1, i16 2, i16 1, i16 2, i16 1, i16 2>, <8 x i16>*@v8i16
+ ; MIPS32-BE-DAG: lui [[R2:\$[0-9]+]], 1
+ ; MIPS32-LE-DAG: lui [[R2:\$[0-9]+]], 2
+ ; MIPS32-BE-DAG: ori [[R2]], [[R2]], 2
+ ; MIPS32-LE-DAG: ori [[R2]], [[R2]], 1
+ ; MIPS32-AE-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]]
+
+ store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, <8 x i16>*@v8i16
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32-AE: .size const_v8i16
+}
+
+define void @const_v4i32() nounwind {
+ ; MIPS32-AE: const_v4i32:
+
+ store volatile <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ldi.w [[R1:\$w[0-9]+]], 1
+
+ store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 31>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1
+
+ store volatile <4 x i32> <i32 65537, i32 65537, i32 65537, i32 65537>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1
+
+ store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, <4 x i32>*@v4i32
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32-AE: .size const_v4i32
+}
+
+define void @const_v2i64() nounwind {
+ ; MIPS32-AE: const_v2i64:
+
+ store volatile <2 x i64> <i64 0, i64 0>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <2 x i64> <i64 72340172838076673, i64 72340172838076673>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1
+
+ store volatile <2 x i64> <i64 281479271743489, i64 281479271743489>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1
+
+ store volatile <2 x i64> <i64 4294967297, i64 4294967297>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.w [[R1:\$w[0-9]+]], 1
+
+ store volatile <2 x i64> <i64 1, i64 1>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ldi.d [[R1:\$w[0-9]+]], 1
+
+ store volatile <2 x i64> <i64 1, i64 31>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x i64> <i64 3, i64 4>, <2 x i64>*@v2i64
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32-AE: .size const_v2i64
+}
+
+define void @nonconst_v16i8(i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %f, i8 %g, i8 %h) nounwind {
+ ; MIPS32-AE: nonconst_v16i8:
+
+ %1 = insertelement <16 x i8> undef, i8 %a, i32 0
+ %2 = insertelement <16 x i8> %1, i8 %b, i32 1
+ %3 = insertelement <16 x i8> %2, i8 %c, i32 2
+ %4 = insertelement <16 x i8> %3, i8 %d, i32 3
+ %5 = insertelement <16 x i8> %4, i8 %e, i32 4
+ %6 = insertelement <16 x i8> %5, i8 %f, i32 5
+ %7 = insertelement <16 x i8> %6, i8 %g, i32 6
+ %8 = insertelement <16 x i8> %7, i8 %h, i32 7
+ %9 = insertelement <16 x i8> %8, i8 %h, i32 8
+ %10 = insertelement <16 x i8> %9, i8 %h, i32 9
+ %11 = insertelement <16 x i8> %10, i8 %h, i32 10
+ %12 = insertelement <16 x i8> %11, i8 %h, i32 11
+ %13 = insertelement <16 x i8> %12, i8 %h, i32 12
+ %14 = insertelement <16 x i8> %13, i8 %h, i32 13
+ %15 = insertelement <16 x i8> %14, i8 %h, i32 14
+ %16 = insertelement <16 x i8> %15, i8 %h, i32 15
+ ; MIPS32-AE-DAG: insert.b [[R1:\$w[0-9]+]][0], $4
+ ; MIPS32-AE-DAG: insert.b [[R1]][1], $5
+ ; MIPS32-AE-DAG: insert.b [[R1]][2], $6
+ ; MIPS32-AE-DAG: insert.b [[R1]][3], $7
+ ; MIPS32-BE-DAG: lbu [[R2:\$[0-9]+]], 19($sp)
+ ; MIPS32-LE-DAG: lbu [[R2:\$[0-9]+]], 16($sp)
+ ; MIPS32-AE-DAG: insert.b [[R1]][4], [[R2]]
+ ; MIPS32-BE-DAG: lbu [[R3:\$[0-9]+]], 23($sp)
+ ; MIPS32-LE-DAG: lbu [[R3:\$[0-9]+]], 20($sp)
+ ; MIPS32-AE-DAG: insert.b [[R1]][5], [[R3]]
+ ; MIPS32-BE-DAG: lbu [[R4:\$[0-9]+]], 27($sp)
+ ; MIPS32-LE-DAG: lbu [[R4:\$[0-9]+]], 24($sp)
+ ; MIPS32-AE-DAG: insert.b [[R1]][6], [[R4]]
+ ; MIPS32-BE-DAG: lbu [[R5:\$[0-9]+]], 31($sp)
+ ; MIPS32-LE-DAG: lbu [[R5:\$[0-9]+]], 28($sp)
+ ; MIPS32-AE-DAG: insert.b [[R1]][7], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][8], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][9], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][10], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][11], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][12], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][13], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][14], [[R5]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][15], [[R5]]
+
+ store volatile <16 x i8> %16, <16 x i8>*@v16i8
+
+ ret void
+ ; MIPS32-AE: .size nonconst_v16i8
+}
+
+define void @nonconst_v8i16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h) nounwind {
+ ; MIPS32-AE: nonconst_v8i16:
+
+ %1 = insertelement <8 x i16> undef, i16 %a, i32 0
+ %2 = insertelement <8 x i16> %1, i16 %b, i32 1
+ %3 = insertelement <8 x i16> %2, i16 %c, i32 2
+ %4 = insertelement <8 x i16> %3, i16 %d, i32 3
+ %5 = insertelement <8 x i16> %4, i16 %e, i32 4
+ %6 = insertelement <8 x i16> %5, i16 %f, i32 5
+ %7 = insertelement <8 x i16> %6, i16 %g, i32 6
+ %8 = insertelement <8 x i16> %7, i16 %h, i32 7
+ ; MIPS32-AE-DAG: insert.h [[R1:\$w[0-9]+]][0], $4
+ ; MIPS32-AE-DAG: insert.h [[R1]][1], $5
+ ; MIPS32-AE-DAG: insert.h [[R1]][2], $6
+ ; MIPS32-AE-DAG: insert.h [[R1]][3], $7
+ ; MIPS32-BE-DAG: lhu [[R2:\$[0-9]+]], 18($sp)
+ ; MIPS32-LE-DAG: lhu [[R2:\$[0-9]+]], 16($sp)
+ ; MIPS32-AE-DAG: insert.h [[R1]][4], [[R2]]
+ ; MIPS32-BE-DAG: lhu [[R2:\$[0-9]+]], 22($sp)
+ ; MIPS32-LE-DAG: lhu [[R2:\$[0-9]+]], 20($sp)
+ ; MIPS32-AE-DAG: insert.h [[R1]][5], [[R2]]
+ ; MIPS32-BE-DAG: lhu [[R2:\$[0-9]+]], 26($sp)
+ ; MIPS32-LE-DAG: lhu [[R2:\$[0-9]+]], 24($sp)
+ ; MIPS32-AE-DAG: insert.h [[R1]][6], [[R2]]
+ ; MIPS32-BE-DAG: lhu [[R2:\$[0-9]+]], 30($sp)
+ ; MIPS32-LE-DAG: lhu [[R2:\$[0-9]+]], 28($sp)
+ ; MIPS32-AE-DAG: insert.h [[R1]][7], [[R2]]
+
+ store volatile <8 x i16> %8, <8 x i16>*@v8i16
+
+ ret void
+ ; MIPS32-AE: .size nonconst_v8i16
+}
+
+define void @nonconst_v4i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+ ; MIPS32-AE: nonconst_v4i32:
+
+ %1 = insertelement <4 x i32> undef, i32 %a, i32 0
+ %2 = insertelement <4 x i32> %1, i32 %b, i32 1
+ %3 = insertelement <4 x i32> %2, i32 %c, i32 2
+ %4 = insertelement <4 x i32> %3, i32 %d, i32 3
+ ; MIPS32-AE: insert.w [[R1:\$w[0-9]+]][0], $4
+ ; MIPS32-AE: insert.w [[R1]][1], $5
+ ; MIPS32-AE: insert.w [[R1]][2], $6
+ ; MIPS32-AE: insert.w [[R1]][3], $7
+
+ store volatile <4 x i32> %4, <4 x i32>*@v4i32
+
+ ret void
+ ; MIPS32-AE: .size nonconst_v4i32
+}
+
+define void @nonconst_v2i64(i64 %a, i64 %b) nounwind {
+ ; MIPS32-AE: nonconst_v2i64:
+
+ %1 = insertelement <2 x i64> undef, i64 %a, i32 0
+ %2 = insertelement <2 x i64> %1, i64 %b, i32 1
+ ; MIPS32-AE: insert.w [[R1:\$w[0-9]+]][0], $4
+ ; MIPS32-AE: insert.w [[R1]][1], $5
+ ; MIPS32-AE: insert.w [[R1]][2], $6
+ ; MIPS32-AE: insert.w [[R1]][3], $7
+
+ store volatile <2 x i64> %2, <2 x i64>*@v2i64
+
+ ret void
+ ; MIPS32-AE: .size nonconst_v2i64
+}
+
+define i32 @extract_sext_v16i8() nounwind {
+ ; MIPS32-AE: extract_sext_v16i8:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
+
+ %2 = add <16 x i8> %1, %1
+ ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <16 x i8> %2, i32 1
+ %4 = sext i8 %3 to i32
+ ; MIPS32-AE-DAG: copy_s.b [[R3:\$[0-9]+]], [[R1]][1]
+ ; MIPS32-AE-NOT: sll
+ ; MIPS32-AE-NOT: sra
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_sext_v16i8
+}
+
+define i32 @extract_sext_v8i16() nounwind {
+ ; MIPS32-AE: extract_sext_v8i16:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
+
+ %2 = add <8 x i16> %1, %1
+ ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <8 x i16> %2, i32 1
+ %4 = sext i16 %3 to i32
+ ; MIPS32-AE-DAG: copy_s.h [[R3:\$[0-9]+]], [[R1]][1]
+ ; MIPS32-AE-NOT: sll
+ ; MIPS32-AE-NOT: sra
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_sext_v8i16
+}
+
+define i32 @extract_sext_v4i32() nounwind {
+ ; MIPS32-AE: extract_sext_v4i32:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = add <4 x i32> %1, %1
+ ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <4 x i32> %2, i32 1
+ ; MIPS32-AE-DAG: copy_s.w [[R3:\$[0-9]+]], [[R1]][1]
+
+ ret i32 %3
+ ; MIPS32-AE: .size extract_sext_v4i32
+}
+
+define i64 @extract_sext_v2i64() nounwind {
+ ; MIPS32-AE: extract_sext_v2i64:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = add <2 x i64> %1, %1
+ ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <2 x i64> %2, i32 1
+ ; MIPS32-AE-DAG: copy_s.w [[R3:\$[0-9]+]], [[R1]][2]
+ ; MIPS32-AE-DAG: copy_s.w [[R4:\$[0-9]+]], [[R1]][3]
+ ; MIPS32-AE-NOT: sll
+ ; MIPS32-AE-NOT: sra
+
+ ret i64 %3
+ ; MIPS32-AE: .size extract_sext_v2i64
+}
+
+define i32 @extract_zext_v16i8() nounwind {
+ ; MIPS32-AE: extract_zext_v16i8:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
+
+ %2 = add <16 x i8> %1, %1
+ ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <16 x i8> %2, i32 1
+ %4 = zext i8 %3 to i32
+ ; MIPS32-AE-DAG: copy_u.b [[R3:\$[0-9]+]], [[R1]][1]
+ ; MIPS32-AE-NOT: andi
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_zext_v16i8
+}
+
+define i32 @extract_zext_v8i16() nounwind {
+ ; MIPS32-AE: extract_zext_v8i16:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
+
+ %2 = add <8 x i16> %1, %1
+ ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <8 x i16> %2, i32 1
+ %4 = zext i16 %3 to i32
+ ; MIPS32-AE-DAG: copy_u.h [[R3:\$[0-9]+]], [[R1]][1]
+ ; MIPS32-AE-NOT: andi
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_zext_v8i16
+}
+
+define i32 @extract_zext_v4i32() nounwind {
+ ; MIPS32-AE: extract_zext_v4i32:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = add <4 x i32> %1, %1
+ ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <4 x i32> %2, i32 1
+ ; MIPS32-AE-DAG: copy_{{[su]}}.w [[R3:\$[0-9]+]], [[R1]][1]
+
+ ret i32 %3
+ ; MIPS32-AE: .size extract_zext_v4i32
+}
+
+define i64 @extract_zext_v2i64() nounwind {
+ ; MIPS32-AE: extract_zext_v2i64:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = add <2 x i64> %1, %1
+ ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <2 x i64> %2, i32 1
+ ; MIPS32-AE-DAG: copy_{{[su]}}.w [[R3:\$[0-9]+]], [[R1]][2]
+ ; MIPS32-AE-DAG: copy_{{[su]}}.w [[R4:\$[0-9]+]], [[R1]][3]
+ ; MIPS32-AE-NOT: andi
+
+ ret i64 %3
+ ; MIPS32-AE: .size extract_zext_v2i64
+}
+
+define void @insert_v16i8(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v16i8:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
+
+ %a2 = trunc i32 %a to i8
+ %a3 = sext i8 %a2 to i32
+ %a4 = trunc i32 %a3 to i8
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %2 = insertelement <16 x i8> %1, i8 %a4, i32 1
+ ; MIPS32-AE-DAG: insert.b [[R1]][1], $4
+
+ store <16 x i8> %2, <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: st.b [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v16i8
+}
+
+define void @insert_v8i16(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v8i16:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
+
+ %a2 = trunc i32 %a to i16
+ %a3 = sext i16 %a2 to i32
+ %a4 = trunc i32 %a3 to i16
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %2 = insertelement <8 x i16> %1, i16 %a4, i32 1
+ ; MIPS32-AE-DAG: insert.h [[R1]][1], $4
+
+ store <8 x i16> %2, <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: st.h [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v8i16
+}
+
+define void @insert_v4i32(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v4i32:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %2 = insertelement <4 x i32> %1, i32 %a, i32 1
+ ; MIPS32-AE-DAG: insert.w [[R1]][1], $4
+
+ store <4 x i32> %2, <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v4i32
+}
+
+define void @insert_v2i64(i64 %a) nounwind {
+ ; MIPS32-AE: insert_v2i64:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %2 = insertelement <2 x i64> %1, i64 %a, i32 1
+ ; MIPS32-AE-DAG: insert.w [[R1]][2], $4
+ ; MIPS32-AE-DAG: insert.w [[R1]][3], $5
+
+ store <2 x i64> %2, <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v2i64
+}
+
+define void @truncstore() nounwind {
+ ; MIPS32-AE: truncstore:
+
+ store volatile <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i8>*@v4i8
+ ; TODO: What code should be emitted?
+
+ ret void
+ ; MIPS32-AE: .size truncstore
+}
diff --git a/test/CodeGen/Mips/msa/basic_operations_float.ll b/test/CodeGen/Mips/msa/basic_operations_float.ll
new file mode 100644
index 0000000000000..1f538108a1fae
--- /dev/null
+++ b/test/CodeGen/Mips/msa/basic_operations_float.ll
@@ -0,0 +1,207 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32 %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32 %s
+
+@v4f32 = global <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>
+@v2f64 = global <2 x double> <double 0.0, double 0.0>
+@f32 = global float 0.0
+@f64 = global double 0.0
+
+define void @const_v4f32() nounwind {
+ ; MIPS32: const_v4f32:
+
+ store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>*@v4f32
+ ; MIPS32: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, <4 x float>*@v4f32
+ ; MIPS32: lui [[R1:\$[0-9]+]], 16256
+ ; MIPS32: fill.w [[R2:\$w[0-9]+]], [[R1]]
+
+ store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, <4 x float>*@v4f32
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, <4 x float>*@v4f32
+ ; MIPS32: lui [[R1:\$[0-9]+]], 18304
+ ; MIPS32: ori [[R2:\$[0-9]+]], [[R1]], 128
+ ; MIPS32: fill.w [[R3:\$w[0-9]+]], [[R2]]
+
+ store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, <4 x float>*@v4f32
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, <4 x float>*@v4f32
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32: .size const_v4f32
+}
+
+define void @const_v2f64() nounwind {
+ ; MIPS32: const_v2f64:
+
+ store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64
+ ; MIPS32: ldi.b [[R1:\$w[0-9]+]], 0
+
+ store volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+
+ ret void
+ ; MIPS32: .size const_v2f64
+}
+
+define void @nonconst_v4f32() nounwind {
+ ; MIPS32: nonconst_v4f32:
+
+ %1 = load float *@f32
+ %2 = insertelement <4 x float> undef, float %1, i32 0
+ %3 = insertelement <4 x float> %2, float %1, i32 1
+ %4 = insertelement <4 x float> %3, float %1, i32 2
+ %5 = insertelement <4 x float> %4, float %1, i32 3
+ store volatile <4 x float> %5, <4 x float>*@v4f32
+ ; MIPS32: lwc1 $f[[R1:[0-9]+]], 0(
+ ; MIPS32: splati.w [[R2:\$w[0-9]+]], $w[[R1]]
+
+ ret void
+ ; MIPS32: .size nonconst_v4f32
+}
+
+define void @nonconst_v2f64() nounwind {
+ ; MIPS32: nonconst_v2f64:
+
+ %1 = load double *@f64
+ %2 = insertelement <2 x double> undef, double %1, i32 0
+ %3 = insertelement <2 x double> %2, double %1, i32 1
+ store volatile <2 x double> %3, <2 x double>*@v2f64
+ ; MIPS32: ldc1 $f[[R1:[0-9]+]], 0(
+ ; MIPS32: splati.d [[R2:\$w[0-9]+]], $w[[R1]]
+
+ ret void
+ ; MIPS32: .size nonconst_v2f64
+}
+
+define float @extract_v4f32() nounwind {
+ ; MIPS32: extract_v4f32:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = fadd <4 x float> %1, %1
+ ; MIPS32-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <4 x float> %2, i32 1
+ ; Element 1 can be obtained by splatting it across the vector and extracting
+ ; $w0:sub_lo
+ ; MIPS32-DAG: splati.w $w0, [[R1]][1]
+
+ ret float %3
+ ; MIPS32: .size extract_v4f32
+}
+
+define float @extract_v4f32_elt0() nounwind {
+ ; MIPS32: extract_v4f32_elt0:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = fadd <4 x float> %1, %1
+ ; MIPS32-DAG: fadd.w $w0, [[R1]], [[R1]]
+
+ %3 = extractelement <4 x float> %2, i32 0
+ ; Element 0 can be obtained by extracting $w0:sub_lo ($f0)
+ ; MIPS32-NOT: copy_u.w
+ ; MIPS32-NOT: mtc1
+
+ ret float %3
+ ; MIPS32: .size extract_v4f32_elt0
+}
+
+define double @extract_v2f64() nounwind {
+ ; MIPS32: extract_v2f64:
+
+ %1 = load <2 x double>* @v2f64
+ ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = fadd <2 x double> %1, %1
+ ; MIPS32-DAG: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <2 x double> %2, i32 1
+ ; Element 1 can be obtained by splatting it across the vector and extracting
+ ; $w0:sub_64
+ ; MIPS32-DAG: splati.d $w0, [[R1]][1]
+ ; MIPS32-NOT: copy_u.w
+ ; MIPS32-NOT: mtc1
+ ; MIPS32-NOT: mthc1
+ ; MIPS32-NOT: sll
+ ; MIPS32-NOT: sra
+
+ ret double %3
+ ; MIPS32: .size extract_v2f64
+}
+
+define double @extract_v2f64_elt0() nounwind {
+ ; MIPS32: extract_v2f64_elt0:
+
+ %1 = load <2 x double>* @v2f64
+ ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = fadd <2 x double> %1, %1
+ ; MIPS32-DAG: fadd.d $w0, [[R1]], [[R1]]
+
+ %3 = extractelement <2 x double> %2, i32 0
+ ; Element 0 can be obtained by extracting $w0:sub_64 ($f0)
+ ; MIPS32-NOT: copy_u.w
+ ; MIPS32-NOT: mtc1
+ ; MIPS32-NOT: mthc1
+ ; MIPS32-NOT: sll
+ ; MIPS32-NOT: sra
+
+ ret double %3
+ ; MIPS32: .size extract_v2f64_elt0
+}
+
+define void @insert_v4f32(float %a) nounwind {
+ ; MIPS32: insert_v4f32:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = insertelement <4 x float> %1, float %a, i32 1
+ ; float argument passed in $f12
+ ; MIPS32-DAG: insve.w [[R1]][1], $w12[0]
+
+ store <4 x float> %2, <4 x float>* @v4f32
+ ; MIPS32-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32: .size insert_v4f32
+}
+
+define void @insert_v2f64(double %a) nounwind {
+ ; MIPS32: insert_v2f64:
+
+ %1 = load <2 x double>* @v2f64
+ ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
+
+ %2 = insertelement <2 x double> %1, double %a, i32 1
+ ; double argument passed in $f12
+ ; MIPS32-DAG: insve.d [[R1]][1], $w12[0]
+
+ store <2 x double> %2, <2 x double>* @v2f64
+ ; MIPS32-DAG: st.d [[R1]]
+
+ ret void
+ ; MIPS32: .size insert_v2f64
+}
diff --git a/test/CodeGen/Mips/msa/bit.ll b/test/CodeGen/Mips/msa/bit.ll
new file mode 100644
index 0000000000000..59ddbe17a33f7
--- /dev/null
+++ b/test/CodeGen/Mips/msa/bit.ll
@@ -0,0 +1,537 @@
+; Test the MSA intrinsics that are encoded with the BIT instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_sat_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sat_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sat_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sat_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sat.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_b_test:
+; CHECK: ld.b
+; CHECK: sat_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sat_s_b_test
+;
+@llvm_mips_sat_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sat_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sat_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sat_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sat.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_h_test:
+; CHECK: ld.h
+; CHECK: sat_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sat_s_h_test
+;
+@llvm_mips_sat_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sat_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sat_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sat_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sat.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_w_test:
+; CHECK: ld.w
+; CHECK: sat_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sat_s_w_test
+;
+@llvm_mips_sat_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sat_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sat_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sat_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sat.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_d_test:
+; CHECK: ld.d
+; CHECK: sat_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sat_s_d_test
+;
+@llvm_mips_sat_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sat_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sat_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sat_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sat.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_b_test:
+; CHECK: ld.b
+; CHECK: sat_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sat_u_b_test
+;
+@llvm_mips_sat_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sat_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sat_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sat_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sat.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_h_test:
+; CHECK: ld.h
+; CHECK: sat_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sat_u_h_test
+;
+@llvm_mips_sat_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sat_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sat_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sat_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sat.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_w_test:
+; CHECK: ld.w
+; CHECK: sat_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sat_u_w_test
+;
+@llvm_mips_sat_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sat_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sat_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sat_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sat.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_d_test:
+; CHECK: ld.d
+; CHECK: sat_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sat_u_d_test
+;
+@llvm_mips_slli_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_slli_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_slli_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_slli_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_slli_b_test:
+; CHECK: ld.b
+; CHECK: slli.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_slli_b_test
+;
+@llvm_mips_slli_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_slli_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_slli_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_slli_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_slli_h_test:
+; CHECK: ld.h
+; CHECK: slli.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_slli_h_test
+;
+@llvm_mips_slli_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_slli_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_slli_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_slli_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_slli_w_test:
+; CHECK: ld.w
+; CHECK: slli.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_slli_w_test
+;
+@llvm_mips_slli_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_slli_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_slli_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_slli_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_slli_d_test:
+; CHECK: ld.d
+; CHECK: slli.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_slli_d_test
+;
+@llvm_mips_srai_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srai_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srai_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srai_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srai_b_test:
+; CHECK: ld.b
+; CHECK: srai.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srai_b_test
+;
+@llvm_mips_srai_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srai_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srai_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srai_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srai_h_test:
+; CHECK: ld.h
+; CHECK: srai.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srai_h_test
+;
+@llvm_mips_srai_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srai_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srai_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srai_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srai_w_test:
+; CHECK: ld.w
+; CHECK: srai.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srai_w_test
+;
+@llvm_mips_srai_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srai_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srai_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srai_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srai_d_test:
+; CHECK: ld.d
+; CHECK: srai.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srai_d_test
+;
+@llvm_mips_srari_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srari_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srari_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srari_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.srari.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_srari_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srari_b_test:
+; CHECK: ld.b
+; CHECK: srari.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srari_b_test
+;
+@llvm_mips_srari_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srari_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srari_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srari_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.srari.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_srari_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srari_h_test:
+; CHECK: ld.h
+; CHECK: srari.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srari_h_test
+;
+@llvm_mips_srari_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srari_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srari_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srari_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.srari.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_srari_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srari_w_test:
+; CHECK: ld.w
+; CHECK: srari.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srari_w_test
+;
+@llvm_mips_srari_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srari_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srari_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srari_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.srari.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_srari_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srari_d_test:
+; CHECK: ld.d
+; CHECK: srari.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srari_d_test
+;
+@llvm_mips_srli_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srli_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srli_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srli_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srli_b_test:
+; CHECK: ld.b
+; CHECK: srli.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srli_b_test
+;
+@llvm_mips_srli_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srli_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srli_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srli_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srli_h_test:
+; CHECK: ld.h
+; CHECK: srli.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srli_h_test
+;
+@llvm_mips_srli_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srli_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srli_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srli_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srli_w_test:
+; CHECK: ld.w
+; CHECK: srli.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srli_w_test
+;
+@llvm_mips_srli_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srli_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srli_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srli_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srli_d_test:
+; CHECK: ld.d
+; CHECK: srli.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srli_d_test
+;
+@llvm_mips_srlri_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_srlri_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srlri_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_srlri_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_srlri_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srlri_b_test:
+; CHECK: ld.b
+; CHECK: srlri.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srlri_b_test
+;
+@llvm_mips_srlri_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_srlri_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srlri_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_srlri_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_srlri_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srlri_h_test:
+; CHECK: ld.h
+; CHECK: srlri.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srlri_h_test
+;
+@llvm_mips_srlri_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_srlri_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srlri_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_srlri_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_srlri_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srlri_w_test:
+; CHECK: ld.w
+; CHECK: srlri.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srlri_w_test
+;
+@llvm_mips_srlri_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_srlri_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srlri_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_srlri_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_srlri_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.srlri.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srlri_d_test:
+; CHECK: ld.d
+; CHECK: srlri.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srlri_d_test
+;
diff --git a/test/CodeGen/Mips/msa/bitcast.ll b/test/CodeGen/Mips/msa/bitcast.ll
new file mode 100644
index 0000000000000..8e880ecd9afbc
--- /dev/null
+++ b/test/CodeGen/Mips/msa/bitcast.ll
@@ -0,0 +1,1210 @@
+; Test the bitcast operation for big-endian and little-endian.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=BIGENDIAN %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=LITENDIAN %s
+
+define void @v16i8_to_v16i8(<16 x i8>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v16i8:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v16i8_to_v16i8
+
+; BIGENDIAN: v16i8_to_v16i8:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.b [[R3]],
+; BIGENDIAN: .size v16i8_to_v16i8
+
+define void @v16i8_to_v8i16(<16 x i8>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v8i16:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v16i8_to_v8i16
+
+; BIGENDIAN: v16i8_to_v8i16:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v16i8_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v16i8_to_v8f16(<16 x i8>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v8f16:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.b [[R2]],
+; LITENDIAN: .size v16i8_to_v8f16
+
+; BIGENDIAN: v16i8_to_v8f16:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.b [[R2]],
+; BIGENDIAN: .size v16i8_to_v8f16
+
+define void @v16i8_to_v4i32(<16 x i8>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v4i32:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v16i8_to_v4i32
+
+; BIGENDIAN: v16i8_to_v4i32:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v16i8_to_v4i32
+
+define void @v16i8_to_v4f32(<16 x i8>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v4f32:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v16i8_to_v4f32
+
+; BIGENDIAN: v16i8_to_v4f32:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: fadd.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v16i8_to_v4f32
+
+define void @v16i8_to_v2i64(<16 x i8>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v2i64:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v16i8_to_v2i64
+
+; BIGENDIAN: v16i8_to_v2i64:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R3]], 177
+; BIGENDIAN: addv.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v16i8_to_v2i64
+
+define void @v16i8_to_v2f64(<16 x i8>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <16 x i8>* %src
+ %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
+ %2 = bitcast <16 x i8> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v16i8_to_v2f64:
+; LITENDIAN: ld.b [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v16i8_to_v2f64
+
+; BIGENDIAN: v16i8_to_v2f64:
+; BIGENDIAN: ld.b [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R3]], 177
+; BIGENDIAN: fadd.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v16i8_to_v2f64
+
+define void @v8i16_to_v16i8(<8 x i16>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v16i8:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v8i16_to_v16i8
+
+; BIGENDIAN: v8i16_to_v16i8:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v8i16_to_v16i8
+
+define void @v8i16_to_v8i16(<8 x i16>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v8i16:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v8i16_to_v8i16
+
+; BIGENDIAN: v8i16_to_v8i16:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.h [[R3]],
+; BIGENDIAN: .size v8i16_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8i16_to_v8f16(<8 x i16>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v8f16:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.h [[R2]],
+; LITENDIAN: .size v8i16_to_v8f16
+
+; BIGENDIAN: v8i16_to_v8f16:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.h [[R2]],
+; BIGENDIAN: .size v8i16_to_v8f16
+
+define void @v8i16_to_v4i32(<8 x i16>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v4i32:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v8i16_to_v4i32
+
+; BIGENDIAN: v8i16_to_v4i32:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v8i16_to_v4i32
+
+define void @v8i16_to_v4f32(<8 x i16>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v4f32:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v8i16_to_v4f32
+
+; BIGENDIAN: v8i16_to_v4f32:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v8i16_to_v4f32
+
+define void @v8i16_to_v2i64(<8 x i16>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v2i64:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v8i16_to_v2i64
+
+; BIGENDIAN: v8i16_to_v2i64:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v8i16_to_v2i64
+
+define void @v8i16_to_v2f64(<8 x i16>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x i16>* %src
+ %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
+ %2 = bitcast <8 x i16> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v8i16_to_v2f64:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v8i16_to_v2f64
+
+; BIGENDIAN: v8i16_to_v2f64:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: fadd.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v8i16_to_v2f64
+
+;----
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v16i8(<8 x half>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <16 x i8>
+ %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %1, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v16i8:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v8f16_to_v16i8
+
+; BIGENDIAN: v8f16_to_v16i8:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R1]], 177
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v8f16_to_v16i8
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v8i16(<8 x half>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <8 x i16>
+ %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %1, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v8i16:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.h [[R2]],
+; LITENDIAN: .size v8f16_to_v8i16
+
+; BIGENDIAN: v8f16_to_v8i16:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.h [[R2]],
+; BIGENDIAN: .size v8f16_to_v8i16
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v8f16(<8 x half>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <8 x half>
+ store <8 x half> %1, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v8f16:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: st.h [[R1]],
+; LITENDIAN: .size v8f16_to_v8f16
+
+; BIGENDIAN: v8f16_to_v8f16:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: st.h [[R1]],
+; BIGENDIAN: .size v8f16_to_v8f16
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v4i32(<8 x half>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %1, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v4i32:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.w [[R2]],
+; LITENDIAN: .size v8f16_to_v4i32
+
+; BIGENDIAN: v8f16_to_v4i32:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.h [[R2:\$w[0-9]+]], [[R1]], 177
+; BIGENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v8f16_to_v4i32
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v4f32(<8 x half>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <4 x float>
+ %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %1, <4 x float> %1)
+ store <4 x float> %2, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v4f32:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.w [[R2]],
+; LITENDIAN: .size v8f16_to_v4f32
+
+; BIGENDIAN: v8f16_to_v4f32:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.h [[R2:\$w[0-9]+]], [[R1]], 177
+; BIGENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v8f16_to_v4f32
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v2i64(<8 x half>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <2 x i64>
+ %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %1, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v2i64:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.d [[R2]],
+; LITENDIAN: .size v8f16_to_v2i64
+
+; BIGENDIAN: v8f16_to_v2i64:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.h [[R2:\$w[0-9]+]], [[R1]], 27
+; BIGENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v8f16_to_v2i64
+
+; We can't prevent the (bitcast (load X)) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v8f16_to_v2f64(<8 x half>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <8 x half>* %src
+ %1 = bitcast <8 x half> %0 to <2 x double>
+ %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %1, <2 x double> %1)
+ store <2 x double> %2, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v8f16_to_v2f64:
+; LITENDIAN: ld.h [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.d [[R2]],
+; LITENDIAN: .size v8f16_to_v2f64
+
+; BIGENDIAN: v8f16_to_v2f64:
+; BIGENDIAN: ld.h [[R1:\$w[0-9]+]],
+; BIGENDIAN: shf.h [[R2:\$w[0-9]+]], [[R1]], 27
+; BIGENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v8f16_to_v2f64
+;----
+
+define void @v4i32_to_v16i8(<4 x i32>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v16i8:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v4i32_to_v16i8
+
+; BIGENDIAN: v4i32_to_v16i8:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v4i32_to_v16i8
+
+define void @v4i32_to_v8i16(<4 x i32>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v8i16:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v4i32_to_v8i16
+
+; BIGENDIAN: v4i32_to_v8i16:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v4i32_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v4i32_to_v8f16(<4 x i32>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v8f16:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.w [[R2]],
+; LITENDIAN: .size v4i32_to_v8f16
+
+; BIGENDIAN: v4i32_to_v8f16:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.w [[R2]],
+; BIGENDIAN: .size v4i32_to_v8f16
+
+define void @v4i32_to_v4i32(<4 x i32>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v4i32:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v4i32_to_v4i32
+
+; BIGENDIAN: v4i32_to_v4i32:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v4i32_to_v4i32
+
+define void @v4i32_to_v4f32(<4 x i32>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v4f32:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v4i32_to_v4f32
+
+; BIGENDIAN: v4i32_to_v4f32:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v4i32_to_v4f32
+
+define void @v4i32_to_v2i64(<4 x i32>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v2i64:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v4i32_to_v2i64
+
+; BIGENDIAN: v4i32_to_v2i64:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v4i32_to_v2i64
+
+define void @v4i32_to_v2f64(<4 x i32>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x i32>* %src
+ %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
+ %2 = bitcast <4 x i32> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v4i32_to_v2f64:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v4i32_to_v2f64
+
+; BIGENDIAN: v4i32_to_v2f64:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v4i32_to_v2f64
+
+define void @v4f32_to_v16i8(<4 x float>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v16i8:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v4f32_to_v16i8
+
+; BIGENDIAN: v4f32_to_v16i8:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v4f32_to_v16i8
+
+define void @v4f32_to_v8i16(<4 x float>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v8i16:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v4f32_to_v8i16
+
+; BIGENDIAN: v4f32_to_v8i16:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v4f32_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v4f32_to_v8f16(<4 x float>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v8f16:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.w [[R2]],
+; LITENDIAN: .size v4f32_to_v8f16
+
+; BIGENDIAN: v4f32_to_v8f16:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.w [[R2]],
+; BIGENDIAN: .size v4f32_to_v8f16
+
+define void @v4f32_to_v4i32(<4 x float>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v4i32:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v4f32_to_v4i32
+
+; BIGENDIAN: v4f32_to_v4i32:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v4f32_to_v4i32
+
+define void @v4f32_to_v4f32(<4 x float>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v4f32:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v4f32_to_v4f32
+
+; BIGENDIAN: v4f32_to_v4f32:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.w [[R3]],
+; BIGENDIAN: .size v4f32_to_v4f32
+
+define void @v4f32_to_v2i64(<4 x float>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v2i64:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v4f32_to_v2i64
+
+; BIGENDIAN: v4f32_to_v2i64:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v4f32_to_v2i64
+
+define void @v4f32_to_v2f64(<4 x float>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <4 x float>* %src
+ %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
+ %2 = bitcast <4 x float> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v4f32_to_v2f64:
+; LITENDIAN: ld.w [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v4f32_to_v2f64
+
+; BIGENDIAN: v4f32_to_v2f64:
+; BIGENDIAN: ld.w [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.d [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.d [[R4]],
+; BIGENDIAN: .size v4f32_to_v2f64
+
+define void @v2i64_to_v16i8(<2 x i64>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v16i8:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v2i64_to_v16i8
+
+; BIGENDIAN: v2i64_to_v16i8:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R3]], 177
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v2i64_to_v16i8
+
+define void @v2i64_to_v8i16(<2 x i64>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v8i16:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v2i64_to_v8i16
+
+; BIGENDIAN: v2i64_to_v8i16:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v2i64_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v2i64_to_v8f16(<2 x i64>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v8f16:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.d [[R2]],
+; LITENDIAN: .size v2i64_to_v8f16
+
+; BIGENDIAN: v2i64_to_v8f16:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.d [[R2]],
+; BIGENDIAN: .size v2i64_to_v8f16
+
+define void @v2i64_to_v4i32(<2 x i64>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v4i32:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v2i64_to_v4i32
+
+; BIGENDIAN: v2i64_to_v4i32:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v2i64_to_v4i32
+
+define void @v2i64_to_v4f32(<2 x i64>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v4f32:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v2i64_to_v4f32
+
+; BIGENDIAN: v2i64_to_v4f32:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v2i64_to_v4f32
+
+define void @v2i64_to_v2i64(<2 x i64>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v2i64:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v2i64_to_v2i64
+
+; BIGENDIAN: v2i64_to_v2i64:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v2i64_to_v2i64
+
+define void @v2i64_to_v2f64(<2 x i64>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x i64>* %src
+ %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
+ %2 = bitcast <2 x i64> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v2i64_to_v2f64:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v2i64_to_v2f64
+
+; BIGENDIAN: v2i64_to_v2f64:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v2i64_to_v2f64
+
+define void @v2f64_to_v16i8(<2 x double>* %src, <16 x i8>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <16 x i8>
+ %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
+ store <16 x i8> %3, <16 x i8>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v16i8:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.b [[R3]],
+; LITENDIAN: .size v2f64_to_v16i8
+
+; BIGENDIAN: v2f64_to_v16i8:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.b [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R3]], 177
+; BIGENDIAN: addv.b [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.b [[R4]],
+; BIGENDIAN: .size v2f64_to_v16i8
+
+define void @v2f64_to_v8i16(<2 x double>* %src, <8 x i16>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <8 x i16>
+ %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
+ store <8 x i16> %3, <8 x i16>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v8i16:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.h [[R3]],
+; LITENDIAN: .size v2f64_to_v8i16
+
+; BIGENDIAN: v2f64_to_v8i16:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.h [[R3:\$w[0-9]+]], [[R2]], 27
+; BIGENDIAN: addv.h [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.h [[R4]],
+; BIGENDIAN: .size v2f64_to_v8i16
+
+; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
+; are no operations for v8f16 to put in the way.
+define void @v2f64_to_v8f16(<2 x double>* %src, <8 x half>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <8 x half>
+ store <8 x half> %2, <8 x half>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v8f16:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: st.d [[R2]],
+; LITENDIAN: .size v2f64_to_v8f16
+
+; BIGENDIAN: v2f64_to_v8f16:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: st.d [[R2]],
+; BIGENDIAN: .size v2f64_to_v8f16
+
+define void @v2f64_to_v4i32(<2 x double>* %src, <4 x i32>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
+ store <4 x i32> %3, <4 x i32>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v4i32:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v2f64_to_v4i32
+
+; BIGENDIAN: v2f64_to_v4i32:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: addv.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v2f64_to_v4i32
+
+define void @v2f64_to_v4f32(<2 x double>* %src, <4 x float>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <4 x float>
+ %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
+ store <4 x float> %3, <4 x float>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v4f32:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.w [[R3]],
+; LITENDIAN: .size v2f64_to_v4f32
+
+; BIGENDIAN: v2f64_to_v4f32:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: shf.w [[R3:\$w[0-9]+]], [[R2]], 177
+; BIGENDIAN: fadd.w [[R4:\$w[0-9]+]], [[R3]], [[R3]]
+; BIGENDIAN: st.w [[R4]],
+; BIGENDIAN: .size v2f64_to_v4f32
+
+define void @v2f64_to_v2i64(<2 x double>* %src, <2 x i64>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <2 x i64>
+ %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
+ store <2 x i64> %3, <2 x i64>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v2i64:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v2f64_to_v2i64
+
+; BIGENDIAN: v2f64_to_v2i64:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: addv.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v2f64_to_v2i64
+
+define void @v2f64_to_v2f64(<2 x double>* %src, <2 x double>* %dst) nounwind {
+entry:
+ %0 = load volatile <2 x double>* %src
+ %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
+ %2 = bitcast <2 x double> %1 to <2 x double>
+ %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
+ store <2 x double> %3, <2 x double>* %dst
+ ret void
+}
+
+; LITENDIAN: v2f64_to_v2f64:
+; LITENDIAN: ld.d [[R1:\$w[0-9]+]],
+; LITENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; LITENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; LITENDIAN: st.d [[R3]],
+; LITENDIAN: .size v2f64_to_v2f64
+
+; BIGENDIAN: v2f64_to_v2f64:
+; BIGENDIAN: ld.d [[R1:\$w[0-9]+]],
+; BIGENDIAN: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+; BIGENDIAN: fadd.d [[R3:\$w[0-9]+]], [[R2]], [[R2]]
+; BIGENDIAN: st.d [[R3]],
+; BIGENDIAN: .size v2f64_to_v2f64
+
+declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
+declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
+declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
+declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
+declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) nounwind
+declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) nounwind
diff --git a/test/CodeGen/Mips/msa/bitwise.ll b/test/CodeGen/Mips/msa/bitwise.ll
new file mode 100644
index 0000000000000..9a88c47b7e1f2
--- /dev/null
+++ b/test/CodeGen/Mips/msa/bitwise.ll
@@ -0,0 +1,1639 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: and_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <16 x i8> %1, %2
+ ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v16i8
+}
+
+define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: and_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <8 x i16> %1, %2
+ ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v8i16
+}
+
+define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: and_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <4 x i32> %1, %2
+ ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v4i32
+}
+
+define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: and_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <2 x i64> %1, %2
+ ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v2i64
+}
+
+define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: and_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: andi.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v16i8_i
+}
+
+define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: and_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: and.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v8i16_i
+}
+
+define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: and_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = and <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: and.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v4i32_i
+}
+
+define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: and_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = and <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: and.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size and_v2i64_i
+}
+
+define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: or_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <16 x i8> %1, %2
+ ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v16i8
+}
+
+define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: or_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <8 x i16> %1, %2
+ ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v8i16
+}
+
+define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: or_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <4 x i32> %1, %2
+ ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v4i32
+}
+
+define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: or_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <2 x i64> %1, %2
+ ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v2i64
+}
+
+define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: or_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ; CHECK-DAG: ori.b [[R4:\$w[0-9]+]], [[R1]], 3
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v16i8_i
+}
+
+define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: or_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: or.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v8i16_i
+}
+
+define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: or_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: or.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v4i32_i
+}
+
+define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: or_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <2 x i64> %1, <i64 3, i64 3>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: or.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size or_v2i64_i
+}
+
+define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: nor_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <16 x i8> %1, %2
+ %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ ; CHECK-DAG: nor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v16i8
+}
+
+define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: nor_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <8 x i16> %1, %2
+ %4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ; CHECK-DAG: nor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v8i16
+}
+
+define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: nor_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <4 x i32> %1, %2
+ %4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ; CHECK-DAG: nor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v4i32
+}
+
+define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: nor_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = or <2 x i64> %1, %2
+ %4 = xor <2 x i64> %3, <i64 -1, i64 -1>
+ ; CHECK-DAG: nor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v2i64
+}
+
+define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: nor_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ ; CHECK-DAG: ori.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v16i8_i
+}
+
+define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: nor_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = xor <8 x i16> %2, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: nor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v8i16_i
+}
+
+define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: nor_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = xor <4 x i32> %2, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: nor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v4i32_i
+}
+
+define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: nor_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <2 x i64> %1, <i64 1, i64 1>
+ %3 = xor <2 x i64> %2, <i64 -1, i64 -1>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: nor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size nor_v2i64_i
+}
+
+define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: xor_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = xor <16 x i8> %1, %2
+ ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v16i8
+}
+
+define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: xor_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = xor <8 x i16> %1, %2
+ ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v8i16
+}
+
+define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: xor_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = xor <4 x i32> %1, %2
+ ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v4i32
+}
+
+define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: xor_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = xor <2 x i64> %1, %2
+ ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v2i64
+}
+
+define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: xor_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ; CHECK-DAG: xori.b [[R4:\$w[0-9]+]], [[R1]], 3
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v16i8_i
+}
+
+define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: xor_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: xor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v8i16_i
+}
+
+define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: xor_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: xor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v4i32_i
+}
+
+define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: xor_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <2 x i64> %1, <i64 3, i64 3>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3
+ ; CHECK-DAG: xor.v [[R4:\$w[0-9]+]], [[R1]], [[R3]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size xor_v2i64_i
+}
+
+define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: sll_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <16 x i8> %1, %2
+ ; CHECK-DAG: sll.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v16i8
+}
+
+define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: sll_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <8 x i16> %1, %2
+ ; CHECK-DAG: sll.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v8i16
+}
+
+define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: sll_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <4 x i32> %1, %2
+ ; CHECK-DAG: sll.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v4i32
+}
+
+define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: sll_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <2 x i64> %1, %2
+ ; CHECK-DAG: sll.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v2i64
+}
+
+define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: sll_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shl <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: slli.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v16i8_i
+}
+
+define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: sll_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shl <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: slli.h [[R4:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v8i16_i
+}
+
+define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: sll_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shl <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: slli.w [[R4:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v4i32_i
+}
+
+define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: sll_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shl <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: slli.d [[R4:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sll_v2i64_i
+}
+
+define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: sra_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = ashr <16 x i8> %1, %2
+ ; CHECK-DAG: sra.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v16i8
+}
+
+define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: sra_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = ashr <8 x i16> %1, %2
+ ; CHECK-DAG: sra.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v8i16
+}
+
+define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: sra_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = ashr <4 x i32> %1, %2
+ ; CHECK-DAG: sra.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v4i32
+}
+
+define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: sra_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = ashr <2 x i64> %1, %2
+ ; CHECK-DAG: sra.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v2i64
+}
+
+define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: sra_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = ashr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: srai.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v16i8_i
+}
+
+define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: sra_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = ashr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: srai.h [[R4:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v8i16_i
+}
+
+define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: sra_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = ashr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: srai.w [[R4:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v4i32_i
+}
+
+define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: sra_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = ashr <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size sra_v2i64_i
+}
+
+define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: srl_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = lshr <16 x i8> %1, %2
+ ; CHECK-DAG: srl.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v16i8
+}
+
+define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: srl_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = lshr <8 x i16> %1, %2
+ ; CHECK-DAG: srl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v8i16
+}
+
+define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: srl_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = lshr <4 x i32> %1, %2
+ ; CHECK-DAG: srl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v4i32
+}
+
+define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: srl_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = lshr <2 x i64> %1, %2
+ ; CHECK-DAG: srl.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v2i64
+}
+
+define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: srl_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = lshr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: srli.b [[R4:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v16i8_i
+}
+
+define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: srl_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = lshr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: srli.h [[R4:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v8i16_i
+}
+
+define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: srl_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = lshr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: srli.w [[R4:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v4i32_i
+}
+
+define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: srl_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = lshr <2 x i64> %1, <i64 1, i64 1>
+ ; CHECK-DAG: srli.d [[R4:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size srl_v2i64_i
+}
+
+define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: ctpop_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <16 x i8> @llvm.ctpop.v16i8 (<16 x i8> %1)
+ ; CHECK-DAG: pcnt.b [[R3:\$w[0-9]+]], [[R1]]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctpop_v16i8
+}
+
+define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: ctpop_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <8 x i16> @llvm.ctpop.v8i16 (<8 x i16> %1)
+ ; CHECK-DAG: pcnt.h [[R3:\$w[0-9]+]], [[R1]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctpop_v8i16
+}
+
+define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ctpop_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x i32> @llvm.ctpop.v4i32 (<4 x i32> %1)
+ ; CHECK-DAG: pcnt.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctpop_v4i32
+}
+
+define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ctpop_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x i64> @llvm.ctpop.v2i64 (<2 x i64> %1)
+ ; CHECK-DAG: pcnt.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctpop_v2i64
+}
+
+define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: ctlz_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <16 x i8> @llvm.ctlz.v16i8 (<16 x i8> %1)
+ ; CHECK-DAG: nlzc.b [[R3:\$w[0-9]+]], [[R1]]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctlz_v16i8
+}
+
+define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: ctlz_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <8 x i16> @llvm.ctlz.v8i16 (<8 x i16> %1)
+ ; CHECK-DAG: nlzc.h [[R3:\$w[0-9]+]], [[R1]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctlz_v8i16
+}
+
+define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ctlz_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <4 x i32> @llvm.ctlz.v4i32 (<4 x i32> %1)
+ ; CHECK-DAG: nlzc.w [[R3:\$w[0-9]+]], [[R1]]
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctlz_v4i32
+}
+
+define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ctlz_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = tail call <2 x i64> @llvm.ctlz.v2i64 (<2 x i64> %1)
+ ; CHECK-DAG: nlzc.d [[R3:\$w[0-9]+]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ctlz_v2i64
+}
+
+define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %m) nounwind {
+ ; CHECK: bsel_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %m
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %5 = and <16 x i8> %1, %3
+ %6 = and <16 x i8> %2, %4
+ %7 = or <16 x i8> %5, %6
+ ; bmnz is the same operation
+ ; CHECK-DAG: bmnz.v [[R1]], [[R2]], [[R3]]
+ store <16 x i8> %7, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R1]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v16i8
+}
+
+define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind {
+ ; CHECK: bsel_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %m
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($6)
+ %3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %4 = and <16 x i8> %1, %3
+ %5 = and <16 x i8> <i8 6, i8 6, i8 6, i8 6,
+ i8 6, i8 6, i8 6, i8 6,
+ i8 6, i8 6, i8 6, i8 6,
+ i8 6, i8 6, i8 6, i8 6>, %2
+ %6 = or <16 x i8> %4, %5
+ ; CHECK-DAG: bseli.b [[R3]], [[R1]], 6
+ store <16 x i8> %6, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v16i8_i
+}
+
+define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: bsel_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <8 x i16> %1, <i16 6, i16 6, i16 6, i16 6,
+ i16 6, i16 6, i16 6, i16 6>
+ %4 = and <8 x i16> %2, <i16 65529, i16 65529, i16 65529, i16 65529,
+ i16 65529, i16 65529, i16 65529, i16 65529>
+ %5 = or <8 x i16> %3, %4
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 6
+ ; CHECK-DAG: bsel.v [[R3]], [[R2]], [[R1]]
+ store <8 x i16> %5, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v8i16
+}
+
+define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: bsel_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <4 x i32> %1, <i32 6, i32 6, i32 6, i32 6>
+ %4 = and <4 x i32> %2, <i32 4294967289, i32 4294967289, i32 4294967289, i32 4294967289>
+ %5 = or <4 x i32> %3, %4
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 6
+ ; CHECK-DAG: bsel.v [[R3]], [[R2]], [[R1]]
+ store <4 x i32> %5, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v4i32
+}
+
+define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: bsel_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <2 x i64> %1, <i64 6, i64 6>
+ %4 = and <2 x i64> %2, <i64 18446744073709551609, i64 18446744073709551609>
+ %5 = or <2 x i64> %3, %4
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 6
+ ; CHECK-DAG: bsel.v [[R3]], [[R2]], [[R1]]
+ store <2 x i64> %5, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v2i64
+}
+
+define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: binsl_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <16 x i8> %1, <i8 192, i8 192, i8 192, i8 192,
+ i8 192, i8 192, i8 192, i8 192,
+ i8 192, i8 192, i8 192, i8 192,
+ i8 192, i8 192, i8 192, i8 192>
+ %4 = and <16 x i8> %2, <i8 63, i8 63, i8 63, i8 63,
+ i8 63, i8 63, i8 63, i8 63,
+ i8 63, i8 63, i8 63, i8 63,
+ i8 63, i8 63, i8 63, i8 63>
+ %5 = or <16 x i8> %3, %4
+ ; CHECK-DAG: binsli.b [[R2]], [[R1]], 2
+ store <16 x i8> %5, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsl_v16i8_i
+}
+
+define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: binsl_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <8 x i16> %1, <i16 49152, i16 49152, i16 49152, i16 49152,
+ i16 49152, i16 49152, i16 49152, i16 49152>
+ %4 = and <8 x i16> %2, <i16 16383, i16 16383, i16 16383, i16 16383,
+ i16 16383, i16 16383, i16 16383, i16 16383>
+ %5 = or <8 x i16> %3, %4
+ ; CHECK-DAG: binsli.h [[R2]], [[R1]], 2
+ store <8 x i16> %5, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsl_v8i16_i
+}
+
+define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: binsl_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <4 x i32> %1, <i32 3221225472, i32 3221225472, i32 3221225472, i32 3221225472>
+ %4 = and <4 x i32> %2, <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823>
+ %5 = or <4 x i32> %3, %4
+ ; CHECK-DAG: binsli.w [[R2]], [[R1]], 2
+ store <4 x i32> %5, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsl_v4i32_i
+}
+
+define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: binsl_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <2 x i64> %1, <i64 18446744073709551608, i64 18446744073709551608>
+ %4 = and <2 x i64> %2, <i64 7, i64 7>
+ %5 = or <2 x i64> %3, %4
+ ; TODO: We use a particularly wide mask here to work around a legalization
+ ; issue. If the mask doesn't fit within a 10-bit immediate, it gets
+ ; legalized into a constant pool. We should add a test to cover the
+ ; other cases once they correctly select binsli.d.
+ ; CHECK-DAG: binsli.d [[R2]], [[R1]], 61
+ store <2 x i64> %5, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsl_v2i64_i
+}
+
+define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: binsr_v16i8_i:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3,
+ i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %4 = and <16 x i8> %2, <i8 252, i8 252, i8 252, i8 252,
+ i8 252, i8 252, i8 252, i8 252,
+ i8 252, i8 252, i8 252, i8 252,
+ i8 252, i8 252, i8 252, i8 252>
+ %5 = or <16 x i8> %3, %4
+ ; CHECK-DAG: binsri.b [[R2]], [[R1]], 2
+ store <16 x i8> %5, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsr_v16i8_i
+}
+
+define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: binsr_v8i16_i:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3,
+ i16 3, i16 3, i16 3, i16 3>
+ %4 = and <8 x i16> %2, <i16 65532, i16 65532, i16 65532, i16 65532,
+ i16 65532, i16 65532, i16 65532, i16 65532>
+ %5 = or <8 x i16> %3, %4
+ ; CHECK-DAG: binsri.h [[R2]], [[R1]], 2
+ store <8 x i16> %5, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsr_v8i16_i
+}
+
+define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: binsr_v4i32_i:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
+ %4 = and <4 x i32> %2, <i32 4294967292, i32 4294967292, i32 4294967292, i32 4294967292>
+ %5 = or <4 x i32> %3, %4
+ ; CHECK-DAG: binsri.w [[R2]], [[R1]], 2
+ store <4 x i32> %5, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsr_v4i32_i
+}
+
+define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: binsr_v2i64_i:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = and <2 x i64> %1, <i64 3, i64 3>
+ %4 = and <2 x i64> %2, <i64 18446744073709551612, i64 18446744073709551612>
+ %5 = or <2 x i64> %3, %4
+ ; CHECK-DAG: binsri.d [[R2]], [[R1]], 2
+ store <2 x i64> %5, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R2]], 0($4)
+
+ ret void
+ ; CHECK: .size binsr_v2i64_i
+}
+
+define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: bclr_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
+ %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %5 = and <16 x i8> %1, %4
+ ; CHECK-DAG: bclr.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %5, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclr_v16i8
+}
+
+define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: bclr_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
+ %4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %5 = and <8 x i16> %1, %4
+ ; CHECK-DAG: bclr.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %5, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclr_v8i16
+}
+
+define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: bclr_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
+ %4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %5 = and <4 x i32> %1, %4
+ ; CHECK-DAG: bclr.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %5, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclr_v4i32
+}
+
+define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: bclr_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <2 x i64> <i64 1, i64 1>, %2
+ %4 = xor <2 x i64> %3, <i64 -1, i64 -1>
+ %5 = and <2 x i64> %1, %4
+ ; CHECK-DAG: bclr.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %5, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclr_v2i64
+}
+
+define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: bset_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
+ %4 = or <16 x i8> %1, %3
+ ; CHECK-DAG: bset.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bset_v16i8
+}
+
+define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: bset_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
+ %4 = or <8 x i16> %1, %3
+ ; CHECK-DAG: bset.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bset_v8i16
+}
+
+define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: bset_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
+ %4 = or <4 x i32> %1, %3
+ ; CHECK-DAG: bset.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bset_v4i32
+}
+
+define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: bset_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <2 x i64> <i64 1, i64 1>, %2
+ %4 = or <2 x i64> %1, %3
+ ; CHECK-DAG: bset.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bset_v2i64
+}
+
+define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: bneg_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
+ %4 = xor <16 x i8> %1, %3
+ ; CHECK-DAG: bneg.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bneg_v16i8
+}
+
+define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: bneg_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
+ %4 = xor <8 x i16> %1, %3
+ ; CHECK-DAG: bneg.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bneg_v8i16
+}
+
+define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: bneg_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
+ %4 = xor <4 x i32> %1, %3
+ ; CHECK-DAG: bneg.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bneg_v4i32
+}
+
+define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: bneg_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shl <2 x i64> <i64 1, i64 1>, %2
+ %4 = xor <2 x i64> %1, %3
+ ; CHECK-DAG: bneg.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bneg_v2i64
+}
+
+define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: bclri_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>,
+ <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %3 = and <16 x i8> %1, %2
+ ; bclri.b and andi.b are exactly equivalent.
+ ; CHECK-DAG: andi.b [[R3:\$w[0-9]+]], [[R1]], 247
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclri_v16i8
+}
+
+define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: bclri_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <8 x i16> <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>,
+ <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %3 = and <8 x i16> %1, %2
+ ; CHECK-DAG: bclri.h [[R3:\$w[0-9]+]], [[R1]], 3
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclri_v8i16
+}
+
+define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: bclri_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <4 x i32> <i32 8, i32 8, i32 8, i32 8>,
+ <i32 -1, i32 -1, i32 -1, i32 -1>
+ %3 = and <4 x i32> %1, %2
+ ; CHECK-DAG: bclri.w [[R3:\$w[0-9]+]], [[R1]], 3
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclri_v4i32
+}
+
+define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: bclri_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <2 x i64> <i64 8, i64 8>,
+ <i64 -1, i64 -1>
+ %3 = and <2 x i64> %1, %2
+ ; CHECK-DAG: bclri.d [[R3:\$w[0-9]+]], [[R1]], 3
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bclri_v2i64
+}
+
+define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: bseti_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+ ; CHECK-DAG: bseti.b [[R3:\$w[0-9]+]], [[R1]], 3
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bseti_v16i8
+}
+
+define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: bseti_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ; CHECK-DAG: bseti.h [[R3:\$w[0-9]+]], [[R1]], 3
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bseti_v8i16
+}
+
+define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: bseti_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
+ ; CHECK-DAG: bseti.w [[R3:\$w[0-9]+]], [[R1]], 3
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bseti_v4i32
+}
+
+define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: bseti_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = or <2 x i64> %1, <i64 8, i64 8>
+ ; CHECK-DAG: bseti.d [[R3:\$w[0-9]+]], [[R1]], 3
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bseti_v2i64
+}
+
+define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: bnegi_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+ ; CHECK-DAG: bnegi.b [[R3:\$w[0-9]+]], [[R1]], 3
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bnegi_v16i8
+}
+
+define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: bnegi_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ; CHECK-DAG: bnegi.h [[R3:\$w[0-9]+]], [[R1]], 3
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bnegi_v8i16
+}
+
+define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: bnegi_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
+ ; CHECK-DAG: bnegi.w [[R3:\$w[0-9]+]], [[R1]], 3
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bnegi_v4i32
+}
+
+define void @bnegi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: bnegi_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = xor <2 x i64> %1, <i64 8, i64 8>
+ ; CHECK-DAG: bnegi.d [[R3:\$w[0-9]+]], [[R1]], 3
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bnegi_v2i64
+}
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %val)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %val)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %val)
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val)
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %val)
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %val)
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val)
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %val)
diff --git a/test/CodeGen/Mips/msa/compare.ll b/test/CodeGen/Mips/msa/compare.ll
new file mode 100644
index 0000000000000..6408d7ba09f43
--- /dev/null
+++ b/test/CodeGen/Mips/msa/compare.ll
@@ -0,0 +1,2079 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ceq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp eq <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: ceq.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceq_v16i8
+}
+
+define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ceq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp eq <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: ceq.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceq_v8i16
+}
+
+define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ceq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp eq <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: ceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceq_v4i32
+}
+
+define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ceq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp eq <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: ceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceq_v2i64
+}
+
+define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: cle_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: cle_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_s_v16i8
+}
+
+define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: cle_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: cle_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_s_v8i16
+}
+
+define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: cle_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: cle_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_s_v4i32
+}
+
+define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: cle_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: cle_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_s_v2i64
+}
+
+define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: cle_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: cle_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_u_v16i8
+}
+
+define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: cle_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: cle_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_u_v8i16
+}
+
+define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: cle_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: cle_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_u_v4i32
+}
+
+define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: cle_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: cle_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cle_u_v2i64
+}
+
+define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: clt_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: clt_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_s_v16i8
+}
+
+define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: clt_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: clt_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_s_v8i16
+}
+
+define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: clt_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: clt_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_s_v4i32
+}
+
+define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: clt_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: clt_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_s_v2i64
+}
+
+define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: clt_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: clt_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_u_v16i8
+}
+
+define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: clt_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: clt_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_u_v8i16
+}
+
+define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: clt_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: clt_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_u_v4i32
+}
+
+define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: clt_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: clt_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clt_u_v2i64
+}
+
+; There is no != comparison, but test it anyway since we've had legalizer
+; issues in this area.
+define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: cne_v16i8:
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ne <16 x i8> %1, %2
+ %4 = sext <16 x i1> %3 to <16 x i8>
+ ; CHECK-DAG: ceq.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ ; CHECK-DAG: xori.b [[R3]], [[R3]], 255
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cne_v16i8
+}
+
+; There is no != comparison, but test it anyway since we've had legalizer
+; issues in this area.
+define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: cne_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ne <8 x i16> %1, %2
+ %4 = sext <8 x i1> %3 to <8 x i16>
+ ; CHECK-DAG: ceq.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
+ ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
+ ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cne_v8i16
+}
+
+; There is no != comparison, but test it anyway since we've had legalizer
+; issues in this area.
+define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: cne_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ne <4 x i32> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: ceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
+ ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
+ ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cne_v4i32
+}
+
+; There is no != comparison, but test it anyway since we've had legalizer
+; issues in this area.
+define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: cne_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ne <2 x i64> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: ceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
+ ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
+ ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size cne_v2i64
+}
+
+define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: ceqi_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp eq <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: ceqi.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceqi_v16i8
+}
+
+define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: ceqi_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp eq <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: ceqi.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceqi_v8i16
+}
+
+define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: ceqi_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp eq <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: ceqi.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceqi_v4i32
+}
+
+define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: ceqi_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp eq <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: ceqi.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ceqi_v2i64
+}
+
+define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: clei_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: clei_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_s_v16i8
+}
+
+define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: clei_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: clei_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_s_v8i16
+}
+
+define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: clei_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: clei_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_s_v4i32
+}
+
+define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: clei_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: clei_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_s_v2i64
+}
+
+define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: clei_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: clei_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_u_v16i8
+}
+
+define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: clei_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: clei_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_u_v8i16
+}
+
+define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: clei_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: clei_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_u_v4i32
+}
+
+define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: clei_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: clei_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clei_u_v2i64
+}
+
+define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: clti_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: clti_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_s_v16i8
+}
+
+define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: clti_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: clti_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_s_v8i16
+}
+
+define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: clti_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: clti_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_s_v4i32
+}
+
+define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: clti_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: clti_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_s_v2i64
+}
+
+define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: clti_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ; CHECK-DAG: clti_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_u_v16i8
+}
+
+define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: clti_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ; CHECK-DAG: clti_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_u_v8i16
+}
+
+define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: clti_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ; CHECK-DAG: clti_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_u_v4i32
+}
+
+define void @clti_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: clti_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ; CHECK-DAG: clti_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size clti_u_v2i64
+}
+
+define void @bsel_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: bsel_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %c
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp sgt <16 x i8> %1, %2
+ ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <16 x i1> %4, <16 x i8> %1, <16 x i8> %3
+ ; bmnz.v is the same operation
+ ; CHECK-DAG: bmnz.v [[R3]], [[R1]], [[R4]]
+ store <16 x i8> %5, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_s_v16i8
+}
+
+define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: bsel_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <8 x i16>* %c
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp sgt <8 x i16> %1, %2
+ ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <8 x i16> %5, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_s_v8i16
+}
+
+define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: bsel_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x i32>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp sgt <4 x i32> %1, %2
+ ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x i32> %5, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_s_v4i32
+}
+
+define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: bsel_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x i64>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp sgt <2 x i64> %1, %2
+ ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x i64> %5, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_s_v2i64
+}
+
+define void @bsel_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: bsel_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <16 x i8>* %c
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp ugt <16 x i8> %1, %2
+ ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <16 x i1> %4, <16 x i8> %1, <16 x i8> %3
+ ; bmnz.v is the same operation
+ ; CHECK-DAG: bmnz.v [[R3]], [[R1]], [[R4]]
+ store <16 x i8> %5, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_u_v16i8
+}
+
+define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: bsel_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <8 x i16>* %c
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp ugt <8 x i16> %1, %2
+ ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <8 x i16> %5, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_u_v8i16
+}
+
+define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: bsel_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x i32>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp ugt <4 x i32> %1, %2
+ ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x i32> %5, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_u_v4i32
+}
+
+define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: bsel_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x i64>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = icmp ugt <2 x i64> %1, %2
+ ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x i64> %5, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_u_v2i64
+}
+
+define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: bseli_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <16 x i8> %1, %2
+ ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
+ store <16 x i8> %4, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_s_v16i8
+}
+
+define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: bseli_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <8 x i16> %1, %2
+ ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <8 x i16> %4, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_s_v8i16
+}
+
+define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: bseli_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <4 x i32> %1, %2
+ ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x i32> %4, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_s_v4i32
+}
+
+define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: bseli_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <2 x i64> %1, %2
+ ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x i64> %4, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_s_v2i64
+}
+
+define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+ <16 x i8>* %c) nounwind {
+ ; CHECK: bseli_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <16 x i8> %1, %2
+ ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
+ store <16 x i8> %4, <16 x i8>* %d
+ ; CHECK-DAG: st.b [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_u_v16i8
+}
+
+define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+ <8 x i16>* %c) nounwind {
+ ; CHECK: bseli_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <8 x i16> %1, %2
+ ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <8 x i16> %4, <8 x i16>* %d
+ ; CHECK-DAG: st.h [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_u_v8i16
+}
+
+define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+ <4 x i32>* %c) nounwind {
+ ; CHECK: bseli_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <4 x i32> %1, %2
+ ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x i32> %4, <4 x i32>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_u_v4i32
+}
+
+define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+ <2 x i64>* %c) nounwind {
+ ; CHECK: bseli_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <2 x i64> %1, %2
+ ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x i64> %4, <2 x i64>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_u_v2i64
+}
+
+define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: max_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: max_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_v16i8
+}
+
+define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: max_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: max_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_v8i16
+}
+
+define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: max_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: max_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_v4i32
+}
+
+define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: max_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sgt <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: max_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_v2i64
+}
+
+define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: max_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: max_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_v16i8
+}
+
+define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: max_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: max_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_v8i16
+}
+
+define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: max_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: max_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_v4i32
+}
+
+define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: max_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ugt <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: max_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_v2i64
+}
+
+define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: max_s_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sge <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: max_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_eq_v16i8
+}
+
+define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: max_s_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sge <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: max_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_eq_v8i16
+}
+
+define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: max_s_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sge <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: max_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_eq_v4i32
+}
+
+define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: max_s_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sge <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: max_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_s_eq_v2i64
+}
+
+define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: max_u_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp uge <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: max_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_eq_v16i8
+}
+
+define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: max_u_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp uge <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: max_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_eq_v8i16
+}
+
+define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: max_u_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp uge <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: max_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_eq_v4i32
+}
+
+define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: max_u_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp uge <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: max_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_u_eq_v2i64
+}
+
+define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: maxi_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sgt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: maxi_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_v16i8
+}
+
+define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: maxi_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sgt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: maxi_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_v8i16
+}
+
+define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: maxi_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sgt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: maxi_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_v4i32
+}
+
+define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: maxi_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sgt <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: maxi_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_v2i64
+}
+
+define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: maxi_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ugt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: maxi_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_v16i8
+}
+
+define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: maxi_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ugt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: maxi_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_v8i16
+}
+
+define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: maxi_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ugt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: maxi_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_v4i32
+}
+
+define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: maxi_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ugt <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: maxi_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_v2i64
+}
+
+define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: maxi_s_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: maxi_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_eq_v16i8
+}
+
+define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: maxi_s_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: maxi_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_eq_v8i16
+}
+
+define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: maxi_s_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: maxi_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_eq_v4i32
+}
+
+define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: maxi_s_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sge <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: maxi_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_s_eq_v2i64
+}
+
+define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: maxi_u_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp uge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: maxi_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_eq_v16i8
+}
+
+define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: maxi_u_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp uge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: maxi_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_eq_v8i16
+}
+
+define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: maxi_u_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp uge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: maxi_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_eq_v4i32
+}
+
+define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: maxi_u_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp uge <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: maxi_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size maxi_u_eq_v2i64
+}
+
+define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: min_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: min_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_v16i8
+}
+
+define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: min_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: min_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_v8i16
+}
+
+define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: min_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: min_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_v4i32
+}
+
+define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: min_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp slt <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: min_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_v2i64
+}
+
+define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: min_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: min_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_v16i8
+}
+
+define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: min_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: min_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_v8i16
+}
+
+define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: min_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: min_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_v4i32
+}
+
+define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: min_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ult <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: min_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_v2i64
+}
+
+define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: min_s_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: min_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_eq_v16i8
+}
+
+define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: min_s_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: min_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_eq_v8i16
+}
+
+define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: min_s_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: min_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_eq_v4i32
+}
+
+define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: min_s_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp sle <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: min_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_s_eq_v2i64
+}
+
+define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: min_u_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <16 x i8> %1, %2
+ %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
+ ; CHECK-DAG: min_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %4, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_eq_v16i8
+}
+
+define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: min_u_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <8 x i16> %1, %2
+ %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
+ ; CHECK-DAG: min_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %4, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_eq_v8i16
+}
+
+define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: min_u_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <4 x i32> %1, %2
+ %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
+ ; CHECK-DAG: min_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_eq_v4i32
+}
+
+define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: min_u_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = icmp ule <2 x i64> %1, %2
+ %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
+ ; CHECK-DAG: min_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_u_eq_v2i64
+}
+
+define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: mini_s_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: mini_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_v16i8
+}
+
+define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: mini_s_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: mini_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_v8i16
+}
+
+define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: mini_s_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: mini_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_v4i32
+}
+
+define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: mini_s_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: mini_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_v2i64
+}
+
+define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: mini_u_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: mini_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_v16i8
+}
+
+define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: mini_u_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: mini_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_v8i16
+}
+
+define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: mini_u_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: mini_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_v4i32
+}
+
+define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: mini_u_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: mini_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_v2i64
+}
+
+define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: mini_s_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: mini_s.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_eq_v16i8
+}
+
+define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: mini_s_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: mini_s.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_eq_v8i16
+}
+
+define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: mini_s_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: mini_s.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_eq_v4i32
+}
+
+define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: mini_s_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: mini_s.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_s_eq_v2i64
+}
+
+define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: mini_u_eq_v16i8:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ; CHECK-DAG: mini_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_eq_v16i8
+}
+
+define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: mini_u_eq_v8i16:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ; CHECK-DAG: mini_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_eq_v8i16
+}
+
+define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: mini_u_eq_v4i32:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+ %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: mini_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_eq_v4i32
+}
+
+define void @mini_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: mini_u_eq_v2i64:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
+ %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ ; CHECK-DAG: mini_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size mini_u_eq_v2i64
+}
diff --git a/test/CodeGen/Mips/msa/compare_float.ll b/test/CodeGen/Mips/msa/compare_float.ll
new file mode 100644
index 0000000000000..2fc61f89c7fa7
--- /dev/null
+++ b/test/CodeGen/Mips/msa/compare_float.ll
@@ -0,0 +1,663 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind
+declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind
+declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
+declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
+
+define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: false_v4f32:
+
+ %1 = load <4 x float>* %a
+ %2 = load <4 x float>* %b
+ %3 = fcmp false <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ store <4 x i32> %4, <4 x i32>* %c
+ ret void
+
+ ; (setcc $a, $b, SETFALSE) is always folded, so we won't get fcaf:
+ ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+ ; CHECK: .size false_v4f32
+}
+
+define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: false_v2f64:
+
+ %1 = load <2 x double>* %a
+ %2 = load <2 x double>* %b
+ %3 = fcmp false <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ store <2 x i64> %4, <2 x i64>* %c
+ ret void
+
+ ; FIXME: This code is correct, but poor. Ideally it would be similar to
+ ; the code in @false_v4f32
+ ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0
+ ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63
+ ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+ ; CHECK: .size false_v2f64
+}
+
+define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: oeq_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp oeq <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size oeq_v4f32
+}
+
+define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: oeq_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp oeq <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size oeq_v2f64
+}
+
+define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: oge_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp oge <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size oge_v4f32
+}
+
+define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: oge_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp oge <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size oge_v2f64
+}
+
+define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ogt_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ogt <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ogt_v4f32
+}
+
+define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ogt_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ogt <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ogt_v2f64
+}
+
+define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ole_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ole <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ole_v4f32
+}
+
+define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ole_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ole <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ole_v2f64
+}
+
+define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: olt_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp olt <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size olt_v4f32
+}
+
+define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: olt_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp olt <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size olt_v2f64
+}
+
+define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: one_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp one <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcne.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size one_v4f32
+}
+
+define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: one_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp one <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcne.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size one_v2f64
+}
+
+define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ord_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ord <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcor.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ord_v4f32
+}
+
+define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ord_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ord <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcor.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ord_v2f64
+}
+
+define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ueq_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ueq <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcueq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ueq_v4f32
+}
+
+define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ueq_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ueq <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcueq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ueq_v2f64
+}
+
+define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: uge_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp uge <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size uge_v4f32
+}
+
+define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: uge_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp uge <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size uge_v2f64
+}
+
+define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ugt_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ugt <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ugt_v4f32
+}
+
+define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ugt_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ugt <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ugt_v2f64
+}
+
+define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ule_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ule <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ule_v4f32
+}
+
+define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ule_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ule <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ule_v2f64
+}
+
+define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: ult_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ult <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ult_v4f32
+}
+
+define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: ult_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ult <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ult_v2f64
+}
+
+define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: uno_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp uno <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ ; CHECK-DAG: fcun.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %4, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size uno_v4f32
+}
+
+define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: uno_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp uno <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ ; CHECK-DAG: fcun.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %4, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size uno_v2f64
+}
+
+define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: true_v4f32:
+
+ %1 = load <4 x float>* %a
+ %2 = load <4 x float>* %b
+ %3 = fcmp true <4 x float> %1, %2
+ %4 = sext <4 x i1> %3 to <4 x i32>
+ store <4 x i32> %4, <4 x i32>* %c
+ ret void
+
+ ; (setcc $a, $b, SETTRUE) is always folded, so we won't get fcaf:
+ ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], -1
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+ ; CHECK: .size true_v4f32
+}
+
+define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: true_v2f64:
+
+ %1 = load <2 x double>* %a
+ %2 = load <2 x double>* %b
+ %3 = fcmp true <2 x double> %1, %2
+ %4 = sext <2 x i1> %3 to <2 x i64>
+ store <2 x i64> %4, <2 x i64>* %c
+ ret void
+
+ ; FIXME: This code is correct, but poor. Ideally it would be similar to
+ ; the code in @true_v4f32
+ ; CHECK-DAG: ldi.d [[R1:\$w[0-9]+]], 1
+ ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63
+ ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+ ; CHECK: .size true_v2f64
+}
+
+define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
+ <4 x float>* %c) nounwind {
+ ; CHECK: bsel_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <4 x float>* %c
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
+ %4 = fcmp ogt <4 x float> %1, %2
+ ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <4 x i1> %4, <4 x float> %1, <4 x float> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <4 x float> %5, <4 x float>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v4f32
+}
+
+define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
+ <2 x double>* %c) nounwind {
+ ; CHECK: bsel_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = load <2 x double>* %c
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
+ %4 = fcmp ogt <2 x double> %1, %2
+ ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %5 = select <2 x i1> %4, <2 x double> %1, <2 x double> %3
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ store <2 x double> %5, <2 x double>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bsel_v2f64
+}
+
+define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
+ <4 x float>* %c) nounwind {
+ ; CHECK: bseli_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ogt <4 x float> %1, %2
+ ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <4 x i1> %3, <4 x float> %1, <4 x float> zeroinitializer
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3:\$w[0-9]+]]
+ store <4 x float> %4, <4 x float>* %d
+ ; CHECK-DAG: st.w [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_v4f32
+}
+
+define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
+ <2 x double>* %c) nounwind {
+ ; CHECK: bseli_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = fcmp ogt <2 x double> %1, %2
+ ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
+ %4 = select <2 x i1> %3, <2 x double> %1, <2 x double> zeroinitializer
+ ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3:\$w[0-9]+]]
+ store <2 x double> %4, <2 x double>* %d
+ ; CHECK-DAG: st.d [[R4]], 0($4)
+
+ ret void
+ ; CHECK: .size bseli_v2f64
+}
+
+define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: max_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2)
+ ; CHECK-DAG: fmax.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_v4f32
+}
+
+define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: max_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2)
+ ; CHECK-DAG: fmax.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size max_v2f64
+}
+
+define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+ ; CHECK: min_v4f32:
+
+ %1 = load <4 x float>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x float>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2)
+ ; CHECK-DAG: fmin.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x float> %3, <4 x float>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_v4f32
+}
+
+define void @min_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+ ; CHECK: min_v2f64:
+
+ %1 = load <2 x double>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x double>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2)
+ ; CHECK-DAG: fmin.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x double> %3, <2 x double>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size min_v2f64
+}
diff --git a/test/CodeGen/Mips/msa/elm_copy.ll b/test/CodeGen/Mips/msa/elm_copy.ll
new file mode 100644
index 0000000000000..ed3e52cbffc29
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_copy.ll
@@ -0,0 +1,162 @@
+; Test the MSA intrinsics that are encoded with the ELM instruction format and
+; are element extraction operations.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_copy_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_copy_s_b_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_copy_s_b_ARG1
+ %1 = tail call i32 @llvm.mips.copy.s.b(<16 x i8> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_s_b_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_copy_s_b_test:
+; CHECK: ld.b
+; CHECK: copy_s.b
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_s_b_test
+;
+@llvm_mips_copy_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_copy_s_h_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_copy_s_h_ARG1
+ %1 = tail call i32 @llvm.mips.copy.s.h(<8 x i16> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_s_h_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_copy_s_h_test:
+; CHECK: ld.h
+; CHECK: copy_s.h
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_s_h_test
+;
+@llvm_mips_copy_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_copy_s_w_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_copy_s_w_ARG1
+ %1 = tail call i32 @llvm.mips.copy.s.w(<4 x i32> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_s_w_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_copy_s_w_test:
+; CHECK: ld.w
+; CHECK: copy_s.w
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_s_w_test
+;
+@llvm_mips_copy_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_copy_s_d_RES = global i64 0, align 16
+
+define void @llvm_mips_copy_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_copy_s_d_ARG1
+ %1 = tail call i64 @llvm.mips.copy.s.d(<2 x i64> %0, i32 1)
+ store i64 %1, i64* @llvm_mips_copy_s_d_RES
+ ret void
+}
+
+declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_copy_s_d_test:
+; CHECK: ld.w
+; CHECK: copy_s.w
+; CHECK: copy_s.w
+; CHECK: sw
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_s_d_test
+;
+@llvm_mips_copy_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_copy_u_b_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_copy_u_b_ARG1
+ %1 = tail call i32 @llvm.mips.copy.u.b(<16 x i8> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_u_b_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_copy_u_b_test:
+; CHECK: ld.b
+; CHECK: copy_u.b
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_u_b_test
+;
+@llvm_mips_copy_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_copy_u_h_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_copy_u_h_ARG1
+ %1 = tail call i32 @llvm.mips.copy.u.h(<8 x i16> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_u_h_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_copy_u_h_test:
+; CHECK: ld.h
+; CHECK: copy_u.h
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_u_h_test
+;
+@llvm_mips_copy_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_copy_u_w_RES = global i32 0, align 16
+
+define void @llvm_mips_copy_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_copy_u_w_ARG1
+ %1 = tail call i32 @llvm.mips.copy.u.w(<4 x i32> %0, i32 1)
+ store i32 %1, i32* @llvm_mips_copy_u_w_RES
+ ret void
+}
+
+declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_copy_u_w_test:
+; CHECK: ld.w
+; CHECK: copy_u.w
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_u_w_test
+;
+@llvm_mips_copy_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_copy_u_d_RES = global i64 0, align 16
+
+define void @llvm_mips_copy_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_copy_u_d_ARG1
+ %1 = tail call i64 @llvm.mips.copy.u.d(<2 x i64> %0, i32 1)
+ store i64 %1, i64* @llvm_mips_copy_u_d_RES
+ ret void
+}
+
+declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_copy_u_d_test:
+; CHECK: ld.w
+; CHECK: copy_s.w
+; CHECK: copy_s.w
+; CHECK: sw
+; CHECK: sw
+; CHECK: .size llvm_mips_copy_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/elm_cxcmsa.ll b/test/CodeGen/Mips/msa/elm_cxcmsa.ll
new file mode 100644
index 0000000000000..8d6b0ee20ab84
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_cxcmsa.ll
@@ -0,0 +1,168 @@
+; Test the MSA ctcmsa and cfcmsa intrinsics (which are encoded with the ELM
+; instruction format).
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define i32 @msa_ir_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 0)
+ ret i32 %0
+}
+
+; CHECK: msa_ir_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $0
+; CHECK: .size msa_ir_cfcmsa_test
+;
+define i32 @msa_csr_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 1)
+ ret i32 %0
+}
+
+; CHECK: msa_csr_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $1
+; CHECK: .size msa_csr_cfcmsa_test
+;
+define i32 @msa_access_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 2)
+ ret i32 %0
+}
+
+; CHECK: msa_access_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $2
+; CHECK: .size msa_access_cfcmsa_test
+;
+define i32 @msa_save_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 3)
+ ret i32 %0
+}
+
+; CHECK: msa_save_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $3
+; CHECK: .size msa_save_cfcmsa_test
+;
+define i32 @msa_modify_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 4)
+ ret i32 %0
+}
+
+; CHECK: msa_modify_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $4
+; CHECK: .size msa_modify_cfcmsa_test
+;
+define i32 @msa_request_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 5)
+ ret i32 %0
+}
+
+; CHECK: msa_request_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $5
+; CHECK: .size msa_request_cfcmsa_test
+;
+define i32 @msa_map_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 6)
+ ret i32 %0
+}
+
+; CHECK: msa_map_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $6
+; CHECK: .size msa_map_cfcmsa_test
+;
+define i32 @msa_unmap_cfcmsa_test() nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.cfcmsa(i32 7)
+ ret i32 %0
+}
+
+; CHECK: msa_unmap_cfcmsa_test:
+; CHECK: cfcmsa $[[R1:[0-9]+]], $7
+; CHECK: .size msa_unmap_cfcmsa_test
+;
+define void @msa_ir_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 0, i32 1)
+ ret void
+}
+
+; CHECK: msa_ir_ctcmsa_test:
+; CHECK: ctcmsa $0
+; CHECK: .size msa_ir_ctcmsa_test
+;
+define void @msa_csr_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 1, i32 1)
+ ret void
+}
+
+; CHECK: msa_csr_ctcmsa_test:
+; CHECK: ctcmsa $1
+; CHECK: .size msa_csr_ctcmsa_test
+;
+define void @msa_access_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 2, i32 1)
+ ret void
+}
+
+; CHECK: msa_access_ctcmsa_test:
+; CHECK: ctcmsa $2
+; CHECK: .size msa_access_ctcmsa_test
+;
+define void @msa_save_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 3, i32 1)
+ ret void
+}
+
+; CHECK: msa_save_ctcmsa_test:
+; CHECK: ctcmsa $3
+; CHECK: .size msa_save_ctcmsa_test
+;
+define void @msa_modify_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 4, i32 1)
+ ret void
+}
+
+; CHECK: msa_modify_ctcmsa_test:
+; CHECK: ctcmsa $4
+; CHECK: .size msa_modify_ctcmsa_test
+;
+define void @msa_request_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 5, i32 1)
+ ret void
+}
+
+; CHECK: msa_request_ctcmsa_test:
+; CHECK: ctcmsa $5
+; CHECK: .size msa_request_ctcmsa_test
+;
+define void @msa_map_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 6, i32 1)
+ ret void
+}
+
+; CHECK: msa_map_ctcmsa_test:
+; CHECK: ctcmsa $6
+; CHECK: .size msa_map_ctcmsa_test
+;
+define void @msa_unmap_ctcmsa_test() nounwind {
+entry:
+ tail call void @llvm.mips.ctcmsa(i32 7, i32 1)
+ ret void
+}
+
+; CHECK: msa_unmap_ctcmsa_test:
+; CHECK: ctcmsa $7
+; CHECK: .size msa_unmap_ctcmsa_test
+;
+declare i32 @llvm.mips.cfcmsa(i32) nounwind
+declare void @llvm.mips.ctcmsa(i32, i32) nounwind
diff --git a/test/CodeGen/Mips/msa/elm_insv.ll b/test/CodeGen/Mips/msa/elm_insv.ll
new file mode 100644
index 0000000000000..fa7ceaf0c6bf3
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_insv.ll
@@ -0,0 +1,192 @@
+; Test the MSA element insertion intrinsics that are encoded with the ELM
+; instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_insert_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_insert_b_ARG3 = global i32 27, align 16
+@llvm_mips_insert_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_insert_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_insert_b_ARG1
+ %1 = load i32* @llvm_mips_insert_b_ARG3
+ %2 = tail call <16 x i8> @llvm.mips.insert.b(<16 x i8> %0, i32 1, i32 %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_insert_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.insert.b(<16 x i8>, i32, i32) nounwind
+
+; CHECK: llvm_mips_insert_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0(
+; CHECK-DAG: insert.b [[R2]][1], [[R1]]
+; CHECK-DAG: st.b [[R2]], 0(
+; CHECK: .size llvm_mips_insert_b_test
+;
+@llvm_mips_insert_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_insert_h_ARG3 = global i32 27, align 16
+@llvm_mips_insert_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_insert_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_insert_h_ARG1
+ %1 = load i32* @llvm_mips_insert_h_ARG3
+ %2 = tail call <8 x i16> @llvm.mips.insert.h(<8 x i16> %0, i32 1, i32 %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_insert_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.insert.h(<8 x i16>, i32, i32) nounwind
+
+; CHECK: llvm_mips_insert_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0(
+; CHECK-DAG: insert.h [[R2]][1], [[R1]]
+; CHECK-DAG: st.h [[R2]], 0(
+; CHECK: .size llvm_mips_insert_h_test
+;
+@llvm_mips_insert_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_insert_w_ARG3 = global i32 27, align 16
+@llvm_mips_insert_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_insert_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_insert_w_ARG1
+ %1 = load i32* @llvm_mips_insert_w_ARG3
+ %2 = tail call <4 x i32> @llvm.mips.insert.w(<4 x i32> %0, i32 1, i32 %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_insert_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.insert.w(<4 x i32>, i32, i32) nounwind
+
+; CHECK: llvm_mips_insert_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0(
+; CHECK-DAG: insert.w [[R2]][1], [[R1]]
+; CHECK-DAG: st.w [[R2]], 0(
+; CHECK: .size llvm_mips_insert_w_test
+;
+@llvm_mips_insert_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_insert_d_ARG3 = global i64 27, align 16
+@llvm_mips_insert_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_insert_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_insert_d_ARG1
+ %1 = load i64* @llvm_mips_insert_d_ARG3
+ %2 = tail call <2 x i64> @llvm.mips.insert.d(<2 x i64> %0, i32 1, i64 %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_insert_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.insert.d(<2 x i64>, i32, i64) nounwind
+
+; CHECK: llvm_mips_insert_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], 4(
+; CHECK-DAG: ld.w [[R3:\$w[0-9]+]],
+; CHECK-DAG: insert.w [[R3]][2], [[R1]]
+; CHECK-DAG: insert.w [[R3]][3], [[R2]]
+; CHECK-DAG: st.w [[R3]],
+; CHECK: .size llvm_mips_insert_d_test
+;
+@llvm_mips_insve_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_insve_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_insve_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_insve_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_insve_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_insve_b_ARG3
+ %2 = tail call <16 x i8> @llvm.mips.insve.b(<16 x i8> %0, i32 1, <16 x i8> %1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_insve_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.insve.b(<16 x i8>, i32, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_insve_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_b_ARG3)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: insve.b [[R3]][1], [[R4]][0]
+; CHECK-DAG: st.b [[R3]],
+; CHECK: .size llvm_mips_insve_b_test
+;
+@llvm_mips_insve_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_insve_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_insve_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_insve_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_insve_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_insve_h_ARG3
+ %2 = tail call <8 x i16> @llvm.mips.insve.h(<8 x i16> %0, i32 1, <8 x i16> %1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_insve_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.insve.h(<8 x i16>, i32, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_insve_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_h_ARG3)(
+; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: insve.h [[R3]][1], [[R4]][0]
+; CHECK-DAG: st.h [[R3]],
+; CHECK: .size llvm_mips_insve_h_test
+;
+@llvm_mips_insve_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_insve_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_insve_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_insve_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_insve_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_insve_w_ARG3
+ %2 = tail call <4 x i32> @llvm.mips.insve.w(<4 x i32> %0, i32 1, <4 x i32> %1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_insve_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.insve.w(<4 x i32>, i32, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_insve_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_w_ARG3)(
+; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: insve.w [[R3]][1], [[R4]][0]
+; CHECK-DAG: st.w [[R3]],
+; CHECK: .size llvm_mips_insve_w_test
+;
+@llvm_mips_insve_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_insve_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_insve_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_insve_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_insve_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_insve_d_ARG3
+ %2 = tail call <2 x i64> @llvm.mips.insve.d(<2 x i64> %0, i32 1, <2 x i64> %1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_insve_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.insve.d(<2 x i64>, i32, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_insve_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_d_ARG3)(
+; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: insve.d [[R3]][1], [[R4]][0]
+; CHECK-DAG: st.d [[R3]],
+; CHECK: .size llvm_mips_insve_d_test
+;
diff --git a/test/CodeGen/Mips/msa/elm_move.ll b/test/CodeGen/Mips/msa/elm_move.ll
new file mode 100644
index 0000000000000..98c06c732c36c
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_move.ll
@@ -0,0 +1,25 @@
+; Test the MSA move intrinsics (which are encoded with the ELM instruction
+; format).
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_move_vb_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_move_vb_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_move_vb_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_move_vb_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.move.v(<16 x i8> %0)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_move_vb_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.move.v(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_move_vb_test:
+; CHECK: ld.b
+; CHECK: move.v
+; CHECK: st.b
+; CHECK: .size llvm_mips_move_vb_test
+;
diff --git a/test/CodeGen/Mips/msa/elm_shift_slide.ll b/test/CodeGen/Mips/msa/elm_shift_slide.ll
new file mode 100644
index 0000000000000..39d670dac8419
--- /dev/null
+++ b/test/CodeGen/Mips/msa/elm_shift_slide.ll
@@ -0,0 +1,158 @@
+; Test the MSA intrinsics that are encoded with the ELM instruction format and
+; are either shifts or slides.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_sldi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sldi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sldi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_sldi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, i32 1)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_sldi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_b_test:
+; CHECK: ld.b
+; CHECK: sldi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sldi_b_test
+;
+@llvm_mips_sldi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sldi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sldi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_sldi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, i32 1)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_sldi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_h_test:
+; CHECK: ld.h
+; CHECK: sldi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sldi_h_test
+;
+@llvm_mips_sldi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sldi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sldi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_sldi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, i32 1)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_sldi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_w_test:
+; CHECK: ld.w
+; CHECK: sldi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sldi_w_test
+;
+@llvm_mips_sldi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sldi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sldi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_sldi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, i32 1)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_sldi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_d_test:
+; CHECK: ld.d
+; CHECK: sldi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sldi_d_test
+;
+@llvm_mips_splati_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_splati_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_splati_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_splati_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.splati.b(<16 x i8> %0, i32 1)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_splati_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_splati_b_test:
+; CHECK: ld.b
+; CHECK: splati.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_splati_b_test
+;
+@llvm_mips_splati_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_splati_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_splati_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_splati_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.splati.h(<8 x i16> %0, i32 1)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_splati_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_splati_h_test:
+; CHECK: ld.h
+; CHECK: splati.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_splati_h_test
+;
+@llvm_mips_splati_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_splati_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_splati_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_splati_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.splati.w(<4 x i32> %0, i32 1)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_splati_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_splati_w_test:
+; CHECK: ld.w
+; CHECK: splati.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_splati_w_test
+;
+@llvm_mips_splati_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_splati_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_splati_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_splati_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.splati.d(<2 x i64> %0, i32 1)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_splati_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.splati.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_splati_d_test:
+; CHECK: ld.d
+; CHECK: splati.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_splati_d_test
+;
diff --git a/test/CodeGen/Mips/msa/endian.ll b/test/CodeGen/Mips/msa/endian.ll
new file mode 100644
index 0000000000000..44d1925f1cff3
--- /dev/null
+++ b/test/CodeGen/Mips/msa/endian.ll
@@ -0,0 +1,107 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=BIGENDIAN %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=LITENDIAN %s
+
+@v16i8 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+@v8i16 = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
+@v4i32 = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+@v2i64 = global <2 x i64> <i64 0, i64 0>
+
+define void @const_v16i8() nounwind {
+ ; LITENDIAN: .byte 0
+ ; LITENDIAN: .byte 1
+ ; LITENDIAN: .byte 2
+ ; LITENDIAN: .byte 3
+ ; LITENDIAN: .byte 4
+ ; LITENDIAN: .byte 5
+ ; LITENDIAN: .byte 6
+ ; LITENDIAN: .byte 7
+ ; LITENDIAN: .byte 8
+ ; LITENDIAN: .byte 9
+ ; LITENDIAN: .byte 10
+ ; LITENDIAN: .byte 11
+ ; LITENDIAN: .byte 12
+ ; LITENDIAN: .byte 13
+ ; LITENDIAN: .byte 14
+ ; LITENDIAN: .byte 15
+ ; LITENDIAN: const_v16i8:
+ ; BIGENDIAN: .byte 0
+ ; BIGENDIAN: .byte 1
+ ; BIGENDIAN: .byte 2
+ ; BIGENDIAN: .byte 3
+ ; BIGENDIAN: .byte 4
+ ; BIGENDIAN: .byte 5
+ ; BIGENDIAN: .byte 6
+ ; BIGENDIAN: .byte 7
+ ; BIGENDIAN: .byte 8
+ ; BIGENDIAN: .byte 9
+ ; BIGENDIAN: .byte 10
+ ; BIGENDIAN: .byte 11
+ ; BIGENDIAN: .byte 12
+ ; BIGENDIAN: .byte 13
+ ; BIGENDIAN: .byte 14
+ ; BIGENDIAN: .byte 15
+ ; BIGENDIAN: const_v16i8:
+
+ store volatile <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8>*@v16i8
+
+ ret void
+}
+
+define void @const_v8i16() nounwind {
+ ; LITENDIAN: .2byte 0
+ ; LITENDIAN: .2byte 1
+ ; LITENDIAN: .2byte 2
+ ; LITENDIAN: .2byte 3
+ ; LITENDIAN: .2byte 4
+ ; LITENDIAN: .2byte 5
+ ; LITENDIAN: .2byte 6
+ ; LITENDIAN: .2byte 7
+ ; LITENDIAN: const_v8i16:
+ ; BIGENDIAN: .2byte 0
+ ; BIGENDIAN: .2byte 1
+ ; BIGENDIAN: .2byte 2
+ ; BIGENDIAN: .2byte 3
+ ; BIGENDIAN: .2byte 4
+ ; BIGENDIAN: .2byte 5
+ ; BIGENDIAN: .2byte 6
+ ; BIGENDIAN: .2byte 7
+ ; BIGENDIAN: const_v8i16:
+
+ store volatile <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, <8 x i16>*@v8i16
+
+ ret void
+}
+
+define void @const_v4i32() nounwind {
+ ; LITENDIAN: .4byte 0
+ ; LITENDIAN: .4byte 1
+ ; LITENDIAN: .4byte 2
+ ; LITENDIAN: .4byte 3
+ ; LITENDIAN: const_v4i32:
+ ; BIGENDIAN: .4byte 0
+ ; BIGENDIAN: .4byte 1
+ ; BIGENDIAN: .4byte 2
+ ; BIGENDIAN: .4byte 3
+ ; BIGENDIAN: const_v4i32:
+
+ store volatile <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>*@v4i32
+
+ ret void
+}
+
+define void @const_v2i64() nounwind {
+ ; LITENDIAN: .4byte 1
+ ; LITENDIAN: .4byte 0
+ ; LITENDIAN: .4byte 2
+ ; LITENDIAN: .4byte 0
+ ; LITENDIAN: const_v2i64:
+ ; BIGENDIAN: .4byte 0
+ ; BIGENDIAN: .4byte 1
+ ; BIGENDIAN: .4byte 0
+ ; BIGENDIAN: .4byte 2
+ ; BIGENDIAN: const_v2i64:
+
+ store volatile <2 x i64> <i64 1, i64 2>, <2 x i64>*@v2i64
+
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/frameindex.ll b/test/CodeGen/Mips/msa/frameindex.ll
new file mode 100644
index 0000000000000..3088e1ba9893b
--- /dev/null
+++ b/test/CodeGen/Mips/msa/frameindex.ll
@@ -0,0 +1,85 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32-AE -check-prefix=MIPS32-BE %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=MIPS32-AE -check-prefix=MIPS32-LE %s
+
+define void @loadstore_v16i8_near() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_near:
+
+ %1 = alloca <16 x i8>
+ %2 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0($sp)
+ store volatile <16 x i8> %2, <16 x i8>* %1
+ ; MIPS32-AE: st.b [[R1]], 0($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_near
+}
+
+define void @loadstore_v16i8_just_under_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_just_under_simm10:
+
+ %1 = alloca <16 x i8>
+ %2 = alloca [496 x i8] ; Push the frame right up to 512 bytes
+
+ %3 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 496($sp)
+ store volatile <16 x i8> %3, <16 x i8>* %1
+ ; MIPS32-AE: st.b [[R1]], 496($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_just_under_simm10
+}
+
+define void @loadstore_v16i8_just_over_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_just_over_simm10:
+
+ %1 = alloca <16 x i8>
+ %2 = alloca [497 x i8] ; Push the frame just over 512 bytes
+
+ %3 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 512
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <16 x i8> %3, <16 x i8>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 512
+ ; MIPS32-AE: st.b [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_just_over_simm10
+}
+
+define void @loadstore_v16i8_just_under_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_just_under_simm16:
+
+ %1 = alloca <16 x i8>
+ %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
+
+ %3 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <16 x i8> %3, <16 x i8>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.b [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_just_under_simm16
+}
+
+define void @loadstore_v16i8_just_over_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v16i8_just_over_simm16:
+
+ %1 = alloca <16 x i8>
+ %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
+
+ %3 = load volatile <16 x i8>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <16 x i8> %3, <16 x i8>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.b [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v16i8_just_over_simm16
+}
diff --git a/test/CodeGen/Mips/msa/i10.ll b/test/CodeGen/Mips/msa/i10.ll
new file mode 100644
index 0000000000000..c5a96174a7349
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i10.ll
@@ -0,0 +1,89 @@
+; Test the MSA intrinsics that are encoded with the I10 instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_bnz_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+
+define i32 @llvm_mips_bnz_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bnz_b_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.b(<16 x i8> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_bnz_b_test:
+; CHECK-DAG: ld.b [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.b [[R0]]
+; CHECK: .size llvm_mips_bnz_b_test
+
+@llvm_mips_bnz_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+
+define i32 @llvm_mips_bnz_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bnz_h_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.h(<8 x i16> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_bnz_h_test:
+; CHECK-DAG: ld.h [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.h [[R0]]
+; CHECK: .size llvm_mips_bnz_h_test
+
+@llvm_mips_bnz_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+
+define i32 @llvm_mips_bnz_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bnz_w_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.w(<4 x i32> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_bnz_w_test:
+; CHECK-DAG: ld.w [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.w [[R0]]
+; CHECK: .size llvm_mips_bnz_w_test
+
+@llvm_mips_bnz_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+
+define i32 @llvm_mips_bnz_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bnz_d_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.d(<2 x i64> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_bnz_d_test:
+; CHECK-DAG: ld.d [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.d [[R0]]
+; CHECK: .size llvm_mips_bnz_d_test
+
diff --git a/test/CodeGen/Mips/msa/i5-a.ll b/test/CodeGen/Mips/msa/i5-a.ll
new file mode 100644
index 0000000000000..0b507208f4298
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-a.ll
@@ -0,0 +1,82 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 'a'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_addvi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_addvi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_addvi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_addvi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_addvi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_addvi_b_test:
+; CHECK: ld.b
+; CHECK: addvi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_addvi_b_test
+;
+@llvm_mips_addvi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_addvi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_addvi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_addvi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_addvi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_addvi_h_test:
+; CHECK: ld.h
+; CHECK: addvi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_addvi_h_test
+;
+@llvm_mips_addvi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_addvi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_addvi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_addvi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_addvi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_addvi_w_test:
+; CHECK: ld.w
+; CHECK: addvi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_addvi_w_test
+;
+@llvm_mips_addvi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_addvi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_addvi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_addvi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_addvi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_addvi_d_test:
+; CHECK: ld.d
+; CHECK: addvi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_addvi_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5-b.ll b/test/CodeGen/Mips/msa/i5-b.ll
new file mode 100644
index 0000000000000..da6be669f0dda
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-b.ll
@@ -0,0 +1,439 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 'b'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_bclri_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bclri_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bclri_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bclri_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_bclri_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bclri_b_test:
+; CHECK: ld.b
+; andi.b is equivalent to bclri.b
+; CHECK: andi.b {{\$w[0-9]}}, {{\$w[0-9]}}, 127
+; CHECK: st.b
+; CHECK: .size llvm_mips_bclri_b_test
+;
+@llvm_mips_bclri_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bclri_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bclri_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bclri_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_bclri_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_bclri_h_test:
+; CHECK: ld.h
+; CHECK: bclri.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bclri_h_test
+;
+@llvm_mips_bclri_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bclri_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bclri_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bclri_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_bclri_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_bclri_w_test:
+; CHECK: ld.w
+; CHECK: bclri.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bclri_w_test
+;
+@llvm_mips_bclri_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bclri_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bclri_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bclri_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_bclri_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_bclri_d_test:
+; CHECK: ld.d
+; CHECK: bclri.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bclri_d_test
+;
+@llvm_mips_binsli_b_ARG1 = global <16 x i8> zeroinitializer, align 16
+@llvm_mips_binsli_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsli_b_RES = global <16 x i8> zeroinitializer, align 16
+
+define void @llvm_mips_binsli_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_binsli_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_binsli_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 7)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_binsli_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsli_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsli.b [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_b_RES)(
+; CHECK-DAG: st.b [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsli_b_test
+
+@llvm_mips_binsli_h_ARG1 = global <8 x i16> zeroinitializer, align 16
+@llvm_mips_binsli_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsli_h_RES = global <8 x i16> zeroinitializer, align 16
+
+define void @llvm_mips_binsli_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_binsli_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_binsli_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 7)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_binsli_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_binsli_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsli_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_h_ARG2)(
+; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsli.h [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_h_RES)(
+; CHECK-DAG: st.h [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsli_h_test
+
+@llvm_mips_binsli_w_ARG1 = global <4 x i32> zeroinitializer, align 16
+@llvm_mips_binsli_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsli_w_RES = global <4 x i32> zeroinitializer, align 16
+
+define void @llvm_mips_binsli_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_binsli_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_binsli_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 7)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_binsli_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_binsli_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsli_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_w_ARG2)(
+; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsli.w [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_w_RES)(
+; CHECK-DAG: st.w [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsli_w_test
+
+@llvm_mips_binsli_d_ARG1 = global <2 x i64> zeroinitializer, align 16
+@llvm_mips_binsli_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsli_d_RES = global <2 x i64> zeroinitializer, align 16
+
+define void @llvm_mips_binsli_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_binsli_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_binsli_d_ARG2
+ ; TODO: We use a particularly wide mask here to work around a legalization
+ ; issue. If the mask doesn't fit within a 10-bit immediate, it gets
+ ; legalized into a constant pool. We should add a test to cover the
+ ; other cases once they correctly select binsli.d.
+ %2 = tail call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %0, <2 x i64> %1, i32 61)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_binsli_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_binsli_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsli_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_d_ARG2)(
+; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsli.d [[R3]], [[R4]], 61
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_d_RES)(
+; CHECK-DAG: st.d [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsli_d_test
+
+@llvm_mips_binsri_b_ARG1 = global <16 x i8> zeroinitializer, align 16
+@llvm_mips_binsri_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_binsri_b_RES = global <16 x i8> zeroinitializer, align 16
+
+define void @llvm_mips_binsri_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_binsri_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_binsri_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 7)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_binsri_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsri_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsri.b [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_b_RES)(
+; CHECK-DAG: st.b [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsri_b_test
+
+@llvm_mips_binsri_h_ARG1 = global <8 x i16> zeroinitializer, align 16
+@llvm_mips_binsri_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_binsri_h_RES = global <8 x i16> zeroinitializer, align 16
+
+define void @llvm_mips_binsri_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_binsri_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_binsri_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 7)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_binsri_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_binsri_h_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsri_h_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_h_ARG2)(
+; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsri.h [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_h_RES)(
+; CHECK-DAG: st.h [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsri_h_test
+
+@llvm_mips_binsri_w_ARG1 = global <4 x i32> zeroinitializer, align 16
+@llvm_mips_binsri_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_binsri_w_RES = global <4 x i32> zeroinitializer, align 16
+
+define void @llvm_mips_binsri_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_binsri_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_binsri_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 7)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_binsri_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_binsri_w_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsri_w_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_w_ARG2)(
+; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsri.w [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_w_RES)(
+; CHECK-DAG: st.w [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsri_w_test
+
+@llvm_mips_binsri_d_ARG1 = global <2 x i64> zeroinitializer, align 16
+@llvm_mips_binsri_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_binsri_d_RES = global <2 x i64> zeroinitializer, align 16
+
+define void @llvm_mips_binsri_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_binsri_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_binsri_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 7)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_binsri_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_binsri_d_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_binsri_d_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_d_ARG2)(
+; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: binsri.d [[R3]], [[R4]], 7
+; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_d_RES)(
+; CHECK-DAG: st.d [[R3]], 0([[R5]])
+; CHECK: .size llvm_mips_binsri_d_test
+
+@llvm_mips_bnegi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bnegi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bnegi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bnegi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_bnegi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bnegi_b_test:
+; CHECK: ld.b
+; CHECK: bnegi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bnegi_b_test
+;
+@llvm_mips_bnegi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bnegi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bnegi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bnegi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_bnegi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_bnegi_h_test:
+; CHECK: ld.h
+; CHECK: bnegi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bnegi_h_test
+;
+@llvm_mips_bnegi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bnegi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bnegi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bnegi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_bnegi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_bnegi_w_test:
+; CHECK: ld.w
+; CHECK: bnegi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bnegi_w_test
+;
+@llvm_mips_bnegi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bnegi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bnegi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bnegi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_bnegi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_bnegi_d_test:
+; CHECK: ld.d
+; CHECK: bnegi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bnegi_d_test
+;
+@llvm_mips_bseti_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bseti_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bseti_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bseti_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %0, i32 7)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_bseti_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bseti_b_test:
+; CHECK: ld.b
+; CHECK: bseti.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_bseti_b_test
+;
+@llvm_mips_bseti_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bseti_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bseti_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bseti_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %0, i32 7)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_bseti_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_bseti_h_test:
+; CHECK: ld.h
+; CHECK: bseti.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_bseti_h_test
+;
+@llvm_mips_bseti_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bseti_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bseti_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bseti_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %0, i32 7)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_bseti_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_bseti_w_test:
+; CHECK: ld.w
+; CHECK: bseti.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_bseti_w_test
+;
+@llvm_mips_bseti_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bseti_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bseti_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bseti_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %0, i32 7)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_bseti_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.bseti.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_bseti_d_test:
+; CHECK: ld.d
+; CHECK: bseti.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_bseti_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5-c.ll b/test/CodeGen/Mips/msa/i5-c.ll
new file mode 100644
index 0000000000000..bf1578f30f326
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-c.ll
@@ -0,0 +1,386 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 'c'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ceqi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ceqi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ceqi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ceqi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_ceqi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ceqi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_ceqi_b_test:
+; CHECK: ld.b
+; CHECK: ceqi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ceqi_b_test
+;
+@llvm_mips_ceqi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ceqi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ceqi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_ceqi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_ceqi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ceqi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_ceqi_h_test:
+; CHECK: ld.h
+; CHECK: ceqi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_ceqi_h_test
+;
+@llvm_mips_ceqi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ceqi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ceqi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_ceqi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ceqi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ceqi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_ceqi_w_test:
+; CHECK: ld.w
+; CHECK: ceqi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_ceqi_w_test
+;
+@llvm_mips_ceqi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ceqi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ceqi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_ceqi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ceqi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ceqi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_ceqi_d_test:
+; CHECK: ld.d
+; CHECK: ceqi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_ceqi_d_test
+;
+@llvm_mips_clei_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clei_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clei_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clei_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_clei_s_b_test:
+; CHECK: ld.b
+; CHECK: clei_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clei_s_b_test
+;
+@llvm_mips_clei_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clei_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clei_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clei_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_clei_s_h_test:
+; CHECK: ld.h
+; CHECK: clei_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clei_s_h_test
+;
+@llvm_mips_clei_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clei_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clei_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clei_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_clei_s_w_test:
+; CHECK: ld.w
+; CHECK: clei_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clei_s_w_test
+;
+@llvm_mips_clei_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clei_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clei_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clei_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_clei_s_d_test:
+; CHECK: ld.d
+; CHECK: clei_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clei_s_d_test
+;
+@llvm_mips_clei_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clei_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clei_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clei_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_clei_u_b_test:
+; CHECK: ld.b
+; CHECK: clei_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clei_u_b_test
+;
+@llvm_mips_clei_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clei_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clei_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clei_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_clei_u_h_test:
+; CHECK: ld.h
+; CHECK: clei_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clei_u_h_test
+;
+@llvm_mips_clei_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clei_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clei_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clei_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_clei_u_w_test:
+; CHECK: ld.w
+; CHECK: clei_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clei_u_w_test
+;
+@llvm_mips_clei_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clei_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clei_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clei_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_clei_u_d_test:
+; CHECK: ld.d
+; CHECK: clei_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clei_u_d_test
+;
+@llvm_mips_clti_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clti_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clti_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clti_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_clti_s_b_test:
+; CHECK: ld.b
+; CHECK: clti_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clti_s_b_test
+;
+@llvm_mips_clti_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clti_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clti_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clti_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_clti_s_h_test:
+; CHECK: ld.h
+; CHECK: clti_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clti_s_h_test
+;
+@llvm_mips_clti_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clti_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clti_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clti_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_clti_s_w_test:
+; CHECK: ld.w
+; CHECK: clti_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clti_s_w_test
+;
+@llvm_mips_clti_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clti_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clti_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clti_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_clti_s_d_test:
+; CHECK: ld.d
+; CHECK: clti_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clti_s_d_test
+;
+@llvm_mips_clti_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_clti_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_clti_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_clti_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_clti_u_b_test:
+; CHECK: ld.b
+; CHECK: clti_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_clti_u_b_test
+;
+@llvm_mips_clti_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_clti_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_clti_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_clti_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_clti_u_h_test:
+; CHECK: ld.h
+; CHECK: clti_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_clti_u_h_test
+;
+@llvm_mips_clti_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_clti_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_clti_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_clti_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_clti_u_w_test:
+; CHECK: ld.w
+; CHECK: clti_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_clti_u_w_test
+;
+@llvm_mips_clti_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_clti_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_clti_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_clti_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.clti.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_clti_u_d_test:
+; CHECK: ld.d
+; CHECK: clti_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_clti_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5-m.ll b/test/CodeGen/Mips/msa/i5-m.ll
new file mode 100644
index 0000000000000..27663494324d9
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-m.ll
@@ -0,0 +1,310 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 'm'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_maxi_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_maxi_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maxi_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_maxi_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_b_test:
+; CHECK: ld.b
+; CHECK: maxi_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maxi_s_b_test
+;
+@llvm_mips_maxi_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_maxi_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maxi_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_maxi_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_h_test:
+; CHECK: ld.h
+; CHECK: maxi_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maxi_s_h_test
+;
+@llvm_mips_maxi_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_maxi_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maxi_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_maxi_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_w_test:
+; CHECK: ld.w
+; CHECK: maxi_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maxi_s_w_test
+;
+@llvm_mips_maxi_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_maxi_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maxi_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_maxi_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_d_test:
+; CHECK: ld.d
+; CHECK: maxi_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maxi_s_d_test
+;
+@llvm_mips_maxi_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_maxi_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maxi_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_maxi_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_b_test:
+; CHECK: ld.b
+; CHECK: maxi_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maxi_u_b_test
+;
+@llvm_mips_maxi_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_maxi_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maxi_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_maxi_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_h_test:
+; CHECK: ld.h
+; CHECK: maxi_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maxi_u_h_test
+;
+@llvm_mips_maxi_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_maxi_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maxi_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_maxi_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_w_test:
+; CHECK: ld.w
+; CHECK: maxi_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maxi_u_w_test
+;
+@llvm_mips_maxi_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_maxi_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maxi_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_maxi_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_d_test:
+; CHECK: ld.d
+; CHECK: maxi_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maxi_u_d_test
+;
+@llvm_mips_mini_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mini_s_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mini_s_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mini_s_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_s_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_b_test:
+; CHECK: ld.b
+; CHECK: mini_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mini_s_b_test
+;
+@llvm_mips_mini_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mini_s_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mini_s_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mini_s_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_s_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_h_test:
+; CHECK: ld.h
+; CHECK: mini_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mini_s_h_test
+;
+@llvm_mips_mini_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mini_s_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mini_s_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mini_s_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_s_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_w_test:
+; CHECK: ld.w
+; CHECK: mini_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mini_s_w_test
+;
+@llvm_mips_mini_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mini_s_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mini_s_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mini_s_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_s_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_d_test:
+; CHECK: ld.d
+; CHECK: mini_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mini_s_d_test
+;
+@llvm_mips_mini_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_mini_u_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mini_u_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_mini_u_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_u_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_b_test:
+; CHECK: ld.b
+; CHECK: mini_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mini_u_b_test
+;
+@llvm_mips_mini_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_mini_u_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mini_u_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_mini_u_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_u_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_h_test:
+; CHECK: ld.h
+; CHECK: mini_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mini_u_h_test
+;
+@llvm_mips_mini_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_mini_u_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mini_u_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_mini_u_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_u_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_w_test:
+; CHECK: ld.w
+; CHECK: mini_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mini_u_w_test
+;
+@llvm_mips_mini_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_mini_u_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mini_u_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_mini_u_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_u_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.mini.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_d_test:
+; CHECK: ld.d
+; CHECK: mini_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mini_u_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5-s.ll b/test/CodeGen/Mips/msa/i5-s.ll
new file mode 100644
index 0000000000000..184172f63b852
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5-s.ll
@@ -0,0 +1,82 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format.
+; There are lots of these so this covers those beginning with 's'
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_subvi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_subvi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subvi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_subvi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_b_test:
+; CHECK: ld.b
+; CHECK: subvi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_subvi_b_test
+;
+@llvm_mips_subvi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_subvi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subvi_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_subvi_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_h_test:
+; CHECK: ld.h
+; CHECK: subvi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_subvi_h_test
+;
+@llvm_mips_subvi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_subvi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subvi_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_subvi_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_w_test:
+; CHECK: ld.w
+; CHECK: subvi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_subvi_w_test
+;
+@llvm_mips_subvi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_subvi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subvi_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_subvi_d_ARG1
+ %1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.subvi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_d_test:
+; CHECK: ld.d
+; CHECK: subvi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_subvi_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i5_ld_st.ll b/test/CodeGen/Mips/msa/i5_ld_st.ll
new file mode 100644
index 0000000000000..7cc55f2904be7
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i5_ld_st.ll
@@ -0,0 +1,150 @@
+; Test the MSA intrinsics that are encoded with the I5 instruction format and
+; are loads or stores.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_ld_b_ARG = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ld_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ld_b_test() nounwind {
+entry:
+ %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
+ %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 16)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ld.b(i8*, i32) nounwind
+
+; CHECK: llvm_mips_ld_b_test:
+; CHECK: ld.b [[R1:\$w[0-9]+]], 16(
+; CHECK: st.b
+; CHECK: .size llvm_mips_ld_b_test
+;
+@llvm_mips_ld_h_ARG = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_ld_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_ld_h_test() nounwind {
+entry:
+ %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
+ %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 16)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.ld.h(i8*, i32) nounwind
+
+; CHECK: llvm_mips_ld_h_test:
+; CHECK: ld.h [[R1:\$w[0-9]+]], 16(
+; CHECK: st.h
+; CHECK: .size llvm_mips_ld_h_test
+;
+@llvm_mips_ld_w_ARG = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_ld_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_ld_w_test() nounwind {
+entry:
+ %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
+ %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 16)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.ld.w(i8*, i32) nounwind
+
+; CHECK: llvm_mips_ld_w_test:
+; CHECK: ld.w [[R1:\$w[0-9]+]], 16(
+; CHECK: st.w
+; CHECK: .size llvm_mips_ld_w_test
+;
+@llvm_mips_ld_d_ARG = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_ld_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_ld_d_test() nounwind {
+entry:
+ %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
+ %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 16)
+ store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
+ ret void
+}
+
+declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind
+
+; CHECK: llvm_mips_ld_d_test:
+; CHECK: ld.d [[R1:\$w[0-9]+]], 16(
+; CHECK: st.d
+; CHECK: .size llvm_mips_ld_d_test
+;
+@llvm_mips_st_b_ARG = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_st_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_st_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_st_b_ARG
+ %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
+ tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 16)
+ ret void
+}
+
+declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind
+
+; CHECK: llvm_mips_st_b_test:
+; CHECK: ld.b
+; CHECK: st.b [[R1:\$w[0-9]+]], 16(
+; CHECK: .size llvm_mips_st_b_test
+;
+@llvm_mips_st_h_ARG = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_st_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_st_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_st_h_ARG
+ %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
+ tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 16)
+ ret void
+}
+
+declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind
+
+; CHECK: llvm_mips_st_h_test:
+; CHECK: ld.h
+; CHECK: st.h [[R1:\$w[0-9]+]], 16(
+; CHECK: .size llvm_mips_st_h_test
+;
+@llvm_mips_st_w_ARG = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_st_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_st_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_st_w_ARG
+ %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
+ tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 16)
+ ret void
+}
+
+declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind
+
+; CHECK: llvm_mips_st_w_test:
+; CHECK: ld.w
+; CHECK: st.w [[R1:\$w[0-9]+]], 16(
+; CHECK: .size llvm_mips_st_w_test
+;
+@llvm_mips_st_d_ARG = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_st_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_st_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_st_d_ARG
+ %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
+ tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 16)
+ ret void
+}
+
+declare void @llvm.mips.st.d(<2 x i64>, i8*, i32) nounwind
+
+; CHECK: llvm_mips_st_d_test:
+; CHECK: ld.d
+; CHECK: st.d [[R1:\$w[0-9]+]], 16(
+; CHECK: .size llvm_mips_st_d_test
+;
diff --git a/test/CodeGen/Mips/msa/i8.ll b/test/CodeGen/Mips/msa/i8.ll
new file mode 100644
index 0000000000000..d2931a72feaab
--- /dev/null
+++ b/test/CodeGen/Mips/msa/i8.ll
@@ -0,0 +1,211 @@
+; Test the MSA intrinsics that are encoded with the I8 instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_andi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_andi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_andi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_andi_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.andi.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_andi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_andi_b_test:
+; CHECK: ld.b
+; CHECK: andi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_andi_b_test
+
+@llvm_mips_bmnzi_b_ARG1 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+@llvm_mips_bmnzi_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmnzi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bmnzi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bmnzi_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: bmnzi.b [[R3]], [[R4]], 25
+; CHECK-DAG: st.b [[R3]], 0(
+; CHECK: .size llvm_mips_bmnzi_b_test
+
+@llvm_mips_bmzi_b_ARG1 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+@llvm_mips_bmzi_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmzi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bmzi_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bmzi_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bmzi_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bmzi_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bmzi_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmzi_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmzi_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; bmnzi.b is the same as bmzi.b with ws and wd_in swapped
+; CHECK-DAG: bmnzi.b [[R4]], [[R3]], 25
+; CHECK-DAG: st.b [[R4]], 0(
+; CHECK: .size llvm_mips_bmzi_b_test
+
+@llvm_mips_bseli_b_ARG1 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+@llvm_mips_bseli_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bseli_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bseli_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bseli_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bseli_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %0, <16 x i8> %1, i32 25)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_bseli_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_bseli_b_test:
+; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bseli_b_ARG1)(
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bseli_b_ARG2)(
+; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: bseli.b [[R3]], [[R4]], 25
+; CHECK-DAG: st.b [[R3]], 0(
+; CHECK: .size llvm_mips_bseli_b_test
+
+@llvm_mips_nori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_nori_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nori_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_nori_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.nori.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_nori_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_nori_b_test:
+; CHECK: ld.b
+; CHECK: nori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_nori_b_test
+;
+@llvm_mips_ori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_ori_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ori_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_ori_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.ori.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_ori_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_ori_b_test:
+; CHECK: ld.b
+; CHECK: ori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ori_b_test
+;
+@llvm_mips_shf_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_shf_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_shf_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_shf_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.shf.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_shf_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.shf.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_shf_b_test:
+; CHECK: ld.b
+; CHECK: shf.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_shf_b_test
+;
+@llvm_mips_shf_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_shf_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_shf_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_shf_h_ARG1
+ %1 = tail call <8 x i16> @llvm.mips.shf.h(<8 x i16> %0, i32 25)
+ store <8 x i16> %1, <8 x i16>* @llvm_mips_shf_h_RES
+ ret void
+}
+
+declare <8 x i16> @llvm.mips.shf.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_shf_h_test:
+; CHECK: ld.h
+; CHECK: shf.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_shf_h_test
+;
+@llvm_mips_shf_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_shf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_shf_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_shf_w_ARG1
+ %1 = tail call <4 x i32> @llvm.mips.shf.w(<4 x i32> %0, i32 25)
+ store <4 x i32> %1, <4 x i32>* @llvm_mips_shf_w_RES
+ ret void
+}
+
+declare <4 x i32> @llvm.mips.shf.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_shf_w_test:
+; CHECK: ld.w
+; CHECK: shf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_shf_w_test
+;
+@llvm_mips_xori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_xori_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_xori_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_xori_b_ARG1
+ %1 = tail call <16 x i8> @llvm.mips.xori.b(<16 x i8> %0, i32 25)
+ store <16 x i8> %1, <16 x i8>* @llvm_mips_xori_b_RES
+ ret void
+}
+
+declare <16 x i8> @llvm.mips.xori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_xori_b_test:
+; CHECK: ld.b
+; CHECK: xori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_xori_b_test
+;
diff --git a/test/CodeGen/Mips/msa/inline-asm.ll b/test/CodeGen/Mips/msa/inline-asm.ll
new file mode 100644
index 0000000000000..4a34273f3c00c
--- /dev/null
+++ b/test/CodeGen/Mips/msa/inline-asm.ll
@@ -0,0 +1,34 @@
+; A basic inline assembly test
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@v4i32_r = global <4 x i32> zeroinitializer, align 16
+
+define void @test1() nounwind {
+entry:
+ ; CHECK-LABEL: test1:
+ %0 = call <4 x i32> asm "ldi.w ${0:w}, 1", "=f"()
+ ; CHECK: ldi.w $w{{[1-3]?[0-9]}}, 1
+ store <4 x i32> %0, <4 x i32>* @v4i32_r
+ ret void
+}
+
+define void @test2() nounwind {
+entry:
+ ; CHECK-LABEL: test2:
+ %0 = load <4 x i32>* @v4i32_r
+ %1 = call <4 x i32> asm "addvi.w ${0:w}, ${1:w}, 1", "=f,f"(<4 x i32> %0)
+ ; CHECK: addvi.w $w{{[1-3]?[0-9]}}, $w{{[1-3]?[0-9]}}, 1
+ store <4 x i32> %1, <4 x i32>* @v4i32_r
+ ret void
+}
+
+define void @test3() nounwind {
+entry:
+ ; CHECK-LABEL: test3:
+ %0 = load <4 x i32>* @v4i32_r
+ %1 = call <4 x i32> asm sideeffect "addvi.w ${0:w}, ${1:w}, 1", "=f,f,~{$w0}"(<4 x i32> %0)
+ ; CHECK: addvi.w $w{{([1-9]|[1-3][0-9])}}, $w{{([1-9]|[1-3][0-9])}}, 1
+ store <4 x i32> %1, <4 x i32>* @v4i32_r
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll b/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
new file mode 100644
index 0000000000000..4beaaa9c18415
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
@@ -0,0 +1,134 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; "Unexpected illegal type!" assertion.
+; It should at least successfully build.
+
+define void @autogen_SD1704963983(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca <4 x double>
+ %A3 = alloca <8 x i64>
+ %A2 = alloca <1 x double>
+ %A1 = alloca double
+ %A = alloca i32
+ %L = load i8* %0
+ store i8 77, i8* %0
+ %E = extractelement <8 x i64> zeroinitializer, i32 2
+ %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15, i32 1, i32 3>
+ %I = insertelement <8 x i64> zeroinitializer, i64 %E, i32 7
+ %Sl = select i1 false, i8* %0, i8* %0
+ %Cmp = icmp eq i32 434069, 272505
+ br label %CF
+
+CF: ; preds = %CF, %CF78, %BB
+ %L5 = load i8* %Sl
+ store i8 %L, i8* %Sl
+ %E6 = extractelement <8 x i32> zeroinitializer, i32 2
+ %Shuff7 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 undef>
+ %I8 = insertelement <8 x i64> zeroinitializer, i64 %4, i32 7
+ %B = shl <1 x i16> zeroinitializer, zeroinitializer
+ %FC = sitofp <8 x i64> zeroinitializer to <8 x float>
+ %Sl9 = select i1 %Cmp, i8 77, i8 77
+ %Cmp10 = icmp uge <8 x i64> %Shuff, zeroinitializer
+ %L11 = load i8* %0
+ store i8 %Sl9, i8* %0
+ %E12 = extractelement <1 x i16> zeroinitializer, i32 0
+ %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 undef, i32 3, i32 5, i32 7>
+ %I14 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3
+ %B15 = udiv <1 x i16> %B, zeroinitializer
+ %Tr = trunc <8 x i64> %Shuff to <8 x i32>
+ %Sl16 = select i1 %Cmp, i8 77, i8 %5
+ %Cmp17 = icmp ult <8 x i1> %Cmp10, %Cmp10
+ %L18 = load i8* %Sl
+ store i8 -1, i8* %Sl
+ %E19 = extractelement <8 x i32> zeroinitializer, i32 3
+ %Shuff20 = shufflevector <8 x float> %FC, <8 x float> %FC, <8 x i32> <i32 6, i32 8, i32 undef, i32 12, i32 14, i32 0, i32 2, i32 undef>
+ %I21 = insertelement <8 x i64> %Shuff13, i64 %E, i32 0
+ %B22 = urem <8 x i64> %Shuff7, %I21
+ %FC23 = sitofp i32 50347 to float
+ %Sl24 = select i1 %Cmp, double 0.000000e+00, double 0.000000e+00
+ %Cmp25 = icmp ugt i32 465489, 47533
+ br i1 %Cmp25, label %CF, label %CF78
+
+CF78: ; preds = %CF
+ %L26 = load i8* %Sl
+ store i32 50347, i32* %A
+ %E27 = extractelement <8 x i1> %Cmp10, i32 2
+ br i1 %E27, label %CF, label %CF77
+
+CF77: ; preds = %CF77, %CF81, %CF78
+ %Shuff28 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 undef>
+ %I29 = insertelement <1 x i16> zeroinitializer, i16 -1, i32 0
+ %B30 = urem <8 x i32> %Tr, zeroinitializer
+ %Tr31 = trunc i32 0 to i16
+ %Sl32 = select i1 %Cmp, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
+ %L33 = load i8* %Sl
+ store i8 %L26, i8* %Sl
+ %E34 = extractelement <4 x i32> zeroinitializer, i32 0
+ %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> %B, <1 x i32> undef
+ %I36 = insertelement <8 x i64> %Shuff28, i64 %E, i32 7
+ %B37 = srem <1 x i16> %I29, zeroinitializer
+ %FC38 = sitofp <8 x i32> %B30 to <8 x double>
+ %Sl39 = select i1 %Cmp, double 0.000000e+00, double %Sl24
+ %L40 = load i8* %Sl
+ store i8 %Sl16, i8* %Sl
+ %E41 = extractelement <1 x i16> zeroinitializer, i32 0
+ %Shuff42 = shufflevector <8 x i1> %Cmp17, <8 x i1> %Cmp10, <8 x i32> <i32 14, i32 undef, i32 2, i32 4, i32 undef, i32 8, i32 10, i32 12>
+ %I43 = insertelement <4 x i32> zeroinitializer, i32 272505, i32 0
+ %B44 = urem <8 x i32> %B30, %Tr
+ %PC = bitcast i8* %0 to i64*
+ %Sl45 = select i1 %Cmp, <8 x i1> %Cmp10, <8 x i1> %Shuff42
+ %Cmp46 = fcmp ugt float 0xB856238A00000000, 0x47DA795E40000000
+ br i1 %Cmp46, label %CF77, label %CF80
+
+CF80: ; preds = %CF80, %CF77
+ %L47 = load i64* %PC
+ store i8 77, i8* %Sl
+ %E48 = extractelement <8 x i64> zeroinitializer, i32 2
+ %Shuff49 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff7, <8 x i32> <i32 5, i32 7, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 3>
+ %I50 = insertelement <8 x i64> zeroinitializer, i64 %L47, i32 7
+ %B51 = fdiv float 0x46CC2D8000000000, %FC23
+ %PC52 = bitcast <8 x i64>* %A3 to i64*
+ %Sl53 = select i1 %Cmp, <8 x i64> %Shuff, <8 x i64> %Shuff
+ %Cmp54 = fcmp ole float 0x47DA795E40000000, 0xB856238A00000000
+ br i1 %Cmp54, label %CF80, label %CF81
+
+CF81: ; preds = %CF80
+ %L55 = load i8* %Sl
+ store i8 %Sl16, i8* %Sl
+ %E56 = extractelement <1 x i16> %B, i32 0
+ %Shuff57 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> <i32 1>
+ %I58 = insertelement <8 x i64> zeroinitializer, i64 %L47, i32 7
+ %B59 = srem i32 %E19, %E19
+ %Sl60 = select i1 %Cmp, i8 77, i8 77
+ %Cmp61 = icmp ult <1 x i16> zeroinitializer, %B
+ %L62 = load i8* %Sl
+ store i64 %L47, i64* %PC52
+ %E63 = extractelement <4 x i32> %I43, i32 2
+ %Shuff64 = shufflevector <4 x i1> zeroinitializer, <4 x i1> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
+ %I65 = insertelement <8 x i64> %B22, i64 %L47, i32 7
+ %B66 = add <8 x i64> %I50, %I65
+ %FC67 = uitofp i16 %E12 to float
+ %Sl68 = select i1 %Cmp, <8 x i32> %B30, <8 x i32> zeroinitializer
+ %Cmp69 = fcmp ord double 0.000000e+00, 0.000000e+00
+ br i1 %Cmp69, label %CF77, label %CF79
+
+CF79: ; preds = %CF81
+ %L70 = load i32* %A
+ store i64 %4, i64* %PC
+ %E71 = extractelement <4 x i32> zeroinitializer, i32 0
+ %Shuff72 = shufflevector <8 x i32> zeroinitializer, <8 x i32> %B44, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 undef, i32 7, i32 9>
+ %I73 = insertelement <8 x i16> zeroinitializer, i16 %E12, i32 5
+ %B74 = fsub double 0.000000e+00, 0.000000e+00
+ %Sl75 = select i1 %Cmp46, i32 %E6, i32 %E19
+ %Cmp76 = icmp ugt <4 x i32> %I43, zeroinitializer
+ store i8 %L, i8* %Sl
+ store i64 %L47, i64* %PC
+ store i64 %L47, i64* %PC
+ store i8 %L5, i8* %Sl
+ store i8 %L5, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll b/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
new file mode 100644
index 0000000000000..f9cab037e7ccb
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
@@ -0,0 +1,138 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; `Opc && "Cannot copy registers"' assertion.
+; It should at least successfully build.
+
+define void @autogen_SD1935737938(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca i64
+ %A3 = alloca <4 x i32>
+ %A2 = alloca i64
+ %A1 = alloca i32
+ %A = alloca <2 x i64>
+ %L = load i8* %0
+ store i8 -1, i8* %0
+ %E = extractelement <2 x i32> zeroinitializer, i32 0
+ %Shuff = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I = insertelement <1 x i64> <i64 -1>, i64 286689, i32 0
+ %B = lshr i8 %L, -69
+ %ZE = fpext float 0xBF2AA5FE80000000 to double
+ %Sl = select i1 true, <1 x i64> <i64 -1>, <1 x i64> <i64 -1>
+ %L5 = load i8* %0
+ store i8 -69, i8* %0
+ %E6 = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
+ %Shuff7 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I8 = insertelement <2 x i32> zeroinitializer, i32 135673, i32 1
+ %B9 = udiv i8 %B, %B
+ %FC = uitofp i32 %3 to double
+ %Sl10 = select i1 true, <1 x i1> zeroinitializer, <1 x i1> zeroinitializer
+ %Cmp = icmp ne <1 x i64> %I, <i64 -1>
+ %L11 = load i8* %0
+ store i8 %L11, i8* %0
+ %E12 = extractelement <1 x i64> <i64 -1>, i32 0
+ %Shuff13 = shufflevector <1 x i64> %Sl, <1 x i64> <i64 -1>, <1 x i32> <i32 1>
+ %I14 = insertelement <1 x i64> %I, i64 303290, i32 0
+ %B15 = frem float 0.000000e+00, 0.000000e+00
+ %Sl16 = select i1 true, <1 x i1> %Cmp, <1 x i1> zeroinitializer
+ %Cmp17 = fcmp one float 0xBD946F9840000000, %B15
+ br label %CF74
+
+CF74: ; preds = %CF74, %CF80, %CF76, %BB
+ %L18 = load i8* %0
+ store i8 -69, i8* %0
+ %E19 = extractelement <1 x i64> %Sl, i32 0
+ %Shuff20 = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i32> <i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10>
+ %I21 = insertelement <2 x i32> %Shuff, i32 135673, i32 0
+ %B22 = urem i32 135673, %3
+ %FC23 = sitofp i8 %L to float
+ %Sl24 = select i1 true, i8 %B, i8 %L18
+ %L25 = load i8* %0
+ store i8 %L, i8* %0
+ %E26 = extractelement <2 x i32> %Shuff, i32 1
+ %Shuff27 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 2, i32 0>
+ %I28 = insertelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i64 %E12, i32 8
+ %B29 = frem double %ZE, 0x235104F0E94F406E
+ %Tr = trunc i64 286689 to i8
+ %Sl30 = select i1 true, float 0x45B13EA500000000, float %B15
+ %Cmp31 = icmp eq i32 %B22, %B22
+ br i1 %Cmp31, label %CF74, label %CF80
+
+CF80: ; preds = %CF74
+ %L32 = load i8* %0
+ store i8 -1, i8* %0
+ %E33 = extractelement <2 x i32> zeroinitializer, i32 1
+ %Shuff34 = shufflevector <1 x i64> %Shuff13, <1 x i64> <i64 -1>, <1 x i32> zeroinitializer
+ %I35 = insertelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i8 -1, i32 0
+ %FC36 = sitofp <1 x i1> %Cmp to <1 x float>
+ %Sl37 = select i1 true, <8 x i8> %Shuff20, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %Cmp38 = icmp sgt <2 x i32> %I21, %Shuff27
+ %L39 = load i8* %0
+ store i8 %Sl24, i8* %0
+ %E40 = extractelement <8 x i64> zeroinitializer, i32 1
+ %Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Cmp38, <2 x i32> <i32 0, i32 2>
+ %I42 = insertelement <4 x i32> zeroinitializer, i32 414573, i32 2
+ %B43 = srem i8 %L5, %L39
+ %Sl44 = select i1 %Cmp17, i8 %L, i8 %L
+ %Cmp45 = fcmp une float 0x3AFCE1A0C0000000, 0.000000e+00
+ br i1 %Cmp45, label %CF74, label %CF76
+
+CF76: ; preds = %CF80
+ %L46 = load i8* %0
+ store i8 %L39, i8* %0
+ %E47 = extractelement <2 x i32> %Shuff27, i32 0
+ %Shuff48 = shufflevector <1 x i1> %Sl10, <1 x i1> %Sl10, <1 x i32> <i32 1>
+ %I49 = insertelement <1 x i64> <i64 -1>, i64 %E12, i32 0
+ %FC50 = fptosi double 0x235104F0E94F406E to i32
+ %Sl51 = select i1 %Cmp17, <16 x i64> %I28, <16 x i64> %I28
+ %Cmp52 = icmp ne i8 %Tr, %Sl24
+ br i1 %Cmp52, label %CF74, label %CF75
+
+CF75: ; preds = %CF75, %CF76
+ %L53 = load i8* %0
+ store i8 %L18, i8* %0
+ %E54 = extractelement <8 x i8> %Shuff20, i32 5
+ %Shuff55 = shufflevector <2 x i32> %Shuff, <2 x i32> zeroinitializer, <2 x i32> <i32 0, i32 2>
+ %I56 = insertelement <4 x i32> %I42, i32 %B22, i32 2
+ %B57 = sub i64 %E40, %E6
+ %Sl58 = select i1 true, i64 303290, i64 %E40
+ %Cmp59 = icmp slt i64 %E40, %E6
+ br i1 %Cmp59, label %CF75, label %CF78
+
+CF78: ; preds = %CF75
+ %L60 = load i8* %0
+ store i8 -69, i8* %0
+ %E61 = extractelement <2 x i32> zeroinitializer, i32 0
+ %Shuff62 = shufflevector <2 x i32> %Shuff7, <2 x i32> %I21, <2 x i32> <i32 1, i32 3>
+ %I63 = insertelement <1 x i1> %Sl16, i1 %Cmp45, i32 0
+ %B64 = and i8 %Sl44, -69
+ %ZE65 = zext <1 x i1> %Shuff48 to <1 x i64>
+ %Sl66 = select i1 true, <1 x i64> %I, <1 x i64> %I49
+ %Cmp67 = icmp ugt i64 286689, %E40
+ br label %CF
+
+CF: ; preds = %CF, %CF78
+ %L68 = load i8* %0
+ store i64 %B57, i64* %2
+ %E69 = extractelement <2 x i1> %Shuff41, i32 1
+ br i1 %E69, label %CF, label %CF77
+
+CF77: ; preds = %CF77, %CF
+ %Shuff70 = shufflevector <1 x i64> %Shuff34, <1 x i64> <i64 -1>, <1 x i32> zeroinitializer
+ %I71 = insertelement <2 x i32> %Shuff, i32 %E26, i32 0
+ %Se = sext i8 %L60 to i32
+ %Sl72 = select i1 %Cmp45, <2 x i32> %Shuff62, <2 x i32> %I71
+ %Cmp73 = fcmp ugt double 0x235104F0E94F406E, 0x235104F0E94F406E
+ br i1 %Cmp73, label %CF77, label %CF79
+
+CF79: ; preds = %CF77
+ store i8 %L18, i8* %0
+ store i8 %E54, i8* %0
+ store i8 %L39, i8* %0
+ store i8 %L39, i8* %0
+ store i8 %B, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll
new file mode 100644
index 0000000000000..38113143e6d59
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a "Cannot select ..." error.
+; This was because undef's are ignored when checking if a vector constant is a
+; splat, but are legalized to zero if left in the DAG which changes the constant
+; into a non-splat.
+;
+; It should at least successfully build.
+
+define void @autogen_SD2090927243() {
+BB:
+ br label %CF77
+
+CF77: ; preds = %CF77, %CF80
+ %Shuff27 = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>,
+ <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>,
+ <16 x i32> <i32 27, i32 29, i32 31, i32 1, i32 3, i32 5, i32 undef, i32 9, i32 11, i32 13, i32 undef, i32 17, i32 19, i32 21, i32 23, i32 undef>
+ %ZE30 = zext <16 x i8> %Shuff27 to <16 x i32>
+ %Cmp32 = fcmp ueq float undef, 0x3CDA6E5E40000000
+ br i1 %Cmp32, label %CF77, label %CF
+
+CF: ; preds = %CF, %CF81
+ %E48 = extractelement <16 x i32> %ZE30, i32 14
+ br i1 undef, label %CF, label %CF78
+
+CF78: ; preds = %CF
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll
new file mode 100644
index 0000000000000..564ad7436d30b
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a "Cannot select ..." error.
+; This happened because the legalizer treated undef's in the <4 x float>
+; constant as equivalent to the defined elements when checking if it a constant
+; splat, but then proceeded to legalize the undef's to zero, leaving it as a
+; non-splat that cannot be selected. It should have eliminated the undef's by
+; rewriting the splat constant.
+
+; It should at least successfully build.
+
+define void @autogen_SD2501752154() {
+BB:
+ %BC = bitcast <4 x i32> <i32 -1, i32 -1, i32 undef, i32 undef> to <4 x float>
+ br label %CF74
+
+CF74: ; preds = %CF74, %CF
+ %E54 = extractelement <1 x i1> undef, i32 0
+ br i1 %E54, label %CF74, label %CF79
+
+CF79: ; preds = %CF75
+ %I63 = insertelement <4 x float> %BC, float undef, i32 0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll b/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
new file mode 100644
index 0000000000000..e14f405320cb8
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
@@ -0,0 +1,141 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA after dereferencing a null this pointer.
+; It should at least successfully build.
+
+define void @autogen_SD2704903805(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca i32
+ %A3 = alloca i32
+ %A2 = alloca i8
+ %A1 = alloca i32
+ %A = alloca i8
+ %L = load i8* %0
+ store i8 %5, i8* %0
+ %E = extractelement <2 x i16> zeroinitializer, i32 0
+ %Shuff = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> undef
+ %I = insertelement <1 x i8> <i8 -1>, i8 85, i32 0
+ %B = lshr <2 x i16> zeroinitializer, zeroinitializer
+ %FC = sitofp <4 x i16> zeroinitializer to <4 x float>
+ %Sl = select i1 true, float 0.000000e+00, float 0x401E76A240000000
+ %Cmp = icmp ule i16 -25210, %E
+ br label %CF83
+
+CF83: ; preds = %BB
+ %L5 = load i8* %0
+ store i8 85, i8* %0
+ %E6 = extractelement <1 x i8> <i8 -1>, i32 0
+ %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I8 = insertelement <4 x i16> zeroinitializer, i16 %E, i32 3
+ %B9 = ashr <2 x i16> %Shuff7, zeroinitializer
+ %FC10 = sitofp i32 -1 to float
+ %Sl11 = select i1 %Cmp, i32 -1, i32 -1
+ %Cmp12 = icmp sgt i32 -1, -1
+ br label %CF
+
+CF: ; preds = %CF, %CF81, %CF83
+ %L13 = load i8* %0
+ store i8 0, i8* %0
+ %E14 = extractelement <2 x i64> zeroinitializer, i32 0
+ %Shuff15 = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 3, i32 5, i32 7, i32 undef>
+ %I16 = insertelement <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i64 81222, i32 1
+ %B17 = lshr <2 x i16> zeroinitializer, %B
+ %Tr = trunc i32 272597 to i1
+ br i1 %Tr, label %CF, label %CF80
+
+CF80: ; preds = %CF80, %CF
+ %Sl18 = select i1 %Cmp, <2 x i64> zeroinitializer, <2 x i64> zeroinitializer
+ %Cmp19 = icmp ne i1 %Cmp12, %Cmp
+ br i1 %Cmp19, label %CF80, label %CF81
+
+CF81: ; preds = %CF80
+ %L20 = load i8* %0
+ store i8 85, i8* %0
+ %E21 = extractelement <1 x i8> <i8 -1>, i32 0
+ %Shuff22 = shufflevector <1 x i8> <i8 -1>, <1 x i8> %Shuff, <1 x i32> zeroinitializer
+ %I23 = insertelement <1 x i8> <i8 -1>, i8 %L5, i32 0
+ %FC24 = fptoui <4 x float> %FC to <4 x i16>
+ %Sl25 = select i1 %Cmp, <2 x i32> zeroinitializer, <2 x i32> <i32 -1, i32 -1>
+ %Cmp26 = icmp ult <4 x i64> %I16, %Shuff15
+ %L27 = load i8* %0
+ store i8 %L, i8* %0
+ %E28 = extractelement <1 x i8> <i8 -1>, i32 0
+ %Shuff29 = shufflevector <8 x i16> zeroinitializer, <8 x i16> zeroinitializer, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5, i32 undef, i32 9>
+ %I30 = insertelement <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i64 %E14, i32 1
+ %B31 = mul i8 %E28, 85
+ %PC = bitcast i32* %A3 to i32*
+ %Sl32 = select i1 %Cmp12, float %FC10, float 0x4712BFE680000000
+ %L33 = load i32* %PC
+ store i32 %L33, i32* %PC
+ %E34 = extractelement <2 x i16> zeroinitializer, i32 1
+ %Shuff35 = shufflevector <1 x i8> %Shuff, <1 x i8> <i8 -1>, <1 x i32> zeroinitializer
+ %I36 = insertelement <1 x i8> <i8 -1>, i8 %L13, i32 0
+ %B37 = xor i8 %L27, %L
+ %Sl38 = select i1 %Cmp, i16 %E34, i16 %E
+ %Cmp39 = icmp eq i1 %Cmp19, %Cmp
+ br i1 %Cmp39, label %CF, label %CF77
+
+CF77: ; preds = %CF77, %CF81
+ %L40 = load i32* %PC
+ store i32 %3, i32* %PC
+ %E41 = extractelement <2 x i32> zeroinitializer, i32 0
+ %Shuff42 = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I43 = insertelement <1 x i8> <i8 -1>, i8 0, i32 0
+ %B44 = or i16 %E, -25210
+ %Se = sext i32 %3 to i64
+ %Sl45 = select i1 true, <1 x i8> %Shuff, <1 x i8> %I43
+ %Cmp46 = icmp sge <1 x i8> %I36, %Shuff
+ %L47 = load i32* %PC
+ store i32 %L33, i32* %PC
+ %E48 = extractelement <2 x i16> zeroinitializer, i32 0
+ %Shuff49 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
+ %I50 = insertelement <2 x i32> %Sl25, i32 47963, i32 1
+ %B51 = srem <1 x i8> %I, %Shuff22
+ %FC52 = sitofp i8 %5 to double
+ %Sl53 = select i1 %Cmp39, i8 %L27, i8 85
+ %Cmp54 = icmp slt i16 %E34, %E34
+ br i1 %Cmp54, label %CF77, label %CF78
+
+CF78: ; preds = %CF78, %CF77
+ %L55 = load i32* %PC
+ store i32 %L33, i32* %PC
+ %E56 = extractelement <8 x i16> %Shuff29, i32 4
+ %Shuff57 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
+ %I58 = insertelement <1 x i8> %B51, i8 %Sl53, i32 0
+ %ZE = fpext float %FC10 to double
+ %Sl59 = select i1 %Cmp12, <2 x i16> %B9, <2 x i16> zeroinitializer
+ %Cmp60 = fcmp ult double 0.000000e+00, 0.000000e+00
+ br i1 %Cmp60, label %CF78, label %CF79
+
+CF79: ; preds = %CF79, %CF78
+ %L61 = load i32* %PC
+ store i32 %L33, i32* %A3
+ %E62 = extractelement <4 x i64> %Shuff15, i32 1
+ %Shuff63 = shufflevector <8 x i16> %Shuff29, <8 x i16> %Shuff29, <8 x i32> <i32 undef, i32 10, i32 12, i32 undef, i32 undef, i32 undef, i32 4, i32 6>
+ %I64 = insertelement <2 x i64> zeroinitializer, i64 %Se, i32 0
+ %B65 = shl i8 %5, 85
+ %ZE66 = zext <4 x i1> %Cmp26 to <4 x i32>
+ %Sl67 = select i1 %Tr, <1 x i8> %Shuff, <1 x i8> %I23
+ %Cmp68 = fcmp olt float 0x4712BFE680000000, 0x4712BFE680000000
+ br i1 %Cmp68, label %CF79, label %CF82
+
+CF82: ; preds = %CF79
+ %L69 = load i32* %PC
+ store i32 %L33, i32* %PC
+ %E70 = extractelement <8 x i16> zeroinitializer, i32 3
+ %Shuff71 = shufflevector <4 x i64> %Shuff15, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 6, i32 undef, i32 2, i32 4>
+ %I72 = insertelement <1 x i8> <i8 -1>, i8 %L, i32 0
+ %B73 = srem i64 %E62, %Se
+ %ZE74 = zext <4 x i1> %Cmp26 to <4 x i32>
+ %Sl75 = select i1 %Cmp, i32 463279, i32 %L61
+ %Cmp76 = icmp sgt <1 x i8> %Shuff49, %Shuff22
+ store i8 %B31, i8* %0
+ store i8 85, i8* %0
+ store i32 %L33, i32* %PC
+ store i8 %B65, i8* %0
+ store i8 %L5, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll b/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
new file mode 100644
index 0000000000000..1a03e55d9d540
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
@@ -0,0 +1,149 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; "Don't know how to expand this condition!" unreachable.
+; It should at least successfully build.
+
+define void @autogen_SD3861334421(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca <2 x i32>
+ %A3 = alloca <2 x double>
+ %A2 = alloca i64
+ %A1 = alloca i64
+ %A = alloca double
+ %L = load i8* %0
+ store i8 -101, i8* %0
+ %E = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
+ %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1>
+ %I = insertelement <8 x i64> zeroinitializer, i64 %4, i32 5
+ %B = and i64 116376, 57247
+ %FC = uitofp i8 7 to double
+ %Sl = select i1 false, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %L5 = load i8* %0
+ store i8 %L, i8* %0
+ %E6 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 3
+ %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
+ %I8 = insertelement <8 x i8> %Sl, i8 7, i32 4
+ %B9 = or <8 x i64> zeroinitializer, zeroinitializer
+ %Sl10 = select i1 false, i64 116376, i64 380809
+ %Cmp = icmp sgt i32 394647, 17081
+ br label %CF
+
+CF: ; preds = %CF, %BB
+ %L11 = load i8* %0
+ store i8 -87, i8* %0
+ %E12 = extractelement <4 x i64> zeroinitializer, i32 0
+ %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1, i32 3, i32 5>
+ %I14 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 1
+ %B15 = srem i64 %Sl10, 380809
+ %FC16 = sitofp i64 57247 to float
+ %Sl17 = select i1 false, double 0x87A9374869A78EC6, double 0.000000e+00
+ %Cmp18 = icmp uge i8 %L, %5
+ br i1 %Cmp18, label %CF, label %CF80
+
+CF80: ; preds = %CF80, %CF88, %CF
+ %L19 = load i8* %0
+ store i8 -101, i8* %0
+ %E20 = extractelement <4 x i64> zeroinitializer, i32 0
+ %Shuff21 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff7, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
+ %I22 = insertelement <4 x i64> zeroinitializer, i64 127438, i32 1
+ %B23 = fdiv double %Sl17, 0.000000e+00
+ %Sl24 = select i1 %Cmp18, i32 420510, i32 492085
+ %Cmp25 = icmp ugt i1 %Cmp18, false
+ br i1 %Cmp25, label %CF80, label %CF83
+
+CF83: ; preds = %CF83, %CF80
+ %L26 = load i8* %0
+ store i8 -87, i8* %0
+ %E27 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
+ %Shuff28 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
+ %I29 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 492085, i32 1
+ %B30 = lshr <8 x i8> %I8, %I8
+ %FC31 = sitofp <4 x i32> %Shuff28 to <4 x double>
+ %Sl32 = select i1 false, <8 x i8> %I8, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %Cmp33 = icmp eq i64 %B, 116376
+ br i1 %Cmp33, label %CF83, label %CF88
+
+CF88: ; preds = %CF83
+ %L34 = load i8* %0
+ store i8 -87, i8* %0
+ %E35 = extractelement <8 x i64> %Shuff, i32 7
+ %Shuff36 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %Shuff28, <4 x i32> <i32 2, i32 undef, i32 undef, i32 0>
+ %I37 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 0
+ %B38 = xor <8 x i64> %B9, %B9
+ %ZE = zext i32 0 to i64
+ %Sl39 = select i1 %Cmp33, i8 %L11, i8 %L5
+ %Cmp40 = icmp sgt i1 %Cmp, false
+ br i1 %Cmp40, label %CF80, label %CF81
+
+CF81: ; preds = %CF81, %CF85, %CF87, %CF88
+ %L41 = load i8* %0
+ store i8 %L34, i8* %0
+ %E42 = extractelement <8 x i64> %Shuff13, i32 6
+ %Shuff43 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 7>
+ %I44 = insertelement <4 x i64> zeroinitializer, i64 116376, i32 3
+ %B45 = fsub float %FC16, 0x3AC86DCC40000000
+ %Tr = trunc <4 x i64> %I14 to <4 x i32>
+ %Sl46 = select i1 false, <8 x i64> %B38, <8 x i64> zeroinitializer
+ %Cmp47 = icmp sgt i1 %Cmp18, %Cmp18
+ br i1 %Cmp47, label %CF81, label %CF85
+
+CF85: ; preds = %CF81
+ %L48 = load i8* %0
+ store i8 -101, i8* %0
+ %E49 = extractelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i32 2
+ %Shuff50 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
+ %I51 = insertelement <4 x i64> zeroinitializer, i64 %E20, i32 3
+ %B52 = or i32 336955, %Sl24
+ %FC53 = uitofp i8 %L48 to double
+ %Sl54 = select i1 %Cmp47, i32 %3, i32 %Sl24
+ %Cmp55 = icmp ne <8 x i64> %Shuff13, zeroinitializer
+ %L56 = load i8* %0
+ store i8 %L11, i8* %0
+ %E57 = extractelement <4 x i64> %Shuff21, i32 1
+ %Shuff58 = shufflevector <8 x i64> %Shuff, <8 x i64> zeroinitializer, <8 x i32> <i32 4, i32 6, i32 undef, i32 10, i32 12, i32 undef, i32 0, i32 2>
+ %I59 = insertelement <4 x i64> zeroinitializer, i64 %E42, i32 2
+ %B60 = udiv <8 x i8> %Sl, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %Tr61 = trunc i8 49 to i1
+ br i1 %Tr61, label %CF81, label %CF84
+
+CF84: ; preds = %CF84, %CF85
+ %Sl62 = select i1 false, i8 %L, i8 %L48
+ %Cmp63 = icmp ne <8 x i64> %I, zeroinitializer
+ %L64 = load i8* %0
+ store i8 %5, i8* %0
+ %E65 = extractelement <8 x i1> %Cmp55, i32 0
+ br i1 %E65, label %CF84, label %CF87
+
+CF87: ; preds = %CF84
+ %Shuff66 = shufflevector <4 x i64> %Shuff21, <4 x i64> %I14, <4 x i32> <i32 3, i32 undef, i32 7, i32 1>
+ %I67 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 %Sl54, i32 1
+ %B68 = frem double %B23, %Sl17
+ %ZE69 = zext <8 x i8> %Sl32 to <8 x i64>
+ %Sl70 = select i1 %Tr61, i64 %E20, i64 %E12
+ %Cmp71 = icmp slt <8 x i64> %I, %Shuff
+ %L72 = load i8* %0
+ store i8 %L72, i8* %0
+ %E73 = extractelement <8 x i1> %Cmp55, i32 6
+ br i1 %E73, label %CF81, label %CF82
+
+CF82: ; preds = %CF82, %CF87
+ %Shuff74 = shufflevector <4 x i32> %I67, <4 x i32> %I29, <4 x i32> <i32 1, i32 3, i32 undef, i32 7>
+ %I75 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 3
+ %B76 = fsub double 0.000000e+00, %FC53
+ %Tr77 = trunc i32 %E to i8
+ %Sl78 = select i1 %Cmp18, i64* %A2, i64* %2
+ %Cmp79 = icmp eq i32 394647, 492085
+ br i1 %Cmp79, label %CF82, label %CF86
+
+CF86: ; preds = %CF82
+ store i64 %Sl70, i64* %Sl78
+ store i64 %E57, i64* %Sl78
+ store i64 %Sl70, i64* %Sl78
+ store i64 %B, i64* %Sl78
+ store i64 %Sl10, i64* %Sl78
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll b/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
new file mode 100644
index 0000000000000..96547d90cb401
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
@@ -0,0 +1,143 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; "Type for zero vector elements is not legal" assertion.
+; It should at least successfully build.
+
+define void @autogen_SD3926023935(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca i1
+ %A3 = alloca float
+ %A2 = alloca double
+ %A1 = alloca float
+ %A = alloca double
+ %L = load i8* %0
+ store i8 -123, i8* %0
+ %E = extractelement <4 x i64> zeroinitializer, i32 1
+ %Shuff = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %I = insertelement <2 x i1> zeroinitializer, i1 false, i32 0
+ %BC = bitcast i64 181325 to double
+ %Sl = select i1 false, <2 x i32> zeroinitializer, <2 x i32> zeroinitializer
+ %Cmp = icmp ne <4 x i64> zeroinitializer, zeroinitializer
+ %L5 = load i8* %0
+ store i8 %L, i8* %0
+ %E6 = extractelement <4 x i64> zeroinitializer, i32 3
+ %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 2, i32 0>
+ %I8 = insertelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i64 498254, i32 4
+ %B = shl i32 0, 364464
+ %Sl9 = select i1 false, i64 %E, i64 498254
+ %Cmp10 = icmp sge i8 -123, %5
+ br label %CF80
+
+CF80: ; preds = %BB
+ %L11 = load i8* %0
+ store i8 -123, i8* %0
+ %E12 = extractelement <2 x i16> zeroinitializer, i32 1
+ %Shuff13 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %I14 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B, i32 2
+ %B15 = sdiv i64 334618, -1
+ %PC = bitcast i1* %A4 to i64*
+ %Sl16 = select i1 %Cmp10, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ %Cmp17 = icmp ule <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %Sl16
+ %L18 = load double* %A2
+ store i64 498254, i64* %PC
+ %E19 = extractelement <4 x i64> zeroinitializer, i32 0
+ %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
+ %I21 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
+ %B22 = fadd double 0.000000e+00, %BC
+ %ZE = zext <2 x i1> %Shuff20 to <2 x i32>
+ %Sl23 = select i1 %Cmp10, <2 x i1> %Shuff20, <2 x i1> zeroinitializer
+ %Cmp24 = icmp ult <2 x i32> zeroinitializer, zeroinitializer
+ %L25 = load i8* %0
+ store i8 %L25, i8* %0
+ %E26 = extractelement <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, i32 3
+ %Shuff27 = shufflevector <4 x i32> %Shuff, <4 x i32> %I14, <4 x i32> <i32 6, i32 0, i32 undef, i32 4>
+ %I28 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 0
+ %B29 = lshr i8 %E26, -43
+ %Tr = trunc i8 %L5 to i1
+ br label %CF79
+
+CF79: ; preds = %CF80
+ %Sl30 = select i1 false, i8 %B29, i8 -123
+ %Cmp31 = icmp sge <2 x i1> %I, %I
+ %L32 = load i64* %PC
+ store i8 -123, i8* %0
+ %E33 = extractelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 2
+ %Shuff34 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
+ %I35 = insertelement <4 x i64> zeroinitializer, i64 498254, i32 3
+ %B36 = sub <8 x i64> %I8, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+ %PC37 = bitcast i8* %0 to i1*
+ %Sl38 = select i1 %Cmp10, i8 -43, i8 %L5
+ %Cmp39 = icmp eq i64 498254, %B15
+ br label %CF
+
+CF: ; preds = %CF, %CF79
+ %L40 = load double* %A
+ store i1 %Cmp39, i1* %PC37
+ %E41 = extractelement <4 x i64> zeroinitializer, i32 3
+ %Shuff42 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %ZE, <2 x i32> <i32 2, i32 undef>
+ %I43 = insertelement <4 x i32> %Shuff, i32 %3, i32 0
+ %B44 = shl i64 %E41, -1
+ %Se = sext <2 x i1> %I to <2 x i32>
+ %Sl45 = select i1 %Cmp10, i1 false, i1 false
+ br i1 %Sl45, label %CF, label %CF77
+
+CF77: ; preds = %CF77, %CF
+ %Cmp46 = fcmp uno double 0.000000e+00, 0.000000e+00
+ br i1 %Cmp46, label %CF77, label %CF78
+
+CF78: ; preds = %CF78, %CF83, %CF82, %CF77
+ %L47 = load i64* %PC
+ store i8 -123, i8* %0
+ %E48 = extractelement <4 x i64> zeroinitializer, i32 3
+ %Shuff49 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
+ %I50 = insertelement <2 x i1> zeroinitializer, i1 %Cmp10, i32 0
+ %B51 = sdiv i64 %E19, 463132
+ %Tr52 = trunc i64 %E48 to i32
+ %Sl53 = select i1 %Tr, i1 %Cmp46, i1 %Cmp10
+ br i1 %Sl53, label %CF78, label %CF83
+
+CF83: ; preds = %CF78
+ %Cmp54 = fcmp uge double %L40, %L40
+ br i1 %Cmp54, label %CF78, label %CF82
+
+CF82: ; preds = %CF83
+ %L55 = load i64* %PC
+ store i64 %L32, i64* %PC
+ %E56 = extractelement <2 x i16> %Shuff7, i32 1
+ %Shuff57 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
+ %I58 = insertelement <2 x i32> %Sl, i32 %Tr52, i32 0
+ %B59 = or i32 %B, %3
+ %FC = sitofp i64 498254 to double
+ %Sl60 = select i1 false, i64 %E6, i64 -1
+ %Cmp61 = icmp sgt <4 x i32> %Shuff27, %I43
+ %L62 = load i64* %PC
+ store i64 %Sl9, i64* %PC
+ %E63 = extractelement <2 x i32> %ZE, i32 0
+ %Shuff64 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 1, i32 3, i32 undef, i32 7>
+ %I65 = insertelement <4 x i32> %Shuff, i32 %3, i32 3
+ %B66 = sub i64 %L47, 53612
+ %Tr67 = trunc i64 %4 to i32
+ %Sl68 = select i1 %Cmp39, i1 %Cmp39, i1 false
+ br i1 %Sl68, label %CF78, label %CF81
+
+CF81: ; preds = %CF82
+ %Cmp69 = icmp ne <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
+ %L70 = load i8* %0
+ store i64 %L55, i64* %PC
+ %E71 = extractelement <4 x i32> %Shuff49, i32 1
+ %Shuff72 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff34, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %I73 = insertelement <4 x i64> %Shuff64, i64 %E, i32 2
+ %B74 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
+ %Sl75 = select i1 %Sl68, i64 %B51, i64 %L55
+ %Cmp76 = icmp sgt <8 x i64> %B74, %B36
+ store i1 %Cmp39, i1* %PC37
+ store i64 %E41, i64* %PC
+ store i64 %L32, i64* %PC
+ store i64 %Sl75, i64* %2
+ store i64 %L32, i64* %PC
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll b/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
new file mode 100644
index 0000000000000..bef75f3645c8a
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
@@ -0,0 +1,152 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed to select instructions for extract_vector_elt for
+; v4f32 on MSA.
+; It should at least successfully build.
+
+define void @autogen_SD3997499501(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca <1 x double>
+ %A3 = alloca double
+ %A2 = alloca float
+ %A1 = alloca double
+ %A = alloca double
+ %L = load i8* %0
+ store i8 97, i8* %0
+ %E = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
+ %Shuff = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I = insertelement <4 x i64> zeroinitializer, i64 0, i32 3
+ %Tr = trunc <1 x i64> zeroinitializer to <1 x i8>
+ %Sl = select i1 false, double* %A1, double* %A
+ %Cmp = icmp ne <2 x i64> zeroinitializer, zeroinitializer
+ %L5 = load double* %Sl
+ store float -4.374162e+06, float* %A2
+ %E6 = extractelement <4 x i64> zeroinitializer, i32 3
+ %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
+ %I8 = insertelement <2 x i1> %Shuff, i1 false, i32 0
+ %B = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %PC = bitcast float* %A2 to float*
+ %Sl9 = select i1 false, i32 82299, i32 0
+ %Cmp10 = icmp slt i8 97, %5
+ br label %CF72
+
+CF72: ; preds = %CF72, %CF80, %CF78, %BB
+ %L11 = load double* %Sl
+ store double 0.000000e+00, double* %Sl
+ %E12 = extractelement <2 x i1> zeroinitializer, i32 0
+ br i1 %E12, label %CF72, label %CF80
+
+CF80: ; preds = %CF72
+ %Shuff13 = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 3, i32 1>
+ %I14 = insertelement <2 x i64> zeroinitializer, i64 %4, i32 1
+ %B15 = fadd double %L5, 0.000000e+00
+ %BC = bitcast i32 0 to float
+ %Sl16 = select i1 %E12, float 0xC7957ED940000000, float %BC
+ %Cmp17 = icmp eq i32 136082, 471909
+ br i1 %Cmp17, label %CF72, label %CF77
+
+CF77: ; preds = %CF77, %CF80
+ %L18 = load double* %Sl
+ store double 0.000000e+00, double* %Sl
+ %E19 = extractelement <2 x i1> zeroinitializer, i32 0
+ br i1 %E19, label %CF77, label %CF78
+
+CF78: ; preds = %CF77
+ %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3>
+ %I21 = insertelement <8 x i1> zeroinitializer, i1 %Cmp10, i32 7
+ %B22 = sdiv <4 x i64> %Shuff7, zeroinitializer
+ %FC = uitofp i8 97 to double
+ %Sl23 = select i1 %Cmp10, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
+ %L24 = load double* %Sl
+ store float %Sl16, float* %PC
+ %E25 = extractelement <2 x i1> %Shuff, i32 1
+ br i1 %E25, label %CF72, label %CF76
+
+CF76: ; preds = %CF78
+ %Shuff26 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %B22, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
+ %I27 = insertelement <4 x i64> zeroinitializer, i64 %E, i32 2
+ %B28 = mul <4 x i64> %I27, zeroinitializer
+ %ZE = zext <8 x i1> zeroinitializer to <8 x i64>
+ %Sl29 = select i1 %Cmp17, float -4.374162e+06, float -4.374162e+06
+ %L30 = load i8* %0
+ store double %L5, double* %Sl
+ %E31 = extractelement <8 x i1> zeroinitializer, i32 5
+ br label %CF
+
+CF: ; preds = %CF, %CF81, %CF76
+ %Shuff32 = shufflevector <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <16 x i32> <i32 8, i32 undef, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 undef, i32 26, i32 28, i32 30, i32 undef, i32 2, i32 4, i32 6>
+ %I33 = insertelement <8 x i1> zeroinitializer, i1 false, i32 2
+ %BC34 = bitcast <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1> to <4 x float>
+ %Sl35 = select i1 %E12, <2 x i1> %I8, <2 x i1> zeroinitializer
+ %Cmp36 = fcmp oge double 0xC2C3BAE2D5C18360, 0xC2C3BAE2D5C18360
+ br i1 %Cmp36, label %CF, label %CF74
+
+CF74: ; preds = %CF74, %CF
+ %L37 = load float* %PC
+ store double 0.000000e+00, double* %Sl
+ %E38 = extractelement <2 x i1> %Sl23, i32 1
+ br i1 %E38, label %CF74, label %CF75
+
+CF75: ; preds = %CF75, %CF82, %CF74
+ %Shuff39 = shufflevector <2 x i1> %Shuff13, <2 x i1> zeroinitializer, <2 x i32> <i32 undef, i32 2>
+ %I40 = insertelement <4 x i64> zeroinitializer, i64 %4, i32 2
+ %Sl41 = select i1 %Cmp10, i32 0, i32 %3
+ %Cmp42 = icmp ne <1 x i64> zeroinitializer, zeroinitializer
+ %L43 = load double* %Sl
+ store i64 %4, i64* %2
+ %E44 = extractelement <2 x i1> %Shuff20, i32 1
+ br i1 %E44, label %CF75, label %CF82
+
+CF82: ; preds = %CF75
+ %Shuff45 = shufflevector <2 x i1> %Sl23, <2 x i1> %Sl23, <2 x i32> <i32 2, i32 0>
+ %I46 = insertelement <4 x i64> zeroinitializer, i64 0, i32 0
+ %B47 = sub i64 %E, %E6
+ %Sl48 = select i1 %Cmp10, double %L5, double %L43
+ %Cmp49 = icmp uge i64 %4, %B47
+ br i1 %Cmp49, label %CF75, label %CF81
+
+CF81: ; preds = %CF82
+ %L50 = load i8* %0
+ store double %L43, double* %Sl
+ %E51 = extractelement <4 x i64> %Shuff7, i32 3
+ %Shuff52 = shufflevector <4 x float> %BC34, <4 x float> %BC34, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
+ %I53 = insertelement <2 x i1> %Cmp, i1 %E25, i32 0
+ %B54 = fdiv double %L24, %L43
+ %BC55 = bitcast <4 x i64> zeroinitializer to <4 x double>
+ %Sl56 = select i1 false, i8 %5, i8 97
+ %L57 = load i8* %0
+ store i8 %L50, i8* %0
+ %E58 = extractelement <2 x i1> %Shuff20, i32 1
+ br i1 %E58, label %CF, label %CF73
+
+CF73: ; preds = %CF73, %CF81
+ %Shuff59 = shufflevector <2 x i1> %Shuff13, <2 x i1> %Shuff45, <2 x i32> <i32 undef, i32 0>
+ %I60 = insertelement <4 x float> %Shuff52, float -4.374162e+06, i32 0
+ %B61 = mul <4 x i64> %I46, zeroinitializer
+ %PC62 = bitcast double* %A3 to float*
+ %Sl63 = select i1 %Cmp10, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer
+ %Cmp64 = icmp ne <2 x i1> %Cmp, %Shuff
+ %L65 = load double* %A1
+ store float -4.374162e+06, float* %PC62
+ %E66 = extractelement <8 x i1> %I21, i32 3
+ br i1 %E66, label %CF73, label %CF79
+
+CF79: ; preds = %CF79, %CF73
+ %Shuff67 = shufflevector <8 x i1> %I21, <8 x i1> %I21, <8 x i32> <i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 undef, i32 4>
+ %I68 = insertelement <1 x i1> %Cmp42, i1 %E25, i32 0
+ %B69 = sdiv <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+ %Sl70 = select i1 %Cmp49, <2 x i1> %Sl23, <2 x i1> %Shuff45
+ %Cmp71 = icmp ne i1 false, false
+ br i1 %Cmp71, label %CF79, label %CF83
+
+CF83: ; preds = %CF79
+ store double 0.000000e+00, double* %Sl
+ store float %BC, float* %PC62
+ store double %Sl48, double* %Sl
+ store double %FC, double* %Sl
+ store float %BC, float* %PC62
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll
new file mode 100644
index 0000000000000..24e27cbf14b83
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test is based on an llvm-stress generated test case with seed=449609655
+
+; This test originally failed for MSA with a
+; "Comparison requires equal bit widths" assertion.
+; The legalizer legalized ; the <4 x i8>'s into <4 x i32>'s, then a call to
+; isVSplat() returned the splat value for <i8 -1, i8 -1, ...> as a 32-bit APInt
+; (255), but the zeroinitializer splat value as an 8-bit APInt (0). The
+; assertion occured when trying to check the values were bitwise inverses of
+; each-other.
+;
+; It should at least successfully build.
+
+define void @autogen_SD449609655(i8) {
+BB:
+ %Cmp = icmp ult i8 -3, %0
+ br label %CF78
+
+CF78: ; preds = %CF81, %CF78, %BB
+ %Sl31 = select i1 %Cmp, <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i8> zeroinitializer
+ br i1 undef, label %CF78, label %CF81
+
+CF81: ; preds = %CF78
+ br i1 undef, label %CF78, label %CF80
+
+CF80: ; preds = %CF81
+ %I59 = insertelement <4 x i8> %Sl31, i8 undef, i32 1
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll b/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
new file mode 100644
index 0000000000000..697871df797df
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
@@ -0,0 +1,139 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed for MSA with a
+; `Num < NumOperands && "Invalid child # of SDNode!"' assertion.
+; It should at least successfully build.
+
+define void @autogen_SD525530439(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca i32
+ %A3 = alloca double
+ %A2 = alloca <1 x double>
+ %A1 = alloca <8 x double>
+ %A = alloca i64
+ %L = load i8* %0
+ store i64 33695, i64* %A
+ %E = extractelement <4 x i32> zeroinitializer, i32 3
+ %Shuff = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 2, i32 0>
+ %I = insertelement <4 x i16> zeroinitializer, i16 -11642, i32 0
+ %B = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %ZE = fpext float 0x3B64A2B880000000 to double
+ %Sl = select i1 true, i16 -1, i16 -11642
+ %L5 = load i8* %0
+ store i8 0, i8* %0
+ %E6 = extractelement <4 x i32> zeroinitializer, i32 2
+ %Shuff7 = shufflevector <8 x i1> zeroinitializer, <8 x i1> zeroinitializer, <8 x i32> <i32 undef, i32 7, i32 9, i32 11, i32 13, i32 15, i32 1, i32 undef>
+ %I8 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3
+ %B9 = sub i32 71140, 439732
+ %BC = bitcast <2 x i32> <i32 -1, i32 -1> to <2 x float>
+ %Sl10 = select i1 true, i32* %1, i32* %1
+ %Cmp = icmp sge <8 x i64> zeroinitializer, zeroinitializer
+ %L11 = load i32* %Sl10
+ store <1 x double> zeroinitializer, <1 x double>* %A2
+ %E12 = extractelement <4 x i16> zeroinitializer, i32 0
+ %Shuff13 = shufflevector <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i32> undef
+ %I14 = insertelement <1 x i16> zeroinitializer, i16 %Sl, i32 0
+ %B15 = or i16 -1, %E12
+ %BC16 = bitcast <4 x i32> zeroinitializer to <4 x float>
+ %Sl17 = select i1 true, i64 %4, i64 %4
+ %Cmp18 = fcmp ugt float 0xC5ABB1BF80000000, 0x3EEF3D6300000000
+ br label %CF75
+
+CF75: ; preds = %CF75, %BB
+ %L19 = load i32* %Sl10
+ store i32 %L11, i32* %Sl10
+ %E20 = extractelement <4 x i32> zeroinitializer, i32 1
+ %Shuff21 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %I8, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
+ %I22 = insertelement <4 x float> %BC16, float 0x3EEF3D6300000000, i32 2
+ %B23 = shl i32 71140, 439732
+ %ZE24 = fpext <4 x float> %I22 to <4 x double>
+ %Sl25 = select i1 %Cmp18, i32 %L11, i32 %L11
+ %Cmp26 = icmp ne i32 %E20, %L19
+ br i1 %Cmp26, label %CF75, label %CF76
+
+CF76: ; preds = %CF75
+ %L27 = load i32* %Sl10
+ store i32 439732, i32* %Sl10
+ %E28 = extractelement <4 x i32> %Shuff21, i32 3
+ %Shuff29 = shufflevector <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0>
+ %I30 = insertelement <8 x i1> %Shuff7, i1 %Cmp18, i32 4
+ %Sl31 = select i1 %Cmp18, i32 %3, i32 %B23
+ %Cmp32 = icmp ugt i32 0, %3
+ br label %CF74
+
+CF74: ; preds = %CF74, %CF80, %CF78, %CF76
+ %L33 = load i64* %2
+ store i32 71140, i32* %Sl10
+ %E34 = extractelement <4 x i32> zeroinitializer, i32 1
+ %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> undef
+ %I36 = insertelement <4 x i16> zeroinitializer, i16 -11642, i32 0
+ %B37 = mul <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, %Shuff29
+ %Sl38 = select i1 %Cmp18, double 0.000000e+00, double 0x2BA9DB480DA732C6
+ %Cmp39 = icmp sgt i16 -11642, %Sl
+ br i1 %Cmp39, label %CF74, label %CF80
+
+CF80: ; preds = %CF74
+ %L40 = load i8* %0
+ store i32 0, i32* %Sl10
+ %E41 = extractelement <8 x i64> zeroinitializer, i32 1
+ %Shuff42 = shufflevector <1 x i16> %I14, <1 x i16> %I14, <1 x i32> undef
+ %I43 = insertelement <4 x i16> %I36, i16 -11642, i32 0
+ %FC = fptoui float 0x455CA2B080000000 to i16
+ %Sl44 = select i1 %Cmp18, i1 %Cmp18, i1 %Cmp39
+ br i1 %Sl44, label %CF74, label %CF78
+
+CF78: ; preds = %CF80
+ %L45 = load i32* %Sl10
+ store i8 %L5, i8* %0
+ %E46 = extractelement <8 x i1> %Shuff7, i32 2
+ br i1 %E46, label %CF74, label %CF77
+
+CF77: ; preds = %CF77, %CF78
+ %Shuff47 = shufflevector <4 x i16> %I43, <4 x i16> zeroinitializer, <4 x i32> <i32 5, i32 undef, i32 1, i32 3>
+ %I48 = insertelement <1 x i16> %Shuff42, i16 %Sl, i32 0
+ %B49 = mul i8 0, %L40
+ %FC50 = uitofp i32 %3 to double
+ %Sl51 = select i1 %Sl44, i32 %L27, i32 0
+ %Cmp52 = icmp sge i8 %B49, 0
+ br i1 %Cmp52, label %CF77, label %CF79
+
+CF79: ; preds = %CF77
+ %L53 = load i32* %Sl10
+ store i8 %L40, i8* %0
+ %E54 = extractelement <4 x i32> zeroinitializer, i32 1
+ %Shuff55 = shufflevector <4 x i32> %Shuff21, <4 x i32> %I8, <4 x i32> <i32 4, i32 6, i32 undef, i32 2>
+ %I56 = insertelement <4 x i32> zeroinitializer, i32 %Sl51, i32 2
+ %Tr = trunc <1 x i64> %Shuff13 to <1 x i16>
+ %Sl57 = select i1 %Cmp18, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>
+ %Cmp58 = icmp uge <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %I56
+ %L59 = load i8* %0
+ store <1 x double> zeroinitializer, <1 x double>* %A2
+ %E60 = extractelement <4 x i32> zeroinitializer, i32 0
+ %Shuff61 = shufflevector <4 x i32> %I8, <4 x i32> %I8, <4 x i32> <i32 undef, i32 1, i32 undef, i32 undef>
+ %I62 = insertelement <4 x i16> zeroinitializer, i16 %E12, i32 1
+ %B63 = and <4 x i32> %Shuff61, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %PC = bitcast double* %A3 to i32*
+ %Sl64 = select i1 %Cmp18, <4 x i32> %Shuff61, <4 x i32> %Shuff55
+ %Cmp65 = icmp sgt i32 439732, %3
+ br label %CF
+
+CF: ; preds = %CF79
+ %L66 = load i32* %Sl10
+ store i32 %E6, i32* %PC
+ %E67 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2
+ %Shuff68 = shufflevector <4 x i32> %Sl64, <4 x i32> %I8, <4 x i32> <i32 5, i32 undef, i32 1, i32 undef>
+ %I69 = insertelement <4 x i16> %Shuff47, i16 %Sl, i32 3
+ %B70 = sdiv <4 x i64> zeroinitializer, zeroinitializer
+ %FC71 = sitofp i32 %L66 to double
+ %Sl72 = select i1 %Cmp18, i64 %4, i64 %4
+ %Cmp73 = icmp eq <4 x i64> zeroinitializer, %B70
+ store i32 %B23, i32* %PC
+ store i32 %3, i32* %PC
+ store i32 %3, i32* %Sl10
+ store i32 %L27, i32* %1
+ store i32 0, i32* %PC
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll b/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
new file mode 100644
index 0000000000000..dc4200ad42851
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
@@ -0,0 +1,143 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed to select instructions for extract_vector_elt for
+; v2f64 on MSA.
+; It should at least successfully build.
+
+define void @autogen_SD997348632(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca <2 x i32>
+ %A3 = alloca <16 x i16>
+ %A2 = alloca <4 x i1>
+ %A1 = alloca <4 x i16>
+ %A = alloca <2 x i32>
+ %L = load i8* %0
+ store i8 %L, i8* %0
+ %E = extractelement <4 x i32> zeroinitializer, i32 0
+ %Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 1, i32 3, i32 5>
+ %I = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
+ %FC = sitofp <4 x i32> zeroinitializer to <4 x double>
+ %Sl = select i1 false, <4 x i64> %Shuff, <4 x i64> %Shuff
+ %L5 = load i8* %0
+ store i8 %5, i8* %0
+ %E6 = extractelement <1 x i16> zeroinitializer, i32 0
+ %Shuff7 = shufflevector <2 x i1> %I, <2 x i1> %I, <2 x i32> <i32 1, i32 undef>
+ %I8 = insertelement <1 x i16> zeroinitializer, i16 0, i32 0
+ %B = xor i32 376034, %3
+ %FC9 = fptoui float 0x406DB70180000000 to i64
+ %Sl10 = select i1 false, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %Cmp = icmp ult <4 x i64> zeroinitializer, zeroinitializer
+ %L11 = load i8* %0
+ store i8 %L, i8* %0
+ %E12 = extractelement <4 x i64> zeroinitializer, i32 2
+ %Shuff13 = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 undef, i32 3>
+ %I14 = insertelement <8 x i32> zeroinitializer, i32 -1, i32 7
+ %B15 = fdiv <4 x double> %FC, %FC
+ %Tr = trunc i32 376034 to i16
+ %Sl16 = select i1 false, <8 x i32> %Sl10, <8 x i32> zeroinitializer
+ %Cmp17 = icmp uge i32 233658, %E
+ br label %CF
+
+CF: ; preds = %CF, %CF79, %CF84, %BB
+ %L18 = load i8* %0
+ store i8 %L, i8* %0
+ %E19 = extractelement <4 x i64> %Sl, i32 3
+ %Shuff20 = shufflevector <2 x i1> %Shuff7, <2 x i1> %I, <2 x i32> <i32 2, i32 0>
+ %I21 = insertelement <4 x i64> zeroinitializer, i64 %FC9, i32 0
+ %B22 = xor <8 x i32> %I14, %I14
+ %Tr23 = trunc i16 0 to i8
+ %Sl24 = select i1 false, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> zeroinitializer
+ %Cmp25 = icmp eq i1 false, false
+ br i1 %Cmp25, label %CF, label %CF79
+
+CF79: ; preds = %CF
+ %L26 = load i8* %0
+ store i8 %L26, i8* %0
+ %E27 = extractelement <1 x i16> zeroinitializer, i32 0
+ %Shuff28 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11>
+ %I29 = insertelement <16 x i32> %Shuff28, i32 %B, i32 15
+ %B30 = fdiv float 0.000000e+00, -6.749110e+06
+ %Sl31 = select i1 false, i32 %3, i32 %3
+ %Cmp32 = fcmp uno float 0.000000e+00, 0x406DB70180000000
+ br i1 %Cmp32, label %CF, label %CF78
+
+CF78: ; preds = %CF78, %CF79
+ %L33 = load i8* %0
+ store i8 %L, i8* %0
+ %E34 = extractelement <16 x i32> %Shuff28, i32 1
+ %Shuff35 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I21, <4 x i32> <i32 undef, i32 6, i32 0, i32 2>
+ %I36 = insertelement <4 x double> %FC, double 0xA4A57F449CA36CC2, i32 2
+ %Se = sext <4 x i1> %Cmp to <4 x i32>
+ %Sl37 = select i1 %Cmp17, i32 0, i32 0
+ %Cmp38 = icmp ne i32 440284, 376034
+ br i1 %Cmp38, label %CF78, label %CF80
+
+CF80: ; preds = %CF80, %CF82, %CF78
+ %L39 = load i8* %0
+ store i8 %L, i8* %0
+ %E40 = extractelement <2 x i1> %Shuff20, i32 1
+ br i1 %E40, label %CF80, label %CF82
+
+CF82: ; preds = %CF80
+ %Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff20, <2 x i32> <i32 2, i32 0>
+ %I42 = insertelement <2 x i1> %Shuff41, i1 false, i32 0
+ %B43 = sub i32 %E, 0
+ %Sl44 = select i1 %Cmp32, <16 x i32> %Shuff28, <16 x i32> %Shuff28
+ %Cmp45 = icmp sgt <4 x i64> zeroinitializer, %I21
+ %L46 = load i8* %0
+ store i8 %L11, i8* %0
+ %E47 = extractelement <8 x i32> %Sl16, i32 4
+ %Shuff48 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff7, <2 x i32> <i32 undef, i32 1>
+ %I49 = insertelement <2 x i1> %Shuff48, i1 %Cmp17, i32 1
+ %B50 = and <8 x i32> %I14, %Sl10
+ %FC51 = fptoui float -6.749110e+06 to i1
+ br i1 %FC51, label %CF80, label %CF81
+
+CF81: ; preds = %CF81, %CF82
+ %Sl52 = select i1 false, float -6.749110e+06, float 0x406DB70180000000
+ %Cmp53 = icmp uge <2 x i32> <i32 -1, i32 -1>, <i32 -1, i32 -1>
+ %L54 = load i8* %0
+ store i8 %L5, i8* %0
+ %E55 = extractelement <8 x i32> zeroinitializer, i32 7
+ %Shuff56 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 4, i32 6, i32 0>
+ %I57 = insertelement <2 x i1> %Shuff7, i1 false, i32 0
+ %B58 = fmul <4 x double> %FC, %FC
+ %FC59 = fptoui <4 x double> %I36 to <4 x i16>
+ %Sl60 = select i1 %Cmp17, <2 x i1> %I, <2 x i1> %I57
+ %Cmp61 = icmp ule <8 x i32> %B50, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %L62 = load i8* %0
+ store i8 %L33, i8* %0
+ %E63 = extractelement <4 x i64> %Shuff, i32 2
+ %Shuff64 = shufflevector <4 x i64> %Shuff56, <4 x i64> %Shuff56, <4 x i32> <i32 5, i32 7, i32 1, i32 undef>
+ %I65 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
+ %B66 = sdiv i32 %B, %E55
+ %Tr67 = trunc i8 %L54 to i1
+ br i1 %Tr67, label %CF81, label %CF83
+
+CF83: ; preds = %CF83, %CF81
+ %Sl68 = select i1 %Cmp17, i1 %Cmp25, i1 %Tr67
+ br i1 %Sl68, label %CF83, label %CF84
+
+CF84: ; preds = %CF83
+ %Cmp69 = icmp uge i32 %E, %E34
+ br i1 %Cmp69, label %CF, label %CF77
+
+CF77: ; preds = %CF84
+ %L70 = load i8* %0
+ store i8 %L, i8* %0
+ %E71 = extractelement <4 x i64> %Shuff, i32 0
+ %Shuff72 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
+ %I73 = insertelement <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B66, i32 1
+ %FC74 = uitofp i1 %Cmp32 to double
+ %Sl75 = select i1 %FC51, i16 9704, i16 0
+ %Cmp76 = icmp ugt <1 x i16> %I8, %I8
+ store i8 %L39, i8* %0
+ store i8 %5, i8* %0
+ store i8 %Tr23, i8* %0
+ store i8 %L, i8* %0
+ store i8 %5, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll b/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll
new file mode 100644
index 0000000000000..8c4fcbad65b44
--- /dev/null
+++ b/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=mips < %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s
+; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s
+
+; This test originally failed to select code for a truncstore of a
+; build_vector.
+; It should at least successfully build.
+
+define void @autogen_SD742806235(i8*, i32*, i64*, i32, i64, i8) {
+BB:
+ %A4 = alloca double
+ %A3 = alloca double
+ %A2 = alloca <8 x i8>
+ %A1 = alloca <4 x float>
+ %A = alloca i1
+ store i8 %5, i8* %0
+ store i8 %5, i8* %0
+ store i8 %5, i8* %0
+ store <8 x i8> <i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1>, <8 x i8>* %A2
+ store i8 %5, i8* %0
+ ret void
+}
diff --git a/test/CodeGen/Mips/msa/shift-dagcombine.ll b/test/CodeGen/Mips/msa/shift-dagcombine.ll
new file mode 100644
index 0000000000000..0d809fb4fbf17
--- /dev/null
+++ b/test/CodeGen/Mips/msa/shift-dagcombine.ll
@@ -0,0 +1,70 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @ashr_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: ashr_v4i32:
+
+ %1 = ashr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sra
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
+ ; CHECK-NOT: sra
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = ashr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sra
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -2
+ ; CHECK-NOT: sra
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size ashr_v4i32
+}
+
+define void @lshr_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: lshr_v4i32:
+
+ %1 = lshr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: srl
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
+ ; CHECK-NOT: srl
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = lshr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: srl
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], %lo
+ ; CHECK-NOT: srl
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size lshr_v4i32
+}
+
+define void @shl_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: shl_v4i32:
+
+ %1 = shl <4 x i32> <i32 8, i32 4, i32 2, i32 1>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sll
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 8
+ ; CHECK-NOT: sll
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = shl <4 x i32> <i32 -8, i32 -4, i32 -2, i32 -1>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sll
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -8
+ ; CHECK-NOT: sll
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size shl_v4i32
+}
diff --git a/test/CodeGen/Mips/msa/shuffle.ll b/test/CodeGen/Mips/msa/shuffle.ll
new file mode 100644
index 0000000000000..316c669c3ac68
--- /dev/null
+++ b/test/CodeGen/Mips/msa/shuffle.ll
@@ -0,0 +1,803 @@
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_0
+}
+
+define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_1:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_1
+}
+
+define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_2:
+
+ %1 = load <16 x i8>* %a
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16>
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_2
+}
+
+define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_3:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_3
+}
+
+define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: vshf_v16i8_4:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17>
+ ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v16i8_4
+}
+
+define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_0
+}
+
+define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_1:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_1
+}
+
+define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_2:
+
+ %1 = load <8 x i16>* %a
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8>
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_2
+}
+
+define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_3:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_3
+}
+
+define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: vshf_v8i16_4:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> <i32 1, i32 9, i32 1, i32 9, i32 1, i32 9, i32 1, i32 9>
+ ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v8i16_4
+}
+
+; Note: v4i32 only has one 4-element set so it's impossible to get a vshf.w
+; instruction when using a single vector.
+
+define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_0
+}
+
+define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_1:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_1
+}
+
+define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_2:
+
+ %1 = load <4 x i32>* %a
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 5, i32 6, i32 4>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R2]], 36
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_2
+}
+
+define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_3:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4>
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.w [[R3]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_3
+}
+
+define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: vshf_v4i32_4:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 5, i32 5, i32 1>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v4i32_4
+}
+
+define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_0
+}
+
+define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_1:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_1
+}
+
+define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_2:
+
+ %1 = load <2 x i64>* %a
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2>
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_2
+}
+
+define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_3:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2>
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_3
+}
+
+define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: vshf_v2i64_4:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> <i32 1, i32 3>
+ ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size vshf_v2i64_4
+}
+
+define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: shf_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 2, i32 0, i32 5, i32 7, i32 6, i32 4, i32 9, i32 11, i32 10, i32 8, i32 13, i32 15, i32 14, i32 12>
+ ; CHECK-DAG: shf.b [[R3:\$w[0-9]+]], [[R1]], 45
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size shf_v16i8_0
+}
+
+define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: shf_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ; CHECK-DAG: shf.h [[R3:\$w[0-9]+]], [[R1]], 27
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size shf_v8i16_0
+}
+
+define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: shf_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size shf_v4i32_0
+}
+
+; shf.d does not exist
+
+define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ilvev_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ; CHECK-DAG: ilvev.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvev_v16i8_0
+}
+
+define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ilvev_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvev_v8i16_0
+}
+
+define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ilvev_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvev_v4i32_0
+}
+
+define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ilvev_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
+ ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvev_v2i64_0
+}
+
+define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ilvod_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ; CHECK-DAG: ilvod.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvod_v16i8_0
+}
+
+define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ilvod_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvod_v8i16_0
+}
+
+define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ilvod_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvod_v4i32_0
+}
+
+define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ilvod_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
+ ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvod_v2i64_0
+}
+
+define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ilvl_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ; CHECK-DAG: ilvl.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvl_v16i8_0
+}
+
+define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ilvl_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvl_v8i16_0
+}
+
+define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ilvl_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvl_v4i32_0
+}
+
+define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ilvl_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
+ ; ilvl.d and ilvev.d are equivalent for v2i64
+ ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvl_v2i64_0
+}
+
+define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: ilvr_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ; CHECK-DAG: ilvr.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvr_v16i8_0
+}
+
+define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: ilvr_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvr_v8i16_0
+}
+
+define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: ilvr_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvr_v4i32_0
+}
+
+define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: ilvr_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
+ ; ilvr.d and ilvod.d are equivalent for v2i64
+ ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size ilvr_v2i64_0
+}
+
+define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: pckev_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ; CHECK-DAG: pckev.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckev_v16i8_0
+}
+
+define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: pckev_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckev_v8i16_0
+}
+
+define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: pckev_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckev_v4i32_0
+}
+
+define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: pckev_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
+ ; pckev.d and ilvev.d are equivalent for v2i64
+ ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckev_v2i64_0
+}
+
+define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+ ; CHECK: pckod_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <16 x i8>* %b
+ ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
+ <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ; CHECK-DAG: pckod.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <16 x i8> %3, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckod_v16i8_0
+}
+
+define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+ ; CHECK: pckod_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <8 x i16>* %b
+ ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <8 x i16> %3, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckod_v8i16_0
+}
+
+define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+ ; CHECK: pckod_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <4 x i32>* %b
+ ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <4 x i32> %3, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckod_v4i32_0
+}
+
+define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+ ; CHECK: pckod_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = load <2 x i64>* %b
+ ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
+ %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
+ ; pckod.d and ilvod.d are equivalent for v2i64
+ ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
+ store <2 x i64> %3, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size pckod_v2i64_0
+}
+
+define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+ ; CHECK: splati_v16i8_0:
+
+ %1 = load <16 x i8>* %a
+ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <16 x i8> %1, <16 x i8> undef,
+ <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][4]
+ store <16 x i8> %2, <16 x i8>* %c
+ ; CHECK-DAG: st.b [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size splati_v16i8_0
+}
+
+define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+ ; CHECK: splati_v8i16_0:
+
+ %1 = load <8 x i16>* %a
+ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][4]
+ store <8 x i16> %2, <8 x i16>* %c
+ ; CHECK-DAG: st.h [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size splati_v8i16_0
+}
+
+define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+ ; CHECK: splati_v4i32_0:
+
+ %1 = load <4 x i32>* %a
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ ; shf.w and splati.w are equivalent
+ ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 255
+ store <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size splati_v4i32_0
+}
+
+define void @splati_v2i64_0(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+ ; CHECK: splati_v2i64_0:
+
+ %1 = load <2 x i64>* %a
+ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
+ %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
+ store <2 x i64> %2, <2 x i64>* %c
+ ; CHECK-DAG: st.d [[R3]], 0($4)
+
+ ret void
+ ; CHECK: .size splati_v2i64_0
+}
diff --git a/test/CodeGen/Mips/msa/special.ll b/test/CodeGen/Mips/msa/special.ll
new file mode 100644
index 0000000000000..60a4369dfb1c5
--- /dev/null
+++ b/test/CodeGen/Mips/msa/special.ll
@@ -0,0 +1,26 @@
+; Test the MSA intrinsics that are encoded with the SPECIAL instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define i32 @llvm_mips_lsa_test(i32 %a, i32 %b) nounwind {
+entry:
+ %0 = tail call i32 @llvm.mips.lsa(i32 %a, i32 %b, i32 2)
+ ret i32 %0
+}
+
+declare i32 @llvm.mips.lsa(i32, i32, i32) nounwind
+
+; CHECK: llvm_mips_lsa_test:
+; CHECK: lsa {{\$[0-9]+}}, {{\$[0-9]+}}, {{\$[0-9]+}}, 2
+; CHECK: .size llvm_mips_lsa_test
+
+define i32 @lsa_test(i32 %a, i32 %b) nounwind {
+entry:
+ %0 = shl i32 %b, 2
+ %1 = add i32 %a, %0
+ ret i32 %1
+}
+
+; CHECK: lsa_test:
+; CHECK: lsa {{\$[0-9]+}}, {{\$[0-9]+}}, {{\$[0-9]+}}, 2
+; CHECK: .size lsa_test
diff --git a/test/CodeGen/Mips/msa/spill.ll b/test/CodeGen/Mips/msa/spill.ll
new file mode 100644
index 0000000000000..66f896ac4684c
--- /dev/null
+++ b/test/CodeGen/Mips/msa/spill.ll
@@ -0,0 +1,601 @@
+; Test that the correct instruction is chosen for spill and reload by trying
+; to have 33 live MSA registers simultaneously
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define i32 @test_i8(<16 x i8>* %p0, <16 x i8>* %q1) nounwind {
+entry:
+ %p1 = getelementptr <16 x i8>* %p0, i32 1
+ %p2 = getelementptr <16 x i8>* %p0, i32 2
+ %p3 = getelementptr <16 x i8>* %p0, i32 3
+ %p4 = getelementptr <16 x i8>* %p0, i32 4
+ %p5 = getelementptr <16 x i8>* %p0, i32 5
+ %p6 = getelementptr <16 x i8>* %p0, i32 6
+ %p7 = getelementptr <16 x i8>* %p0, i32 7
+ %p8 = getelementptr <16 x i8>* %p0, i32 8
+ %p9 = getelementptr <16 x i8>* %p0, i32 9
+ %p10 = getelementptr <16 x i8>* %p0, i32 10
+ %p11 = getelementptr <16 x i8>* %p0, i32 11
+ %p12 = getelementptr <16 x i8>* %p0, i32 12
+ %p13 = getelementptr <16 x i8>* %p0, i32 13
+ %p14 = getelementptr <16 x i8>* %p0, i32 14
+ %p15 = getelementptr <16 x i8>* %p0, i32 15
+ %p16 = getelementptr <16 x i8>* %p0, i32 16
+ %p17 = getelementptr <16 x i8>* %p0, i32 17
+ %p18 = getelementptr <16 x i8>* %p0, i32 18
+ %p19 = getelementptr <16 x i8>* %p0, i32 19
+ %p20 = getelementptr <16 x i8>* %p0, i32 20
+ %p21 = getelementptr <16 x i8>* %p0, i32 21
+ %p22 = getelementptr <16 x i8>* %p0, i32 22
+ %p23 = getelementptr <16 x i8>* %p0, i32 23
+ %p24 = getelementptr <16 x i8>* %p0, i32 24
+ %p25 = getelementptr <16 x i8>* %p0, i32 25
+ %p26 = getelementptr <16 x i8>* %p0, i32 26
+ %p27 = getelementptr <16 x i8>* %p0, i32 27
+ %p28 = getelementptr <16 x i8>* %p0, i32 28
+ %p29 = getelementptr <16 x i8>* %p0, i32 29
+ %p30 = getelementptr <16 x i8>* %p0, i32 30
+ %p31 = getelementptr <16 x i8>* %p0, i32 31
+ %p32 = getelementptr <16 x i8>* %p0, i32 32
+ %p33 = getelementptr <16 x i8>* %p0, i32 33
+ %0 = load <16 x i8>* %p0, align 16
+ %1 = load <16 x i8>* %p1, align 16
+ %2 = load <16 x i8>* %p2, align 16
+ %3 = load <16 x i8>* %p3, align 16
+ %4 = load <16 x i8>* %p4, align 16
+ %5 = load <16 x i8>* %p5, align 16
+ %6 = load <16 x i8>* %p6, align 16
+ %7 = load <16 x i8>* %p7, align 16
+ %8 = load <16 x i8>* %p8, align 16
+ %9 = load <16 x i8>* %p9, align 16
+ %10 = load <16 x i8>* %p10, align 16
+ %11 = load <16 x i8>* %p11, align 16
+ %12 = load <16 x i8>* %p12, align 16
+ %13 = load <16 x i8>* %p13, align 16
+ %14 = load <16 x i8>* %p14, align 16
+ %15 = load <16 x i8>* %p15, align 16
+ %16 = load <16 x i8>* %p16, align 16
+ %17 = load <16 x i8>* %p17, align 16
+ %18 = load <16 x i8>* %p18, align 16
+ %19 = load <16 x i8>* %p19, align 16
+ %20 = load <16 x i8>* %p20, align 16
+ %21 = load <16 x i8>* %p21, align 16
+ %22 = load <16 x i8>* %p22, align 16
+ %23 = load <16 x i8>* %p23, align 16
+ %24 = load <16 x i8>* %p24, align 16
+ %25 = load <16 x i8>* %p25, align 16
+ %26 = load <16 x i8>* %p26, align 16
+ %27 = load <16 x i8>* %p27, align 16
+ %28 = load <16 x i8>* %p28, align 16
+ %29 = load <16 x i8>* %p29, align 16
+ %30 = load <16 x i8>* %p30, align 16
+ %31 = load <16 x i8>* %p31, align 16
+ %32 = load <16 x i8>* %p32, align 16
+ %33 = load <16 x i8>* %p33, align 16
+ %r1 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
+ %r2 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r1, <16 x i8> %2)
+ %r3 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r2, <16 x i8> %3)
+ %r4 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r3, <16 x i8> %4)
+ %r5 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r4, <16 x i8> %5)
+ %r6 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r5, <16 x i8> %6)
+ %r7 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r6, <16 x i8> %7)
+ %r8 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r7, <16 x i8> %8)
+ %r9 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r8, <16 x i8> %9)
+ %r10 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r9, <16 x i8> %10)
+ %r11 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r10, <16 x i8> %11)
+ %r12 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r11, <16 x i8> %12)
+ %r13 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r12, <16 x i8> %13)
+ %r14 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r13, <16 x i8> %14)
+ %r15 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r14, <16 x i8> %15)
+ %r16 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r15, <16 x i8> %16)
+ %r17 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r16, <16 x i8> %17)
+ %r18 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r17, <16 x i8> %18)
+ %r19 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r18, <16 x i8> %19)
+ %r20 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r19, <16 x i8> %20)
+ %r21 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r20, <16 x i8> %21)
+ %r22 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r21, <16 x i8> %22)
+ %r23 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r22, <16 x i8> %23)
+ %r24 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r23, <16 x i8> %24)
+ %r25 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r24, <16 x i8> %25)
+ %r26 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r25, <16 x i8> %26)
+ %r27 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r26, <16 x i8> %27)
+ %r28 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r27, <16 x i8> %28)
+ %r29 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r28, <16 x i8> %29)
+ %r30 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r29, <16 x i8> %30)
+ %r31 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r30, <16 x i8> %31)
+ %r32 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r31, <16 x i8> %32)
+ %r33 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r32, <16 x i8> %33)
+ %rx1 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r33, <16 x i8> %1)
+ %rx2 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx1, <16 x i8> %2)
+ %rx3 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx2, <16 x i8> %3)
+ %rx4 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx3, <16 x i8> %4)
+ %rx5 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx4, <16 x i8> %5)
+ %rx6 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx5, <16 x i8> %6)
+ %rx7 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx6, <16 x i8> %7)
+ %rx8 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx7, <16 x i8> %8)
+ %rx9 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx8, <16 x i8> %9)
+ %rx10 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx9, <16 x i8> %10)
+ %rx11 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx10, <16 x i8> %11)
+ %rx12 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx11, <16 x i8> %12)
+ %rx13 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx12, <16 x i8> %13)
+ %rx14 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx13, <16 x i8> %14)
+ %rx15 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx14, <16 x i8> %15)
+ %rx16 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx15, <16 x i8> %16)
+ %rx17 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx16, <16 x i8> %17)
+ %rx18 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx17, <16 x i8> %18)
+ %rx19 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx18, <16 x i8> %19)
+ %rx20 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx19, <16 x i8> %20)
+ %rx21 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx20, <16 x i8> %21)
+ %rx22 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx21, <16 x i8> %22)
+ %rx23 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx22, <16 x i8> %23)
+ %rx24 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx23, <16 x i8> %24)
+ %rx25 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx24, <16 x i8> %25)
+ %rx26 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx25, <16 x i8> %26)
+ %rx27 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx26, <16 x i8> %27)
+ %rx28 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx27, <16 x i8> %28)
+ %rx29 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx28, <16 x i8> %29)
+ %rx30 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx29, <16 x i8> %30)
+ %rx31 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx30, <16 x i8> %31)
+ %rx32 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx31, <16 x i8> %32)
+ %rx33 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %rx32, <16 x i8> %33)
+ %res = call i32 @llvm.mips.copy.s.b(<16 x i8> %rx33, i32 0)
+ ret i32 %res
+}
+
+declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
+declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: test_i8:
+; CHECK: st.b {{.*}} Spill
+; CHECK: st.b {{.*}} Spill
+; CHECK: ld.b {{.*}} Reload
+; CHECK: ld.b {{.*}} Reload
+; CHECK: .size
+
+define i32 @test_i16(<8 x i16>* %p0, <8 x i16>* %q1) nounwind {
+entry:
+ %p1 = getelementptr <8 x i16>* %p0, i32 1
+ %p2 = getelementptr <8 x i16>* %p0, i32 2
+ %p3 = getelementptr <8 x i16>* %p0, i32 3
+ %p4 = getelementptr <8 x i16>* %p0, i32 4
+ %p5 = getelementptr <8 x i16>* %p0, i32 5
+ %p6 = getelementptr <8 x i16>* %p0, i32 6
+ %p7 = getelementptr <8 x i16>* %p0, i32 7
+ %p8 = getelementptr <8 x i16>* %p0, i32 8
+ %p9 = getelementptr <8 x i16>* %p0, i32 9
+ %p10 = getelementptr <8 x i16>* %p0, i32 10
+ %p11 = getelementptr <8 x i16>* %p0, i32 11
+ %p12 = getelementptr <8 x i16>* %p0, i32 12
+ %p13 = getelementptr <8 x i16>* %p0, i32 13
+ %p14 = getelementptr <8 x i16>* %p0, i32 14
+ %p15 = getelementptr <8 x i16>* %p0, i32 15
+ %p16 = getelementptr <8 x i16>* %p0, i32 16
+ %p17 = getelementptr <8 x i16>* %p0, i32 17
+ %p18 = getelementptr <8 x i16>* %p0, i32 18
+ %p19 = getelementptr <8 x i16>* %p0, i32 19
+ %p20 = getelementptr <8 x i16>* %p0, i32 20
+ %p21 = getelementptr <8 x i16>* %p0, i32 21
+ %p22 = getelementptr <8 x i16>* %p0, i32 22
+ %p23 = getelementptr <8 x i16>* %p0, i32 23
+ %p24 = getelementptr <8 x i16>* %p0, i32 24
+ %p25 = getelementptr <8 x i16>* %p0, i32 25
+ %p26 = getelementptr <8 x i16>* %p0, i32 26
+ %p27 = getelementptr <8 x i16>* %p0, i32 27
+ %p28 = getelementptr <8 x i16>* %p0, i32 28
+ %p29 = getelementptr <8 x i16>* %p0, i32 29
+ %p30 = getelementptr <8 x i16>* %p0, i32 30
+ %p31 = getelementptr <8 x i16>* %p0, i32 31
+ %p32 = getelementptr <8 x i16>* %p0, i32 32
+ %p33 = getelementptr <8 x i16>* %p0, i32 33
+ %0 = load <8 x i16>* %p0, align 16
+ %1 = load <8 x i16>* %p1, align 16
+ %2 = load <8 x i16>* %p2, align 16
+ %3 = load <8 x i16>* %p3, align 16
+ %4 = load <8 x i16>* %p4, align 16
+ %5 = load <8 x i16>* %p5, align 16
+ %6 = load <8 x i16>* %p6, align 16
+ %7 = load <8 x i16>* %p7, align 16
+ %8 = load <8 x i16>* %p8, align 16
+ %9 = load <8 x i16>* %p9, align 16
+ %10 = load <8 x i16>* %p10, align 16
+ %11 = load <8 x i16>* %p11, align 16
+ %12 = load <8 x i16>* %p12, align 16
+ %13 = load <8 x i16>* %p13, align 16
+ %14 = load <8 x i16>* %p14, align 16
+ %15 = load <8 x i16>* %p15, align 16
+ %16 = load <8 x i16>* %p16, align 16
+ %17 = load <8 x i16>* %p17, align 16
+ %18 = load <8 x i16>* %p18, align 16
+ %19 = load <8 x i16>* %p19, align 16
+ %20 = load <8 x i16>* %p20, align 16
+ %21 = load <8 x i16>* %p21, align 16
+ %22 = load <8 x i16>* %p22, align 16
+ %23 = load <8 x i16>* %p23, align 16
+ %24 = load <8 x i16>* %p24, align 16
+ %25 = load <8 x i16>* %p25, align 16
+ %26 = load <8 x i16>* %p26, align 16
+ %27 = load <8 x i16>* %p27, align 16
+ %28 = load <8 x i16>* %p28, align 16
+ %29 = load <8 x i16>* %p29, align 16
+ %30 = load <8 x i16>* %p30, align 16
+ %31 = load <8 x i16>* %p31, align 16
+ %32 = load <8 x i16>* %p32, align 16
+ %33 = load <8 x i16>* %p33, align 16
+ %r1 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
+ %r2 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r1, <8 x i16> %2)
+ %r3 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r2, <8 x i16> %3)
+ %r4 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r3, <8 x i16> %4)
+ %r5 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r4, <8 x i16> %5)
+ %r6 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r5, <8 x i16> %6)
+ %r7 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r6, <8 x i16> %7)
+ %r8 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r7, <8 x i16> %8)
+ %r9 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r8, <8 x i16> %9)
+ %r10 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r9, <8 x i16> %10)
+ %r11 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r10, <8 x i16> %11)
+ %r12 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r11, <8 x i16> %12)
+ %r13 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r12, <8 x i16> %13)
+ %r14 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r13, <8 x i16> %14)
+ %r15 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r14, <8 x i16> %15)
+ %r16 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r15, <8 x i16> %16)
+ %r17 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r16, <8 x i16> %17)
+ %r18 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r17, <8 x i16> %18)
+ %r19 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r18, <8 x i16> %19)
+ %r20 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r19, <8 x i16> %20)
+ %r21 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r20, <8 x i16> %21)
+ %r22 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r21, <8 x i16> %22)
+ %r23 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r22, <8 x i16> %23)
+ %r24 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r23, <8 x i16> %24)
+ %r25 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r24, <8 x i16> %25)
+ %r26 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r25, <8 x i16> %26)
+ %r27 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r26, <8 x i16> %27)
+ %r28 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r27, <8 x i16> %28)
+ %r29 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r28, <8 x i16> %29)
+ %r30 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r29, <8 x i16> %30)
+ %r31 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r30, <8 x i16> %31)
+ %r32 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r31, <8 x i16> %32)
+ %r33 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r32, <8 x i16> %33)
+ %rx1 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r33, <8 x i16> %1)
+ %rx2 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx1, <8 x i16> %2)
+ %rx3 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx2, <8 x i16> %3)
+ %rx4 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx3, <8 x i16> %4)
+ %rx5 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx4, <8 x i16> %5)
+ %rx6 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx5, <8 x i16> %6)
+ %rx7 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx6, <8 x i16> %7)
+ %rx8 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx7, <8 x i16> %8)
+ %rx9 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx8, <8 x i16> %9)
+ %rx10 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx9, <8 x i16> %10)
+ %rx11 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx10, <8 x i16> %11)
+ %rx12 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx11, <8 x i16> %12)
+ %rx13 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx12, <8 x i16> %13)
+ %rx14 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx13, <8 x i16> %14)
+ %rx15 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx14, <8 x i16> %15)
+ %rx16 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx15, <8 x i16> %16)
+ %rx17 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx16, <8 x i16> %17)
+ %rx18 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx17, <8 x i16> %18)
+ %rx19 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx18, <8 x i16> %19)
+ %rx20 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx19, <8 x i16> %20)
+ %rx21 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx20, <8 x i16> %21)
+ %rx22 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx21, <8 x i16> %22)
+ %rx23 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx22, <8 x i16> %23)
+ %rx24 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx23, <8 x i16> %24)
+ %rx25 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx24, <8 x i16> %25)
+ %rx26 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx25, <8 x i16> %26)
+ %rx27 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx26, <8 x i16> %27)
+ %rx28 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx27, <8 x i16> %28)
+ %rx29 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx28, <8 x i16> %29)
+ %rx30 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx29, <8 x i16> %30)
+ %rx31 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx30, <8 x i16> %31)
+ %rx32 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx31, <8 x i16> %32)
+ %rx33 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %rx32, <8 x i16> %33)
+ %res = call i32 @llvm.mips.copy.s.h(<8 x i16> %rx33, i32 0)
+ ret i32 %res
+}
+
+declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
+declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: test_i16:
+; CHECK: st.h {{.*}} Spill
+; CHECK: st.h {{.*}} Spill
+; CHECK: ld.h {{.*}} Reload
+; CHECK: ld.h {{.*}} Reload
+; CHECK: .size
+
+define i32 @test_i32(<4 x i32>* %p0, <4 x i32>* %q1) nounwind {
+entry:
+ %p1 = getelementptr <4 x i32>* %p0, i32 1
+ %p2 = getelementptr <4 x i32>* %p0, i32 2
+ %p3 = getelementptr <4 x i32>* %p0, i32 3
+ %p4 = getelementptr <4 x i32>* %p0, i32 4
+ %p5 = getelementptr <4 x i32>* %p0, i32 5
+ %p6 = getelementptr <4 x i32>* %p0, i32 6
+ %p7 = getelementptr <4 x i32>* %p0, i32 7
+ %p8 = getelementptr <4 x i32>* %p0, i32 8
+ %p9 = getelementptr <4 x i32>* %p0, i32 9
+ %p10 = getelementptr <4 x i32>* %p0, i32 10
+ %p11 = getelementptr <4 x i32>* %p0, i32 11
+ %p12 = getelementptr <4 x i32>* %p0, i32 12
+ %p13 = getelementptr <4 x i32>* %p0, i32 13
+ %p14 = getelementptr <4 x i32>* %p0, i32 14
+ %p15 = getelementptr <4 x i32>* %p0, i32 15
+ %p16 = getelementptr <4 x i32>* %p0, i32 16
+ %p17 = getelementptr <4 x i32>* %p0, i32 17
+ %p18 = getelementptr <4 x i32>* %p0, i32 18
+ %p19 = getelementptr <4 x i32>* %p0, i32 19
+ %p20 = getelementptr <4 x i32>* %p0, i32 20
+ %p21 = getelementptr <4 x i32>* %p0, i32 21
+ %p22 = getelementptr <4 x i32>* %p0, i32 22
+ %p23 = getelementptr <4 x i32>* %p0, i32 23
+ %p24 = getelementptr <4 x i32>* %p0, i32 24
+ %p25 = getelementptr <4 x i32>* %p0, i32 25
+ %p26 = getelementptr <4 x i32>* %p0, i32 26
+ %p27 = getelementptr <4 x i32>* %p0, i32 27
+ %p28 = getelementptr <4 x i32>* %p0, i32 28
+ %p29 = getelementptr <4 x i32>* %p0, i32 29
+ %p30 = getelementptr <4 x i32>* %p0, i32 30
+ %p31 = getelementptr <4 x i32>* %p0, i32 31
+ %p32 = getelementptr <4 x i32>* %p0, i32 32
+ %p33 = getelementptr <4 x i32>* %p0, i32 33
+ %0 = load <4 x i32>* %p0, align 16
+ %1 = load <4 x i32>* %p1, align 16
+ %2 = load <4 x i32>* %p2, align 16
+ %3 = load <4 x i32>* %p3, align 16
+ %4 = load <4 x i32>* %p4, align 16
+ %5 = load <4 x i32>* %p5, align 16
+ %6 = load <4 x i32>* %p6, align 16
+ %7 = load <4 x i32>* %p7, align 16
+ %8 = load <4 x i32>* %p8, align 16
+ %9 = load <4 x i32>* %p9, align 16
+ %10 = load <4 x i32>* %p10, align 16
+ %11 = load <4 x i32>* %p11, align 16
+ %12 = load <4 x i32>* %p12, align 16
+ %13 = load <4 x i32>* %p13, align 16
+ %14 = load <4 x i32>* %p14, align 16
+ %15 = load <4 x i32>* %p15, align 16
+ %16 = load <4 x i32>* %p16, align 16
+ %17 = load <4 x i32>* %p17, align 16
+ %18 = load <4 x i32>* %p18, align 16
+ %19 = load <4 x i32>* %p19, align 16
+ %20 = load <4 x i32>* %p20, align 16
+ %21 = load <4 x i32>* %p21, align 16
+ %22 = load <4 x i32>* %p22, align 16
+ %23 = load <4 x i32>* %p23, align 16
+ %24 = load <4 x i32>* %p24, align 16
+ %25 = load <4 x i32>* %p25, align 16
+ %26 = load <4 x i32>* %p26, align 16
+ %27 = load <4 x i32>* %p27, align 16
+ %28 = load <4 x i32>* %p28, align 16
+ %29 = load <4 x i32>* %p29, align 16
+ %30 = load <4 x i32>* %p30, align 16
+ %31 = load <4 x i32>* %p31, align 16
+ %32 = load <4 x i32>* %p32, align 16
+ %33 = load <4 x i32>* %p33, align 16
+ %r1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
+ %r2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r1, <4 x i32> %2)
+ %r3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r2, <4 x i32> %3)
+ %r4 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r3, <4 x i32> %4)
+ %r5 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r4, <4 x i32> %5)
+ %r6 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r5, <4 x i32> %6)
+ %r7 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r6, <4 x i32> %7)
+ %r8 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r7, <4 x i32> %8)
+ %r9 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r8, <4 x i32> %9)
+ %r10 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r9, <4 x i32> %10)
+ %r11 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r10, <4 x i32> %11)
+ %r12 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r11, <4 x i32> %12)
+ %r13 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r12, <4 x i32> %13)
+ %r14 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r13, <4 x i32> %14)
+ %r15 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r14, <4 x i32> %15)
+ %r16 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r15, <4 x i32> %16)
+ %r17 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r16, <4 x i32> %17)
+ %r18 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r17, <4 x i32> %18)
+ %r19 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r18, <4 x i32> %19)
+ %r20 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r19, <4 x i32> %20)
+ %r21 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r20, <4 x i32> %21)
+ %r22 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r21, <4 x i32> %22)
+ %r23 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r22, <4 x i32> %23)
+ %r24 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r23, <4 x i32> %24)
+ %r25 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r24, <4 x i32> %25)
+ %r26 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r25, <4 x i32> %26)
+ %r27 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r26, <4 x i32> %27)
+ %r28 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r27, <4 x i32> %28)
+ %r29 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r28, <4 x i32> %29)
+ %r30 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r29, <4 x i32> %30)
+ %r31 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r30, <4 x i32> %31)
+ %r32 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r31, <4 x i32> %32)
+ %r33 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r32, <4 x i32> %33)
+ %rx1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r33, <4 x i32> %1)
+ %rx2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx1, <4 x i32> %2)
+ %rx3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx2, <4 x i32> %3)
+ %rx4 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx3, <4 x i32> %4)
+ %rx5 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx4, <4 x i32> %5)
+ %rx6 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx5, <4 x i32> %6)
+ %rx7 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx6, <4 x i32> %7)
+ %rx8 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx7, <4 x i32> %8)
+ %rx9 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx8, <4 x i32> %9)
+ %rx10 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx9, <4 x i32> %10)
+ %rx11 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx10, <4 x i32> %11)
+ %rx12 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx11, <4 x i32> %12)
+ %rx13 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx12, <4 x i32> %13)
+ %rx14 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx13, <4 x i32> %14)
+ %rx15 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx14, <4 x i32> %15)
+ %rx16 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx15, <4 x i32> %16)
+ %rx17 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx16, <4 x i32> %17)
+ %rx18 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx17, <4 x i32> %18)
+ %rx19 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx18, <4 x i32> %19)
+ %rx20 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx19, <4 x i32> %20)
+ %rx21 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx20, <4 x i32> %21)
+ %rx22 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx21, <4 x i32> %22)
+ %rx23 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx22, <4 x i32> %23)
+ %rx24 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx23, <4 x i32> %24)
+ %rx25 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx24, <4 x i32> %25)
+ %rx26 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx25, <4 x i32> %26)
+ %rx27 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx26, <4 x i32> %27)
+ %rx28 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx27, <4 x i32> %28)
+ %rx29 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx28, <4 x i32> %29)
+ %rx30 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx29, <4 x i32> %30)
+ %rx31 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx30, <4 x i32> %31)
+ %rx32 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx31, <4 x i32> %32)
+ %rx33 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %rx32, <4 x i32> %33)
+ %res = call i32 @llvm.mips.copy.s.w(<4 x i32> %rx33, i32 0)
+ ret i32 %res
+}
+
+declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
+declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: test_i32:
+; CHECK: st.w {{.*}} Spill
+; CHECK: st.w {{.*}} Spill
+; CHECK: ld.w {{.*}} Reload
+; CHECK: ld.w {{.*}} Reload
+; CHECK: .size
+
+define i32 @test_i64(<2 x i64>* %p0, <2 x i64>* %q1) nounwind {
+entry:
+ %p1 = getelementptr <2 x i64>* %p0, i32 1
+ %p2 = getelementptr <2 x i64>* %p0, i32 2
+ %p3 = getelementptr <2 x i64>* %p0, i32 3
+ %p4 = getelementptr <2 x i64>* %p0, i32 4
+ %p5 = getelementptr <2 x i64>* %p0, i32 5
+ %p6 = getelementptr <2 x i64>* %p0, i32 6
+ %p7 = getelementptr <2 x i64>* %p0, i32 7
+ %p8 = getelementptr <2 x i64>* %p0, i32 8
+ %p9 = getelementptr <2 x i64>* %p0, i32 9
+ %p10 = getelementptr <2 x i64>* %p0, i32 10
+ %p11 = getelementptr <2 x i64>* %p0, i32 11
+ %p12 = getelementptr <2 x i64>* %p0, i32 12
+ %p13 = getelementptr <2 x i64>* %p0, i32 13
+ %p14 = getelementptr <2 x i64>* %p0, i32 14
+ %p15 = getelementptr <2 x i64>* %p0, i32 15
+ %p16 = getelementptr <2 x i64>* %p0, i32 16
+ %p17 = getelementptr <2 x i64>* %p0, i32 17
+ %p18 = getelementptr <2 x i64>* %p0, i32 18
+ %p19 = getelementptr <2 x i64>* %p0, i32 19
+ %p20 = getelementptr <2 x i64>* %p0, i32 20
+ %p21 = getelementptr <2 x i64>* %p0, i32 21
+ %p22 = getelementptr <2 x i64>* %p0, i32 22
+ %p23 = getelementptr <2 x i64>* %p0, i32 23
+ %p24 = getelementptr <2 x i64>* %p0, i32 24
+ %p25 = getelementptr <2 x i64>* %p0, i32 25
+ %p26 = getelementptr <2 x i64>* %p0, i32 26
+ %p27 = getelementptr <2 x i64>* %p0, i32 27
+ %p28 = getelementptr <2 x i64>* %p0, i32 28
+ %p29 = getelementptr <2 x i64>* %p0, i32 29
+ %p30 = getelementptr <2 x i64>* %p0, i32 30
+ %p31 = getelementptr <2 x i64>* %p0, i32 31
+ %p32 = getelementptr <2 x i64>* %p0, i32 32
+ %p33 = getelementptr <2 x i64>* %p0, i32 33
+ %0 = load <2 x i64>* %p0, align 16
+ %1 = load <2 x i64>* %p1, align 16
+ %2 = load <2 x i64>* %p2, align 16
+ %3 = load <2 x i64>* %p3, align 16
+ %4 = load <2 x i64>* %p4, align 16
+ %5 = load <2 x i64>* %p5, align 16
+ %6 = load <2 x i64>* %p6, align 16
+ %7 = load <2 x i64>* %p7, align 16
+ %8 = load <2 x i64>* %p8, align 16
+ %9 = load <2 x i64>* %p9, align 16
+ %10 = load <2 x i64>* %p10, align 16
+ %11 = load <2 x i64>* %p11, align 16
+ %12 = load <2 x i64>* %p12, align 16
+ %13 = load <2 x i64>* %p13, align 16
+ %14 = load <2 x i64>* %p14, align 16
+ %15 = load <2 x i64>* %p15, align 16
+ %16 = load <2 x i64>* %p16, align 16
+ %17 = load <2 x i64>* %p17, align 16
+ %18 = load <2 x i64>* %p18, align 16
+ %19 = load <2 x i64>* %p19, align 16
+ %20 = load <2 x i64>* %p20, align 16
+ %21 = load <2 x i64>* %p21, align 16
+ %22 = load <2 x i64>* %p22, align 16
+ %23 = load <2 x i64>* %p23, align 16
+ %24 = load <2 x i64>* %p24, align 16
+ %25 = load <2 x i64>* %p25, align 16
+ %26 = load <2 x i64>* %p26, align 16
+ %27 = load <2 x i64>* %p27, align 16
+ %28 = load <2 x i64>* %p28, align 16
+ %29 = load <2 x i64>* %p29, align 16
+ %30 = load <2 x i64>* %p30, align 16
+ %31 = load <2 x i64>* %p31, align 16
+ %32 = load <2 x i64>* %p32, align 16
+ %33 = load <2 x i64>* %p33, align 16
+ %r1 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
+ %r2 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r1, <2 x i64> %2)
+ %r3 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r2, <2 x i64> %3)
+ %r4 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r3, <2 x i64> %4)
+ %r5 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r4, <2 x i64> %5)
+ %r6 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r5, <2 x i64> %6)
+ %r7 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r6, <2 x i64> %7)
+ %r8 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r7, <2 x i64> %8)
+ %r9 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r8, <2 x i64> %9)
+ %r10 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r9, <2 x i64> %10)
+ %r11 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r10, <2 x i64> %11)
+ %r12 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r11, <2 x i64> %12)
+ %r13 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r12, <2 x i64> %13)
+ %r14 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r13, <2 x i64> %14)
+ %r15 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r14, <2 x i64> %15)
+ %r16 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r15, <2 x i64> %16)
+ %r17 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r16, <2 x i64> %17)
+ %r18 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r17, <2 x i64> %18)
+ %r19 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r18, <2 x i64> %19)
+ %r20 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r19, <2 x i64> %20)
+ %r21 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r20, <2 x i64> %21)
+ %r22 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r21, <2 x i64> %22)
+ %r23 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r22, <2 x i64> %23)
+ %r24 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r23, <2 x i64> %24)
+ %r25 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r24, <2 x i64> %25)
+ %r26 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r25, <2 x i64> %26)
+ %r27 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r26, <2 x i64> %27)
+ %r28 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r27, <2 x i64> %28)
+ %r29 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r28, <2 x i64> %29)
+ %r30 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r29, <2 x i64> %30)
+ %r31 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r30, <2 x i64> %31)
+ %r32 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r31, <2 x i64> %32)
+ %r33 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r32, <2 x i64> %33)
+ %rx1 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r33, <2 x i64> %1)
+ %rx2 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx1, <2 x i64> %2)
+ %rx3 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx2, <2 x i64> %3)
+ %rx4 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx3, <2 x i64> %4)
+ %rx5 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx4, <2 x i64> %5)
+ %rx6 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx5, <2 x i64> %6)
+ %rx7 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx6, <2 x i64> %7)
+ %rx8 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx7, <2 x i64> %8)
+ %rx9 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx8, <2 x i64> %9)
+ %rx10 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx9, <2 x i64> %10)
+ %rx11 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx10, <2 x i64> %11)
+ %rx12 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx11, <2 x i64> %12)
+ %rx13 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx12, <2 x i64> %13)
+ %rx14 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx13, <2 x i64> %14)
+ %rx15 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx14, <2 x i64> %15)
+ %rx16 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx15, <2 x i64> %16)
+ %rx17 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx16, <2 x i64> %17)
+ %rx18 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx17, <2 x i64> %18)
+ %rx19 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx18, <2 x i64> %19)
+ %rx20 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx19, <2 x i64> %20)
+ %rx21 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx20, <2 x i64> %21)
+ %rx22 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx21, <2 x i64> %22)
+ %rx23 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx22, <2 x i64> %23)
+ %rx24 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx23, <2 x i64> %24)
+ %rx25 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx24, <2 x i64> %25)
+ %rx26 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx25, <2 x i64> %26)
+ %rx27 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx26, <2 x i64> %27)
+ %rx28 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx27, <2 x i64> %28)
+ %rx29 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx28, <2 x i64> %29)
+ %rx30 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx29, <2 x i64> %30)
+ %rx31 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx30, <2 x i64> %31)
+ %rx32 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx31, <2 x i64> %32)
+ %rx33 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %rx32, <2 x i64> %33)
+ %res1 = bitcast <2 x i64> %rx33 to <4 x i32>
+ %res = call i32 @llvm.mips.copy.s.w(<4 x i32> %res1, i32 0)
+ ret i32 %res
+}
+
+declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: test_i64:
+; CHECK: st.d {{.*}} Spill
+; CHECK: st.d {{.*}} Spill
+; CHECK: ld.d {{.*}} Reload
+; CHECK: ld.d {{.*}} Reload
+; CHECK: .size
diff --git a/test/CodeGen/Mips/msa/vec.ll b/test/CodeGen/Mips/msa/vec.ll
new file mode 100644
index 0000000000000..5bddf5aea4059
--- /dev/null
+++ b/test/CodeGen/Mips/msa/vec.ll
@@ -0,0 +1,946 @@
+; Test the MSA intrinsics that are encoded with the VEC instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=ANYENDIAN %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=ANYENDIAN %s
+
+@llvm_mips_and_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_and_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_and_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_and_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
+ %2 = bitcast <16 x i8> %0 to <16 x i8>
+ %3 = bitcast <16 x i8> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <16 x i8>
+ store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_and_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_and_v_b_test
+;
+@llvm_mips_and_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_and_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_and_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_and_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
+ %2 = bitcast <8 x i16> %0 to <16 x i8>
+ %3 = bitcast <8 x i16> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <8 x i16>
+ store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_and_v_h_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_and_v_h_test
+;
+@llvm_mips_and_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_and_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_and_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_and_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
+ %2 = bitcast <4 x i32> %0 to <16 x i8>
+ %3 = bitcast <4 x i32> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <4 x i32>
+ store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_and_v_w_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_and_v_w_test
+;
+@llvm_mips_and_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_and_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_and_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_and_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
+ %2 = bitcast <2 x i64> %0 to <16 x i8>
+ %3 = bitcast <2 x i64> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <2 x i64>
+ store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_and_v_d_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_and_v_d_test
+;
+define void @and_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
+ %2 = and <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
+ ret void
+}
+
+; CHECK: and_v_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: and.v
+; CHECK: st.b
+; CHECK: .size and_v_b_test
+;
+define void @and_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
+ %2 = and <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
+ ret void
+}
+
+; CHECK: and_v_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: and.v
+; CHECK: st.h
+; CHECK: .size and_v_h_test
+;
+
+define void @and_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
+ %2 = and <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
+ ret void
+}
+
+; CHECK: and_v_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: and.v
+; CHECK: st.w
+; CHECK: .size and_v_w_test
+;
+
+define void @and_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
+ %2 = and <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
+ ret void
+}
+
+; CHECK: and_v_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: and.v
+; CHECK: st.d
+; CHECK: .size and_v_d_test
+;
+@llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bmnz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmnz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bmnz_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
+ %3 = bitcast <16 x i8> %0 to <16 x i8>
+ %4 = bitcast <16 x i8> %1 to <16 x i8>
+ %5 = bitcast <16 x i8> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <16 x i8>
+ store <16 x i8> %7, <16 x i8>* @llvm_mips_bmnz_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmnz_v_b_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R4]], 0(
+; ANYENDIAN: .size llvm_mips_bmnz_v_b_test
+
+@llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bmnz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bmnz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bmnz_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
+ %3 = bitcast <8 x i16> %0 to <16 x i8>
+ %4 = bitcast <8 x i16> %1 to <16 x i8>
+ %5 = bitcast <8 x i16> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <8 x i16>
+ store <8 x i16> %7, <8 x i16>* @llvm_mips_bmnz_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmnz_v_h_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R4]], 0(
+; ANYENDIAN: .size llvm_mips_bmnz_v_h_test
+
+@llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bmnz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bmnz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bmnz_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
+ %3 = bitcast <4 x i32> %0 to <16 x i8>
+ %4 = bitcast <4 x i32> %1 to <16 x i8>
+ %5 = bitcast <4 x i32> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <4 x i32>
+ store <4 x i32> %7, <4 x i32>* @llvm_mips_bmnz_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmnz_v_w_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R4]], 0(
+; ANYENDIAN: .size llvm_mips_bmnz_v_w_test
+
+@llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bmnz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bmnz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bmnz_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
+ %3 = bitcast <2 x i64> %0 to <16 x i8>
+ %4 = bitcast <2 x i64> %1 to <16 x i8>
+ %5 = bitcast <2 x i64> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <2 x i64>
+ store <2 x i64> %7, <2 x i64>* @llvm_mips_bmnz_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmnz_v_d_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R4]], 0(
+; ANYENDIAN: .size llvm_mips_bmnz_v_d_test
+
+@llvm_mips_bmz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bmz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bmz_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bmz_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG3
+ %3 = bitcast <16 x i8> %0 to <16 x i8>
+ %4 = bitcast <16 x i8> %1 to <16 x i8>
+ %5 = bitcast <16 x i8> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <16 x i8>
+ store <16 x i8> %7, <16 x i8>* @llvm_mips_bmz_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmz_v_b_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bmz.v with ws and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
+; ANYENDIAN: .size llvm_mips_bmz_v_b_test
+
+@llvm_mips_bmz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bmz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bmz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bmz_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bmz_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG3
+ %3 = bitcast <8 x i16> %0 to <16 x i8>
+ %4 = bitcast <8 x i16> %1 to <16 x i8>
+ %5 = bitcast <8 x i16> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <8 x i16>
+ store <8 x i16> %7, <8 x i16>* @llvm_mips_bmz_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmz_v_h_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bmz.v with ws and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
+; ANYENDIAN: .size llvm_mips_bmz_v_h_test
+
+@llvm_mips_bmz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bmz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bmz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bmz_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bmz_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG3
+ %3 = bitcast <4 x i32> %0 to <16 x i8>
+ %4 = bitcast <4 x i32> %1 to <16 x i8>
+ %5 = bitcast <4 x i32> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <4 x i32>
+ store <4 x i32> %7, <4 x i32>* @llvm_mips_bmz_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmz_v_w_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bmz.v with ws and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
+; ANYENDIAN: .size llvm_mips_bmz_v_w_test
+
+@llvm_mips_bmz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bmz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bmz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bmz_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bmz_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG3
+ %3 = bitcast <2 x i64> %0 to <16 x i8>
+ %4 = bitcast <2 x i64> %1 to <16 x i8>
+ %5 = bitcast <2 x i64> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <2 x i64>
+ store <2 x i64> %7, <2 x i64>* @llvm_mips_bmz_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bmz_v_d_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bmz.v with ws and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
+; ANYENDIAN: .size llvm_mips_bmz_v_d_test
+
+@llvm_mips_bsel_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bsel_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_bsel_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_bsel_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_bsel_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2
+ %2 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG3
+ %3 = bitcast <16 x i8> %0 to <16 x i8>
+ %4 = bitcast <16 x i8> %1 to <16 x i8>
+ %5 = bitcast <16 x i8> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <16 x i8>
+ store <16 x i8> %7, <16 x i8>* @llvm_mips_bsel_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bsel_v_b_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bsel.v with wt and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R6]], 0(
+; ANYENDIAN: .size llvm_mips_bsel_v_b_test
+
+@llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bsel_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_bsel_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_bsel_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_bsel_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2
+ %2 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG3
+ %3 = bitcast <8 x i16> %0 to <16 x i8>
+ %4 = bitcast <8 x i16> %1 to <16 x i8>
+ %5 = bitcast <8 x i16> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <8 x i16>
+ store <8 x i16> %7, <8 x i16>* @llvm_mips_bsel_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bsel_v_h_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bsel.v with wt and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R6]], 0(
+; ANYENDIAN: .size llvm_mips_bsel_v_h_test
+
+@llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bsel_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_bsel_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_bsel_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_bsel_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2
+ %2 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG3
+ %3 = bitcast <4 x i32> %0 to <16 x i8>
+ %4 = bitcast <4 x i32> %1 to <16 x i8>
+ %5 = bitcast <4 x i32> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <4 x i32>
+ store <4 x i32> %7, <4 x i32>* @llvm_mips_bsel_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bsel_v_w_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bsel.v with wt and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R6]], 0(
+; ANYENDIAN: .size llvm_mips_bsel_v_w_test
+
+@llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bsel_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_bsel_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_bsel_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_bsel_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2
+ %2 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG3
+ %3 = bitcast <2 x i64> %0 to <16 x i8>
+ %4 = bitcast <2 x i64> %1 to <16 x i8>
+ %5 = bitcast <2 x i64> %2 to <16 x i8>
+ %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
+ %7 = bitcast <16 x i8> %6 to <2 x i64>
+ store <2 x i64> %7, <2 x i64>* @llvm_mips_bsel_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_bsel_v_d_test:
+; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG1)(
+; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG2)(
+; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG3)(
+; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
+; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
+; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
+; bmnz.v is the same as bsel.v with wt and wd_in swapped
+; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R6]], 0(
+; ANYENDIAN: .size llvm_mips_bsel_v_d_test
+
+@llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_nor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_nor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nor_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2
+ %2 = bitcast <16 x i8> %0 to <16 x i8>
+ %3 = bitcast <16 x i8> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <16 x i8>
+ store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_nor_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: nor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_nor_v_b_test
+;
+@llvm_mips_nor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_nor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_nor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_nor_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2
+ %2 = bitcast <8 x i16> %0 to <16 x i8>
+ %3 = bitcast <8 x i16> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <8 x i16>
+ store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_nor_v_h_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: nor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_nor_v_h_test
+;
+@llvm_mips_nor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_nor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_nor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_nor_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2
+ %2 = bitcast <4 x i32> %0 to <16 x i8>
+ %3 = bitcast <4 x i32> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <4 x i32>
+ store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_nor_v_w_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: nor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_nor_v_w_test
+;
+@llvm_mips_nor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_nor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_nor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_nor_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2
+ %2 = bitcast <2 x i64> %0 to <16 x i8>
+ %3 = bitcast <2 x i64> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <2 x i64>
+ store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_nor_v_d_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: nor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_nor_v_d_test
+;
+@llvm_mips_or_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_or_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_or_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_or_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
+ %2 = bitcast <16 x i8> %0 to <16 x i8>
+ %3 = bitcast <16 x i8> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <16 x i8>
+ store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_or_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_or_v_b_test
+;
+@llvm_mips_or_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_or_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_or_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_or_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
+ %2 = bitcast <8 x i16> %0 to <16 x i8>
+ %3 = bitcast <8 x i16> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <8 x i16>
+ store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_or_v_h_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_or_v_h_test
+;
+@llvm_mips_or_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_or_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_or_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_or_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
+ %2 = bitcast <4 x i32> %0 to <16 x i8>
+ %3 = bitcast <4 x i32> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <4 x i32>
+ store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_or_v_w_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_or_v_w_test
+;
+@llvm_mips_or_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_or_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_or_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_or_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
+ %2 = bitcast <2 x i64> %0 to <16 x i8>
+ %3 = bitcast <2 x i64> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <2 x i64>
+ store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_or_v_d_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_or_v_d_test
+;
+define void @or_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
+ %2 = or <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
+ ret void
+}
+
+; CHECK: or_v_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: or.v
+; CHECK: st.b
+; CHECK: .size or_v_b_test
+;
+define void @or_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
+ %2 = or <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
+ ret void
+}
+
+; CHECK: or_v_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: or.v
+; CHECK: st.h
+; CHECK: .size or_v_h_test
+;
+
+define void @or_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
+ %2 = or <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
+ ret void
+}
+
+; CHECK: or_v_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: or.v
+; CHECK: st.w
+; CHECK: .size or_v_w_test
+;
+
+define void @or_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
+ %2 = or <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
+ ret void
+}
+
+; CHECK: or_v_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: or.v
+; CHECK: st.d
+; CHECK: .size or_v_d_test
+;
+@llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+@llvm_mips_xor_v_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_xor_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
+ %2 = bitcast <16 x i8> %0 to <16 x i8>
+ %3 = bitcast <16 x i8> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <16 x i8>
+ store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_xor_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_xor_v_b_test
+;
+@llvm_mips_xor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_xor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+@llvm_mips_xor_v_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_xor_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
+ %2 = bitcast <8 x i16> %0 to <16 x i8>
+ %3 = bitcast <8 x i16> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <8 x i16>
+ store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_xor_v_h_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_xor_v_h_test
+;
+@llvm_mips_xor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_xor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+@llvm_mips_xor_v_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_xor_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
+ %2 = bitcast <4 x i32> %0 to <16 x i8>
+ %3 = bitcast <4 x i32> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <4 x i32>
+ store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_xor_v_w_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_xor_v_w_test
+;
+@llvm_mips_xor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_xor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+@llvm_mips_xor_v_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_xor_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
+ %2 = bitcast <2 x i64> %0 to <16 x i8>
+ %3 = bitcast <2 x i64> %1 to <16 x i8>
+ %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
+ %5 = bitcast <16 x i8> %4 to <2 x i64>
+ store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES
+ ret void
+}
+
+; ANYENDIAN: llvm_mips_xor_v_d_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size llvm_mips_xor_v_d_test
+;
+define void @xor_v_b_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
+ %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
+ %2 = xor <16 x i8> %0, %1
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
+ ret void
+}
+
+; CHECK: xor_v_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: xor.v
+; CHECK: st.b
+; CHECK: .size xor_v_b_test
+;
+define void @xor_v_h_test() nounwind {
+entry:
+ %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
+ %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
+ %2 = xor <8 x i16> %0, %1
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
+ ret void
+}
+
+; CHECK: xor_v_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: xor.v
+; CHECK: st.h
+; CHECK: .size xor_v_h_test
+;
+
+define void @xor_v_w_test() nounwind {
+entry:
+ %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
+ %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
+ %2 = xor <4 x i32> %0, %1
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
+ ret void
+}
+
+; CHECK: xor_v_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: xor.v
+; CHECK: st.w
+; CHECK: .size xor_v_w_test
+;
+
+define void @xor_v_d_test() nounwind {
+entry:
+ %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
+ %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
+ %2 = xor <2 x i64> %0, %1
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
+ ret void
+}
+
+; CHECK: xor_v_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: xor.v
+; CHECK: st.d
+; CHECK: .size xor_v_d_test
+;
+declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind
+declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind
diff --git a/test/CodeGen/Mips/msa/vecs10.ll b/test/CodeGen/Mips/msa/vecs10.ll
new file mode 100644
index 0000000000000..e22e0755ef00f
--- /dev/null
+++ b/test/CodeGen/Mips/msa/vecs10.ll
@@ -0,0 +1,47 @@
+; Test the MSA intrinsics that are encoded with the VECS10 instruction format.
+
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+
+@llvm_mips_bnz_v_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+
+define i32 @llvm_mips_bnz_v_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bnz_v_ARG1
+ %1 = tail call i32 @llvm.mips.bnz.v(<16 x i8> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bnz.v(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_bnz_v_test:
+; CHECK-DAG: ld.b [[R0:\$w[0-9]+]]
+; CHECK-DAG: bnz.v [[R0]]
+; CHECK: .size llvm_mips_bnz_v_test
+
+@llvm_mips_bz_v_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+
+define i32 @llvm_mips_bz_v_test() nounwind {
+entry:
+ %0 = load <16 x i8>* @llvm_mips_bz_v_ARG1
+ %1 = tail call i32 @llvm.mips.bz.v(<16 x i8> %0)
+ %2 = icmp eq i32 %1, 0
+ br i1 %2, label %true, label %false
+true:
+ ret i32 2
+false:
+ ret i32 3
+}
+
+declare i32 @llvm.mips.bz.v(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_bz_v_test:
+; CHECK-DAG: ld.b [[R0:\$w[0-9]+]]
+; CHECK-DAG: bz.v [[R0]]
+; CHECK: .size llvm_mips_bz_v_test
+;
diff --git a/test/CodeGen/Mips/nomips16.ll b/test/CodeGen/Mips/nomips16.ll
new file mode 100644
index 0000000000000..bf7c667d057f6
--- /dev/null
+++ b/test/CodeGen/Mips/nomips16.ll
@@ -0,0 +1,38 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s
+
+@x = global float 0.000000e+00, align 4
+@.str = private unnamed_addr constant [20 x i8] c"in main: mips16 %f\0A\00", align 1
+
+; Function Attrs: nounwind
+define void @foo() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %conv = fpext float %0 to double
+ %add = fadd double %conv, 1.500000e+00
+ %conv1 = fptrunc double %add to float
+ store float %conv1, float* @x, align 4
+ ret void
+}
+; CHECK: .ent foo
+; CHECK: jal __mips16_extendsfdf2
+; CHECK: .end foo
+
+; Function Attrs: nounwind
+define void @nofoo() #1 {
+entry:
+ %0 = load float* @x, align 4
+ %conv = fpext float %0 to double
+ %add = fadd double %conv, 3.900000e+00
+ %conv1 = fptrunc double %add to float
+ store float %conv1, float* @x, align 4
+ ret void
+}
+
+; CHECK: .ent nofoo
+; CHECK: cvt.d.s $f{{.+}}, $f{{.+}}
+; CHECK: .end nofoo
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
diff --git a/test/CodeGen/Mips/o32_cc.ll b/test/CodeGen/Mips/o32_cc.ll
index 70b66efee9a87..08e5aab4f7ac4 100644
--- a/test/CodeGen/Mips/o32_cc.ll
+++ b/test/CodeGen/Mips/o32_cc.ll
@@ -1,11 +1,12 @@
-; RUN: llc -march=mips < %s | FileCheck %s
-
-; FIXME: Disabled because it unpredictably fails on certain platforms.
-; REQUIRES: disabled
+; RUN: llc -march=mipsel < %s | FileCheck %s
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s
+; RUN: llc -march=mipsel < %s | FileCheck -check-prefix=FP32EL %s
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck -check-prefix=FP64EL %s
; $f12, $f14
-; CHECK: ldc1 $f12, %lo
-; CHECK: ldc1 $f14, %lo
+; CHECK-LABEL: testlowercall0:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: ldc1 $f14, %lo
define void @testlowercall0() nounwind {
entry:
tail call void @f0(double 5.000000e+00, double 6.000000e+00) nounwind
@@ -15,8 +16,9 @@ entry:
declare void @f0(double, double)
; $f12, $f14
-; CHECK: lwc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
+; CHECK-LABEL: testlowercall1:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
define void @testlowercall1() nounwind {
entry:
tail call void @f1(float 8.000000e+00, float 9.000000e+00) nounwind
@@ -26,8 +28,9 @@ entry:
declare void @f1(float, float)
; $f12, $f14
-; CHECK: lwc1 $f12, %lo
-; CHECK: ldc1 $f14, %lo
+; CHECK-LABEL: testlowercall2:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: ldc1 $f14, %lo
define void @testlowercall2() nounwind {
entry:
tail call void @f2(float 8.000000e+00, double 6.000000e+00) nounwind
@@ -37,8 +40,9 @@ entry:
declare void @f2(float, double)
; $f12, $f14
-; CHECK: ldc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
+; CHECK-LABEL: testlowercall3:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
define void @testlowercall3() nounwind {
entry:
tail call void @f3(double 5.000000e+00, float 9.000000e+00) nounwind
@@ -48,10 +52,11 @@ entry:
declare void @f3(double, float)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 12
-; CHECK: addiu $5, $zero, 13
-; CHECK: addiu $6, $zero, 14
-; CHECK: addiu $7, $zero, 15
+; CHECK-LABEL: testlowercall4:
+; CHECK-DAG: addiu $4, $zero, 12
+; CHECK-DAG: addiu $5, $zero, 13
+; CHECK-DAG: addiu $6, $zero, 14
+; CHECK-DAG: addiu $7, $zero, 15
define void @testlowercall4() nounwind {
entry:
tail call void @f4(i32 12, i32 13, i32 14, i32 15) nounwind
@@ -61,10 +66,11 @@ entry:
declare void @f4(i32, i32, i32, i32)
; $f12, $6, stack
-; CHECK: sw
-; CHECK: sw
-; CHECK: ldc1 $f12, %lo
-; CHECK: addiu $6, $zero, 23
+; CHECK-LABEL: testlowercall5:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: addiu $6, $zero, 23
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall5() nounwind {
entry:
tail call void @f5(double 1.500000e+01, i32 23, double 1.700000e+01) nounwind
@@ -74,9 +80,10 @@ entry:
declare void @f5(double, i32, double)
; $f12, $6, $7
-; CHECK: ldc1 $f12, %lo
-; CHECK: addiu $6, $zero, 33
-; CHECK: addiu $7, $zero, 24
+; CHECK-LABEL: testlowercall6:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: addiu $6, $zero, 33
+; CHECK-DAG: addiu $7, $zero, 24
define void @testlowercall6() nounwind {
entry:
tail call void @f6(double 2.500000e+01, i32 33, i32 24) nounwind
@@ -86,9 +93,10 @@ entry:
declare void @f6(double, i32, i32)
; $f12, $5, $6
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 43
-; CHECK: addiu $6, $zero, 34
+; CHECK-LABEL: testlowercall7:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 43
+; CHECK-DAG: addiu $6, $zero, 34
define void @testlowercall7() nounwind {
entry:
tail call void @f7(float 1.800000e+01, i32 43, i32 34) nounwind
@@ -98,11 +106,12 @@ entry:
declare void @f7(float, i32, i32)
; $4, $5, $6, stack
-; CHECK: sw
-; CHECK: sw
-; CHECK: addiu $4, $zero, 22
-; CHECK: addiu $5, $zero, 53
-; CHECK: addiu $6, $zero, 44
+; CHECK-LABEL: testlowercall8:
+; CHECK-DAG: addiu $4, $zero, 22
+; CHECK-DAG: addiu $5, $zero, 53
+; CHECK-DAG: addiu $6, $zero, 44
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall8() nounwind {
entry:
tail call void @f8(i32 22, i32 53, i32 44, double 4.000000e+00) nounwind
@@ -112,10 +121,11 @@ entry:
declare void @f8(i32, i32, i32, double)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 32
-; CHECK: addiu $5, $zero, 63
-; CHECK: addiu $6, $zero, 54
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall9:
+; CHECK-DAG: addiu $4, $zero, 32
+; CHECK-DAG: addiu $5, $zero, 63
+; CHECK-DAG: addiu $6, $zero, 54
+; CHECK-DAG: lui $7, 16688
define void @testlowercall9() nounwind {
entry:
tail call void @f9(i32 32, i32 63, i32 54, float 1.100000e+01) nounwind
@@ -125,10 +135,15 @@ entry:
declare void @f9(i32, i32, i32, float)
; $4, $5, ($6, $7)
-; CHECK: addiu $4, $zero, 42
-; CHECK: addiu $5, $zero, 73
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall10:
+; CHECK-DAG: addiu $4, $zero, 42
+; CHECK-DAG: addiu $5, $zero, 73
+; FP32EL-LABEL: testlowercall10:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall10:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall10() nounwind {
entry:
tail call void @f10(i32 42, i32 73, double 2.700000e+01) nounwind
@@ -138,9 +153,14 @@ entry:
declare void @f10(i32, i32, double)
; $4, ($6, $7)
-; CHECK: addiu $4, $zero, 52
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall11:
+; CHECK-DAG: addiu $4, $zero, 52
+; FP32EL-LABEL: testlowercall11:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall11:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall11() nounwind {
entry:
tail call void @f11(i32 52, double 1.600000e+01) nounwind
@@ -150,10 +170,11 @@ entry:
declare void @f11(i32, double)
; $f12, $f14, $6, $7
-; CHECK: lwc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
-; CHECK: ori $6
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall12:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
+; CHECK-DAG: lui $6, 16672
+; CHECK-DAG: lui $7, 16808
define void @testlowercall12() nounwind {
entry:
tail call void @f12(float 2.800000e+01, float 1.900000e+01, float 1.000000e+01, float 2.100000e+01) nounwind
@@ -163,10 +184,11 @@ entry:
declare void @f12(float, float, float, float)
; $f12, $5, $6, $7
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 83
-; CHECK: ori $6
-; CHECK: addiu $7, $zero, 25
+; CHECK-LABEL: testlowercall13:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 83
+; CHECK-DAG: lui $6, 16800
+; CHECK-DAG: addiu $7, $zero, 25
define void @testlowercall13() nounwind {
entry:
tail call void @f13(float 3.800000e+01, i32 83, float 2.000000e+01, i32 25) nounwind
@@ -177,9 +199,10 @@ entry:
declare void @f13(float, i32, float, i32)
; $f12, $f14, $7
-; CHECK: ldc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall14:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
+; CHECK-DAG: lui $7, 16880
define void @testlowercall14() nounwind {
entry:
tail call void @f14(double 3.500000e+01, float 2.900000e+01, float 3.000000e+01) nounwind
@@ -189,10 +212,15 @@ entry:
declare void @f14(double, float, float)
; $f12, $f14, ($6, $7)
-; CHECK: lwc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall15:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
+; FP32EL-LABEL: testlowercall15:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall15:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall15() nounwind {
entry:
tail call void @f15(float 4.800000e+01, float 3.900000e+01, double 3.700000e+01) nounwind
@@ -202,10 +230,11 @@ entry:
declare void @f15(float, float, double)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 62
-; CHECK: ori $5
-; CHECK: addiu $6, $zero, 64
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall16:
+; CHECK-DAG: addiu $4, $zero, 62
+; CHECK-DAG: lui $5, 16964
+; CHECK-DAG: addiu $6, $zero, 64
+; CHECK-DAG: lui $7, 16888
define void @testlowercall16() nounwind {
entry:
tail call void @f16(i32 62, float 4.900000e+01, i32 64, float 3.100000e+01) nounwind
@@ -215,10 +244,11 @@ entry:
declare void @f16(i32, float, i32, float)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 72
-; CHECK: ori $5
-; CHECK: addiu $6, $zero, 74
-; CHECK: addiu $7, $zero, 35
+; CHECK-LABEL: testlowercall17:
+; CHECK-DAG: addiu $4, $zero, 72
+; CHECK-DAG: lui $5, 17004
+; CHECK-DAG: addiu $6, $zero, 74
+; CHECK-DAG: addiu $7, $zero, 35
define void @testlowercall17() nounwind {
entry:
tail call void @f17(i32 72, float 5.900000e+01, i32 74, i32 35) nounwind
@@ -228,10 +258,11 @@ entry:
declare void @f17(i32, float, i32, i32)
; $4, $5, $6, $7
-; CHECK: addiu $4, $zero, 82
-; CHECK: addiu $5, $zero, 93
-; CHECK: ori $6
-; CHECK: addiu $7, $zero, 45
+; CHECK-LABEL: testlowercall18:
+; CHECK-DAG: addiu $4, $zero, 82
+; CHECK-DAG: addiu $5, $zero, 93
+; CHECK-DAG: lui $6, 16928
+; CHECK-DAG: addiu $7, $zero, 45
define void @testlowercall18() nounwind {
entry:
tail call void @f18(i32 82, i32 93, float 4.000000e+01, i32 45) nounwind
@@ -242,11 +273,16 @@ declare void @f18(i32, i32, float, i32)
; $4, ($6, $7), stack
-; CHECK: sw
-; CHECK: sw
-; CHECK: addiu $4, $zero, 92
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall20:
+; CHECK-DAG: addiu $4, $zero, 92
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
+; FP32EL-LABEL: testlowercall20:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall20:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall20() nounwind {
entry:
tail call void @f20(i32 92, double 2.600000e+01, double 4.700000e+01) nounwind
@@ -256,8 +292,9 @@ entry:
declare void @f20(i32, double, double)
; $f12, $5
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 103
+; CHECK-LABEL: testlowercall21:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 103
define void @testlowercall21() nounwind {
entry:
tail call void @f21(float 5.800000e+01, i32 103) nounwind
@@ -267,10 +304,15 @@ entry:
declare void @f21(float, i32)
; $f12, $5, ($6, $7)
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 113
-; CHECK: addiu $6, $zero, 0
-; CHECK: ori $7
+; CHECK-LABEL: testlowercall22:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 113
+; FP32EL-LABEL: testlowercall22:
+; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
+; FP64EL-LABEL: testlowercall22:
+; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
+; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall22() nounwind {
entry:
tail call void @f22(float 6.800000e+01, i32 113, double 5.700000e+01) nounwind
@@ -280,8 +322,9 @@ entry:
declare void @f22(float, i32, double)
; $f12, f6
-; CHECK: ldc1 $f12, %lo
-; CHECK: addiu $6, $zero, 123
+; CHECK-LABEL: testlowercall23:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: addiu $6, $zero, 123
define void @testlowercall23() nounwind {
entry:
tail call void @f23(double 4.500000e+01, i32 123) nounwind
@@ -291,10 +334,11 @@ entry:
declare void @f23(double, i32)
; $f12,$6, stack
-; CHECK: sw
-; CHECK: sw
-; CHECK: ldc1 $f12, %lo
-; CHECK: addiu $6, $zero, 133
+; CHECK-LABEL: testlowercall24:
+; CHECK-DAG: ldc1 $f12, %lo
+; CHECK-DAG: addiu $6, $zero, 133
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall24() nounwind {
entry:
tail call void @f24(double 5.500000e+01, i32 133, double 6.700000e+01) nounwind
@@ -303,19 +347,19 @@ entry:
declare void @f24(double, i32, double)
-; CHECK: lwc1 $f12, %lo
-; lwc1 $f12, %lo
-; CHECK: lwc1 $f14, %lo
-; CHECK: ori $6
-; CHECK: ori $7
-; CHECK: lwc1 $f12, %lo
-; CHECK: addiu $5, $zero, 83
-; CHECK: ori $6
-; CHECK: addiu $7, $zero, 25
-; CHECK: addiu $4, $zero, 82
-; CHECK: addiu $5, $zero, 93
-; CHECK: ori $6
-; CHECK: addiu $7, $zero, 45
+; CHECK-LABEL: testlowercall25:
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: lwc1 $f14, %lo
+; CHECK-DAG: lui $6
+; CHECK-DAG: lui $7
+; CHECK-DAG: lwc1 $f12, %lo
+; CHECK-DAG: addiu $5, $zero, 83
+; CHECK-DAG: lui $6
+; CHECK-DAG: addiu $7, $zero, 25
+; CHECK-DAG: addiu $4, $zero, 82
+; CHECK-DAG: addiu $5, $zero, 93
+; CHECK-DAG: lui $6
+; CHECK-DAG: addiu $7, $zero, 45
define void @testlowercall25() nounwind {
entry:
tail call void @f12(float 2.800000e+01, float 1.900000e+01, float 1.000000e+01, float 2.100000e+01) nounwind
diff --git a/test/CodeGen/Mips/o32_cc_byval.ll b/test/CodeGen/Mips/o32_cc_byval.ll
index 0a8f85f4825d6..5db47acc5a85d 100644
--- a/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/test/CodeGen/Mips/o32_cc_byval.ll
@@ -10,22 +10,23 @@
define void @f1() nounwind {
entry:
-; CHECK: lw $[[R1:[0-9]+]], %got(f1.s1)
-; CHECK: addiu $[[R0:[0-9]+]], $[[R1]], %lo(f1.s1)
-; CHECK: lw $[[R7:[0-9]+]], 12($[[R0]])
-; CHECK: lw $[[R3:[0-9]+]], 16($[[R0]])
-; CHECK: lw $[[R4:[0-9]+]], 20($[[R0]])
-; CHECK: lw $[[R5:[0-9]+]], 24($[[R0]])
-; CHECK: lw $[[R6:[0-9]+]], 28($[[R0]])
-; CHECK: sw $[[R6]], 36($sp)
-; CHECK: sw $[[R5]], 32($sp)
-; CHECK: sw $[[R4]], 28($sp)
-; CHECK: sw $[[R3]], 24($sp)
-; CHECK: sw $[[R7]], 20($sp)
-; CHECK: lw $[[R2:[0-9]+]], 8($[[R0]])
-; CHECK: sw $[[R2]], 16($sp)
-; CHECK: lw $6, %lo(f1.s1)($[[R1]])
-; CHECK: lw $7, 4($[[R0]])
+; CHECK-LABEL: f1:
+; CHECK-DAG: lw $[[R1:[0-9]+]], %got(f1.s1)
+; CHECK-DAG: addiu $[[R0:[0-9]+]], $[[R1]], %lo(f1.s1)
+; CHECK-DAG: lw $[[R7:[0-9]+]], 12($[[R0]])
+; CHECK-DAG: lw $[[R3:[0-9]+]], 16($[[R0]])
+; CHECK-DAG: lw $[[R4:[0-9]+]], 20($[[R0]])
+; CHECK-DAG: lw $[[R5:[0-9]+]], 24($[[R0]])
+; CHECK-DAG: lw $[[R6:[0-9]+]], 28($[[R0]])
+; CHECK-DAG: sw $[[R6]], 36($sp)
+; CHECK-DAG: sw $[[R5]], 32($sp)
+; CHECK-DAG: sw $[[R4]], 28($sp)
+; CHECK-DAG: sw $[[R3]], 24($sp)
+; CHECK-DAG: sw $[[R7]], 20($sp)
+; CHECK-DAG: lw $[[R2:[0-9]+]], 8($[[R0]])
+; CHECK-DAG: sw $[[R2]], 16($sp)
+; CHECK-DAG: lw $6, %lo(f1.s1)($[[R1]])
+; CHECK-DAG: lw $7, 4($[[R0]])
%agg.tmp10 = alloca %struct.S3, align 4
call void @callee1(float 2.000000e+01, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
call void @callee2(%struct.S2* byval @f1.s2) nounwind
@@ -61,17 +62,17 @@ entry:
; CHECK: mfc1 $6, $f[[F0]]
%i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5
- %tmp = load i32* %i2, align 4, !tbaa !0
+ %tmp = load i32* %i2, align 4
%d = getelementptr inbounds %struct.S1* %s1, i32 0, i32 4
- %tmp1 = load double* %d, align 8, !tbaa !3
+ %tmp1 = load double* %d, align 8
%ll = getelementptr inbounds %struct.S1* %s1, i32 0, i32 3
- %tmp2 = load i64* %ll, align 8, !tbaa !4
+ %tmp2 = load i64* %ll, align 8
%i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2
- %tmp3 = load i32* %i, align 4, !tbaa !0
+ %tmp3 = load i32* %i, align 4
%s = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1
- %tmp4 = load i16* %s, align 2, !tbaa !5
+ %tmp4 = load i16* %s, align 2
%c = getelementptr inbounds %struct.S1* %s1, i32 0, i32 0
- %tmp5 = load i8* %c, align 1, !tbaa !1
+ %tmp5 = load i8* %c, align 1
tail call void @callee4(i32 %tmp, double %tmp1, i64 %tmp2, i32 %tmp3, i16 signext %tmp4, i8 signext %tmp5, float %f) nounwind
ret void
}
@@ -90,9 +91,9 @@ entry:
; CHECK: sw $[[R0]], 24($sp)
%arrayidx = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 0
- %tmp = load i32* %arrayidx, align 4, !tbaa !0
+ %tmp = load i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 3
- %tmp3 = load i32* %arrayidx2, align 4, !tbaa !0
+ %tmp3 = load i32* %arrayidx2, align 4
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp3, i16 signext 4, i8 signext 5, float 6.000000e+00) nounwind
ret void
}
@@ -110,11 +111,11 @@ entry:
; CHECK: sw $[[R1]], 24($sp)
%i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2
- %tmp = load i32* %i, align 4, !tbaa !0
+ %tmp = load i32* %i, align 4
%i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5
- %tmp1 = load i32* %i2, align 4, !tbaa !0
+ %tmp1 = load i32* %i2, align 4
%c = getelementptr inbounds %struct.S3* %s3, i32 0, i32 0
- %tmp2 = load i8* %c, align 1, !tbaa !1
+ %tmp2 = load i8* %c, align 1
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp1, i16 signext 4, i8 signext %tmp2, float 6.000000e+00) nounwind
ret void
}
@@ -128,10 +129,3 @@ entry:
}
declare void @f6(%struct.S4* nocapture byval, i64)
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"double", metadata !1}
-!4 = metadata !{metadata !"long long", metadata !1}
-!5 = metadata !{metadata !"short", metadata !1}
diff --git a/test/CodeGen/Mips/o32_cc_vararg.ll b/test/CodeGen/Mips/o32_cc_vararg.ll
index 35332b6550d82..10972e884ac38 100644
--- a/test/CodeGen/Mips/o32_cc_vararg.ll
+++ b/test/CodeGen/Mips/o32_cc_vararg.ll
@@ -27,7 +27,7 @@ entry:
%tmp = load i32* %b, align 4
ret i32 %tmp
-; CHECK: va1:
+; CHECK-LABEL: va1:
; CHECK: addiu $sp, $sp, -16
; CHECK: sw $7, 28($sp)
; CHECK: sw $6, 24($sp)
@@ -53,7 +53,7 @@ entry:
%tmp = load double* %b, align 8
ret double %tmp
-; CHECK: va2:
+; CHECK-LABEL: va2:
; CHECK: addiu $sp, $sp, -16
; CHECK: sw $7, 28($sp)
; CHECK: sw $6, 24($sp)
@@ -81,7 +81,7 @@ entry:
%tmp = load i32* %b, align 4
ret i32 %tmp
-; CHECK: va3:
+; CHECK-LABEL: va3:
; CHECK: addiu $sp, $sp, -16
; CHECK: sw $7, 28($sp)
; CHECK: sw $6, 24($sp)
@@ -104,7 +104,7 @@ entry:
%tmp = load double* %b, align 8
ret double %tmp
-; CHECK: va4:
+; CHECK-LABEL: va4:
; CHECK: addiu $sp, $sp, -24
; CHECK: sw $7, 36($sp)
; CHECK: sw $6, 32($sp)
@@ -132,7 +132,7 @@ entry:
%tmp = load i32* %d, align 4
ret i32 %tmp
-; CHECK: va5:
+; CHECK-LABEL: va5:
; CHECK: addiu $sp, $sp, -24
; CHECK: sw $7, 36($sp)
; CHECK: lw $2, 36($sp)
@@ -158,7 +158,7 @@ entry:
%tmp = load double* %d, align 8
ret double %tmp
-; CHECK: va6:
+; CHECK-LABEL: va6:
; CHECK: addiu $sp, $sp, -24
; CHECK: sw $7, 36($sp)
; CHECK: addiu $[[R0:[0-9]+]], $sp, 36
@@ -186,7 +186,7 @@ entry:
%tmp = load i32* %c, align 4
ret i32 %tmp
-; CHECK: va7:
+; CHECK-LABEL: va7:
; CHECK: addiu $sp, $sp, -24
; CHECK: lw $2, 40($sp)
}
@@ -209,7 +209,7 @@ entry:
%tmp = load double* %c, align 8
ret double %tmp
-; CHECK: va8:
+; CHECK-LABEL: va8:
; CHECK: addiu $sp, $sp, -32
; CHECK: addiu ${{[0-9]+}}, $sp, 48
; CHECK: ldc1 $f0, 48($sp)
@@ -235,7 +235,7 @@ entry:
%tmp = load i32* %d, align 4
ret i32 %tmp
-; CHECK: va9:
+; CHECK-LABEL: va9:
; CHECK: addiu $sp, $sp, -32
; CHECK: lw $2, 52($sp)
}
@@ -260,7 +260,7 @@ entry:
%tmp = load double* %d, align 8
ret double %tmp
-; CHECK: va10:
+; CHECK-LABEL: va10:
; CHECK: addiu $sp, $sp, -32
; CHECK: addiu $[[R0:[0-9]+]], $sp, 52
; CHECK: addiu $[[R1:[0-9]+]], $[[R0]], 7
diff --git a/test/CodeGen/Mips/optimize-fp-math.ll b/test/CodeGen/Mips/optimize-fp-math.ll
new file mode 100644
index 0000000000000..8b71dc42344c1
--- /dev/null
+++ b/test/CodeGen/Mips/optimize-fp-math.ll
@@ -0,0 +1,32 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
+
+; 32-LABEL: test_sqrtf_float_:
+; 32: sqrt.s $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 32: c.un.s $f[[R0]], $f[[R0]]
+; 64-LABEL: test_sqrtf_float_:
+; 64: sqrt.s $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 64: c.un.s $f[[R0]], $f[[R0]]
+
+define float @test_sqrtf_float_(float %a) {
+entry:
+ %call = tail call float @sqrtf(float %a)
+ ret float %call
+}
+
+declare float @sqrtf(float)
+
+; 32-LABEL: test_sqrt_double_:
+; 32: sqrt.d $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 32: c.un.d $f[[R0]], $f[[R0]]
+; 64-LABEL: test_sqrt_double_:
+; 64: sqrt.d $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 64: c.un.d $f[[R0]], $f[[R0]]
+
+define double @test_sqrt_double_(double %a) {
+entry:
+ %call = tail call double @sqrt(double %a)
+ ret double %call
+}
+
+declare double @sqrt(double)
diff --git a/test/CodeGen/Mips/powif64_16.ll b/test/CodeGen/Mips/powif64_16.ll
new file mode 100644
index 0000000000000..35a7ca9201e27
--- /dev/null
+++ b/test/CodeGen/Mips/powif64_16.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s
+
+declare float @llvm.powi.f32(float %Val, i32 %power)
+declare double @llvm.powi.f64(double %Val, i32 %power)
+
+define float @foo_pow_f32(float %y, i32 %p) {
+ %1 = tail call float @llvm.powi.f32(float %y, i32 %p)
+; CHECK-NOT: .ent __call_stub_fp_llvm.powi.f32
+; CHECK-NOT: {{.*}} jal llvm.powi.f32
+ ret float %1
+}
+
+define double @foo_pow_f64(double %y, i32 %p) {
+ %1 = tail call double @llvm.powi.f64(double %y, i32 %p)
+; CHECK-NOT: .ent __call_stub_fp_llvm.powi.f64
+; CHECK-NOT: {{.*}} jal llvm.powi.f64
+ ret double %1
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind readonly }
+
+!0 = metadata !{metadata !"double", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"int", metadata !1}
diff --git a/test/CodeGen/Mips/private.ll b/test/CodeGen/Mips/private.ll
index d1a67fd9f4bf6..058db0bb977ad 100644
--- a/test/CodeGen/Mips/private.ll
+++ b/test/CodeGen/Mips/private.ll
@@ -3,14 +3,14 @@
; RUN: llc -march=mips < %s | FileCheck %s
define private void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
ret void
}
@baz = private global i32 4
define i32 @bar() {
-; CHECK: bar:
+; CHECK-LABEL: bar:
; CHECK: call16($foo)
; CHECK: lw $[[R0:[0-9]+]], %got($baz)($
; CHECK: lw ${{[0-9]+}}, %lo($baz)($[[R0]])
diff --git a/test/CodeGen/Mips/ra-allocatable.ll b/test/CodeGen/Mips/ra-allocatable.ll
index 76217886829a4..afc5cb0c2556d 100644
--- a/test/CodeGen/Mips/ra-allocatable.ll
+++ b/test/CodeGen/Mips/ra-allocatable.ll
@@ -98,191 +98,186 @@ entry:
; CHECK: lw $ra, {{[0-9]+}}($sp) # 4-byte Folded Reload
; CHECK: jr $ra
- %0 = load i32* @a0, align 4, !tbaa !0
- %1 = load i32** @b0, align 4, !tbaa !3
- store i32 %0, i32* %1, align 4, !tbaa !0
- %2 = load i32* @a1, align 4, !tbaa !0
- %3 = load i32** @b1, align 4, !tbaa !3
- store i32 %2, i32* %3, align 4, !tbaa !0
- %4 = load i32* @a2, align 4, !tbaa !0
- %5 = load i32** @b2, align 4, !tbaa !3
- store i32 %4, i32* %5, align 4, !tbaa !0
- %6 = load i32* @a3, align 4, !tbaa !0
- %7 = load i32** @b3, align 4, !tbaa !3
- store i32 %6, i32* %7, align 4, !tbaa !0
- %8 = load i32* @a4, align 4, !tbaa !0
- %9 = load i32** @b4, align 4, !tbaa !3
- store i32 %8, i32* %9, align 4, !tbaa !0
- %10 = load i32* @a5, align 4, !tbaa !0
- %11 = load i32** @b5, align 4, !tbaa !3
- store i32 %10, i32* %11, align 4, !tbaa !0
- %12 = load i32* @a6, align 4, !tbaa !0
- %13 = load i32** @b6, align 4, !tbaa !3
- store i32 %12, i32* %13, align 4, !tbaa !0
- %14 = load i32* @a7, align 4, !tbaa !0
- %15 = load i32** @b7, align 4, !tbaa !3
- store i32 %14, i32* %15, align 4, !tbaa !0
- %16 = load i32* @a8, align 4, !tbaa !0
- %17 = load i32** @b8, align 4, !tbaa !3
- store i32 %16, i32* %17, align 4, !tbaa !0
- %18 = load i32* @a9, align 4, !tbaa !0
- %19 = load i32** @b9, align 4, !tbaa !3
- store i32 %18, i32* %19, align 4, !tbaa !0
- %20 = load i32* @a10, align 4, !tbaa !0
- %21 = load i32** @b10, align 4, !tbaa !3
- store i32 %20, i32* %21, align 4, !tbaa !0
- %22 = load i32* @a11, align 4, !tbaa !0
- %23 = load i32** @b11, align 4, !tbaa !3
- store i32 %22, i32* %23, align 4, !tbaa !0
- %24 = load i32* @a12, align 4, !tbaa !0
- %25 = load i32** @b12, align 4, !tbaa !3
- store i32 %24, i32* %25, align 4, !tbaa !0
- %26 = load i32* @a13, align 4, !tbaa !0
- %27 = load i32** @b13, align 4, !tbaa !3
- store i32 %26, i32* %27, align 4, !tbaa !0
- %28 = load i32* @a14, align 4, !tbaa !0
- %29 = load i32** @b14, align 4, !tbaa !3
- store i32 %28, i32* %29, align 4, !tbaa !0
- %30 = load i32* @a15, align 4, !tbaa !0
- %31 = load i32** @b15, align 4, !tbaa !3
- store i32 %30, i32* %31, align 4, !tbaa !0
- %32 = load i32* @a16, align 4, !tbaa !0
- %33 = load i32** @b16, align 4, !tbaa !3
- store i32 %32, i32* %33, align 4, !tbaa !0
- %34 = load i32* @a17, align 4, !tbaa !0
- %35 = load i32** @b17, align 4, !tbaa !3
- store i32 %34, i32* %35, align 4, !tbaa !0
- %36 = load i32* @a18, align 4, !tbaa !0
- %37 = load i32** @b18, align 4, !tbaa !3
- store i32 %36, i32* %37, align 4, !tbaa !0
- %38 = load i32* @a19, align 4, !tbaa !0
- %39 = load i32** @b19, align 4, !tbaa !3
- store i32 %38, i32* %39, align 4, !tbaa !0
- %40 = load i32* @a20, align 4, !tbaa !0
- %41 = load i32** @b20, align 4, !tbaa !3
- store i32 %40, i32* %41, align 4, !tbaa !0
- %42 = load i32* @a21, align 4, !tbaa !0
- %43 = load i32** @b21, align 4, !tbaa !3
- store i32 %42, i32* %43, align 4, !tbaa !0
- %44 = load i32* @a22, align 4, !tbaa !0
- %45 = load i32** @b22, align 4, !tbaa !3
- store i32 %44, i32* %45, align 4, !tbaa !0
- %46 = load i32* @a23, align 4, !tbaa !0
- %47 = load i32** @b23, align 4, !tbaa !3
- store i32 %46, i32* %47, align 4, !tbaa !0
- %48 = load i32* @a24, align 4, !tbaa !0
- %49 = load i32** @b24, align 4, !tbaa !3
- store i32 %48, i32* %49, align 4, !tbaa !0
- %50 = load i32* @a25, align 4, !tbaa !0
- %51 = load i32** @b25, align 4, !tbaa !3
- store i32 %50, i32* %51, align 4, !tbaa !0
- %52 = load i32* @a26, align 4, !tbaa !0
- %53 = load i32** @b26, align 4, !tbaa !3
- store i32 %52, i32* %53, align 4, !tbaa !0
- %54 = load i32* @a27, align 4, !tbaa !0
- %55 = load i32** @b27, align 4, !tbaa !3
- store i32 %54, i32* %55, align 4, !tbaa !0
- %56 = load i32* @a28, align 4, !tbaa !0
- %57 = load i32** @b28, align 4, !tbaa !3
- store i32 %56, i32* %57, align 4, !tbaa !0
- %58 = load i32* @a29, align 4, !tbaa !0
- %59 = load i32** @b29, align 4, !tbaa !3
- store i32 %58, i32* %59, align 4, !tbaa !0
- %60 = load i32* @a0, align 4, !tbaa !0
- %61 = load i32** @c0, align 4, !tbaa !3
- store i32 %60, i32* %61, align 4, !tbaa !0
- %62 = load i32* @a1, align 4, !tbaa !0
- %63 = load i32** @c1, align 4, !tbaa !3
- store i32 %62, i32* %63, align 4, !tbaa !0
- %64 = load i32* @a2, align 4, !tbaa !0
- %65 = load i32** @c2, align 4, !tbaa !3
- store i32 %64, i32* %65, align 4, !tbaa !0
- %66 = load i32* @a3, align 4, !tbaa !0
- %67 = load i32** @c3, align 4, !tbaa !3
- store i32 %66, i32* %67, align 4, !tbaa !0
- %68 = load i32* @a4, align 4, !tbaa !0
- %69 = load i32** @c4, align 4, !tbaa !3
- store i32 %68, i32* %69, align 4, !tbaa !0
- %70 = load i32* @a5, align 4, !tbaa !0
- %71 = load i32** @c5, align 4, !tbaa !3
- store i32 %70, i32* %71, align 4, !tbaa !0
- %72 = load i32* @a6, align 4, !tbaa !0
- %73 = load i32** @c6, align 4, !tbaa !3
- store i32 %72, i32* %73, align 4, !tbaa !0
- %74 = load i32* @a7, align 4, !tbaa !0
- %75 = load i32** @c7, align 4, !tbaa !3
- store i32 %74, i32* %75, align 4, !tbaa !0
- %76 = load i32* @a8, align 4, !tbaa !0
- %77 = load i32** @c8, align 4, !tbaa !3
- store i32 %76, i32* %77, align 4, !tbaa !0
- %78 = load i32* @a9, align 4, !tbaa !0
- %79 = load i32** @c9, align 4, !tbaa !3
- store i32 %78, i32* %79, align 4, !tbaa !0
- %80 = load i32* @a10, align 4, !tbaa !0
- %81 = load i32** @c10, align 4, !tbaa !3
- store i32 %80, i32* %81, align 4, !tbaa !0
- %82 = load i32* @a11, align 4, !tbaa !0
- %83 = load i32** @c11, align 4, !tbaa !3
- store i32 %82, i32* %83, align 4, !tbaa !0
- %84 = load i32* @a12, align 4, !tbaa !0
- %85 = load i32** @c12, align 4, !tbaa !3
- store i32 %84, i32* %85, align 4, !tbaa !0
- %86 = load i32* @a13, align 4, !tbaa !0
- %87 = load i32** @c13, align 4, !tbaa !3
- store i32 %86, i32* %87, align 4, !tbaa !0
- %88 = load i32* @a14, align 4, !tbaa !0
- %89 = load i32** @c14, align 4, !tbaa !3
- store i32 %88, i32* %89, align 4, !tbaa !0
- %90 = load i32* @a15, align 4, !tbaa !0
- %91 = load i32** @c15, align 4, !tbaa !3
- store i32 %90, i32* %91, align 4, !tbaa !0
- %92 = load i32* @a16, align 4, !tbaa !0
- %93 = load i32** @c16, align 4, !tbaa !3
- store i32 %92, i32* %93, align 4, !tbaa !0
- %94 = load i32* @a17, align 4, !tbaa !0
- %95 = load i32** @c17, align 4, !tbaa !3
- store i32 %94, i32* %95, align 4, !tbaa !0
- %96 = load i32* @a18, align 4, !tbaa !0
- %97 = load i32** @c18, align 4, !tbaa !3
- store i32 %96, i32* %97, align 4, !tbaa !0
- %98 = load i32* @a19, align 4, !tbaa !0
- %99 = load i32** @c19, align 4, !tbaa !3
- store i32 %98, i32* %99, align 4, !tbaa !0
- %100 = load i32* @a20, align 4, !tbaa !0
- %101 = load i32** @c20, align 4, !tbaa !3
- store i32 %100, i32* %101, align 4, !tbaa !0
- %102 = load i32* @a21, align 4, !tbaa !0
- %103 = load i32** @c21, align 4, !tbaa !3
- store i32 %102, i32* %103, align 4, !tbaa !0
- %104 = load i32* @a22, align 4, !tbaa !0
- %105 = load i32** @c22, align 4, !tbaa !3
- store i32 %104, i32* %105, align 4, !tbaa !0
- %106 = load i32* @a23, align 4, !tbaa !0
- %107 = load i32** @c23, align 4, !tbaa !3
- store i32 %106, i32* %107, align 4, !tbaa !0
- %108 = load i32* @a24, align 4, !tbaa !0
- %109 = load i32** @c24, align 4, !tbaa !3
- store i32 %108, i32* %109, align 4, !tbaa !0
- %110 = load i32* @a25, align 4, !tbaa !0
- %111 = load i32** @c25, align 4, !tbaa !3
- store i32 %110, i32* %111, align 4, !tbaa !0
- %112 = load i32* @a26, align 4, !tbaa !0
- %113 = load i32** @c26, align 4, !tbaa !3
- store i32 %112, i32* %113, align 4, !tbaa !0
- %114 = load i32* @a27, align 4, !tbaa !0
- %115 = load i32** @c27, align 4, !tbaa !3
- store i32 %114, i32* %115, align 4, !tbaa !0
- %116 = load i32* @a28, align 4, !tbaa !0
- %117 = load i32** @c28, align 4, !tbaa !3
- store i32 %116, i32* %117, align 4, !tbaa !0
- %118 = load i32* @a29, align 4, !tbaa !0
- %119 = load i32** @c29, align 4, !tbaa !3
- store i32 %118, i32* %119, align 4, !tbaa !0
- %120 = load i32* @a0, align 4, !tbaa !0
+ %0 = load i32* @a0, align 4
+ %1 = load i32** @b0, align 4
+ store i32 %0, i32* %1, align 4
+ %2 = load i32* @a1, align 4
+ %3 = load i32** @b1, align 4
+ store i32 %2, i32* %3, align 4
+ %4 = load i32* @a2, align 4
+ %5 = load i32** @b2, align 4
+ store i32 %4, i32* %5, align 4
+ %6 = load i32* @a3, align 4
+ %7 = load i32** @b3, align 4
+ store i32 %6, i32* %7, align 4
+ %8 = load i32* @a4, align 4
+ %9 = load i32** @b4, align 4
+ store i32 %8, i32* %9, align 4
+ %10 = load i32* @a5, align 4
+ %11 = load i32** @b5, align 4
+ store i32 %10, i32* %11, align 4
+ %12 = load i32* @a6, align 4
+ %13 = load i32** @b6, align 4
+ store i32 %12, i32* %13, align 4
+ %14 = load i32* @a7, align 4
+ %15 = load i32** @b7, align 4
+ store i32 %14, i32* %15, align 4
+ %16 = load i32* @a8, align 4
+ %17 = load i32** @b8, align 4
+ store i32 %16, i32* %17, align 4
+ %18 = load i32* @a9, align 4
+ %19 = load i32** @b9, align 4
+ store i32 %18, i32* %19, align 4
+ %20 = load i32* @a10, align 4
+ %21 = load i32** @b10, align 4
+ store i32 %20, i32* %21, align 4
+ %22 = load i32* @a11, align 4
+ %23 = load i32** @b11, align 4
+ store i32 %22, i32* %23, align 4
+ %24 = load i32* @a12, align 4
+ %25 = load i32** @b12, align 4
+ store i32 %24, i32* %25, align 4
+ %26 = load i32* @a13, align 4
+ %27 = load i32** @b13, align 4
+ store i32 %26, i32* %27, align 4
+ %28 = load i32* @a14, align 4
+ %29 = load i32** @b14, align 4
+ store i32 %28, i32* %29, align 4
+ %30 = load i32* @a15, align 4
+ %31 = load i32** @b15, align 4
+ store i32 %30, i32* %31, align 4
+ %32 = load i32* @a16, align 4
+ %33 = load i32** @b16, align 4
+ store i32 %32, i32* %33, align 4
+ %34 = load i32* @a17, align 4
+ %35 = load i32** @b17, align 4
+ store i32 %34, i32* %35, align 4
+ %36 = load i32* @a18, align 4
+ %37 = load i32** @b18, align 4
+ store i32 %36, i32* %37, align 4
+ %38 = load i32* @a19, align 4
+ %39 = load i32** @b19, align 4
+ store i32 %38, i32* %39, align 4
+ %40 = load i32* @a20, align 4
+ %41 = load i32** @b20, align 4
+ store i32 %40, i32* %41, align 4
+ %42 = load i32* @a21, align 4
+ %43 = load i32** @b21, align 4
+ store i32 %42, i32* %43, align 4
+ %44 = load i32* @a22, align 4
+ %45 = load i32** @b22, align 4
+ store i32 %44, i32* %45, align 4
+ %46 = load i32* @a23, align 4
+ %47 = load i32** @b23, align 4
+ store i32 %46, i32* %47, align 4
+ %48 = load i32* @a24, align 4
+ %49 = load i32** @b24, align 4
+ store i32 %48, i32* %49, align 4
+ %50 = load i32* @a25, align 4
+ %51 = load i32** @b25, align 4
+ store i32 %50, i32* %51, align 4
+ %52 = load i32* @a26, align 4
+ %53 = load i32** @b26, align 4
+ store i32 %52, i32* %53, align 4
+ %54 = load i32* @a27, align 4
+ %55 = load i32** @b27, align 4
+ store i32 %54, i32* %55, align 4
+ %56 = load i32* @a28, align 4
+ %57 = load i32** @b28, align 4
+ store i32 %56, i32* %57, align 4
+ %58 = load i32* @a29, align 4
+ %59 = load i32** @b29, align 4
+ store i32 %58, i32* %59, align 4
+ %60 = load i32* @a0, align 4
+ %61 = load i32** @c0, align 4
+ store i32 %60, i32* %61, align 4
+ %62 = load i32* @a1, align 4
+ %63 = load i32** @c1, align 4
+ store i32 %62, i32* %63, align 4
+ %64 = load i32* @a2, align 4
+ %65 = load i32** @c2, align 4
+ store i32 %64, i32* %65, align 4
+ %66 = load i32* @a3, align 4
+ %67 = load i32** @c3, align 4
+ store i32 %66, i32* %67, align 4
+ %68 = load i32* @a4, align 4
+ %69 = load i32** @c4, align 4
+ store i32 %68, i32* %69, align 4
+ %70 = load i32* @a5, align 4
+ %71 = load i32** @c5, align 4
+ store i32 %70, i32* %71, align 4
+ %72 = load i32* @a6, align 4
+ %73 = load i32** @c6, align 4
+ store i32 %72, i32* %73, align 4
+ %74 = load i32* @a7, align 4
+ %75 = load i32** @c7, align 4
+ store i32 %74, i32* %75, align 4
+ %76 = load i32* @a8, align 4
+ %77 = load i32** @c8, align 4
+ store i32 %76, i32* %77, align 4
+ %78 = load i32* @a9, align 4
+ %79 = load i32** @c9, align 4
+ store i32 %78, i32* %79, align 4
+ %80 = load i32* @a10, align 4
+ %81 = load i32** @c10, align 4
+ store i32 %80, i32* %81, align 4
+ %82 = load i32* @a11, align 4
+ %83 = load i32** @c11, align 4
+ store i32 %82, i32* %83, align 4
+ %84 = load i32* @a12, align 4
+ %85 = load i32** @c12, align 4
+ store i32 %84, i32* %85, align 4
+ %86 = load i32* @a13, align 4
+ %87 = load i32** @c13, align 4
+ store i32 %86, i32* %87, align 4
+ %88 = load i32* @a14, align 4
+ %89 = load i32** @c14, align 4
+ store i32 %88, i32* %89, align 4
+ %90 = load i32* @a15, align 4
+ %91 = load i32** @c15, align 4
+ store i32 %90, i32* %91, align 4
+ %92 = load i32* @a16, align 4
+ %93 = load i32** @c16, align 4
+ store i32 %92, i32* %93, align 4
+ %94 = load i32* @a17, align 4
+ %95 = load i32** @c17, align 4
+ store i32 %94, i32* %95, align 4
+ %96 = load i32* @a18, align 4
+ %97 = load i32** @c18, align 4
+ store i32 %96, i32* %97, align 4
+ %98 = load i32* @a19, align 4
+ %99 = load i32** @c19, align 4
+ store i32 %98, i32* %99, align 4
+ %100 = load i32* @a20, align 4
+ %101 = load i32** @c20, align 4
+ store i32 %100, i32* %101, align 4
+ %102 = load i32* @a21, align 4
+ %103 = load i32** @c21, align 4
+ store i32 %102, i32* %103, align 4
+ %104 = load i32* @a22, align 4
+ %105 = load i32** @c22, align 4
+ store i32 %104, i32* %105, align 4
+ %106 = load i32* @a23, align 4
+ %107 = load i32** @c23, align 4
+ store i32 %106, i32* %107, align 4
+ %108 = load i32* @a24, align 4
+ %109 = load i32** @c24, align 4
+ store i32 %108, i32* %109, align 4
+ %110 = load i32* @a25, align 4
+ %111 = load i32** @c25, align 4
+ store i32 %110, i32* %111, align 4
+ %112 = load i32* @a26, align 4
+ %113 = load i32** @c26, align 4
+ store i32 %112, i32* %113, align 4
+ %114 = load i32* @a27, align 4
+ %115 = load i32** @c27, align 4
+ store i32 %114, i32* %115, align 4
+ %116 = load i32* @a28, align 4
+ %117 = load i32** @c28, align 4
+ store i32 %116, i32* %117, align 4
+ %118 = load i32* @a29, align 4
+ %119 = load i32** @c29, align 4
+ store i32 %118, i32* %119, align 4
+ %120 = load i32* @a0, align 4
ret i32 %120
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
diff --git a/test/CodeGen/Mips/return-vector.ll b/test/CodeGen/Mips/return-vector.ll
index 739c43c68a55e..0e0d515875416 100644
--- a/test/CodeGen/Mips/return-vector.ll
+++ b/test/CodeGen/Mips/return-vector.ll
@@ -30,7 +30,7 @@ entry:
%add7 = add i32 %add5, %add6
ret i32 %add7
-; CHECK: call_i8:
+; CHECK-LABEL: call_i8:
; CHECK: call16(i8)
; CHECK: addiu $4, $sp, 32
; CHECK: lw $[[R0:[a-z0-9]+]], 60($sp)
@@ -56,7 +56,7 @@ entry:
%add3 = fadd float %add1, %add2
ret float %add3
-; CHECK: call_f4:
+; CHECK-LABEL: call_f4:
; CHECK: call16(f4)
; CHECK: addiu $4, $sp, 16
; CHECK: lwc1 $[[R0:[a-z0-9]+]], 28($sp)
@@ -78,7 +78,7 @@ entry:
%add3 = fadd double %add1, %add2
ret double %add3
-; CHECK: call_d4:
+; CHECK-LABEL: call_d4:
; CHECK: call16(d4)
; CHECK: addiu $4, $sp, 32
; CHECK: ldc1 $[[R0:[a-z0-9]+]], 56($sp)
@@ -109,7 +109,7 @@ entry:
%add3 = add i32 %add1, %add2
ret i32 %add3
-; CHECK: call_i4:
+; CHECK-LABEL: call_i4:
; CHECK: call16(i4)
; CHECK-NOT: lw
; CHECK: addu $[[R2:[a-z0-9]+]], $[[R0:[a-z0-9]+]], $[[R1:[a-z0-9]+]]
@@ -126,7 +126,7 @@ entry:
%add1 = fadd float %v0, %v1
ret float %add1
-; CHECK: call_f2:
+; CHECK-LABEL: call_f2:
; CHECK: call16(f2)
; CHECK-NOT: lwc1
; CHECK: add.s $[[R2:[a-z0-9]+]], $[[R0:[a-z0-9]+]], $[[R1:[a-z0-9]+]]
@@ -141,7 +141,7 @@ entry:
%add1 = fadd double %v0, %v1
ret double %add1
-; CHECK: call_d2:
+; CHECK-LABEL: call_d2:
; CHECK: call16(d2)
; CHECK-NOT: ldc1
; CHECK: add.d $[[R2:[a-z0-9]+]], $[[R0:[a-z0-9]+]], $[[R1:[a-z0-9]+]]
@@ -158,7 +158,7 @@ define <8 x i32> @return_i8() {
entry:
ret <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK: return_i8:
+; CHECK-LABEL: return_i8:
; CHECK: sw $[[R0:[a-z0-9]+]], 28($4)
; CHECK: sw $[[R1:[a-z0-9]+]], 24($4)
; CHECK: sw $[[R2:[a-z0-9]+]], 20($4)
@@ -178,12 +178,12 @@ entry:
%vecins4 = insertelement <4 x float> %vecins3, float %d, i32 3
ret <4 x float> %vecins4
-; CHECK: return_f4:
-; CHECK: lwc1 $[[R0:[a-z0-9]+]], 16($sp)
-; CHECK: swc1 $[[R0]], 12($4)
-; CHECK: sw $7, 8($4)
-; CHECK: sw $6, 4($4)
-; CHECK: sw $5, 0($4)
+; CHECK-LABEL: return_f4:
+; CHECK-DAG: lwc1 $[[R0:[a-z0-9]+]], 16($sp)
+; CHECK-DAG: swc1 $[[R0]], 12($4)
+; CHECK-DAG: sw $7, 8($4)
+; CHECK-DAG: sw $6, 4($4)
+; CHECK-DAG: sw $5, 0($4)
}
@@ -195,11 +195,11 @@ entry:
%vecins4 = insertelement <4 x double> %vecins3, double %d, i32 3
ret <4 x double> %vecins4
-; CHECK: return_d4:
-; CHECK: sdc1 $[[R0:[a-z0-9]+]], 24($4)
-; CHECK: sdc1 $[[R1:[a-z0-9]+]], 16($4)
-; CHECK: sdc1 $[[R2:[a-z0-9]+]], 8($4)
-; CHECK: sdc1 $[[R3:[a-z0-9]+]], 0($4)
+; CHECK-LABEL: return_d4:
+; CHECK-DAG: sdc1 $[[R0:[a-z0-9]+]], 24($4)
+; CHECK-DAG: sdc1 $[[R1:[a-z0-9]+]], 16($4)
+; CHECK-DAG: sdc1 $[[R2:[a-z0-9]+]], 8($4)
+; CHECK-DAG: sdc1 $[[R3:[a-z0-9]+]], 0($4)
}
@@ -212,7 +212,7 @@ define <4 x i32> @return_i4() {
entry:
ret <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK: return_i4:
+; CHECK-LABEL: return_i4:
; CHECK: addiu $2, $zero, 0
; CHECK: addiu $3, $zero, 1
; CHECK: addiu $4, $zero, 2
@@ -226,7 +226,7 @@ entry:
%vecins2 = insertelement <2 x float> %vecins1, float %b, i32 1
ret <2 x float> %vecins2
-; CHECK: return_f2:
+; CHECK-LABEL: return_f2:
; CHECK: mov.s $f0, $f12
; CHECK: mov.s $f2, $f14
}
@@ -238,7 +238,7 @@ entry:
%vecins2 = insertelement <2 x double> %vecins1, double %b, i32 1
ret <2 x double> %vecins2
-; CHECK: return_d2:
+; CHECK-LABEL: return_d2:
; CHECK: mov.d $f0, $f12
; CHECK: mov.d $f2, $f14
}
diff --git a/test/CodeGen/Mips/rotate.ll b/test/CodeGen/Mips/rotate.ll
index 4f3cfb7df41c5..813bbdf18bbda 100644
--- a/test/CodeGen/Mips/rotate.ll
+++ b/test/CodeGen/Mips/rotate.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=mips16
; CHECK: rotrv $2, $4
+; mips16: .ent rot0
define i32 @rot0(i32 %a, i32 %b) nounwind readnone {
entry:
%shl = shl i32 %a, %b
@@ -11,6 +13,7 @@ entry:
}
; CHECK: rotr $2, $4, 22
+; mips16: .ent rot1
define i32 @rot1(i32 %a) nounwind readnone {
entry:
%shl = shl i32 %a, 10
@@ -20,6 +23,7 @@ entry:
}
; CHECK: rotrv $2, $4, $5
+; mips16: .ent rot2
define i32 @rot2(i32 %a, i32 %b) nounwind readnone {
entry:
%shr = lshr i32 %a, %b
@@ -30,6 +34,7 @@ entry:
}
; CHECK: rotr $2, $4, 10
+; mips16: .ent rot3
define i32 @rot3(i32 %a) nounwind readnone {
entry:
%shr = lshr i32 %a, 10
diff --git a/test/CodeGen/Mips/sel1c.ll b/test/CodeGen/Mips/sel1c.ll
new file mode 100644
index 0000000000000..4c4784de6aa84
--- /dev/null
+++ b/test/CodeGen/Mips/sel1c.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=cond-b-short
+
+@i = global i32 1, align 4
+@j = global i32 2, align 4
+@k = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define void @t() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %1 = load i32* @j, align 4
+ %cmp = icmp eq i32 %0, %1
+ %cond = select i1 %cmp, i32 1, i32 2
+ store i32 %cond, i32* @k, align 4
+ ret void
+; cond-b-short: bteqz $BB0_{{[0-9]+}} # 16 bit inst
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
diff --git a/test/CodeGen/Mips/sel2c.ll b/test/CodeGen/Mips/sel2c.ll
new file mode 100644
index 0000000000000..25dfaa9ba87ea
--- /dev/null
+++ b/test/CodeGen/Mips/sel2c.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic -mips16-constant-islands < %s | FileCheck %s -check-prefix=cond-b-short
+
+@i = global i32 1, align 4
+@j = global i32 2, align 4
+@k = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define void @t() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %1 = load i32* @j, align 4
+ %cmp = icmp ne i32 %0, %1
+ %cond = select i1 %cmp, i32 1, i32 2
+ store i32 %cond, i32* @k, align 4
+; cond-b-short: btnez $BB0_{{[0-9]+}} # 16 bit inst
+ ret void
+}
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
diff --git a/test/CodeGen/Mips/selectcc.ll b/test/CodeGen/Mips/selectcc.ll
index a17517e7d1455..aeef60ecb806e 100644
--- a/test/CodeGen/Mips/selectcc.ll
+++ b/test/CodeGen/Mips/selectcc.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=mipsel < %s
+; RUN: llc -march=mipsel -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
@gf0 = external global float
@gf1 = external global float
@@ -7,6 +8,21 @@
define float @select_cc_f32(float %a, float %b) nounwind {
entry:
+; SOURCE-SCHED: lui
+; SOURCE-SCHED: addiu
+; SOURCE-SCHED: addu
+; SOURCE-SCHED: lw
+; SOURCE-SCHED: sw
+; SOURCE-SCHED: lw
+; SOURCE-SCHED: lui
+; SOURCE-SCHED: sw
+; SOURCE-SCHED: addiu
+; SOURCE-SCHED: addiu
+; SOURCE-SCHED: c.olt.s
+; SOURCE-SCHED: movt
+; SOURCE-SCHED: mtc1
+; SOURCE-SCHED: jr
+
store float 0.000000e+00, float* @gf0, align 4
store float 1.000000e+00, float* @gf1, align 4
%cmp = fcmp olt float %a, %b
diff --git a/test/CodeGen/Mips/selnek.ll b/test/CodeGen/Mips/selnek.ll
index 26015523106dd..64834b256fe56 100644
--- a/test/CodeGen/Mips/selnek.ll
+++ b/test/CodeGen/Mips/selnek.ll
@@ -104,4 +104,4 @@ attributes #1 = { "target-cpu"="mips16" "target-features"="+mips16,+o32" }
; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
; 16: cmpi ${{[0-9]+}}, 1000
-; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}} \ No newline at end of file
+; 16: bteqz $BB{{[0-9]+}}_{{[0-9]}}
diff --git a/test/CodeGen/Mips/setcc-se.ll b/test/CodeGen/Mips/setcc-se.ll
new file mode 100644
index 0000000000000..99071c42f1694
--- /dev/null
+++ b/test/CodeGen/Mips/setcc-se.ll
@@ -0,0 +1,155 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s
+
+@g1 = external global i32
+
+; CHECK-LABEL: seteq0:
+; CHECK: sltiu ${{[0-9]+}}, $4, 1
+
+define i32 @seteq0(i32 %a) {
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+; CHECK-LABEL: setne0:
+; CHECK: sltu ${{[0-9]+}}, $zero, $4
+
+define i32 @setne0(i32 %a) {
+entry:
+ %cmp = icmp ne i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+; CHECK-LABEL: slti_beq0:
+; CHECK: slti $[[R0:[0-9]+]], $4, -32768
+; CHECK: beqz $[[R0]]
+
+define void @slti_beq0(i32 %a) {
+entry:
+ %cmp = icmp slt i32 %a, -32768
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 %a, i32* @g1, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: slti_beq1:
+; CHECK: slt ${{[0-9]+}}
+
+define void @slti_beq1(i32 %a) {
+entry:
+ %cmp = icmp slt i32 %a, -32769
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 %a, i32* @g1, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: slti_beq2:
+; CHECK: slti $[[R0:[0-9]+]], $4, 32767
+; CHECK: beqz $[[R0]]
+
+define void @slti_beq2(i32 %a) {
+entry:
+ %cmp = icmp slt i32 %a, 32767
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 %a, i32* @g1, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: slti_beq3:
+; CHECK: slt ${{[0-9]+}}
+
+define void @slti_beq3(i32 %a) {
+entry:
+ %cmp = icmp slt i32 %a, 32768
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 %a, i32* @g1, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: sltiu_beq0:
+; CHECK: sltiu $[[R0:[0-9]+]], $4, 32767
+; CHECK: beqz $[[R0]]
+
+define void @sltiu_beq0(i32 %a) {
+entry:
+ %cmp = icmp ult i32 %a, 32767
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 %a, i32* @g1, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: sltiu_beq1:
+; CHECK: sltu ${{[0-9]+}}
+
+define void @sltiu_beq1(i32 %a) {
+entry:
+ %cmp = icmp ult i32 %a, 32768
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 %a, i32* @g1, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: sltiu_beq2:
+; CHECK: sltiu $[[R0:[0-9]+]], $4, -32768
+; CHECK: beqz $[[R0]]
+
+define void @sltiu_beq2(i32 %a) {
+entry:
+ %cmp = icmp ult i32 %a, -32768
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 %a, i32* @g1, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: sltiu_beq3:
+; CHECK: sltu ${{[0-9]+}}
+
+define void @sltiu_beq3(i32 %a) {
+entry:
+ %cmp = icmp ult i32 %a, -32769
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 %a, i32* @g1, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
diff --git a/test/CodeGen/Mips/simplebr.ll b/test/CodeGen/Mips/simplebr.ll
new file mode 100644
index 0000000000000..a1d63671b4eee
--- /dev/null
+++ b/test/CodeGen/Mips/simplebr.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s -check-prefix=CHECK-STATIC16
+
+; ModuleID = 'simplebr.c'
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@i = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @foo() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ call void bitcast (void (...)* @goo to void ()*)()
+ br label %if.end
+
+if.else: ; preds = %entry
+ call void bitcast (void (...)* @hoo to void ()*)()
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void
+}
+
+; CHECK-STATIC16: b $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+
+declare void @goo(...) #1
+
+declare void @hoo(...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+
+
diff --git a/test/CodeGen/Mips/sint-fp-store_pattern.ll b/test/CodeGen/Mips/sint-fp-store_pattern.ll
new file mode 100644
index 0000000000000..c44ea080a886b
--- /dev/null
+++ b/test/CodeGen/Mips/sint-fp-store_pattern.ll
@@ -0,0 +1,52 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
+
+@gint_ = external global i32
+@gLL_ = external global i64
+
+; 32-LABEL: store_int_float_:
+; 32: trunc.w.s $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 32: swc1 $f[[R0]],
+
+define void @store_int_float_(float %a) {
+entry:
+ %conv = fptosi float %a to i32
+ store i32 %conv, i32* @gint_, align 4
+ ret void
+}
+
+; 32-LABEL: store_int_double_:
+; 32: trunc.w.d $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 32: swc1 $f[[R0]],
+; 64-LABEL: store_int_double_:
+; 64: trunc.w.d $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 64: swc1 $f[[R0]],
+
+define void @store_int_double_(double %a) {
+entry:
+ %conv = fptosi double %a to i32
+ store i32 %conv, i32* @gint_, align 4
+ ret void
+}
+
+; 64-LABEL: store_LL_float_:
+; 64: trunc.l.s $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 64: sdc1 $f[[R0]],
+
+define void @store_LL_float_(float %a) {
+entry:
+ %conv = fptosi float %a to i64
+ store i64 %conv, i64* @gLL_, align 8
+ ret void
+}
+
+; 64-LABEL: store_LL_double_:
+; 64: trunc.l.d $f[[R0:[0-9]+]], $f{{[0-9]+}}
+; 64: sdc1 $f[[R0]],
+
+define void @store_LL_double_(double %a) {
+entry:
+ %conv = fptosi double %a to i64
+ store i64 %conv, i64* @gLL_, align 8
+ ret void
+}
diff --git a/test/CodeGen/Mips/stack-alignment.ll b/test/CodeGen/Mips/stack-alignment.ll
new file mode 100644
index 0000000000000..b18f96695ff57
--- /dev/null
+++ b/test/CodeGen/Mips/stack-alignment.ll
@@ -0,0 +1,13 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
+
+; 32: addiu $sp, $sp, -8
+; 64: addiu $sp, $sp, -16
+
+define i32 @foo1() #0 {
+entry:
+ ret i32 14
+}
+
+attributes #0 = { "no-frame-pointer-elim"="true" }
diff --git a/test/CodeGen/Mips/stackcoloring.ll b/test/CodeGen/Mips/stackcoloring.ll
new file mode 100644
index 0000000000000..4987dad5338ba
--- /dev/null
+++ b/test/CodeGen/Mips/stackcoloring.ll
@@ -0,0 +1,39 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s
+
+@g1 = external global i32*
+
+; CHECK-LABEL: foo1:
+; CHECK: lw ${{[0-9]+}}, %got(g1)
+; CHECK: # %for.body
+; CHECK: # %for.end
+
+define i32 @foo1() {
+entry:
+ %b = alloca [16 x i32], align 4
+ %0 = bitcast [16 x i32]* %b to i8*
+ call void @llvm.lifetime.start(i64 64, i8* %0)
+ %arraydecay = getelementptr inbounds [16 x i32]* %b, i32 0, i32 0
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %v.04 = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %1 = load i32** @g1, align 4
+ %arrayidx = getelementptr inbounds i32* %1, i32 %i.05
+ %2 = load i32* %arrayidx, align 4
+ %call = call i32 @foo2(i32 %2, i32* %arraydecay)
+ %add = add nsw i32 %call, %v.04
+ %inc = add nsw i32 %i.05, 1
+ %exitcond = icmp eq i32 %inc, 10000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ call void @llvm.lifetime.end(i64 64, i8* %0)
+ ret i32 %add
+}
+
+declare void @llvm.lifetime.start(i64, i8* nocapture)
+
+declare i32 @foo2(i32, i32*)
+
+declare void @llvm.lifetime.end(i64, i8* nocapture)
diff --git a/test/CodeGen/Mips/stchar.ll b/test/CodeGen/Mips/stchar.ll
index c00c9fd9d2a13..12eae3487ff1d 100644
--- a/test/CodeGen/Mips/stchar.ll
+++ b/test/CodeGen/Mips/stchar.ll
@@ -50,8 +50,8 @@ entry:
%conv1.i = sext i8 %3 to i32
%call.i = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind
ret void
-; 16_b: test:
-; 16_h: test:
+; 16_b-LABEL: test:
+; 16_h-LABEL: test:
; 16_b: sb ${{[0-9]+}}, [[offset1:[0-9]+]](${{[0-9]+}})
; 16_b: lb ${{[0-9]+}}, [[offset1]](${{[0-9]+}})
; 16_h: sh ${{[0-9]+}}, [[offset2:[0-9]+]](${{[0-9]+}})
diff --git a/test/CodeGen/Mips/tailcall.ll b/test/CodeGen/Mips/tailcall.ll
index bcd33fca70ed6..30f47abc06cbe 100644
--- a/test/CodeGen/Mips/tailcall.ll
+++ b/test/CodeGen/Mips/tailcall.ll
@@ -243,3 +243,16 @@ entry:
ret i32 %call
}
+; Check that there is a chain edge between the load and store nodes.
+;
+; PIC32-LABEL: caller14:
+; PIC32: lw ${{[0-9]+}}, 16($sp)
+; PIC32: sw $4, 16($sp)
+
+define void @caller14(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+entry:
+ tail call void @callee14(i32 %e, i32 %b, i32 %c, i32 %d, i32 %a)
+ ret void
+}
+
+declare void @callee14(i32, i32, i32, i32, i32)
diff --git a/test/CodeGen/Mips/tls-alias.ll b/test/CodeGen/Mips/tls-alias.ll
index ce98cc8262239..3c810542cca31 100644
--- a/test/CodeGen/Mips/tls-alias.ll
+++ b/test/CodeGen/Mips/tls-alias.ll
@@ -4,7 +4,7 @@
@bar = hidden alias i32* @foo
define i32* @zed() {
-; CHECK: __tls_get_addr
-; CHECK-NEXT: %tlsgd(bar)
+; CHECK-DAG: __tls_get_addr
+; CHECK-DAG: %tlsgd(bar)
ret i32* @bar
}
diff --git a/test/CodeGen/Mips/tls-models.ll b/test/CodeGen/Mips/tls-models.ll
index 8f5789ec7995b..1a958dceaa28c 100644
--- a/test/CodeGen/Mips/tls-models.ll
+++ b/test/CodeGen/Mips/tls-models.ll
@@ -20,9 +20,9 @@ entry:
ret i32* @external_gd
; Non-PIC code can use initial-exec, PIC code has to use general dynamic.
- ; CHECK-NONPIC: f1:
+ ; CHECK-NONPIC-LABEL: f1:
; CHECK-NONPIC: %gottprel
- ; CHECK-PIC: f1:
+ ; CHECK-PIC-LABEL: f1:
; CHECK-PIC: %tlsgd
}
@@ -31,9 +31,9 @@ entry:
ret i32* @internal_gd
; Non-PIC code can use local exec, PIC code can use local dynamic.
- ; CHECK-NONPIC: f2:
+ ; CHECK-NONPIC-LABEL: f2:
; CHECK-NONPIC: %tprel_hi
- ; CHECK-PIC: f2:
+ ; CHECK-PIC-LABEL: f2:
; CHECK-PIC: %tlsldm
}
@@ -45,9 +45,9 @@ entry:
ret i32* @external_ld
; Non-PIC code can use initial exec, PIC should use local dynamic.
- ; CHECK-NONPIC: f3:
+ ; CHECK-NONPIC-LABEL: f3:
; CHECK-NONPIC: %gottprel
- ; CHECK-PIC: f3:
+ ; CHECK-PIC-LABEL: f3:
; CHECK-PIC: %tlsldm
}
@@ -56,9 +56,9 @@ entry:
ret i32* @internal_ld
; Non-PIC code can use local exec, PIC code can use local dynamic.
- ; CHECK-NONPIC: f4:
+ ; CHECK-NONPIC-LABEL: f4:
; CHECK-NONPIC: %tprel_hi
- ; CHECK-PIC: f4:
+ ; CHECK-PIC-LABEL: f4:
; CHECK-PIC: %tlsldm
}
@@ -70,9 +70,9 @@ entry:
ret i32* @external_ie
; Non-PIC and PIC code will use initial exec as specified.
- ; CHECK-NONPIC: f5:
+ ; CHECK-NONPIC-LABEL: f5:
; CHECK-NONPIC: %gottprel
- ; CHECK-PIC: f5:
+ ; CHECK-PIC-LABEL: f5:
; CHECK-PIC: %gottprel
}
@@ -81,9 +81,9 @@ entry:
ret i32* @internal_ie
; Non-PIC code can use local exec, PIC code use initial exec as specified.
- ; CHECK-NONPIC: f6:
+ ; CHECK-NONPIC-LABEL: f6:
; CHECK-NONPIC: %tprel_hi
- ; CHECK-PIC: f6:
+ ; CHECK-PIC-LABEL: f6:
; CHECK-PIC: %gottprel
}
@@ -95,9 +95,9 @@ entry:
ret i32* @external_le
; Non-PIC and PIC code will use local exec as specified.
- ; CHECK-NONPIC: f7:
+ ; CHECK-NONPIC-LABEL: f7:
; CHECK-NONPIC: %tprel_hi
- ; CHECK-PIC: f7:
+ ; CHECK-PIC-LABEL: f7:
; CHECK-PIC: %tprel_hi
}
@@ -106,8 +106,8 @@ entry:
ret i32* @internal_le
; Non-PIC and PIC code will use local exec as specified.
- ; CHECK-NONPIC: f8:
+ ; CHECK-NONPIC-LABEL: f8:
; CHECK-NONPIC: %tprel_hi
- ; CHECK-PIC: f8:
+ ; CHECK-PIC-LABEL: f8:
; CHECK-PIC: %tprel_hi
}
diff --git a/test/CodeGen/Mips/tls.ll b/test/CodeGen/Mips/tls.ll
index b86d25e5e5e83..23a8f93a9d7cf 100644
--- a/test/CodeGen/Mips/tls.ll
+++ b/test/CodeGen/Mips/tls.ll
@@ -13,14 +13,14 @@ entry:
%tmp = load i32* @t1, align 4
ret i32 %tmp
-; CHECK: f1:
-
-; PIC: addu $[[R0:[a-z0-9]+]], $2, $25
-; PIC: lw $25, %call16(__tls_get_addr)($[[R0]])
-; PIC: addiu $4, $[[R0]], %tlsgd(t1)
-; PIC: jalr $25
-; PIC: lw $2, 0($2)
-
+; PIC-LABEL: f1:
+; PIC-DAG: addu $[[R0:[a-z0-9]+]], $2, $25
+; PIC-DAG: lw $25, %call16(__tls_get_addr)($[[R0]])
+; PIC-DAG: addiu $4, $[[R0]], %tlsgd(t1)
+; PIC-DAG: jalr $25
+; PIC-DAG: lw $2, 0($2)
+
+; STATIC-LABEL: f1:
; STATIC: lui $[[R0:[0-9]+]], %tprel_hi(t1)
; STATIC: addiu $[[R1:[0-9]+]], $[[R0]], %tprel_lo(t1)
; STATIC: rdhwr $3, $29
@@ -36,17 +36,19 @@ entry:
%tmp = load i32* @t2, align 4
ret i32 %tmp
-; CHECK: f2:
-
-; PIC: addu $[[R0:[a-z0-9]+]], $2, $25
-; PIC: lw $25, %call16(__tls_get_addr)($[[R0]])
-; PIC: addiu $4, $[[R0]], %tlsgd(t2)
-; PIC: jalr $25
-; PIC: lw $2, 0($2)
+; PIC-LABEL: f2:
+; PIC-DAG: addu $[[R0:[a-z0-9]+]], $2, $25
+; PIC-DAG: lw $25, %call16(__tls_get_addr)($[[R0]])
+; PIC-DAG: addiu $4, $[[R0]], %tlsgd(t2)
+; PIC-DAG: jalr $25
+; PIC-DAG: lw $2, 0($2)
+; STATICGP-LABEL: f2:
; STATICGP: lui $[[R0:[0-9]+]], %hi(__gnu_local_gp)
; STATICGP: addiu $[[GP:[0-9]+]], $[[R0]], %lo(__gnu_local_gp)
; STATICGP: lw ${{[0-9]+}}, %gottprel(t2)($[[GP]])
+
+; STATIC-LABEL: f2:
; STATIC: lui $[[R0:[0-9]+]], %hi(__gnu_local_gp)
; STATIC: addiu $[[GP:[0-9]+]], $[[R0]], %lo(__gnu_local_gp)
; STATIC: rdhwr $3, $29
@@ -59,7 +61,7 @@ entry:
define i32 @f3() nounwind {
entry:
-; CHECK: f3:
+; CHECK-LABEL: f3:
; PIC: addiu $4, ${{[a-z0-9]+}}, %tlsldm(f3.i)
; PIC: jalr $25
diff --git a/test/CodeGen/Mips/tnaked.ll b/test/CodeGen/Mips/tnaked.ll
index f5bdd915b28cd..08f1ab5be86ee 100644
--- a/test/CodeGen/Mips/tnaked.ll
+++ b/test/CodeGen/Mips/tnaked.ll
@@ -7,7 +7,7 @@ entry:
}
; CHECK: .ent tnaked
-; CHECK: tnaked:
+; CHECK-LABEL: tnaked:
; CHECK-NOT: .frame {{.*}}
; CHECK-NOT: .mask {{.*}}
; CHECK-NOT: .fmask {{.*}}
@@ -19,11 +19,11 @@ entry:
}
; CHECK: .ent tnonaked
-; CHECK: tnonaked:
+; CHECK-LABEL: tnonaked:
; CHECK: .frame $fp,8,$ra
; CHECK: .mask 0x40000000,-4
; CHECK: .fmask 0x00000000,0
; CHECK: addiu $sp, $sp, -8
-attributes #0 = { naked noinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { naked noinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/trap.ll b/test/CodeGen/Mips/trap.ll
new file mode 100644
index 0000000000000..beb4b894632b8
--- /dev/null
+++ b/test/CodeGen/Mips/trap.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s
+
+declare void @llvm.trap()
+
+define void @f1() {
+entry:
+ call void @llvm.trap()
+ unreachable
+
+; CHECK: break
+}
diff --git a/test/CodeGen/Mips/trap1.ll b/test/CodeGen/Mips/trap1.ll
new file mode 100644
index 0000000000000..bfcd7fed30d9b
--- /dev/null
+++ b/test/CodeGen/Mips/trap1.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic
+
+declare void @llvm.trap()
+
+; Function Attrs: nounwind optsize readnone
+define i32 @main() {
+entry:
+ call void @llvm.trap()
+ unreachable
+; pic: break 0
+ ret i32 0
+}
+
diff --git a/test/CodeGen/Mips/unalignedload.ll b/test/CodeGen/Mips/unalignedload.ll
index 7f880b6fe3799..19f3af7f344a7 100644
--- a/test/CodeGen/Mips/unalignedload.ll
+++ b/test/CodeGen/Mips/unalignedload.ll
@@ -9,17 +9,17 @@
define void @foo1() nounwind {
entry:
-; CHECK-EL: lbu ${{[0-9]+}}, 2($[[R0:[0-9]+]])
-; CHECK-EL: lbu ${{[0-9]+}}, 3($[[R0]])
-; CHECK-EL: jalr
-; CHECK-EL: lwl $[[R1:[0-9]+]], 3($[[R2:[0-9]+]])
-; CHECK-EL: lwr $[[R1]], 0($[[R2]])
+; CHECK-EL-DAG: lbu ${{[0-9]+}}, 2($[[R0:[0-9]+]])
+; CHECK-EL-DAG: lbu ${{[0-9]+}}, 3($[[R0]])
+; CHECK-EL: jalr
+; CHECK-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[R2:[0-9]+]])
+; CHECK-EL-DAG: lwr $[[R1]], 0($[[R2]])
-; CHECK-EB: lbu ${{[0-9]+}}, 3($[[R0:[0-9]+]])
-; CHECK-EB: lbu ${{[0-9]+}}, 2($[[R0]])
-; CHECK-EB: jalr
-; CHECK-EB: lwl $[[R1:[0-9]+]], 0($[[R2:[0-9]+]])
-; CHECK-BE: lwr $[[R1]], 3($[[R2]])
+; CHECK-EB-DAG: lbu ${{[0-9]+}}, 3($[[R0:[0-9]+]])
+; CHECK-EB-DAG: lbu ${{[0-9]+}}, 2($[[R0]])
+; CHECK-EB: jalr
+; CHECK-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[R2:[0-9]+]])
+; CHECK-EB-DAG: lwr $[[R1]], 3($[[R2]])
tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2* @s2, i32 0, i32 1)) nounwind
tail call void @foo4(%struct.S4* byval @s4) nounwind