summaryrefslogtreecommitdiff
path: root/test/CodeGen/SystemZ
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/SystemZ')
-rw-r--r--test/CodeGen/SystemZ/branch-11.ll56
-rw-r--r--test/CodeGen/SystemZ/fp-abs-03.ll43
-rw-r--r--test/CodeGen/SystemZ/fp-abs-04.ll46
-rw-r--r--test/CodeGen/SystemZ/fp-add-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-add-04.ll17
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-01.ll102
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-06.ll33
-rw-r--r--test/CodeGen/SystemZ/fp-const-11.ll40
-rw-r--r--test/CodeGen/SystemZ/fp-conv-15.ll50
-rw-r--r--test/CodeGen/SystemZ/fp-conv-16.ll99
-rw-r--r--test/CodeGen/SystemZ/fp-copysign-02.ll81
-rw-r--r--test/CodeGen/SystemZ/fp-div-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-div-04.ll17
-rw-r--r--test/CodeGen/SystemZ/fp-move-13.ll46
-rw-r--r--test/CodeGen/SystemZ/fp-mul-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-mul-06.ll31
-rw-r--r--test/CodeGen/SystemZ/fp-mul-08.ll31
-rw-r--r--test/CodeGen/SystemZ/fp-mul-10.ll43
-rw-r--r--test/CodeGen/SystemZ/fp-mul-11.ll32
-rw-r--r--test/CodeGen/SystemZ/fp-mul-12.ll72
-rw-r--r--test/CodeGen/SystemZ/fp-neg-02.ll41
-rw-r--r--test/CodeGen/SystemZ/fp-round-03.ll207
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-01.ll8
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-04.ll17
-rw-r--r--test/CodeGen/SystemZ/fp-sub-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-sub-04.ll17
-rw-r--r--test/CodeGen/SystemZ/int-add-17.ll95
-rw-r--r--test/CodeGen/SystemZ/int-mul-09.ll95
-rw-r--r--test/CodeGen/SystemZ/int-mul-10.ll165
-rw-r--r--test/CodeGen/SystemZ/int-mul-11.ll32
-rw-r--r--test/CodeGen/SystemZ/int-sub-10.ll95
-rw-r--r--test/CodeGen/SystemZ/tdc-07.ll18
-rw-r--r--test/CodeGen/SystemZ/vec-abs-06.ll47
-rw-r--r--test/CodeGen/SystemZ/vec-add-02.ll24
-rw-r--r--test/CodeGen/SystemZ/vec-and-04.ll47
-rw-r--r--test/CodeGen/SystemZ/vec-cmp-07.ll349
-rw-r--r--test/CodeGen/SystemZ/vec-ctpop-02.ll45
-rw-r--r--test/CodeGen/SystemZ/vec-div-02.ll24
-rw-r--r--test/CodeGen/SystemZ/vec-intrinsics-01.ll (renamed from test/CodeGen/SystemZ/vec-intrinsics.ll)0
-rw-r--r--test/CodeGen/SystemZ/vec-intrinsics-02.ll441
-rw-r--r--test/CodeGen/SystemZ/vec-max-05.ll175
-rw-r--r--test/CodeGen/SystemZ/vec-min-05.ll175
-rw-r--r--test/CodeGen/SystemZ/vec-move-18.ll24
-rw-r--r--test/CodeGen/SystemZ/vec-mul-03.ll24
-rw-r--r--test/CodeGen/SystemZ/vec-mul-04.ll31
-rw-r--r--test/CodeGen/SystemZ/vec-mul-05.ll63
-rw-r--r--test/CodeGen/SystemZ/vec-neg-02.ll23
-rw-r--r--test/CodeGen/SystemZ/vec-or-03.ll91
-rw-r--r--test/CodeGen/SystemZ/vec-round-02.ll118
-rw-r--r--test/CodeGen/SystemZ/vec-sqrt-02.ll23
-rw-r--r--test/CodeGen/SystemZ/vec-sub-02.ll31
-rw-r--r--test/CodeGen/SystemZ/vec-xor-02.ll47
52 files changed, 3384 insertions, 71 deletions
diff --git a/test/CodeGen/SystemZ/branch-11.ll b/test/CodeGen/SystemZ/branch-11.ll
new file mode 100644
index 000000000000..ce7b3ef267b4
--- /dev/null
+++ b/test/CodeGen/SystemZ/branch-11.ll
@@ -0,0 +1,56 @@
+; Test indirect jumps on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define i32 @f1(i32 %x, i32 %y, i32 %op) {
+; CHECK-LABEL: f1:
+; CHECK: ahi %r4, -1
+; CHECK: clibh %r4, 5, 0(%r14)
+; CHECK: llgfr [[OP64:%r[0-5]]], %r4
+; CHECK: sllg [[INDEX:%r[1-5]]], [[OP64]], 3
+; CHECK: larl [[BASE:%r[1-5]]]
+; CHECK: bi 0([[BASE]],[[INDEX]])
+entry:
+ switch i32 %op, label %exit [
+ i32 1, label %b.add
+ i32 2, label %b.sub
+ i32 3, label %b.and
+ i32 4, label %b.or
+ i32 5, label %b.xor
+ i32 6, label %b.mul
+ ]
+
+b.add:
+ %add = add i32 %x, %y
+ br label %exit
+
+b.sub:
+ %sub = sub i32 %x, %y
+ br label %exit
+
+b.and:
+ %and = and i32 %x, %y
+ br label %exit
+
+b.or:
+ %or = or i32 %x, %y
+ br label %exit
+
+b.xor:
+ %xor = xor i32 %x, %y
+ br label %exit
+
+b.mul:
+ %mul = mul i32 %x, %y
+ br label %exit
+
+exit:
+ %res = phi i32 [ %x, %entry ],
+ [ %add, %b.add ],
+ [ %sub, %b.sub ],
+ [ %and, %b.and ],
+ [ %or, %b.or ],
+ [ %xor, %b.xor ],
+ [ %mul, %b.mul ]
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/fp-abs-03.ll b/test/CodeGen/SystemZ/fp-abs-03.ll
new file mode 100644
index 000000000000..cab6c116bc08
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-abs-03.ll
@@ -0,0 +1,43 @@
+; Test floating-point absolute on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test f32.
+declare float @llvm.fabs.f32(float %f)
+define float @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: lpdfr %f0, %f0
+; CHECK: br %r14
+ %res = call float @llvm.fabs.f32(float %f)
+ ret float %res
+}
+
+; Test f64.
+declare double @llvm.fabs.f64(double %f)
+define double @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: lpdfr %f0, %f0
+; CHECK: br %r14
+ %res = call double @llvm.fabs.f64(double %f)
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure absolute would probably
+; be better implemented using an NI on the upper byte. Do some extra
+; processing so that using FPRs is unequivocally better.
+declare fp128 @llvm.fabs.f128(fp128 %f)
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: wflpxb [[POSREG1:%v[0-9]+]], [[REG1]]
+; CHECK: wfdxb [[RES:%v[0-9]+]], [[POSREG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %orig = load fp128 , fp128 *%ptr
+ %abs = call fp128 @llvm.fabs.f128(fp128 %orig)
+ %op2 = load fp128 , fp128 *%ptr2
+ %res = fdiv fp128 %abs, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-abs-04.ll b/test/CodeGen/SystemZ/fp-abs-04.ll
new file mode 100644
index 000000000000..606bce3de36e
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-abs-04.ll
@@ -0,0 +1,46 @@
+; Test negated floating-point absolute on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test f32.
+declare float @llvm.fabs.f32(float %f)
+define float @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: lndfr %f0, %f0
+; CHECK: br %r14
+ %abs = call float @llvm.fabs.f32(float %f)
+ %res = fsub float -0.0, %abs
+ ret float %res
+}
+
+; Test f64.
+declare double @llvm.fabs.f64(double %f)
+define double @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: lndfr %f0, %f0
+; CHECK: br %r14
+ %abs = call double @llvm.fabs.f64(double %f)
+ %res = fsub double -0.0, %abs
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure negative-absolute would
+; probably be better implemented using an OI on the upper byte. Do some
+; extra processing so that using FPRs is unequivocally better.
+declare fp128 @llvm.fabs.f128(fp128 %f)
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: wflnxb [[NEGREG1:%v[0-9]+]], [[REG1]]
+; CHECK: wfdxb [[RES:%v[0-9]+]], [[NEGREG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %orig = load fp128 , fp128 *%ptr
+ %abs = call fp128 @llvm.fabs.f128(fp128 %orig)
+ %negabs = fsub fp128 0xL00000000000000008000000000000000, %abs
+ %op2 = load fp128 , fp128 *%ptr2
+ %res = fdiv fp128 %negabs, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-add-01.ll b/test/CodeGen/SystemZ/fp-add-01.ll
index 5b0ed0513a37..219607d628d7 100644
--- a/test/CodeGen/SystemZ/fp-add-01.ll
+++ b/test/CodeGen/SystemZ/fp-add-01.ll
@@ -1,6 +1,8 @@
; Test 32-bit floating-point addition.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
@@ -76,7 +78,7 @@ define float @f6(float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: aeb %f0, 16{{[04]}}(%r15)
+; CHECK-SCALAR: aeb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
diff --git a/test/CodeGen/SystemZ/fp-add-04.ll b/test/CodeGen/SystemZ/fp-add-04.ll
new file mode 100644
index 000000000000..186f37ca5182
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-add-04.ll
@@ -0,0 +1,17 @@
+; Test 128-bit floating-point addition on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfaxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %sum = fadd fp128 %f1, %f2
+ store fp128 %sum, fp128 *%ptr1
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-cmp-01.ll b/test/CodeGen/SystemZ/fp-cmp-01.ll
index 075c7aa3dd84..146b16bc695f 100644
--- a/test/CodeGen/SystemZ/fp-cmp-01.ll
+++ b/test/CodeGen/SystemZ/fp-cmp-01.ll
@@ -1,7 +1,10 @@
; Test 32-bit floating-point comparison. The tests assume a z10 implementation
; of select, using conditional branches rather than LOCGR.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
declare float @foo()
@@ -9,8 +12,9 @@ declare float @foo()
define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) {
; CHECK-LABEL: f1:
; CHECK: cebr %f0, %f2
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%cond = fcmp oeq float %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
@@ -21,8 +25,9 @@ define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) {
define i64 @f2(i64 %a, i64 %b, float %f1, float *%ptr) {
; CHECK-LABEL: f2:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%f2 = load float , float *%ptr
%cond = fcmp oeq float %f1, %f2
@@ -34,8 +39,9 @@ define i64 @f2(i64 %a, i64 %b, float %f1, float *%ptr) {
define i64 @f3(i64 %a, i64 %b, float %f1, float *%base) {
; CHECK-LABEL: f3:
; CHECK: ceb %f0, 4092(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%f2 = load float , float *%ptr
@@ -50,8 +56,9 @@ define i64 @f4(i64 %a, i64 %b, float %f1, float *%base) {
; CHECK-LABEL: f4:
; CHECK: aghi %r4, 4096
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1024
%f2 = load float , float *%ptr
@@ -65,8 +72,9 @@ define i64 @f5(i64 %a, i64 %b, float %f1, float *%base) {
; CHECK-LABEL: f5:
; CHECK: aghi %r4, -4
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 -1
%f2 = load float , float *%ptr
@@ -80,8 +88,9 @@ define i64 @f6(i64 %a, i64 %b, float %f1, float *%base, i64 %index) {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r5, 2
; CHECK: ceb %f0, 400(%r1,%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%ptr1 = getelementptr float, float *%base, i64 %index
%ptr2 = getelementptr float, float *%ptr1, i64 100
@@ -95,7 +104,7 @@ define i64 @f6(i64 %a, i64 %b, float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: ceb {{%f[0-9]+}}, 16{{[04]}}(%r15)
+; CHECK-SCALAR: ceb {{%f[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
@@ -153,8 +162,9 @@ define float @f7(float *%ptr0) {
define i64 @f8(i64 %a, i64 %b, float %f) {
; CHECK-LABEL: f8:
; CHECK: ltebr %f0, %f0
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%cond = fcmp oeq float %f, 0.0
%res = select i1 %cond, i64 %a, i64 %b
@@ -166,8 +176,9 @@ define i64 @f8(i64 %a, i64 %b, float %f) {
define i64 @f9(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f9:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: ber %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: ber %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrne %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp oeq float %f1, %f2
@@ -179,8 +190,9 @@ define i64 @f9(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f10(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f10:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: blhr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: blhr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnlh %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp one float %f1, %f2
@@ -192,8 +204,9 @@ define i64 @f10(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f11(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f11:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bhr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bhr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnh %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp olt float %f1, %f2
@@ -205,8 +218,9 @@ define i64 @f11(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f12(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f12:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bher %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bher %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnhe %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ole float %f1, %f2
@@ -218,8 +232,9 @@ define i64 @f12(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f13(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f13:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bler %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bler %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnle %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp oge float %f1, %f2
@@ -231,8 +246,9 @@ define i64 @f13(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f14(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f14:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: blr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: blr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrnl %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ogt float %f1, %f2
@@ -244,8 +260,9 @@ define i64 @f14(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f15(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f15:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnlhr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnlhr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrlh %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ueq float %f1, %f2
@@ -257,8 +274,9 @@ define i64 @f15(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f16(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f16:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bner %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bner %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgre %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp une float %f1, %f2
@@ -270,8 +288,9 @@ define i64 @f16(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f17(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f17:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnler %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnler %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrle %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ult float %f1, %f2
@@ -283,8 +302,9 @@ define i64 @f17(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f18(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f18:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnlr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnlr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrl %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ule float %f1, %f2
@@ -296,8 +316,9 @@ define i64 @f18(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f19(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f19:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnhr %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnhr %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrh %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp uge float %f1, %f2
@@ -309,8 +330,9 @@ define i64 @f19(i64 %a, i64 %b, float %f2, float *%ptr) {
define i64 @f20(i64 %a, i64 %b, float %f2, float *%ptr) {
; CHECK-LABEL: f20:
; CHECK: ceb %f0, 0(%r4)
-; CHECK-NEXT: bnher %r14
-; CHECK: lgr %r2, %r3
+; CHECK-SCALAR-NEXT: bnher %r14
+; CHECK-SCALAR: lgr %r2, %r3
+; CHECK-VECTOR-NEXT: locgrhe %r2, %r3
; CHECK: br %r14
%f1 = load float , float *%ptr
%cond = fcmp ugt float %f1, %f2
diff --git a/test/CodeGen/SystemZ/fp-cmp-06.ll b/test/CodeGen/SystemZ/fp-cmp-06.ll
new file mode 100644
index 000000000000..e146b51e4fb2
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-cmp-06.ll
@@ -0,0 +1,33 @@
+; Test f128 comparisons on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; There is no memory form of 128-bit comparison.
+define i64 @f1(i64 %a, i64 %b, fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r4)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r5)
+; CHECK: wfcxb [[REG1]], [[REG2]]
+; CHECK-NEXT: locgrne %r2, %r3
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %cond = fcmp oeq fp128 %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check comparison with zero -- it is not worthwhile to copy to
+; FP pairs just so we can use LTXBR, so simply load up a zero.
+define i64 @f2(i64 %a, i64 %b, fp128 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r4)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfcxb [[REG1]], [[REG2]]
+; CHECK-NEXT: locgrne %r2, %r3
+; CHECK: br %r14
+ %f = load fp128, fp128 *%ptr
+ %cond = fcmp oeq fp128 %f, 0xL00000000000000000000000000000000
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/fp-const-11.ll b/test/CodeGen/SystemZ/fp-const-11.ll
new file mode 100644
index 000000000000..8523f2786c34
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-11.ll
@@ -0,0 +1,40 @@
+; Test loads of f128 floating-point constants on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s -check-prefix=CONST
+
+; Test loading zero.
+define void @f1(fp128 *%x) {
+; CHECK-LABEL: f1:
+; CHECK: vzero [[REG:%v[0-9]+]]
+; CHECK: vst [[REG]], 0(%r2)
+; CHECK: br %r14
+ store fp128 0xL00000000000000000000000000000000, fp128 *%x
+ ret void
+}
+
+; Test loading of negative floating-point zero.
+define void @f2(fp128 *%x) {
+; CHECK-LABEL: f2:
+; CHECK: vzero [[REG:%v[0-9]+]]
+; CHECK: wflnxb [[REG]], [[REG]]
+; CHECK: vst [[REG]], 0(%r2)
+; CHECK: br %r14
+ store fp128 0xL00000000000000008000000000000000, fp128 *%x
+ ret void
+}
+
+; Test loading of a 128-bit floating-point constant. This value would
+; actually fit within the 32-bit format, but we don't have extending
+; loads into vector registers.
+define void @f3(fp128 *%x) {
+; CHECK-LABEL: f3:
+; CHECK: larl [[REGISTER:%r[1-5]+]], {{.*}}
+; CHECK: vl [[REG:%v[0-9]+]], 0([[REGISTER]])
+; CHECK: vst [[REG]], 0(%r2)
+; CHECK: br %r14
+; CONST: .quad 4611404543484231680
+; CONST: .quad 0
+ store fp128 0xL00000000000000003fff000002000000, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-15.ll b/test/CodeGen/SystemZ/fp-conv-15.ll
new file mode 100644
index 000000000000..61100016c426
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-15.ll
@@ -0,0 +1,50 @@
+; Test f128 floating-point truncations/extensions on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test f128->f64.
+define double @f1(fp128 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wflrx %f0, [[REG]], 0, 0
+; CHECK: br %r14
+ %val = load fp128, fp128 *%ptr
+ %res = fptrunc fp128 %val to double
+ ret double %res
+}
+
+; Test f128->f32.
+define float @f2(fp128 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wflrx %f0, [[REG]], 0, 3
+; CHECK: ledbra %f0, 0, %f0, 0
+; CHECK: br %r14
+ %val = load fp128, fp128 *%ptr
+ %res = fptrunc fp128 %val to float
+ ret float %res
+}
+
+; Test f64->f128.
+define void @f3(fp128 *%dst, double %val) {
+; CHECK-LABEL: f3:
+; CHECK: wflld [[RES:%v[0-9]+]], %f0
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %res = fpext double %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Test f32->f128.
+define void @f4(fp128 *%dst, float %val) {
+; CHECK-LABEL: f4:
+; CHECK: ldebr %f0, %f0
+; CHECK: wflld [[RES:%v[0-9]+]], %f0
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %res = fpext float %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-conv-16.ll b/test/CodeGen/SystemZ/fp-conv-16.ll
new file mode 100644
index 000000000000..4f9bb865694b
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-16.ll
@@ -0,0 +1,99 @@
+; Test f128 floating-point conversion to/from integers on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test signed i32->f128.
+define void @f1(i32 %i, fp128 *%dst) {
+; CHECK-LABEL: f1:
+; CHECK: cxfbr %f0, %r2
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = sitofp i32 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test signed i64->f128.
+define void @f2(i64 %i, fp128 *%dst) {
+; CHECK-LABEL: f2:
+; CHECK: cxgbr %f0, %r2
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = sitofp i64 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test unsigned i32->f128.
+define void @f3(i32 %i, fp128 *%dst) {
+; CHECK-LABEL: f3:
+; CHECK: cxlfbr %f0, 0, %r2, 0
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = uitofp i32 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test unsigned i64->f128.
+define void @f4(i64 %i, fp128 *%dst) {
+; CHECK-LABEL: f4:
+; CHECK: cxlgbr %f0, 0, %r2, 0
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = uitofp i64 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test signed f128->i32.
+define i32 @f5(fp128 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: cfxbr %r2, 5, %f0
+; CHECK: br %r14
+ %f = load fp128, fp128 *%src
+ %conv = fptosi fp128 %f to i32
+ ret i32 %conv
+}
+
+; Test signed f128->i64.
+define i64 @f6(fp128 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: cgxbr %r2, 5, %f0
+; CHECK: br %r14
+ %f = load fp128, fp128 *%src
+ %conv = fptosi fp128 %f to i64
+ ret i64 %conv
+}
+
+; Test unsigned f128->i32.
+define i32 @f7(fp128 *%src) {
+; CHECK-LABEL: f7:
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: clfxbr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %f = load fp128 , fp128 *%src
+ %conv = fptoui fp128 %f to i32
+ ret i32 %conv
+}
+
+; Test unsigned f128->i64.
+define i64 @f8(fp128 *%src) {
+; CHECK-LABEL: f8:
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: clgxbr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %f = load fp128 , fp128 *%src
+ %conv = fptoui fp128 %f to i64
+ ret i64 %conv
+}
diff --git a/test/CodeGen/SystemZ/fp-copysign-02.ll b/test/CodeGen/SystemZ/fp-copysign-02.ll
new file mode 100644
index 000000000000..657c0e18767b
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-copysign-02.ll
@@ -0,0 +1,81 @@
+; Test f128 copysign operations on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare float @copysignf(float, float) readnone
+declare double @copysign(double, double) readnone
+; FIXME: not really the correct prototype for SystemZ.
+declare fp128 @copysignl(fp128, fp128) readnone
+
+; Test f32 copies in which the sign comes from an f128.
+define float @f1(float %a, fp128 *%bptr) {
+; CHECK-LABEL: f1:
+; CHECK: vl %v[[REG:[0-9]+]], 0(%r2)
+; CHECK: cpsdr %f0, %f[[REG]], %f0
+; CHECK: br %r14
+ %bl = load volatile fp128, fp128 *%bptr
+ %b = fptrunc fp128 %bl to float
+ %res = call float @copysignf(float %a, float %b) readnone
+ ret float %res
+}
+
+; Test f64 copies in which the sign comes from an f128.
+define double @f2(double %a, fp128 *%bptr) {
+; CHECK-LABEL: f2:
+; CHECK: vl %v[[REG:[0-9]+]], 0(%r2)
+; CHECK: cpsdr %f0, %f[[REG]], %f0
+; CHECK: br %r14
+ %bl = load volatile fp128, fp128 *%bptr
+ %b = fptrunc fp128 %bl to double
+ %res = call double @copysign(double %a, double %b) readnone
+ ret double %res
+}
+
+; Test f128 copies in which the sign comes from an f32.
+define void @f7(fp128 *%cptr, fp128 *%aptr, float %bf) {
+; CHECK-LABEL: f7:
+; CHECK: vl [[REG1:%v[0-7]+]], 0(%r3)
+; CHECK: tmlh
+; CHECK: wflnxb [[REG1]], [[REG1]]
+; CHECK: wflpxb [[REG1]], [[REG1]]
+; CHECK: vst [[REG1]], 0(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128, fp128 *%aptr
+ %b = fpext float %bf to fp128
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
+
+; As above, but the sign comes from an f64.
+define void @f8(fp128 *%cptr, fp128 *%aptr, double %bd) {
+; CHECK-LABEL: f8:
+; CHECK: vl [[REG1:%v[0-7]+]], 0(%r3)
+; CHECK: tmhh
+; CHECK: wflnxb [[REG1]], [[REG1]]
+; CHECK: wflpxb [[REG1]], [[REG1]]
+; CHECK: vst [[REG1]], 0(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128, fp128 *%aptr
+ %b = fpext double %bd to fp128
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
+
+; As above, but the sign comes from an f128.
+define void @f9(fp128 *%cptr, fp128 *%aptr, fp128 *%bptr) {
+; CHECK-LABEL: f9:
+; CHECK: vl [[REG1:%v[0-7]+]], 0(%r3)
+; CHECK: vl [[REG2:%v[0-7]+]], 0(%r4)
+; CHECK: tm
+; CHECK: wflnxb [[REG1]], [[REG1]]
+; CHECK: wflpxb [[REG1]], [[REG1]]
+; CHECK: vst [[REG1]], 0(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128, fp128 *%aptr
+ %b = load volatile fp128, fp128 *%bptr
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-div-01.ll b/test/CodeGen/SystemZ/fp-div-01.ll
index 0791e8db93f8..ee514dc474e9 100644
--- a/test/CodeGen/SystemZ/fp-div-01.ll
+++ b/test/CodeGen/SystemZ/fp-div-01.ll
@@ -1,6 +1,8 @@
; Test 32-bit floating-point division.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
@@ -76,7 +78,7 @@ define float @f6(float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: deb %f0, 16{{[04]}}(%r15)
+; CHECK-SCALAR: deb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
diff --git a/test/CodeGen/SystemZ/fp-div-04.ll b/test/CodeGen/SystemZ/fp-div-04.ll
new file mode 100644
index 000000000000..54e87f46c84a
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-div-04.ll
@@ -0,0 +1,17 @@
+; Test 128-bit floating-point division on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfdxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %sum = fdiv fp128 %f1, %f2
+ store fp128 %sum, fp128 *%ptr1
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-move-13.ll b/test/CodeGen/SystemZ/fp-move-13.ll
new file mode 100644
index 000000000000..d6c53eaceeef
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-13.ll
@@ -0,0 +1,46 @@
+; Test f128 moves on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; VR-to-VR moves. Since f128s are passed by reference,
+; we need to force a copy by other means.
+define void @f1(fp128 *%x) {
+; CHECK-LABEL: f1:
+; CHECK: vlr
+; CHECK: vleig
+; CHECK: br %r14
+ %val = load volatile fp128 , fp128 *%x
+ %t1 = bitcast fp128 %val to <2 x i64>
+ %t2 = insertelement <2 x i64> %t1, i64 0, i32 0
+ %res = bitcast <2 x i64> %t2 to fp128
+ store volatile fp128 %res, fp128 *%x
+ store volatile fp128 %val, fp128 *%x
+ ret void
+}
+
+; Test 128-bit moves from GPRs to VRs. i128 isn't a legitimate type,
+; so this goes through memory.
+define void @f2(fp128 *%a, i128 *%b) {
+; CHECK-LABEL: f2:
+; CHECK: lg
+; CHECK: lg
+; CHECK: stg
+; CHECK: stg
+; CHECK: br %r14
+ %val = load i128 , i128 *%b
+ %res = bitcast i128 %val to fp128
+ store fp128 %res, fp128 *%a
+ ret void
+}
+
+; Test 128-bit moves from VRs to GPRs, with the same restriction as f2.
+define void @f3(fp128 *%a, i128 *%b) {
+; CHECK-LABEL: f3:
+; CHECK: vl
+; CHECK: vst
+ %val = load fp128 , fp128 *%a
+ %res = bitcast fp128 %val to i128
+ store i128 %res, i128 *%b
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-mul-01.ll b/test/CodeGen/SystemZ/fp-mul-01.ll
index 3b72d25e0b5c..126567b218ab 100644
--- a/test/CodeGen/SystemZ/fp-mul-01.ll
+++ b/test/CodeGen/SystemZ/fp-mul-01.ll
@@ -1,6 +1,8 @@
; Test multiplication of two f32s, producing an f32 result.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
@@ -76,7 +78,7 @@ define float @f6(float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: meeb %f0, 16{{[04]}}(%r15)
+; CHECK-SCALAR: meeb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
diff --git a/test/CodeGen/SystemZ/fp-mul-06.ll b/test/CodeGen/SystemZ/fp-mul-06.ll
index 896fafecbdaf..581e44eeaa2f 100644
--- a/test/CodeGen/SystemZ/fp-mul-06.ll
+++ b/test/CodeGen/SystemZ/fp-mul-06.ll
@@ -1,11 +1,15 @@
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
define float @f1(float %f1, float %f2, float %acc) {
; CHECK-LABEL: f1:
-; CHECK: maebr %f4, %f0, %f2
-; CHECK: ler %f0, %f4
+; CHECK-SCALAR: maebr %f4, %f0, %f2
+; CHECK-SCALAR: ler %f0, %f4
+; CHECK-VECTOR: wfmasb %f0, %f0, %f2, %f4
; CHECK: br %r14
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
ret float %res
@@ -14,7 +18,8 @@ define float @f1(float %f1, float %f2, float %acc) {
define float @f2(float %f1, float *%ptr, float %acc) {
; CHECK-LABEL: f2:
; CHECK: maeb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%f2 = load float , float *%ptr
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
@@ -24,7 +29,8 @@ define float @f2(float %f1, float *%ptr, float %acc) {
define float @f3(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f3:
; CHECK: maeb %f2, %f0, 4092(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%f2 = load float , float *%ptr
@@ -39,7 +45,8 @@ define float @f4(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: maeb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1024
%f2 = load float , float *%ptr
@@ -54,7 +61,8 @@ define float @f5(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: maeb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 -1
%f2 = load float , float *%ptr
@@ -66,7 +74,8 @@ define float @f6(float %f1, float *%base, i64 %index, float %acc) {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: maeb %f2, %f0, 0(%r1,%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 %index
%f2 = load float , float *%ptr
@@ -78,7 +87,8 @@ define float @f7(float %f1, float *%base, i64 %index, float %acc) {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 2
; CHECK: maeb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1023
%ptr = getelementptr float, float *%base, i64 %index2
@@ -92,7 +102,8 @@ define float @f8(float %f1, float *%base, i64 %index, float %acc) {
; CHECK: sllg %r1, %r3, 2
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
; CHECK: maeb %f2, %f0, 0(%r1)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1024
%ptr = getelementptr float, float *%base, i64 %index2
diff --git a/test/CodeGen/SystemZ/fp-mul-08.ll b/test/CodeGen/SystemZ/fp-mul-08.ll
index 5e5538bfacc9..5b1f9b96c089 100644
--- a/test/CodeGen/SystemZ/fp-mul-08.ll
+++ b/test/CodeGen/SystemZ/fp-mul-08.ll
@@ -1,11 +1,15 @@
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
define float @f1(float %f1, float %f2, float %acc) {
; CHECK-LABEL: f1:
-; CHECK: msebr %f4, %f0, %f2
-; CHECK: ler %f0, %f4
+; CHECK-SCALAR: msebr %f4, %f0, %f2
+; CHECK-SCALAR: ler %f0, %f4
+; CHECK-VECTOR: wfmssb %f0, %f0, %f2, %f4
; CHECK: br %r14
%negacc = fsub float -0.0, %acc
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
@@ -15,7 +19,8 @@ define float @f1(float %f1, float %f2, float %acc) {
define float @f2(float %f1, float *%ptr, float %acc) {
; CHECK-LABEL: f2:
; CHECK: mseb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%f2 = load float , float *%ptr
%negacc = fsub float -0.0, %acc
@@ -26,7 +31,8 @@ define float @f2(float %f1, float *%ptr, float %acc) {
define float @f3(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f3:
; CHECK: mseb %f2, %f0, 4092(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%f2 = load float , float *%ptr
@@ -42,7 +48,8 @@ define float @f4(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: mseb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1024
%f2 = load float , float *%ptr
@@ -58,7 +65,8 @@ define float @f5(float %f1, float *%base, float %acc) {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: mseb %f2, %f0, 0(%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 -1
%f2 = load float , float *%ptr
@@ -71,7 +79,8 @@ define float @f6(float %f1, float *%base, i64 %index, float %acc) {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: mseb %f2, %f0, 0(%r1,%r2)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 %index
%f2 = load float , float *%ptr
@@ -84,7 +93,8 @@ define float @f7(float %f1, float *%base, i64 %index, float %acc) {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 2
; CHECK: mseb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1023
%ptr = getelementptr float, float *%base, i64 %index2
@@ -99,7 +109,8 @@ define float @f8(float %f1, float *%base, i64 %index, float %acc) {
; CHECK: sllg %r1, %r3, 2
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
; CHECK: mseb %f2, %f0, 0(%r1)
-; CHECK: ler %f0, %f2
+; CHECK-SCALAR: ler %f0, %f2
+; CHECK-VECTOR: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1024
%ptr = getelementptr float, float *%base, i64 %index2
diff --git a/test/CodeGen/SystemZ/fp-mul-10.ll b/test/CodeGen/SystemZ/fp-mul-10.ll
new file mode 100644
index 000000000000..c23a6a202ad5
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-10.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare double @llvm.fma.f64(double %f1, double %f2, double %f3)
+declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
+
+define double @f1(double %f1, double %f2, double %acc) {
+; CHECK-LABEL: f1:
+; CHECK: wfnmadb %f0, %f0, %f2, %f4
+; CHECK: br %r14
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ %negres = fsub double -0.0, %res
+ ret double %negres
+}
+
+define double @f2(double %f1, double %f2, double %acc) {
+; CHECK-LABEL: f2:
+; CHECK: wfnmsdb %f0, %f0, %f2, %f4
+; CHECK: br %r14
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ %negres = fsub double -0.0, %res
+ ret double %negres
+}
+
+define float @f3(float %f1, float %f2, float %acc) {
+; CHECK-LABEL: f3:
+; CHECK: wfnmasb %f0, %f0, %f2, %f4
+; CHECK: br %r14
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ %negres = fsub float -0.0, %res
+ ret float %negres
+}
+
+define float @f4(float %f1, float %f2, float %acc) {
+; CHECK-LABEL: f4:
+; CHECK: wfnmssb %f0, %f0, %f2, %f4
+; CHECK: br %r14
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ %negres = fsub float -0.0, %res
+ ret float %negres
+}
+
diff --git a/test/CodeGen/SystemZ/fp-mul-11.ll b/test/CodeGen/SystemZ/fp-mul-11.ll
new file mode 100644
index 000000000000..ef45bf184a4c
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-11.ll
@@ -0,0 +1,32 @@
+; Test 128-bit floating-point multiplication on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfmxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %sum = fmul fp128 %f1, %f2
+ store fp128 %sum, fp128 *%ptr1
+ ret void
+}
+
+define void @f2(double %f1, double %f2, fp128 *%dst) {
+; CHECK-LABEL: f2:
+; CHECK-DAG: wflld [[REG1:%v[0-9]+]], %f0
+; CHECK-DAG: wflld [[REG2:%v[0-9]+]], %f2
+; CHECK: wfmxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1x = fpext double %f1 to fp128
+ %f2x = fpext double %f2 to fp128
+ %res = fmul fp128 %f1x, %f2x
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-mul-12.ll b/test/CodeGen/SystemZ/fp-mul-12.ll
new file mode 100644
index 000000000000..331f9a30c274
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-12.ll
@@ -0,0 +1,72 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare fp128 @llvm.fma.f128(fp128 %f1, fp128 %f2, fp128 %f3)
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2, fp128 *%ptr3, fp128 *%dst) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: vl [[REG3:%v[0-9]+]], 0(%r4)
+; CHECK: wfmaxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], [[REG3]]
+; CHECK: vst [[RES]], 0(%r5)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %f3 = load fp128, fp128 *%ptr3
+ %res = call fp128 @llvm.fma.f128 (fp128 %f1, fp128 %f2, fp128 %f3)
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+define void @f2(fp128 *%ptr1, fp128 *%ptr2, fp128 *%ptr3, fp128 *%dst) {
+; CHECK-LABEL: f2:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: vl [[REG3:%v[0-9]+]], 0(%r4)
+; CHECK: wfmsxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], [[REG3]]
+; CHECK: vst [[RES]], 0(%r5)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %f3 = load fp128, fp128 *%ptr3
+ %neg = fsub fp128 0xL00000000000000008000000000000000, %f3
+ %res = call fp128 @llvm.fma.f128 (fp128 %f1, fp128 %f2, fp128 %neg)
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+define void @f3(fp128 *%ptr1, fp128 *%ptr2, fp128 *%ptr3, fp128 *%dst) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: vl [[REG3:%v[0-9]+]], 0(%r4)
+; CHECK: wfnmaxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], [[REG3]]
+; CHECK: vst [[RES]], 0(%r5)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %f3 = load fp128, fp128 *%ptr3
+ %res = call fp128 @llvm.fma.f128 (fp128 %f1, fp128 %f2, fp128 %f3)
+ %negres = fsub fp128 0xL00000000000000008000000000000000, %res
+ store fp128 %negres, fp128 *%dst
+ ret void
+}
+
+define void @f4(fp128 *%ptr1, fp128 *%ptr2, fp128 *%ptr3, fp128 *%dst) {
+; CHECK-LABEL: f4:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: vl [[REG3:%v[0-9]+]], 0(%r4)
+; CHECK: wfnmsxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], [[REG3]]
+; CHECK: vst [[RES]], 0(%r5)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %f3 = load fp128, fp128 *%ptr3
+ %neg = fsub fp128 0xL00000000000000008000000000000000, %f3
+ %res = call fp128 @llvm.fma.f128 (fp128 %f1, fp128 %f2, fp128 %neg)
+ %negres = fsub fp128 0xL00000000000000008000000000000000, %res
+ store fp128 %negres, fp128 *%dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-neg-02.ll b/test/CodeGen/SystemZ/fp-neg-02.ll
new file mode 100644
index 000000000000..38fb3a58d404
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-neg-02.ll
@@ -0,0 +1,41 @@
+; Test floating-point negation on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test f32.
+define float @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: lcdfr %f0, %f0
+; CHECK: br %r14
+ %res = fsub float -0.0, %f
+ ret float %res
+}
+
+; Test f64.
+define double @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: lcdfr %f0, %f0
+; CHECK: br %r14
+ %res = fsub double -0.0, %f
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure negation would probably
+; be better implemented using an XI on the upper byte. Do some extra
+; processing so that using FPRs is unequivocally better.
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK-DAG: wflcxb [[NEGREG1:%v[0-9]+]], [[REG1]]
+; CHECK: wfdxb [[RES:%v[0-9]+]], [[NEGREG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %orig = load fp128 , fp128 *%ptr
+ %negzero = fpext float -0.0 to fp128
+ %neg = fsub fp128 0xL00000000000000008000000000000000, %orig
+ %op2 = load fp128 , fp128 *%ptr2
+ %res = fdiv fp128 %neg, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-round-03.ll b/test/CodeGen/SystemZ/fp-round-03.ll
new file mode 100644
index 000000000000..762e793701d1
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-round-03.ll
@@ -0,0 +1,207 @@
+; Test rounding functions for z14 and above.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test rint for f32.
+declare float @llvm.rint.f32(float %f)
+define float @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: fiebra %f0, 0, %f0, 0
+; CHECK: br %r14
+ %res = call float @llvm.rint.f32(float %f)
+ ret float %res
+}
+
+; Test rint for f64.
+declare double @llvm.rint.f64(double %f)
+define double @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: fidbra %f0, 0, %f0, 0
+; CHECK: br %r14
+ %res = call double @llvm.rint.f64(double %f)
+ ret double %res
+}
+
+; Test rint for f128.
+declare fp128 @llvm.rint.f128(fp128 %f)
+define void @f3(fp128 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 0, 0
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.rint.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test nearbyint for f32.
+declare float @llvm.nearbyint.f32(float %f)
+define float @f4(float %f) {
+; CHECK-LABEL: f4:
+; CHECK: fiebra %f0, 0, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.nearbyint.f32(float %f)
+ ret float %res
+}
+
+; Test nearbyint for f64.
+declare double @llvm.nearbyint.f64(double %f)
+define double @f5(double %f) {
+; CHECK-LABEL: f5:
+; CHECK: fidbra %f0, 0, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.nearbyint.f64(double %f)
+ ret double %res
+}
+
+; Test nearbyint for f128.
+declare fp128 @llvm.nearbyint.f128(fp128 %f)
+define void @f6(fp128 *%ptr) {
+; CHECK-LABEL: f6:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 0
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.nearbyint.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test floor for f32.
+declare float @llvm.floor.f32(float %f)
+define float @f7(float %f) {
+; CHECK-LABEL: f7:
+; CHECK: fiebra %f0, 7, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.floor.f32(float %f)
+ ret float %res
+}
+
+; Test floor for f64.
+declare double @llvm.floor.f64(double %f)
+define double @f8(double %f) {
+; CHECK-LABEL: f8:
+; CHECK: fidbra %f0, 7, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.floor.f64(double %f)
+ ret double %res
+}
+
+; Test floor for f128.
+declare fp128 @llvm.floor.f128(fp128 %f)
+define void @f9(fp128 *%ptr) {
+; CHECK-LABEL: f9:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 7
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.floor.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test ceil for f32.
+declare float @llvm.ceil.f32(float %f)
+define float @f10(float %f) {
+; CHECK-LABEL: f10:
+; CHECK: fiebra %f0, 6, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.ceil.f32(float %f)
+ ret float %res
+}
+
+; Test ceil for f64.
+declare double @llvm.ceil.f64(double %f)
+define double @f11(double %f) {
+; CHECK-LABEL: f11:
+; CHECK: fidbra %f0, 6, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.ceil.f64(double %f)
+ ret double %res
+}
+
+; Test ceil for f128.
+declare fp128 @llvm.ceil.f128(fp128 %f)
+define void @f12(fp128 *%ptr) {
+; CHECK-LABEL: f12:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 6
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.ceil.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test trunc for f32.
+declare float @llvm.trunc.f32(float %f)
+define float @f13(float %f) {
+; CHECK-LABEL: f13:
+; CHECK: fiebra %f0, 5, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.trunc.f32(float %f)
+ ret float %res
+}
+
+; Test trunc for f64.
+declare double @llvm.trunc.f64(double %f)
+define double @f14(double %f) {
+; CHECK-LABEL: f14:
+; CHECK: fidbra %f0, 5, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.trunc.f64(double %f)
+ ret double %res
+}
+
+; Test trunc for f128.
+declare fp128 @llvm.trunc.f128(fp128 %f)
+define void @f15(fp128 *%ptr) {
+; CHECK-LABEL: f15:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 5
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.trunc.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
+
+; Test round for f32.
+declare float @llvm.round.f32(float %f)
+define float @f16(float %f) {
+; CHECK-LABEL: f16:
+; CHECK: fiebra %f0, 1, %f0, 4
+; CHECK: br %r14
+ %res = call float @llvm.round.f32(float %f)
+ ret float %res
+}
+
+; Test round for f64.
+declare double @llvm.round.f64(double %f)
+define double @f17(double %f) {
+; CHECK-LABEL: f17:
+; CHECK: fidbra %f0, 1, %f0, 4
+; CHECK: br %r14
+ %res = call double @llvm.round.f64(double %f)
+ ret double %res
+}
+
+; Test round for f128.
+declare fp128 @llvm.round.f128(fp128 %f)
+define void @f18(fp128 *%ptr) {
+; CHECK-LABEL: f18:
+; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 1
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %src = load fp128 , fp128 *%ptr
+ %res = call fp128 @llvm.round.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-sqrt-01.ll b/test/CodeGen/SystemZ/fp-sqrt-01.ll
index 3680207e7f20..85a46bc2d7fc 100644
--- a/test/CodeGen/SystemZ/fp-sqrt-01.ll
+++ b/test/CodeGen/SystemZ/fp-sqrt-01.ll
@@ -1,6 +1,8 @@
; Test 32-bit square root.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @llvm.sqrt.f32(float)
declare float @sqrtf(float)
@@ -77,7 +79,7 @@ define float @f6(float *%base, i64 %index) {
; to use SQEB if possible.
define void @f7(float *%ptr) {
; CHECK-LABEL: f7:
-; CHECK: sqeb {{%f[0-9]+}}, 16{{[04]}}(%r15)
+; CHECK-SCALAR: sqeb {{%f[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
%val0 = load volatile float , float *%ptr
%val1 = load volatile float , float *%ptr
@@ -160,7 +162,7 @@ define float @f8(float %dummy, float %val) {
; CHECK: sqebr %f0, %f2
; CHECK: cebr %f0, %f0
; CHECK: bnor %r14
-; CHECK: ler %f0, %f2
+; CHECK: {{ler|ldr}} %f0, %f2
; CHECK: jg sqrtf@PLT
%res = tail call float @sqrtf(float %val)
ret float %res
diff --git a/test/CodeGen/SystemZ/fp-sqrt-04.ll b/test/CodeGen/SystemZ/fp-sqrt-04.ll
new file mode 100644
index 000000000000..e0fb2569b39a
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sqrt-04.ll
@@ -0,0 +1,17 @@
+; Test 128-bit floating-point square root on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare fp128 @llvm.sqrt.f128(fp128 %f)
+
+define void @f1(fp128 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG:%v[0-9]+]], 0(%r2)
+; CHECK: wfsqxb [[RES:%v[0-9]+]], [[REG]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f = load fp128, fp128 *%ptr
+ %res = call fp128 @llvm.sqrt.f128(fp128 %f)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-sub-01.ll b/test/CodeGen/SystemZ/fp-sub-01.ll
index f4185ca3108d..41f72e1810e9 100644
--- a/test/CodeGen/SystemZ/fp-sub-01.ll
+++ b/test/CodeGen/SystemZ/fp-sub-01.ll
@@ -1,6 +1,8 @@
; Test 32-bit floating-point subtraction.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
+; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
declare float @foo()
@@ -76,7 +78,7 @@ define float @f6(float %f1, float *%base, i64 %index) {
define float @f7(float *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
-; CHECK: seb %f0, 16{{[04]}}(%r15)
+; CHECK-SCALAR: seb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
%ptr1 = getelementptr float, float *%ptr0, i64 2
%ptr2 = getelementptr float, float *%ptr0, i64 4
diff --git a/test/CodeGen/SystemZ/fp-sub-04.ll b/test/CodeGen/SystemZ/fp-sub-04.ll
new file mode 100644
index 000000000000..5f88132664ef
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sub-04.ll
@@ -0,0 +1,17 @@
+; Test 128-bit floating-point subtraction on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfsxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK: vst [[RES]], 0(%r2)
+; CHECK: br %r14
+ %f1 = load fp128, fp128 *%ptr1
+ %f2 = load fp128, fp128 *%ptr2
+ %sum = fsub fp128 %f1, %f2
+ store fp128 %sum, fp128 *%ptr1
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-add-17.ll b/test/CodeGen/SystemZ/int-add-17.ll
new file mode 100644
index 000000000000..fd245871c652
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-17.ll
@@ -0,0 +1,95 @@
+; Test additions between an i64 and a sign-extended i16 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check AGH with no displacement.
+define i64 @f1(i64 %a, i16 *%src) {
+; CHECK-LABEL: f1:
+; CHECK: agh %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i16, i16 *%src
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the high end of the aligned AGH range.
+define i64 @f2(i64 %a, i16 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: agh %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262143
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 %a, i16 *%src) {
+; CHECK-LABEL: f3:
+; CHECK: agfi %r3, 524288
+; CHECK: agh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the high end of the negative aligned AGH range.
+define i64 @f4(i64 %a, i16 *%src) {
+; CHECK-LABEL: f4:
+; CHECK: agh %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -1
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the low end of the AGH range.
+define i64 @f5(i64 %a, i16 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: agh %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %a, i16 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524290
+; CHECK: agh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check that AGH allows an index.
+define i64 @f7(i64 %a, i64 %src, i64 %index) {
+; CHECK-LABEL: f7:
+; CHECK: agh %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i16 *
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
diff --git a/test/CodeGen/SystemZ/int-mul-09.ll b/test/CodeGen/SystemZ/int-mul-09.ll
new file mode 100644
index 000000000000..3e384e72db5d
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-09.ll
@@ -0,0 +1,95 @@
+; Test multiplications between an i64 and a sign-extended i16 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check MGH with no displacement.
+define i64 @f1(i64 %a, i16 *%src) {
+; CHECK-LABEL: f1:
+; CHECK: mgh %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i16, i16 *%src
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the high end of the aligned MGH range.
+define i64 @f2(i64 %a, i16 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: mgh %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262143
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 %a, i16 *%src) {
+; CHECK-LABEL: f3:
+; CHECK: agfi %r3, 524288
+; CHECK: mgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the high end of the negative aligned MGH range.
+define i64 @f4(i64 %a, i16 *%src) {
+; CHECK-LABEL: f4:
+; CHECK: mgh %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -1
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the low end of the MGH range.
+define i64 @f5(i64 %a, i16 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: mgh %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %a, i16 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524290
+; CHECK: mgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check that MGH allows an index.
+define i64 @f7(i64 %a, i64 %src, i64 %index) {
+; CHECK-LABEL: f7:
+; CHECK: mgh %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i16 *
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
diff --git a/test/CodeGen/SystemZ/int-mul-10.ll b/test/CodeGen/SystemZ/int-mul-10.ll
new file mode 100644
index 000000000000..a4d80af36a3c
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-10.ll
@@ -0,0 +1,165 @@
+; Test signed high-part i64->i128 multiplications on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check sign-extended multiplication in which only the high part is used.
+define i64 @f1(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mgrk %r2, %r3, %r4
+; CHECK: br %r14
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check sign-extended multiplication in which only part of the high half
+; is used.
+define i64 @f2(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f2:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mgrk [[REG:%r[0-9]+]], %r3, %r4
+; CHECK: srlg %r2, [[REG]], 3
+; CHECK: br %r14
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 67
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check sign-extended multiplication in which the result is split into
+; high and low halves.
+define i64 @f3(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mgrk %r2, %r3, %r4
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ %low = trunc i128 %mulx to i64
+ %or = or i64 %high, %low
+ ret i64 %or
+}
+
+; Check MG with no displacement.
+define i64 @f4(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f4:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mg %r2, 0(%r4)
+; CHECK: br %r14
+ %b = load i64 , i64 *%src
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the high end of the aligned MG range.
+define i64 @f5(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: mg %r2, 524280(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 65535
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the next doubleword up, which requires separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: mg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 65536
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the high end of the negative aligned MG range.
+define i64 @f7(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f7:
+; CHECK: mg %r2, -8(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 -1
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the low end of the MG range.
+define i64 @f8(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK-LABEL: f8:
+; CHECK: mg %r2, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f9(i64 *%dest, i64 %a, i64 *%src) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524296
+; CHECK: mg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check that MG allows an index.
+define i64 @f10(i64 *%dest, i64 %a, i64 %src, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: mg %r2, 524287(%r5,%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 , i64 *%ptr
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
diff --git a/test/CodeGen/SystemZ/int-mul-11.ll b/test/CodeGen/SystemZ/int-mul-11.ll
new file mode 100644
index 000000000000..f26251982518
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-11.ll
@@ -0,0 +1,32 @@
+; Test three-operand multiplication instructions on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Check MSRKC.
+define i32 @f1(i32 %dummy, i32 %a, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: msrkc %r2, %r3, %r4
+; CHECK: br %r14
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check MSGRKC.
+define i64 @f2(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f2:
+; CHECK: msgrkc %r2, %r3, %r4
+; CHECK: br %r14
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Verify that we still use MSGFR for i32->i64 multiplies.
+define i64 @f3(i64 %a, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: msgfr %r2, %r3
+; CHECK: br %r14
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
diff --git a/test/CodeGen/SystemZ/int-sub-10.ll b/test/CodeGen/SystemZ/int-sub-10.ll
new file mode 100644
index 000000000000..bf6638575e55
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-sub-10.ll
@@ -0,0 +1,95 @@
+; Test subtractions of a sign-extended i16 from an i64 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check SGH with no displacement.
+define i64 @f1(i64 %a, i16 *%src) {
+; CHECK-LABEL: f1:
+; CHECK: sgh %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i16, i16 *%src
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the high end of the aligned SGH range.
+define i64 @f2(i64 %a, i16 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: sgh %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262143
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 %a, i16 *%src) {
+; CHECK-LABEL: f3:
+; CHECK: agfi %r3, 524288
+; CHECK: sgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the high end of the negative aligned SGH range.
+define i64 @f4(i64 %a, i16 *%src) {
+; CHECK-LABEL: f4:
+; CHECK: sgh %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -1
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the low end of the SGH range.
+define i64 @f5(i64 %a, i16 *%src) {
+; CHECK-LABEL: f5:
+; CHECK: sgh %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %a, i16 *%src) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524290
+; CHECK: sgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check that SGH allows an index.
+define i64 @f7(i64 %a, i64 %src, i64 %index) {
+; CHECK-LABEL: f7:
+; CHECK: sgh %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i16 *
+ %b = load i16, i16 *%ptr
+ %bext = sext i16 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
diff --git a/test/CodeGen/SystemZ/tdc-07.ll b/test/CodeGen/SystemZ/tdc-07.ll
new file mode 100644
index 000000000000..6651410e7c66
--- /dev/null
+++ b/test/CodeGen/SystemZ/tdc-07.ll
@@ -0,0 +1,18 @@
+; Test the Test Data Class instruction on z14
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i32 @llvm.s390.tdc.f128(fp128, i64)
+
+; Check using as i32 - f128
+define i32 @f3(fp128 %x) {
+; CHECK-LABEL: f3
+; CHECK: vl %v0, 0(%r2)
+; CHECK: vrepg %v2, %v0, 1
+; CHECK: tcxb %f0, 123
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+ %res = call i32 @llvm.s390.tdc.f128(fp128 %x, i64 123)
+ ret i32 %res
+}
+
diff --git a/test/CodeGen/SystemZ/vec-abs-06.ll b/test/CodeGen/SystemZ/vec-abs-06.ll
new file mode 100644
index 000000000000..8eee1d9d2507
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-abs-06.ll
@@ -0,0 +1,47 @@
+; Test f32 and v4f32 absolute on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare float @llvm.fabs.f32(float)
+declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
+
+; Test a plain absolute.
+define <4 x float> @f1(<4 x float> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vflpsb %v24, %v24
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.fabs.v4f32(<4 x float> %val)
+ ret <4 x float> %ret
+}
+
+; Test a negative absolute.
+define <4 x float> @f2(<4 x float> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vflnsb %v24, %v24
+; CHECK: br %r14
+ %abs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %val)
+ %ret = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %abs
+ ret <4 x float> %ret
+}
+
+; Test an f32 absolute that uses vector registers.
+define float @f3(<4 x float> %val) {
+; CHECK-LABEL: f3:
+; CHECK: wflpsb %f0, %v24
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %ret = call float @llvm.fabs.f32(float %scalar)
+ ret float %ret
+}
+
+; Test an f32 negative absolute that uses vector registers.
+define float @f4(<4 x float> %val) {
+; CHECK-LABEL: f4:
+; CHECK: wflnsb %f0, %v24
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %abs = call float @llvm.fabs.f32(float %scalar)
+ %ret = fsub float -0.0, %abs
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-add-02.ll b/test/CodeGen/SystemZ/vec-add-02.ll
new file mode 100644
index 000000000000..97a9b84a063c
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-add-02.ll
@@ -0,0 +1,24 @@
+; Test vector addition on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 addition.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vfasb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = fadd <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
+; Test an f32 addition that uses vector registers.
+define float @f2(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfasb %f0, %v24, %v26
+; CHECK: br %r14
+ %scalar1 = extractelement <4 x float> %val1, i32 0
+ %scalar2 = extractelement <4 x float> %val2, i32 0
+ %ret = fadd float %scalar1, %scalar2
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-and-04.ll b/test/CodeGen/SystemZ/vec-and-04.ll
new file mode 100644
index 000000000000..e9355beb4296
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-and-04.ll
@@ -0,0 +1,47 @@
+; Test vector NAND on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v16i8 NAND.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vnn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <16 x i8> %val1, %val2
+ %not = xor <16 x i8> %ret, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ ret <16 x i8> %not
+}
+
+; Test a v8i16 NAND.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vnn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <8 x i16> %val1, %val2
+ %not = xor <8 x i16> %ret, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %not
+}
+
+; Test a v4i32 NAND.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vnn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <4 x i32> %val1, %val2
+ %not = xor <4 x i32> %ret, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %not
+}
+
+; Test a v2i64 NAND.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vnn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <2 x i64> %val1, %val2
+ %not = xor <2 x i64> %ret, <i64 -1, i64 -1>
+ ret <2 x i64> %not
+}
diff --git a/test/CodeGen/SystemZ/vec-cmp-07.ll b/test/CodeGen/SystemZ/vec-cmp-07.ll
new file mode 100644
index 000000000000..f272ba4bd755
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-cmp-07.ll
@@ -0,0 +1,349 @@
+; Test f32 and v4f32 comparisons on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test oeq.
+define <4 x i32> @f1(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vfcesb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = fcmp oeq <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test one.
+define <4 x i32> @f2(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f2:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v28, %v26
+; CHECK-DAG: vfchsb [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK: vo %v24, [[REG1]], [[REG2]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp one <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ogt.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vfchsb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test oge.
+define <4 x i32> @f4(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vfchesb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = fcmp oge <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ole.
+define <4 x i32> @f5(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vfchesb %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ole <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test olt.
+define <4 x i32> @f6(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vfchsb %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = fcmp olt <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ueq.
+define <4 x i32> @f7(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f7:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v28, %v26
+; CHECK-DAG: vfchsb [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK: vno %v24, [[REG1]], [[REG2]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ueq <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test une.
+define <4 x i32> @f8(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vfcesb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp une <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ugt.
+define <4 x i32> @f9(<4 x i32> %dummy, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ugt <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test uge.
+define <4 x i32> @f10(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp uge <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ule.
+define <4 x i32> @f11(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f11:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ule <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ult.
+define <4 x i32> @f12(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f12:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ult <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ord.
+define <4 x i32> @f13(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f13:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v28, %v26
+; CHECK-DAG: vfchesb [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK: vo %v24, [[REG1]], [[REG2]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ord <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test uno.
+define <4 x i32> @f14(<4 x i32> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f14:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v28, %v26
+; CHECK-DAG: vfchesb [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK: vno %v24, [[REG1]], [[REG2]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp uno <4 x float> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test oeq selects.
+define <4 x float> @f15(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f15:
+; CHECK: vfcesb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp oeq <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test one selects.
+define <4 x float> @f16(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f16:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v26, %v24
+; CHECK-DAG: vfchsb [[REG2:%v[0-9]+]], %v24, %v26
+; CHECK: vo [[REG:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp one <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ogt selects.
+define <4 x float> @f17(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f17:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ogt <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test oge selects.
+define <4 x float> @f18(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f18:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp oge <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ole selects.
+define <4 x float> @f19(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f19:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ole <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test olt selects.
+define <4 x float> @f20(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f20:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp olt <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ueq selects.
+define <4 x float> @f21(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f21:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v26, %v24
+; CHECK-DAG: vfchsb [[REG2:%v[0-9]+]], %v24, %v26
+; CHECK: vo [[REG:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ueq <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test une selects.
+define <4 x float> @f22(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f22:
+; CHECK: vfcesb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp une <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ugt selects.
+define <4 x float> @f23(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f23:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ugt <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test uge selects.
+define <4 x float> @f24(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f24:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp uge <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ule selects.
+define <4 x float> @f25(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f25:
+; CHECK: vfchsb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ule <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ult selects.
+define <4 x float> @f26(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f26:
+; CHECK: vfchesb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ult <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test ord selects.
+define <4 x float> @f27(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f27:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v26, %v24
+; CHECK-DAG: vfchesb [[REG2:%v[0-9]+]], %v24, %v26
+; CHECK: vo [[REG:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp ord <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test uno selects.
+define <4 x float> @f28(<4 x float> %val1, <4 x float> %val2,
+ <4 x float> %val3, <4 x float> %val4) {
+; CHECK-LABEL: f28:
+; CHECK-DAG: vfchsb [[REG1:%v[0-9]+]], %v26, %v24
+; CHECK-DAG: vfchesb [[REG2:%v[0-9]+]], %v24, %v26
+; CHECK: vo [[REG:%v[0-9]+]], [[REG1]], [[REG2]]
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = fcmp uno <4 x float> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
+ ret <4 x float> %ret
+}
+
+; Test an f32 comparison that uses vector registers.
+define i64 @f29(i64 %a, i64 %b, float %f1, <4 x float> %vec) {
+; CHECK-LABEL: f29:
+; CHECK: wfcsb %f0, %v24
+; CHECK-NEXT: locgrne %r2, %r3
+; CHECK: br %r14
+ %f2 = extractelement <4 x float> %vec, i32 0
+ %cond = fcmp oeq float %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/vec-ctpop-02.ll b/test/CodeGen/SystemZ/vec-ctpop-02.ll
new file mode 100644
index 000000000000..ee50e88d0430
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-ctpop-02.ll
@@ -0,0 +1,45 @@
+; Test vector population-count instruction on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %a)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a)
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
+
+define <16 x i8> @f1(<16 x i8> %a) {
+; CHECK-LABEL: f1:
+; CHECK: vpopctb %v24, %v24
+; CHECK: br %r14
+
+ %popcnt = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
+ ret <16 x i8> %popcnt
+}
+
+define <8 x i16> @f2(<8 x i16> %a) {
+; CHECK-LABEL: f2:
+; CHECK: vpopcth %v24, %v24
+; CHECK: br %r14
+
+ %popcnt = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %a)
+ ret <8 x i16> %popcnt
+}
+
+define <4 x i32> @f3(<4 x i32> %a) {
+; CHECK-LABEL: f3:
+; CHECK: vpopctf %v24, %v24
+; CHECK: br %r14
+
+ %popcnt = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a)
+ ret <4 x i32> %popcnt
+}
+
+define <2 x i64> @f4(<2 x i64> %a) {
+; CHECK-LABEL: f4:
+; CHECK: vpopctg %v24, %v24
+; CHECK: br %r14
+
+ %popcnt = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
+ ret <2 x i64> %popcnt
+}
+
diff --git a/test/CodeGen/SystemZ/vec-div-02.ll b/test/CodeGen/SystemZ/vec-div-02.ll
new file mode 100644
index 000000000000..74e3b5148ad5
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-div-02.ll
@@ -0,0 +1,24 @@
+; Test vector division on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 division.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vfdsb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = fdiv <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
+; Test an f32 division that uses vector registers.
+define float @f2(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfdsb %f0, %v24, %v26
+; CHECK: br %r14
+ %scalar1 = extractelement <4 x float> %val1, i32 0
+ %scalar2 = extractelement <4 x float> %val2, i32 0
+ %ret = fdiv float %scalar1, %scalar2
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-intrinsics.ll b/test/CodeGen/SystemZ/vec-intrinsics-01.ll
index 6f5eb0691aa8..6f5eb0691aa8 100644
--- a/test/CodeGen/SystemZ/vec-intrinsics.ll
+++ b/test/CodeGen/SystemZ/vec-intrinsics-01.ll
diff --git a/test/CodeGen/SystemZ/vec-intrinsics-02.ll b/test/CodeGen/SystemZ/vec-intrinsics-02.ll
new file mode 100644
index 000000000000..84c6a0784031
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-intrinsics-02.ll
@@ -0,0 +1,441 @@
+; Test vector intrinsics added with z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare <2 x i64> @llvm.s390.vbperm(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.s390.vmslg(<2 x i64>, <2 x i64>, <16 x i8>, i32)
+declare <16 x i8> @llvm.s390.vlrl(i32, i8 *)
+declare void @llvm.s390.vstrl(<16 x i8>, i32, i8 *)
+
+declare {<4 x i32>, i32} @llvm.s390.vfcesbs(<4 x float>, <4 x float>)
+declare {<4 x i32>, i32} @llvm.s390.vfchsbs(<4 x float>, <4 x float>)
+declare {<4 x i32>, i32} @llvm.s390.vfchesbs(<4 x float>, <4 x float>)
+declare {<4 x i32>, i32} @llvm.s390.vftcisb(<4 x float>, i32)
+declare <4 x float> @llvm.s390.vfisb(<4 x float>, i32, i32)
+
+declare <2 x double> @llvm.s390.vfmaxdb(<2 x double>, <2 x double>, i32)
+declare <2 x double> @llvm.s390.vfmindb(<2 x double>, <2 x double>, i32)
+declare <4 x float> @llvm.s390.vfmaxsb(<4 x float>, <4 x float>, i32)
+declare <4 x float> @llvm.s390.vfminsb(<4 x float>, <4 x float>, i32)
+
+; VBPERM.
+define <2 x i64> @test_vbperm(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vbperm:
+; CHECK: vbperm %v24, %v24, %v26
+; CHECK: br %r14
+ %res = call <2 x i64> @llvm.s390.vbperm(<16 x i8> %a, <16 x i8> %b)
+ ret <2 x i64> %res
+}
+
+; VMSLG with no shifts.
+define <16 x i8> @test_vmslg1(<2 x i64> %a, <2 x i64> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vmslg1:
+; CHECK: vmslg %v24, %v24, %v26, %v28, 0
+; CHECK: br %r14
+ %res = call <16 x i8> @llvm.s390.vmslg(<2 x i64> %a, <2 x i64> %b, <16 x i8> %c, i32 0)
+ ret <16 x i8> %res
+}
+
+; VMSLG with both shifts.
+define <16 x i8> @test_vmslg2(<2 x i64> %a, <2 x i64> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vmslg2:
+; CHECK: vmslg %v24, %v24, %v26, %v28, 12
+; CHECK: br %r14
+ %res = call <16 x i8> @llvm.s390.vmslg(<2 x i64> %a, <2 x i64> %b, <16 x i8> %c, i32 12)
+ ret <16 x i8> %res
+}
+
+; VLRLR with the lowest in-range displacement.
+define <16 x i8> @test_vlrlr1(i8 *%ptr, i32 %length) {
+; CHECK-LABEL: test_vlrlr1:
+; CHECK: vlrlr %v24, %r3, 0(%r2)
+; CHECK: br %r14
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 %length, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRLR with the highest in-range displacement.
+define <16 x i8> @test_vlrlr2(i8 *%base, i32 %length) {
+; CHECK-LABEL: test_vlrlr2:
+; CHECK: vlrlr %v24, %r3, 4095(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 %length, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRLR with an out-of-range displacement.
+define <16 x i8> @test_vlrlr3(i8 *%base, i32 %length) {
+; CHECK-LABEL: test_vlrlr3:
+; CHECK: vlrlr %v24, %r3, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 %length, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; Check that VLRLR doesn't allow an index.
+define <16 x i8> @test_vlrlr4(i8 *%base, i64 %index, i32 %length) {
+; CHECK-LABEL: test_vlrlr4:
+; CHECK: vlrlr %v24, %r4, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 %length, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRL with the lowest in-range displacement.
+define <16 x i8> @test_vlrl1(i8 *%ptr) {
+; CHECK-LABEL: test_vlrl1:
+; CHECK: vlrl %v24, 0(%r2), 0
+; CHECK: br %r14
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 0, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRL with the highest in-range displacement.
+define <16 x i8> @test_vlrl2(i8 *%base) {
+; CHECK-LABEL: test_vlrl2:
+; CHECK: vlrl %v24, 4095(%r2), 0
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 0, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VLRL with an out-of-range displacement.
+define <16 x i8> @test_vlrl3(i8 *%base) {
+; CHECK-LABEL: test_vlrl3:
+; CHECK: vlrl %v24, 0({{%r[1-5]}}), 0
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 0, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; Check that VLRL doesn't allow an index.
+define <16 x i8> @test_vlrl4(i8 *%base, i64 %index) {
+; CHECK-LABEL: test_vlrl4:
+; CHECK: vlrl %v24, 0({{%r[1-5]}}), 0
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ %res = call <16 x i8> @llvm.s390.vlrl(i32 0, i8 *%ptr)
+ ret <16 x i8> %res
+}
+
+; VSTRLR with the lowest in-range displacement.
+define void @test_vstrlr1(<16 x i8> %vec, i8 *%ptr, i32 %length) {
+; CHECK-LABEL: test_vstrlr1:
+; CHECK: vstrlr %v24, %r3, 0(%r2)
+; CHECK: br %r14
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 %length, i8 *%ptr)
+ ret void
+}
+
+; VSTRLR with the highest in-range displacement.
+define void @test_vstrlr2(<16 x i8> %vec, i8 *%base, i32 %length) {
+; CHECK-LABEL: test_vstrlr2:
+; CHECK: vstrlr %v24, %r3, 4095(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 %length, i8 *%ptr)
+ ret void
+}
+
+; VSTRLR with an out-of-range displacement.
+define void @test_vstrlr3(<16 x i8> %vec, i8 *%base, i32 %length) {
+; CHECK-LABEL: test_vstrlr3:
+; CHECK: vstrlr %v24, %r3, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 %length, i8 *%ptr)
+ ret void
+}
+
+; Check that VSTRLR doesn't allow an index.
+define void @test_vstrlr4(<16 x i8> %vec, i8 *%base, i64 %index, i32 %length) {
+; CHECK-LABEL: test_vstrlr4:
+; CHECK: vstrlr %v24, %r4, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 %length, i8 *%ptr)
+ ret void
+}
+
+; VSTRL with the lowest in-range displacement.
+define void @test_vstrl1(<16 x i8> %vec, i8 *%ptr) {
+; CHECK-LABEL: test_vstrl1:
+; CHECK: vstrl %v24, 0(%r2), 8
+; CHECK: br %r14
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 8, i8 *%ptr)
+ ret void
+}
+
+; VSTRL with the highest in-range displacement.
+define void @test_vstrl2(<16 x i8> %vec, i8 *%base) {
+; CHECK-LABEL: test_vstrl2:
+; CHECK: vstrl %v24, 4095(%r2), 8
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 8, i8 *%ptr)
+ ret void
+}
+
+; VSTRL with an out-of-range displacement.
+define void @test_vstrl3(<16 x i8> %vec, i8 *%base) {
+; CHECK-LABEL: test_vstrl3:
+; CHECK: vstrl %v24, 0({{%r[1-5]}}), 8
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 8, i8 *%ptr)
+ ret void
+}
+
+; Check that VSTRL doesn't allow an index.
+define void @test_vstrl4(<16 x i8> %vec, i8 *%base, i64 %index) {
+; CHECK-LABEL: test_vstrl4:
+; CHECK: vstrl %v24, 0({{%r[1-5]}}), 8
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ call void @llvm.s390.vstrl(<16 x i8> %vec, i32 8, i8 *%ptr)
+ ret void
+}
+
+; VFCESBS with no processing of the result.
+define i32 @test_vfcesbs(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfcesbs:
+; CHECK: vfcesbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfcesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ ret i32 %res
+}
+
+; VFCESBS, returning 1 if any elements are equal (CC != 3).
+define i32 @test_vfcesbs_any_bool(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfcesbs_any_bool:
+; CHECK: vfcesbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm %r2
+; CHECK: afi %r2, -536870912
+; CHECK: srl %r2, 31
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfcesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp ne i32 %res, 3
+ %ext = zext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; VFCESBS, storing to %ptr if any elements are equal.
+define <4 x i32> @test_vfcesbs_any_store(<4 x float> %a, <4 x float> %b,
+ i32 *%ptr) {
+; CHECK-LABEL: test_vfcesbs_any_store:
+; CHECK-NOT: %r
+; CHECK: vfcesbs %v24, %v24, %v26
+; CHECK-NEXT: {{bor|bnler}} %r14
+; CHECK: mvhi 0(%r2), 0
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfcesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 0
+ %cc = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp ule i32 %cc, 2
+ br i1 %cmp, label %store, label %exit
+
+store:
+ store i32 0, i32 *%ptr
+ br label %exit
+
+exit:
+ ret <4 x i32> %res
+}
+
+; VFCHSBS with no processing of the result.
+define i32 @test_vfchsbs(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfchsbs:
+; CHECK: vfchsbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchsbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ ret i32 %res
+}
+
+; VFCHSBS, returning 1 if not all elements are higher.
+define i32 @test_vfchsbs_notall_bool(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfchsbs_notall_bool:
+; CHECK: vfchsbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risblg %r2, [[REG]], 31, 159, 36
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchsbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp sge i32 %res, 1
+ %ext = zext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; VFCHSBS, storing to %ptr if not all elements are higher.
+define <4 x i32> @test_vfchsbs_notall_store(<4 x float> %a, <4 x float> %b,
+ i32 *%ptr) {
+; CHECK-LABEL: test_vfchsbs_notall_store:
+; CHECK-NOT: %r
+; CHECK: vfchsbs %v24, %v24, %v26
+; CHECK-NEXT: {{bher|ber}} %r14
+; CHECK: mvhi 0(%r2), 0
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchsbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 0
+ %cc = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp ugt i32 %cc, 0
+ br i1 %cmp, label %store, label %exit
+
+store:
+ store i32 0, i32 *%ptr
+ br label %exit
+
+exit:
+ ret <4 x i32> %res
+}
+
+; VFCHESBS with no processing of the result.
+define i32 @test_vfchesbs(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfchesbs:
+; CHECK: vfchesbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ ret i32 %res
+}
+
+; VFCHESBS, returning 1 if neither element is higher or equal.
+define i32 @test_vfchesbs_none_bool(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfchesbs_none_bool:
+; CHECK: vfchesbs {{%v[0-9]+}}, %v24, %v26
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risblg %r2, [[REG]], 31, 159, 35
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp eq i32 %res, 3
+ %ext = zext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; VFCHESBS, storing to %ptr if neither element is higher or equal.
+define <4 x i32> @test_vfchesbs_none_store(<4 x float> %a, <4 x float> %b,
+ i32 *%ptr) {
+; CHECK-LABEL: test_vfchesbs_none_store:
+; CHECK-NOT: %r
+; CHECK: vfchesbs %v24, %v24, %v26
+; CHECK-NEXT: {{bnor|bler}} %r14
+; CHECK: mvhi 0(%r2), 0
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vfchesbs(<4 x float> %a,
+ <4 x float> %b)
+ %res = extractvalue {<4 x i32>, i32} %call, 0
+ %cc = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp uge i32 %cc, 3
+ br i1 %cmp, label %store, label %exit
+
+store:
+ store i32 0, i32 *%ptr
+ br label %exit
+
+exit:
+ ret <4 x i32> %res
+}
+
+; VFTCISB with the lowest useful class selector and no processing of the result.
+define i32 @test_vftcisb(<4 x float> %a) {
+; CHECK-LABEL: test_vftcisb:
+; CHECK: vftcisb {{%v[0-9]+}}, %v24, 1
+; CHECK: ipm %r2
+; CHECK: srl %r2, 28
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vftcisb(<4 x float> %a, i32 1)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ ret i32 %res
+}
+
+; VFTCISB with the highest useful class selector, returning 1 if all elements
+; have the right class (CC == 0).
+define i32 @test_vftcisb_all_bool(<4 x float> %a) {
+; CHECK-LABEL: test_vftcisb_all_bool:
+; CHECK: vftcisb {{%v[0-9]+}}, %v24, 4094
+; CHECK: afi %r2, -268435456
+; CHECK: srl %r2, 31
+; CHECK: br %r14
+ %call = call {<4 x i32>, i32} @llvm.s390.vftcisb(<4 x float> %a, i32 4094)
+ %res = extractvalue {<4 x i32>, i32} %call, 1
+ %cmp = icmp eq i32 %res, 0
+ %ext = zext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; VFISB with a rounding mode not usable via standard intrinsics.
+define <4 x float> @test_vfisb_0_4(<4 x float> %a) {
+; CHECK-LABEL: test_vfisb_0_4:
+; CHECK: vfisb %v24, %v24, 0, 4
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.s390.vfisb(<4 x float> %a, i32 0, i32 4)
+ ret <4 x float> %res
+}
+
+; VFISB with IEEE-inexact exception suppressed.
+define <4 x float> @test_vfisb_4_0(<4 x float> %a) {
+; CHECK-LABEL: test_vfisb_4_0:
+; CHECK: vfisb %v24, %v24, 4, 0
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.s390.vfisb(<4 x float> %a, i32 4, i32 0)
+ ret <4 x float> %res
+}
+
+; VFMAXDB.
+define <2 x double> @test_vfmaxdb(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test_vfmaxdb:
+; CHECK: vfmaxdb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %res = call <2 x double> @llvm.s390.vfmaxdb(<2 x double> %a, <2 x double> %b, i32 4)
+ ret <2 x double> %res
+}
+
+; VFMINDB.
+define <2 x double> @test_vfmindb(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test_vfmindb:
+; CHECK: vfmindb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %res = call <2 x double> @llvm.s390.vfmindb(<2 x double> %a, <2 x double> %b, i32 4)
+ ret <2 x double> %res
+}
+
+; VFMAXSB.
+define <4 x float> @test_vfmaxsb(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfmaxsb:
+; CHECK: vfmaxsb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.s390.vfmaxsb(<4 x float> %a, <4 x float> %b, i32 4)
+ ret <4 x float> %res
+}
+
+; VFMINSB.
+define <4 x float> @test_vfminsb(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_vfminsb:
+; CHECK: vfminsb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.s390.vfminsb(<4 x float> %a, <4 x float> %b, i32 4)
+ ret <4 x float> %res
+}
+
diff --git a/test/CodeGen/SystemZ/vec-max-05.ll b/test/CodeGen/SystemZ/vec-max-05.ll
new file mode 100644
index 000000000000..591d3bf36f16
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-max-05.ll
@@ -0,0 +1,175 @@
+; Test vector maximum on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare double @fmax(double, double)
+declare double @llvm.maxnum.f64(double, double)
+declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>)
+
+declare float @fmaxf(float, float)
+declare float @llvm.maxnum.f32(float, float)
+declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
+
+declare fp128 @fmaxl(fp128, fp128)
+declare fp128 @llvm.maxnum.f128(fp128, fp128)
+
+; Test the fmax library function.
+define double @f1(double %dummy, double %val1, double %val2) {
+; CHECK-LABEL: f1:
+; CHECK: wfmaxdb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call double @fmax(double %val1, double %val2) readnone
+ ret double %ret
+}
+
+; Test the f64 maxnum intrinsic.
+define double @f2(double %dummy, double %val1, double %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfmaxdb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call double @llvm.maxnum.f64(double %val1, double %val2)
+ ret double %ret
+}
+
+; Test a f64 constant compare/select resulting in maxnum.
+define double @f3(double %dummy, double %val) {
+; CHECK-LABEL: f3:
+; CHECK: lzdr [[REG:%f[0-9]+]]
+; CHECK: wfmaxdb %f0, %f2, [[REG]], 4
+; CHECK: br %r14
+ %cmp = fcmp ogt double %val, 0.0
+ %ret = select i1 %cmp, double %val, double 0.0
+ ret double %ret
+}
+
+; Test a f64 constant compare/select resulting in maxnan.
+define double @f4(double %dummy, double %val) {
+; CHECK-LABEL: f4:
+; CHECK: lzdr [[REG:%f[0-9]+]]
+; CHECK: wfmaxdb %f0, %f2, [[REG]], 1
+; CHECK: br %r14
+ %cmp = fcmp ugt double %val, 0.0
+ %ret = select i1 %cmp, double %val, double 0.0
+ ret double %ret
+}
+
+; Test the v2f64 maxnum intrinsic.
+define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
+ <2 x double> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vfmaxdb %v24, %v26, %v28, 4
+; CHECK: br %r14
+ %ret = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %val1, <2 x double> %val2)
+ ret <2 x double> %ret
+}
+
+; Test the fmaxf library function.
+define float @f11(float %dummy, float %val1, float %val2) {
+; CHECK-LABEL: f11:
+; CHECK: wfmaxsb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call float @fmaxf(float %val1, float %val2) readnone
+ ret float %ret
+}
+
+; Test the f32 maxnum intrinsic.
+define float @f12(float %dummy, float %val1, float %val2) {
+; CHECK-LABEL: f12:
+; CHECK: wfmaxsb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call float @llvm.maxnum.f32(float %val1, float %val2)
+ ret float %ret
+}
+
+; Test a f32 constant compare/select resulting in maxnum.
+define float @f13(float %dummy, float %val) {
+; CHECK-LABEL: f13:
+; CHECK: lzer [[REG:%f[0-9]+]]
+; CHECK: wfmaxsb %f0, %f2, [[REG]], 4
+; CHECK: br %r14
+ %cmp = fcmp ogt float %val, 0.0
+ %ret = select i1 %cmp, float %val, float 0.0
+ ret float %ret
+}
+
+; Test a f32 constant compare/select resulting in maxnan.
+define float @f14(float %dummy, float %val) {
+; CHECK-LABEL: f14:
+; CHECK: lzer [[REG:%f[0-9]+]]
+; CHECK: wfmaxsb %f0, %f2, [[REG]], 1
+; CHECK: br %r14
+ %cmp = fcmp ugt float %val, 0.0
+ %ret = select i1 %cmp, float %val, float 0.0
+ ret float %ret
+}
+
+; Test the v4f32 maxnum intrinsic.
+define <4 x float> @f15(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f15:
+; CHECK: vfmaxsb %v24, %v26, %v28, 4
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %val1, <4 x float> %val2)
+ ret <4 x float> %ret
+}
+
+; Test the fmaxl library function.
+define void @f21(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+; CHECK-LABEL: f21:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfmaxxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r4)
+; CHECK: br %r14
+ %val1 = load fp128, fp128* %ptr1
+ %val2 = load fp128, fp128* %ptr2
+ %res = call fp128 @fmaxl(fp128 %val1, fp128 %val2) readnone
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test the f128 maxnum intrinsic.
+define void @f22(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+; CHECK-LABEL: f22:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfmaxxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r4)
+; CHECK: br %r14
+ %val1 = load fp128, fp128* %ptr1
+ %val2 = load fp128, fp128* %ptr2
+ %res = call fp128 @llvm.maxnum.f128(fp128 %val1, fp128 %val2)
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test a f128 constant compare/select resulting in maxnum.
+define void @f23(fp128 *%ptr, fp128 *%dst) {
+; CHECK-LABEL: f23:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfmaxxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r3)
+; CHECK: br %r14
+ %val = load fp128, fp128* %ptr
+ %cmp = fcmp ogt fp128 %val, 0xL00000000000000000000000000000000
+ %res = select i1 %cmp, fp128 %val, fp128 0xL00000000000000000000000000000000
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test a f128 constant compare/select resulting in maxnan.
+define void @f24(fp128 *%ptr, fp128 *%dst) {
+; CHECK-LABEL: f24:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfmaxxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 1
+; CHECK: vst [[RES]], 0(%r3)
+; CHECK: br %r14
+ %val = load fp128, fp128* %ptr
+ %cmp = fcmp ugt fp128 %val, 0xL00000000000000000000000000000000
+ %res = select i1 %cmp, fp128 %val, fp128 0xL00000000000000000000000000000000
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/vec-min-05.ll b/test/CodeGen/SystemZ/vec-min-05.ll
new file mode 100644
index 000000000000..3eef9016cd08
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-min-05.ll
@@ -0,0 +1,175 @@
+; Test vector minimum on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare double @fmin(double, double)
+declare double @llvm.minnum.f64(double, double)
+declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
+
+declare float @fminf(float, float)
+declare float @llvm.minnum.f32(float, float)
+declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
+
+declare fp128 @fminl(fp128, fp128)
+declare fp128 @llvm.minnum.f128(fp128, fp128)
+
+; Test the fmin library function.
+define double @f1(double %dummy, double %val1, double %val2) {
+; CHECK-LABEL: f1:
+; CHECK: wfmindb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call double @fmin(double %val1, double %val2) readnone
+ ret double %ret
+}
+
+; Test the f64 minnum intrinsic.
+define double @f2(double %dummy, double %val1, double %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfmindb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call double @llvm.minnum.f64(double %val1, double %val2)
+ ret double %ret
+}
+
+; Test a f64 constant compare/select resulting in minnum.
+define double @f3(double %dummy, double %val) {
+; CHECK-LABEL: f3:
+; CHECK: lzdr [[REG:%f[0-9]+]]
+; CHECK: wfmindb %f0, %f2, [[REG]], 4
+; CHECK: br %r14
+ %cmp = fcmp olt double %val, 0.0
+ %ret = select i1 %cmp, double %val, double 0.0
+ ret double %ret
+}
+
+; Test a f64 constant compare/select resulting in minnan.
+define double @f4(double %dummy, double %val) {
+; CHECK-LABEL: f4:
+; CHECK: lzdr [[REG:%f[0-9]+]]
+; CHECK: wfmindb %f0, %f2, [[REG]], 1
+; CHECK: br %r14
+ %cmp = fcmp ult double %val, 0.0
+ %ret = select i1 %cmp, double %val, double 0.0
+ ret double %ret
+}
+
+; Test the v2f64 minnum intrinsic.
+define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
+ <2 x double> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vfmindb %v24, %v26, %v28, 4
+; CHECK: br %r14
+ %ret = call <2 x double> @llvm.minnum.v2f64(<2 x double> %val1, <2 x double> %val2)
+ ret <2 x double> %ret
+}
+
+; Test the fminf library function.
+define float @f11(float %dummy, float %val1, float %val2) {
+; CHECK-LABEL: f11:
+; CHECK: wfminsb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call float @fminf(float %val1, float %val2) readnone
+ ret float %ret
+}
+
+; Test the f32 minnum intrinsic.
+define float @f12(float %dummy, float %val1, float %val2) {
+; CHECK-LABEL: f12:
+; CHECK: wfminsb %f0, %f2, %f4, 4
+; CHECK: br %r14
+ %ret = call float @llvm.minnum.f32(float %val1, float %val2)
+ ret float %ret
+}
+
+; Test a f32 constant compare/select resulting in minnum.
+define float @f13(float %dummy, float %val) {
+; CHECK-LABEL: f13:
+; CHECK: lzer [[REG:%f[0-9]+]]
+; CHECK: wfminsb %f0, %f2, [[REG]], 4
+; CHECK: br %r14
+ %cmp = fcmp olt float %val, 0.0
+ %ret = select i1 %cmp, float %val, float 0.0
+ ret float %ret
+}
+
+; Test a f32 constant compare/select resulting in minnan.
+define float @f14(float %dummy, float %val) {
+; CHECK-LABEL: f14:
+; CHECK: lzer [[REG:%f[0-9]+]]
+; CHECK: wfminsb %f0, %f2, [[REG]], 1
+; CHECK: br %r14
+ %cmp = fcmp ult float %val, 0.0
+ %ret = select i1 %cmp, float %val, float 0.0
+ ret float %ret
+}
+
+; Test the v4f32 minnum intrinsic.
+define <4 x float> @f15(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f15:
+; CHECK: vfminsb %v24, %v26, %v28, 4
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.minnum.v4f32(<4 x float> %val1, <4 x float> %val2)
+ ret <4 x float> %ret
+}
+
+; Test the fminl library function.
+define void @f21(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+; CHECK-LABEL: f21:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfminxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r4)
+; CHECK: br %r14
+ %val1 = load fp128, fp128* %ptr1
+ %val2 = load fp128, fp128* %ptr2
+ %res = call fp128 @fminl(fp128 %val1, fp128 %val2) readnone
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test the f128 minnum intrinsic.
+define void @f22(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+; CHECK-LABEL: f22:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
+; CHECK: wfminxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r4)
+; CHECK: br %r14
+ %val1 = load fp128, fp128* %ptr1
+ %val2 = load fp128, fp128* %ptr2
+ %res = call fp128 @llvm.minnum.f128(fp128 %val1, fp128 %val2)
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test a f128 constant compare/select resulting in minnum.
+define void @f23(fp128 *%ptr, fp128 *%dst) {
+; CHECK-LABEL: f23:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfminxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 4
+; CHECK: vst [[RES]], 0(%r3)
+; CHECK: br %r14
+ %val = load fp128, fp128* %ptr
+ %cmp = fcmp olt fp128 %val, 0xL00000000000000000000000000000000
+ %res = select i1 %cmp, fp128 %val, fp128 0xL00000000000000000000000000000000
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
+; Test a f128 constant compare/select resulting in minnan.
+define void @f24(fp128 *%ptr, fp128 *%dst) {
+; CHECK-LABEL: f24:
+; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
+; CHECK-DAG: vzero [[REG2:%v[0-9]+]]
+; CHECK: wfminxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]], 1
+; CHECK: vst [[RES]], 0(%r3)
+; CHECK: br %r14
+ %val = load fp128, fp128* %ptr
+ %cmp = fcmp ult fp128 %val, 0xL00000000000000000000000000000000
+ %res = select i1 %cmp, fp128 %val, fp128 0xL00000000000000000000000000000000
+ store fp128 %res, fp128* %dst
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/vec-move-18.ll b/test/CodeGen/SystemZ/vec-move-18.ll
new file mode 100644
index 000000000000..5d3d09d83ef1
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-move-18.ll
@@ -0,0 +1,24 @@
+; Test insertions of memory values into 0 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test VLLEZLF.
+define <4 x i32> @f1(i32 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vllezlf %v24, 0(%r2)
+; CHECK: br %r14
+ %val = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> zeroinitializer, i32 %val, i32 0
+ ret <4 x i32> %ret
+}
+
+; Test VLLEZLF with a float.
+define <4 x float> @f2(float *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: vllezlf %v24, 0(%r2)
+; CHECK: br %r14
+ %val = load float, float *%ptr
+ %ret = insertelement <4 x float> zeroinitializer, float %val, i32 0
+ ret <4 x float> %ret
+}
+
diff --git a/test/CodeGen/SystemZ/vec-mul-03.ll b/test/CodeGen/SystemZ/vec-mul-03.ll
new file mode 100644
index 000000000000..3733db9fb339
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-mul-03.ll
@@ -0,0 +1,24 @@
+; Test vector multiplication on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 multiplication.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vfmsb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = fmul <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
+; Test an f32 multiplication that uses vector registers.
+define float @f2(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: wfmsb %f0, %v24, %v26
+; CHECK: br %r14
+ %scalar1 = extractelement <4 x float> %val1, i32 0
+ %scalar2 = extractelement <4 x float> %val2, i32 0
+ %ret = fmul float %scalar1, %scalar2
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-mul-04.ll b/test/CodeGen/SystemZ/vec-mul-04.ll
new file mode 100644
index 000000000000..d96f0b6a745a
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-mul-04.ll
@@ -0,0 +1,31 @@
+; Test vector multiply-and-add on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+; Test a v4f32 multiply-and-add.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2, <4 x float> %val3) {
+; CHECK-LABEL: f1:
+; CHECK: vfmasb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.fma.v4f32 (<4 x float> %val1,
+ <4 x float> %val2,
+ <4 x float> %val3)
+ ret <4 x float> %ret
+}
+
+; Test a v4f32 multiply-and-subtract.
+define <4 x float> @f2(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2, <4 x float> %val3) {
+; CHECK-LABEL: f2:
+; CHECK: vfmssb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %negval3 = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %val3
+ %ret = call <4 x float> @llvm.fma.v4f32 (<4 x float> %val1,
+ <4 x float> %val2,
+ <4 x float> %negval3)
+ ret <4 x float> %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-mul-05.ll b/test/CodeGen/SystemZ/vec-mul-05.ll
new file mode 100644
index 000000000000..90a1f7a7efdf
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-mul-05.ll
@@ -0,0 +1,63 @@
+; Test vector negative multiply-and-add on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+; Test a v2f64 negative multiply-and-add.
+define <2 x double> @f1(<2 x double> %dummy, <2 x double> %val1,
+ <2 x double> %val2, <2 x double> %val3) {
+; CHECK-LABEL: f1:
+; CHECK: vfnmadb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %ret = call <2 x double> @llvm.fma.v2f64 (<2 x double> %val1,
+ <2 x double> %val2,
+ <2 x double> %val3)
+ %negret = fsub <2 x double> <double -0.0, double -0.0>, %ret
+ ret <2 x double> %negret
+}
+
+; Test a v2f64 negative multiply-and-subtract.
+define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
+ <2 x double> %val2, <2 x double> %val3) {
+; CHECK-LABEL: f2:
+; CHECK: vfnmsdb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %negval3 = fsub <2 x double> <double -0.0, double -0.0>, %val3
+ %ret = call <2 x double> @llvm.fma.v2f64 (<2 x double> %val1,
+ <2 x double> %val2,
+ <2 x double> %negval3)
+ %negret = fsub <2 x double> <double -0.0, double -0.0>, %ret
+ ret <2 x double> %negret
+}
+
+; Test a v4f32 negative multiply-and-add.
+define <4 x float> @f3(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2, <4 x float> %val3) {
+; CHECK-LABEL: f3:
+; CHECK: vfnmasb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.fma.v4f32 (<4 x float> %val1,
+ <4 x float> %val2,
+ <4 x float> %val3)
+ %negret = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %ret
+ ret <4 x float> %negret
+}
+
+; Test a v4f32 negative multiply-and-subtract.
+define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2, <4 x float> %val3) {
+; CHECK-LABEL: f4:
+; CHECK: vfnmssb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %negval3 = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %val3
+ %ret = call <4 x float> @llvm.fma.v4f32 (<4 x float> %val1,
+ <4 x float> %val2,
+ <4 x float> %negval3)
+ %negret = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %ret
+ ret <4 x float> %negret
+}
diff --git a/test/CodeGen/SystemZ/vec-neg-02.ll b/test/CodeGen/SystemZ/vec-neg-02.ll
new file mode 100644
index 000000000000..07ce037542fd
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-neg-02.ll
@@ -0,0 +1,23 @@
+; Test vector negation on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 negation.
+define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vflcsb %v24, %v26
+; CHECK: br %r14
+ %ret = fsub <4 x float> <float -0.0, float -0.0,
+ float -0.0, float -0.0>, %val
+ ret <4 x float> %ret
+}
+
+; Test an f32 negation that uses vector registers.
+define float @f2(<4 x float> %val) {
+; CHECK-LABEL: f2:
+; CHECK: wflcsb %f0, %v24
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %ret = fsub float -0.0, %scalar
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-or-03.ll b/test/CodeGen/SystemZ/vec-or-03.ll
new file mode 100644
index 000000000000..010629d880d1
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-or-03.ll
@@ -0,0 +1,91 @@
+; Test vector OR-NOT on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v16i8 OR-NOT.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: voc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <16 x i8> %val2, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %ret = or <16 x i8> %val1, %not
+ ret <16 x i8> %ret
+}
+
+; ...and again with the reverse.
+define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: voc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <16 x i8> %val1, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %ret = or <16 x i8> %not, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 OR-NOT.
+define <8 x i16> @f3(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: voc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <8 x i16> %val2, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ %ret = or <8 x i16> %val1, %not
+ ret <8 x i16> %ret
+}
+
+; ...and again with the reverse.
+define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: voc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <8 x i16> %val1, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ %ret = or <8 x i16> %not, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 OR-NOT.
+define <4 x i32> @f5(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: voc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <4 x i32> %val2, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %ret = or <4 x i32> %val1, %not
+ ret <4 x i32> %ret
+}
+
+; ...and again with the reverse.
+define <4 x i32> @f6(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: voc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <4 x i32> %val1, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %ret = or <4 x i32> %not, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 OR-NOT.
+define <2 x i64> @f7(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: voc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <2 x i64> %val2, <i64 -1, i64 -1>
+ %ret = or <2 x i64> %val1, %not
+ ret <2 x i64> %ret
+}
+
+; ...and again with the reverse.
+define <2 x i64> @f8(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: voc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <2 x i64> %val1, <i64 -1, i64 -1>
+ %ret = or <2 x i64> %not, %val2
+ ret <2 x i64> %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-round-02.ll b/test/CodeGen/SystemZ/vec-round-02.ll
new file mode 100644
index 000000000000..bcd66ea803d1
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-round-02.ll
@@ -0,0 +1,118 @@
+; Test v4f32 rounding on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare float @llvm.rint.f32(float)
+declare float @llvm.nearbyint.f32(float)
+declare float @llvm.floor.f32(float)
+declare float @llvm.ceil.f32(float)
+declare float @llvm.trunc.f32(float)
+declare float @llvm.round.f32(float)
+declare <4 x float> @llvm.rint.v4f32(<4 x float>)
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>)
+declare <4 x float> @llvm.floor.v4f32(<4 x float>)
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>)
+declare <4 x float> @llvm.round.v4f32(<4 x float>)
+
+define <4 x float> @f1(<4 x float> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vfisb %v24, %v24, 0, 0
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.rint.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f2(<4 x float> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vfisb %v24, %v24, 4, 0
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f3(<4 x float> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vfisb %v24, %v24, 4, 7
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.floor.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f4(<4 x float> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vfisb %v24, %v24, 4, 6
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.ceil.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f5(<4 x float> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vfisb %v24, %v24, 4, 5
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define <4 x float> @f6(<4 x float> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vfisb %v24, %v24, 4, 1
+; CHECK: br %r14
+ %res = call <4 x float> @llvm.round.v4f32(<4 x float> %val)
+ ret <4 x float> %res
+}
+
+define float @f7(<4 x float> %val) {
+; CHECK-LABEL: f7:
+; CHECK: wfisb %f0, %v24, 0, 0
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.rint.f32(float %scalar)
+ ret float %res
+}
+
+define float @f8(<4 x float> %val) {
+; CHECK-LABEL: f8:
+; CHECK: wfisb %f0, %v24, 4, 0
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.nearbyint.f32(float %scalar)
+ ret float %res
+}
+
+define float @f9(<4 x float> %val) {
+; CHECK-LABEL: f9:
+; CHECK: wfisb %f0, %v24, 4, 7
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.floor.f32(float %scalar)
+ ret float %res
+}
+
+define float @f10(<4 x float> %val) {
+; CHECK-LABEL: f10:
+; CHECK: wfisb %f0, %v24, 4, 6
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.ceil.f32(float %scalar)
+ ret float %res
+}
+
+define float @f11(<4 x float> %val) {
+; CHECK-LABEL: f11:
+; CHECK: wfisb %f0, %v24, 4, 5
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.trunc.f32(float %scalar)
+ ret float %res
+}
+
+define float @f12(<4 x float> %val) {
+; CHECK-LABEL: f12:
+; CHECK: wfisb %f0, %v24, 4, 1
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %res = call float @llvm.round.f32(float %scalar)
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/vec-sqrt-02.ll b/test/CodeGen/SystemZ/vec-sqrt-02.ll
new file mode 100644
index 000000000000..6970d9db6698
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-sqrt-02.ll
@@ -0,0 +1,23 @@
+; Test f32 and v4f32 square root on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare float @llvm.sqrt.f32(float)
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
+
+define <4 x float> @f1(<4 x float> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vfsqsb %v24, %v24
+; CHECK: br %r14
+ %ret = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %val)
+ ret <4 x float> %ret
+}
+
+define float @f2(<4 x float> %val) {
+; CHECK-LABEL: f2:
+; CHECK: wfsqsb %f0, %v24
+; CHECK: br %r14
+ %scalar = extractelement <4 x float> %val, i32 0
+ %ret = call float @llvm.sqrt.f32(float %scalar)
+ ret float %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-sub-02.ll b/test/CodeGen/SystemZ/vec-sub-02.ll
new file mode 100644
index 000000000000..83c76b5d4aa6
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-sub-02.ll
@@ -0,0 +1,31 @@
+; Test vector subtraction on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v4f32 subtraction.
+define <4 x float> @f6(<4 x float> %dummy, <4 x float> %val1,
+ <4 x float> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vfssb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = fsub <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
+; Test an f32 subtraction that uses vector registers.
+define float @f7(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: wfssb %f0, %v24, %v26
+; CHECK: br %r14
+ %scalar1 = extractelement <4 x float> %val1, i32 0
+ %scalar2 = extractelement <4 x float> %val2, i32 0
+ %ret = fsub float %scalar1, %scalar2
+ ret float %ret
+}
+
+; Test a v2f32 subtraction, which gets promoted to v4f32.
+define <2 x float> @f14(<2 x float> %val1, <2 x float> %val2) {
+; No particular output expected, but must compile.
+ %ret = fsub <2 x float> %val1, %val2
+ ret <2 x float> %ret
+}
diff --git a/test/CodeGen/SystemZ/vec-xor-02.ll b/test/CodeGen/SystemZ/vec-xor-02.ll
new file mode 100644
index 000000000000..b4b5a96ba254
--- /dev/null
+++ b/test/CodeGen/SystemZ/vec-xor-02.ll
@@ -0,0 +1,47 @@
+; Test vector NOT-XOR on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+; Test a v16i8 NOT-XOR.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vnx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <16 x i8> %val1, %val2
+ %not = xor <16 x i8> %ret, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ ret <16 x i8> %not
+}
+
+; Test a v8i16 NOT-XOR.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vnx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <8 x i16> %val1, %val2
+ %not = xor <8 x i16> %ret, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %not
+}
+
+; Test a v4i32 NOT-XOR.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vnx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <4 x i32> %val1, %val2
+ %not = xor <4 x i32> %ret, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %not
+}
+
+; Test a v2i64 NOT-XOR.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vnx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <2 x i64> %val1, %val2
+ %not = xor <2 x i64> %ret, <i64 -1, i64 -1>
+ ret <2 x i64> %not
+}