summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-09-06 18:36:24 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-09-06 18:36:24 +0000
commit36c5ade2f4674b544039d78db4c466756cf142b0 (patch)
tree3d3ed1e1987dbe6444294b1b4e249814b97b97a5 /test/CodeGen
parent51ece4aae5857052d224ce52277924c74685714e (diff)
Notes
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/builtins-ppc-altivec.c146
-rw-r--r--test/CodeGen/builtins-systemz-zvector-error.c576
-rw-r--r--test/CodeGen/builtins-systemz-zvector.c2967
-rw-r--r--test/CodeGen/integer-overflow.c7
-rw-r--r--test/CodeGen/le32-regparm.c1
-rw-r--r--test/CodeGen/long_double_fp128.cpp22
-rw-r--r--test/CodeGen/palignr.c4
-rw-r--r--test/CodeGen/x86_64-fp128.c115
-rw-r--r--test/CodeGen/zvector.c2798
9 files changed, 6633 insertions, 3 deletions
diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c
index 8e8216b10111e..32166b50f9854 100644
--- a/test/CodeGen/builtins-ppc-altivec.c
+++ b/test/CodeGen/builtins-ppc-altivec.c
@@ -3307,81 +3307,225 @@ void test6() {
/* vec_sld */
res_vsc = vec_sld(vsc, vsc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_sld(vuc, vuc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_sld(vs, vs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_sld(vus, vus, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vbs = vec_sld(vbs, vbs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: xor <16 x i8>
// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
res_vp = vec_sld(vp, vp, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_sld(vi, vi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_sld(vui, vui, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vbi = vec_sld(vbi, vbi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: xor <16 x i8>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
res_vf = vec_sld(vf, vf, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vsc = vec_vsldoi(vsc, vsc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vuc = vec_vsldoi(vuc, vuc, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vs = vec_vsldoi(vs, vs, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vus = vec_vsldoi(vus, vus, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vp = vec_vsldoi(vp, vp, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vi = vec_vsldoi(vi, vi, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_vsldoi(vui, vui, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vf = vec_vsldoi(vf, vf, 0);
-// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_sll */
diff --git a/test/CodeGen/builtins-systemz-zvector-error.c b/test/CodeGen/builtins-systemz-zvector-error.c
new file mode 100644
index 0000000000000..8d5380dac1612
--- /dev/null
+++ b/test/CodeGen/builtins-systemz-zvector-error.c
@@ -0,0 +1,576 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
+// RUN: -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -fsyntax-only -verify %s
+
+#include <vecintrin.h>
+
+volatile vector signed char vsc;
+volatile vector signed short vss;
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned char vuc;
+volatile vector unsigned short vus;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool char vbc;
+volatile vector bool short vbs;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector double vd;
+
+volatile signed char sc;
+volatile signed short ss;
+volatile signed int si;
+volatile signed long long sl;
+volatile unsigned char uc;
+volatile unsigned short us;
+volatile unsigned int ui;
+volatile unsigned long long ul;
+volatile double d;
+
+const void * volatile cptr;
+const signed char * volatile cptrsc;
+const signed short * volatile cptrss;
+const signed int * volatile cptrsi;
+const signed long long * volatile cptrsl;
+const unsigned char * volatile cptruc;
+const unsigned short * volatile cptrus;
+const unsigned int * volatile cptrui;
+const unsigned long long * volatile cptrul;
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+void * volatile ptr;
+signed char * volatile ptrsc;
+signed short * volatile ptrss;
+signed int * volatile ptrsi;
+signed long long * volatile ptrsl;
+unsigned char * volatile ptruc;
+unsigned short * volatile ptrus;
+unsigned int * volatile ptrui;
+unsigned long long * volatile ptrul;
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile unsigned int len;
+volatile int idx;
+int cc;
+
+void test_core(void) {
+ len = __lcbb(cptr, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 200); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+ len = __lcbb(cptr, 8192); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}}
+
+ vsl = vec_permi(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_permi(vsl, vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_permi(vsl, vsl, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vul = vec_permi(vul, vul, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbl = vec_permi(vbl, vbl, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 2 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_permi(vd, vd, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 3 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+
+ vsi = vec_gather_element(vsi, vui, cptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_gather_element(vsi, vui, cptrsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_gather_element(vsi, vui, cptrsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_gather_element(vui, vui, cptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_gather_element(vbi, vui, cptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_gather_element(vsl, vul, cptrsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_gather_element(vul, vul, cptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_gather_element(vbl, vul, cptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_gather_element(vd, vul, cptrd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vec_scatter_element(vsi, vui, ptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsi, vui, ptrsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsi, vui, ptrsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vui, vui, ptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vbi, vui, ptrui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vec_scatter_element(vsl, vul, ptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vsl, vul, ptrsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vsl, vul, ptrsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vul, vul, ptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vbl, vul, ptrul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 5 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vec_scatter_element(vd, vul, ptrd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 6 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vsc = vec_load_bndry(cptrsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 200); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsc = vec_load_bndry(cptrsc, 8192); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vuc = vec_load_bndry(cptruc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vss = vec_load_bndry(cptrss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vus = vec_load_bndry(cptrus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsi = vec_load_bndry(cptrsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vui = vec_load_bndry(cptrui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vsl = vec_load_bndry(cptrsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+ vul = vec_load_bndry(cptrul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}}
+
+ vuc = vec_genmask(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+
+ vuc = vec_genmasks_8(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_genmasks_8(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_genmasks_8(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_genmasks_16(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_genmasks_32(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(0, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(idx, 0); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_genmasks_64(idx, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+
+ vsc = vec_splat(vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_splat(vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_splat(vsc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vuc = vec_splat(vuc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vbc = vec_splat(vbc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}}
+ vss = vec_splat(vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vss = vec_splat(vss, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vss = vec_splat(vss, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vus = vec_splat(vus, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vbs = vec_splat(vbs, 8); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}}
+ vsi = vec_splat(vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_splat(vsi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_splat(vsi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vui = vec_splat(vui, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vbi = vec_splat(vbi, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}}
+ vsl = vec_splat(vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_splat(vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vsl = vec_splat(vsl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vul = vec_splat(vul, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vbl = vec_splat(vbl, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 11 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+ vd = vec_splat(vd, 2); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 12 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}}
+
+ vsc = vec_splat_s8(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vuc = vec_splat_u8(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vss = vec_splat_s16(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vus = vec_splat_u16(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vsi = vec_splat_s32(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vui = vec_splat_u32(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vsl = vec_splat_s64(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+ vul = vec_splat_u64(idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* {{must be a constant integer}}
+}
+
+void test_integer(void) {
+ vsc = vec_rl_mask(vsc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vuc = vec_rl_mask(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vss = vec_rl_mask(vss, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vus = vec_rl_mask(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vsi = vec_rl_mask(vsi, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vui = vec_rl_mask(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vsl = vec_rl_mask(vsl, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+ vul = vec_rl_mask(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 7 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer}}
+
+ vsc = vec_sld(vsc, vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_sld(vsc, vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsc = vec_sld(vsc, vsc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vuc = vec_sld(vuc, vuc, 16); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vss = vec_sld(vss, vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vus = vec_sld(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsi = vec_sld(vsi, vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vui = vec_sld(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vsl = vec_sld(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vul = vec_sld(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+ vd = vec_sld(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}}
+
+ vsc = vec_sldw(vsc, vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsc = vec_sldw(vsc, vsc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsc = vec_sldw(vsc, vsc, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vuc = vec_sldw(vuc, vuc, 4); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vss = vec_sldw(vss, vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vus = vec_sldw(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsi = vec_sldw(vsi, vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vui = vec_sldw(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vsl = vec_sldw(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vul = vec_sldw(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+ vd = vec_sldw(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 8 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}}
+}
+
+void test_float(void) {
+ vd = vec_ctd(vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vsl, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vsl, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vd = vec_ctd(vul, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+
+ vsl = vec_ctsl(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vsl = vec_ctsl(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vsl = vec_ctsl(vd, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, -1); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+ vul = vec_ctul(vd, 32); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}}
+
+ vbl = vec_fp_test_data_class(vd, idx, &cc); // expected-error {{must be a constant integer}}
+ vbl = vec_fp_test_data_class(vd, -1, &cc); // expected-error {{should be a value from 0 to 4095}}
+ vbl = vec_fp_test_data_class(vd, 4096, &cc); // expected-error {{should be a value from 0 to 4095}}
+}
diff --git a/test/CodeGen/builtins-systemz-zvector.c b/test/CodeGen/builtins-systemz-zvector.c
new file mode 100644
index 0000000000000..6d554af44e93a
--- /dev/null
+++ b/test/CodeGen/builtins-systemz-zvector.c
@@ -0,0 +1,2967 @@
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+
+#include <vecintrin.h>
+
+volatile vector signed char vsc;
+volatile vector signed short vss;
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned char vuc;
+volatile vector unsigned short vus;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool char vbc;
+volatile vector bool short vbs;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector double vd;
+
+volatile signed char sc;
+volatile signed short ss;
+volatile signed int si;
+volatile signed long long sl;
+volatile unsigned char uc;
+volatile unsigned short us;
+volatile unsigned int ui;
+volatile unsigned long long ul;
+volatile double d;
+
+const void * volatile cptr;
+const signed char * volatile cptrsc;
+const signed short * volatile cptrss;
+const signed int * volatile cptrsi;
+const signed long long * volatile cptrsl;
+const unsigned char * volatile cptruc;
+const unsigned short * volatile cptrus;
+const unsigned int * volatile cptrui;
+const unsigned long long * volatile cptrul;
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+void * volatile ptr;
+signed char * volatile ptrsc;
+signed short * volatile ptrss;
+signed int * volatile ptrsi;
+signed long long * volatile ptrsl;
+unsigned char * volatile ptruc;
+unsigned short * volatile ptrus;
+unsigned int * volatile ptrui;
+unsigned long long * volatile ptrul;
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile unsigned int len;
+volatile int idx;
+int cc;
+
+void test_core(void) {
+ len = __lcbb(cptr, 64);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 0)
+ len = __lcbb(cptr, 128);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 1)
+ len = __lcbb(cptr, 256);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 2)
+ len = __lcbb(cptr, 512);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 3)
+ len = __lcbb(cptr, 1024);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 4)
+ len = __lcbb(cptr, 2048);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 5)
+ len = __lcbb(cptr, 4096);
+ // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 6)
+
+ sc = vec_extract(vsc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ uc = vec_extract(vuc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ uc = vec_extract(vbc, idx);
+ // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}}
+ ss = vec_extract(vss, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ us = vec_extract(vus, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ us = vec_extract(vbs, idx);
+ // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}}
+ si = vec_extract(vsi, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ ui = vec_extract(vui, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ ui = vec_extract(vbi, idx);
+ // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}}
+ sl = vec_extract(vsl, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ ul = vec_extract(vul, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ ul = vec_extract(vbl, idx);
+ // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}}
+ d = vec_extract(vd, idx);
+ // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_insert(sc, vsc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_insert(uc, vuc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_insert(uc, vbc, idx);
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}}
+ vss = vec_insert(ss, vss, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_insert(us, vus, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_insert(us, vbs, idx);
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}}
+ vsi = vec_insert(si, vsi, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_insert(ui, vui, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_insert(ui, vbi, idx);
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}
+ vsl = vec_insert(sl, vsl, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_insert(ul, vul, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_insert(ul, vbl, idx);
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}}
+ vd = vec_insert(d, vd, idx);
+ // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_promote(sc, idx);
+ // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 %{{.*}}
+ vuc = vec_promote(uc, idx);
+ // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 %{{.*}}
+ vss = vec_promote(ss, idx);
+ // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 %{{.*}}
+ vus = vec_promote(us, idx);
+ // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 %{{.*}}
+ vsi = vec_promote(si, idx);
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 %{{.*}}
+ vui = vec_promote(ui, idx);
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 %{{.*}}
+ vsl = vec_promote(sl, idx);
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 %{{.*}}
+ vul = vec_promote(ul, idx);
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 %{{.*}}
+ vd = vec_promote(d, idx);
+ // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 %{{.*}}
+
+ vsc = vec_insert_and_zero(cptrsc);
+ // CHECK: insertelement <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i8 %{{.*}}, i32 7
+ vuc = vec_insert_and_zero(cptruc);
+ // CHECK: insertelement <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i8 %{{.*}}, i32 7
+ vss = vec_insert_and_zero(cptrss);
+ // CHECK: insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0>, i16 %{{.*}}, i32 3
+ vus = vec_insert_and_zero(cptrus);
+ // CHECK: insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 undef, i16 0, i16 0, i16 0, i16 0>, i16 %{{.*}}, i32 3
+ vsi = vec_insert_and_zero(cptrsi);
+ // CHECK: insertelement <4 x i32> <i32 0, i32 undef, i32 0, i32 0>, i32 %{{.*}}, i32 1
+ vui = vec_insert_and_zero(cptrui);
+ // CHECK: insertelement <4 x i32> <i32 0, i32 undef, i32 0, i32 0>, i32 %{{.*}}, i32 1
+ vsl = vec_insert_and_zero(cptrsl);
+ // CHECK: insertelement <2 x i64> <i64 undef, i64 0>, i64 %{{.*}}, i32 0
+ vul = vec_insert_and_zero(cptrul);
+ // CHECK: insertelement <2 x i64> <i64 undef, i64 0>, i64 %{{.*}}, i32 0
+ vd = vec_insert_and_zero(cptrd);
+ // CHECK: insertelement <2 x double> <double undef, double 0.000000e+00>, double %{{.*}}, i32 0
+
+ vsc = vec_perm(vsc, vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_perm(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_perm(vbc, vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_perm(vss, vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_perm(vus, vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_perm(vbs, vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_perm(vsi, vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_perm(vui, vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_perm(vbi, vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_perm(vsl, vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_perm(vul, vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_perm(vbl, vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_perm(vd, vd, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsl = vec_permi(vsl, vsl, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vsl = vec_permi(vsl, vsl, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vsl = vec_permi(vsl, vsl, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vsl = vec_permi(vsl, vsl, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vul = vec_permi(vul, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vul = vec_permi(vul, vul, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vul = vec_permi(vul, vul, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vul = vec_permi(vul, vul, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vbl = vec_permi(vbl, vbl, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vbl = vec_permi(vbl, vbl, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vbl = vec_permi(vbl, vbl, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vbl = vec_permi(vbl, vbl, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+ vd = vec_permi(vd, vd, 0);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vd = vec_permi(vd, vd, 1);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1)
+ vd = vec_permi(vd, vd, 2);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4)
+ vd = vec_permi(vd, vd, 3);
+ // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
+
+ vsc = vec_sel(vsc, vsc, vuc);
+ vsc = vec_sel(vsc, vsc, vbc);
+ vuc = vec_sel(vuc, vuc, vuc);
+ vuc = vec_sel(vuc, vuc, vbc);
+ vbc = vec_sel(vbc, vbc, vuc);
+ vbc = vec_sel(vbc, vbc, vbc);
+ vss = vec_sel(vss, vss, vus);
+ vss = vec_sel(vss, vss, vbs);
+ vus = vec_sel(vus, vus, vus);
+ vus = vec_sel(vus, vus, vbs);
+ vbs = vec_sel(vbs, vbs, vus);
+ vbs = vec_sel(vbs, vbs, vbs);
+ vsi = vec_sel(vsi, vsi, vui);
+ vsi = vec_sel(vsi, vsi, vbi);
+ vui = vec_sel(vui, vui, vui);
+ vui = vec_sel(vui, vui, vbi);
+ vbi = vec_sel(vbi, vbi, vui);
+ vbi = vec_sel(vbi, vbi, vbi);
+ vsl = vec_sel(vsl, vsl, vul);
+ vsl = vec_sel(vsl, vsl, vbl);
+ vul = vec_sel(vul, vul, vul);
+ vul = vec_sel(vul, vul, vbl);
+ vbl = vec_sel(vbl, vbl, vul);
+ vbl = vec_sel(vbl, vbl, vbl);
+ vd = vec_sel(vd, vd, vul);
+ vd = vec_sel(vd, vd, vbl);
+
+ vsi = vec_gather_element(vsi, vui, cptrsi, 0);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 1);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 2);
+ vsi = vec_gather_element(vsi, vui, cptrsi, 3);
+ vui = vec_gather_element(vui, vui, cptrui, 0);
+ vui = vec_gather_element(vui, vui, cptrui, 1);
+ vui = vec_gather_element(vui, vui, cptrui, 2);
+ vui = vec_gather_element(vui, vui, cptrui, 3);
+ vbi = vec_gather_element(vbi, vui, cptrui, 0);
+ vbi = vec_gather_element(vbi, vui, cptrui, 1);
+ vbi = vec_gather_element(vbi, vui, cptrui, 2);
+ vbi = vec_gather_element(vbi, vui, cptrui, 3);
+ vsl = vec_gather_element(vsl, vul, cptrsl, 0);
+ vsl = vec_gather_element(vsl, vul, cptrsl, 1);
+ vul = vec_gather_element(vul, vul, cptrul, 0);
+ vul = vec_gather_element(vul, vul, cptrul, 1);
+ vbl = vec_gather_element(vbl, vul, cptrul, 0);
+ vbl = vec_gather_element(vbl, vul, cptrul, 1);
+ vd = vec_gather_element(vd, vul, cptrd, 0);
+ vd = vec_gather_element(vd, vul, cptrd, 1);
+
+ vec_scatter_element(vsi, vui, ptrsi, 0);
+ vec_scatter_element(vsi, vui, ptrsi, 1);
+ vec_scatter_element(vsi, vui, ptrsi, 2);
+ vec_scatter_element(vsi, vui, ptrsi, 3);
+ vec_scatter_element(vui, vui, ptrui, 0);
+ vec_scatter_element(vui, vui, ptrui, 1);
+ vec_scatter_element(vui, vui, ptrui, 2);
+ vec_scatter_element(vui, vui, ptrui, 3);
+ vec_scatter_element(vbi, vui, ptrui, 0);
+ vec_scatter_element(vbi, vui, ptrui, 1);
+ vec_scatter_element(vbi, vui, ptrui, 2);
+ vec_scatter_element(vbi, vui, ptrui, 3);
+ vec_scatter_element(vsl, vul, ptrsl, 0);
+ vec_scatter_element(vsl, vul, ptrsl, 1);
+ vec_scatter_element(vul, vul, ptrul, 0);
+ vec_scatter_element(vul, vul, ptrul, 1);
+ vec_scatter_element(vbl, vul, ptrul, 0);
+ vec_scatter_element(vbl, vul, ptrul, 1);
+ vec_scatter_element(vd, vul, ptrd, 0);
+ vec_scatter_element(vd, vul, ptrd, 1);
+
+ vsc = vec_xld2(idx, cptrsc);
+ vuc = vec_xld2(idx, cptruc);
+ vss = vec_xld2(idx, cptrss);
+ vus = vec_xld2(idx, cptrus);
+ vsi = vec_xld2(idx, cptrsi);
+ vui = vec_xld2(idx, cptrui);
+ vsl = vec_xld2(idx, cptrsl);
+ vul = vec_xld2(idx, cptrul);
+ vd = vec_xld2(idx, cptrd);
+
+ vsc = vec_xlw4(idx, cptrsc);
+ vuc = vec_xlw4(idx, cptruc);
+ vss = vec_xlw4(idx, cptrss);
+ vus = vec_xlw4(idx, cptrus);
+ vsi = vec_xlw4(idx, cptrsi);
+ vui = vec_xlw4(idx, cptrui);
+
+ vec_xstd2(vsc, idx, ptrsc);
+ vec_xstd2(vuc, idx, ptruc);
+ vec_xstd2(vss, idx, ptrss);
+ vec_xstd2(vus, idx, ptrus);
+ vec_xstd2(vsi, idx, ptrsi);
+ vec_xstd2(vui, idx, ptrui);
+ vec_xstd2(vsl, idx, ptrsl);
+ vec_xstd2(vul, idx, ptrul);
+ vec_xstd2(vd, idx, ptrd);
+
+ vec_xstw4(vsc, idx, ptrsc);
+ vec_xstw4(vuc, idx, ptruc);
+ vec_xstw4(vss, idx, ptrss);
+ vec_xstw4(vus, idx, ptrus);
+ vec_xstw4(vsi, idx, ptrsi);
+ vec_xstw4(vui, idx, ptrui);
+
+ vsc = vec_load_bndry(cptrsc, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vuc = vec_load_bndry(cptruc, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vss = vec_load_bndry(cptrss, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vus = vec_load_bndry(cptrus, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsi = vec_load_bndry(cptrsi, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vui = vec_load_bndry(cptrui, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsl = vec_load_bndry(cptrsl, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vul = vec_load_bndry(cptrul, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vd = vec_load_bndry(cptrd, 64);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0)
+ vsc = vec_load_bndry(cptrsc, 128);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 1)
+ vsc = vec_load_bndry(cptrsc, 256);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 2)
+ vsc = vec_load_bndry(cptrsc, 512);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 3)
+ vsc = vec_load_bndry(cptrsc, 1024);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 4)
+ vsc = vec_load_bndry(cptrsc, 2048);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 5)
+ vsc = vec_load_bndry(cptrsc, 4096);
+ // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 6)
+
+ vsc = vec_load_len(cptrsc, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vuc = vec_load_len(cptruc, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vss = vec_load_len(cptrss, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vus = vec_load_len(cptrus, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vsi = vec_load_len(cptrsi, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vui = vec_load_len(cptrui, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vsl = vec_load_len(cptrsl, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vul = vec_load_len(cptrul, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+ vd = vec_load_len(cptrd, idx);
+ // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}})
+
+ vec_store_len(vsc, ptrsc, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vuc, ptruc, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vss, ptrss, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vus, ptrus, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vsi, ptrsi, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vui, ptrui, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vsl, ptrsl, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vul, ptrul, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+ vec_store_len(vd, ptrd, idx);
+ // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}})
+
+ vsl = vec_load_pair(sl, sl);
+ vul = vec_load_pair(ul, ul);
+
+ vuc = vec_genmask(0);
+ // CHECK: <16 x i8> zeroinitializer
+ vuc = vec_genmask(0x8000);
+ // CHECK: <16 x i8> <i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ vuc = vec_genmask(0xffff);
+ // CHECK: <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+
+ vuc = vec_genmasks_8(0, 7);
+ // CHECK: <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ vuc = vec_genmasks_8(1, 4);
+ // CHECK: <16 x i8> <i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120, i8 120>
+ vuc = vec_genmasks_8(6, 2);
+ // CHECK: <16 x i8> <i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29, i8 -29>
+ vus = vec_genmasks_16(0, 15);
+ // CHECK: <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ vus = vec_genmasks_16(2, 11);
+ // CHECK: <8 x i16> <i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368, i16 16368>
+ vus = vec_genmasks_16(9, 2);
+ // CHECK: <8 x i16> <i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065, i16 -8065>
+ vui = vec_genmasks_32(0, 31);
+ // CHECK: <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ vui = vec_genmasks_32(7, 20);
+ // CHECK: <4 x i32> <i32 33552384, i32 33552384, i32 33552384, i32 33552384>
+ vui = vec_genmasks_32(25, 4);
+ // CHECK: <4 x i32> <i32 -134217601, i32 -134217601, i32 -134217601, i32 -134217601>
+ vul = vec_genmasks_64(0, 63);
+ // CHECK: <2 x i64> <i64 -1, i64 -1>
+ vul = vec_genmasks_64(3, 40);
+ // CHECK: <2 x i64> <i64 2305843009205305344, i64 2305843009205305344>
+ vul = vec_genmasks_64(30, 11);
+ // CHECK: <2 x i64> <i64 -4503582447501313, i64 -4503582447501313>
+
+ vsc = vec_splat(vsc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vsc = vec_splat(vsc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vuc = vec_splat(vuc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vuc = vec_splat(vuc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vbc = vec_splat(vbc, 0);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vbc = vec_splat(vbc, 15);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+ vss = vec_splat(vss, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vss = vec_splat(vss, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vus = vec_splat(vus, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vus = vec_splat(vus, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vbs = vec_splat(vbs, 0);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vbs = vec_splat(vbs, 7);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ vsi = vec_splat(vsi, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vsi = vec_splat(vsi, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vui = vec_splat(vui, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vui = vec_splat(vui, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vbi = vec_splat(vbi, 0);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vbi = vec_splat(vbi, 3);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ vsl = vec_splat(vsl, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vsl = vec_splat(vsl, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vul = vec_splat(vul, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vul = vec_splat(vul, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vbl = vec_splat(vbl, 0);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vbl = vec_splat(vbl, 1);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ vd = vec_splat(vd, 0);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+ vd = vec_splat(vd, 1);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+
+ vsc = vec_splat_s8(-128);
+ // CHECK: <16 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
+ vsc = vec_splat_s8(127);
+ // CHECK: <16 x i8> <i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127>
+ vuc = vec_splat_u8(1);
+ // CHECK: <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ vuc = vec_splat_u8(254);
+ // CHECK: <16 x i8> <i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2>
+ vss = vec_splat_s16(-32768);
+ // CHECK: <8 x i16> <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
+ vss = vec_splat_s16(32767);
+ // CHECK: <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>
+ vus = vec_splat_u16(1);
+ // CHECK: <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ vus = vec_splat_u16(65534);
+ // CHECK: <8 x i16> <i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2, i16 -2>
+ vsi = vec_splat_s32(-32768);
+ // CHECK: <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ vsi = vec_splat_s32(32767);
+ // CHECK: <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+ vui = vec_splat_u32(-32768);
+ // CHECK: <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ vui = vec_splat_u32(32767);
+ // CHECK: <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+ vsl = vec_splat_s64(-32768);
+ // CHECK: <2 x i64> <i64 -32768, i64 -32768>
+ vsl = vec_splat_s64(32767);
+ // CHECK: <2 x i64> <i64 32767, i64 32767>
+ vul = vec_splat_u64(-32768);
+ // CHECK: <2 x i64> <i64 -32768, i64 -32768>
+ vul = vec_splat_u64(32767);
+ // CHECK: <2 x i64> <i64 32767, i64 32767>
+
+ vsc = vec_splats(sc);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vuc = vec_splats(uc);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer
+ vss = vec_splats(ss);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vus = vec_splats(us);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer
+ vsi = vec_splats(si);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vui = vec_splats(ui);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer
+ vsl = vec_splats(sl);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vul = vec_splats(ul);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer
+ vd = vec_splats(d);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer
+
+ vsl = vec_extend_s64(vsc);
+ vsl = vec_extend_s64(vss);
+ vsl = vec_extend_s64(vsi);
+
+ vsc = vec_mergeh(vsc, vsc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vuc = vec_mergeh(vuc, vuc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vbc = vec_mergeh(vbc, vbc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ vss = vec_mergeh(vss, vss);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vus = vec_mergeh(vus, vus);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vbs = vec_mergeh(vbs, vbs);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ vsi = vec_mergeh(vsi, vsi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vui = vec_mergeh(vui, vui);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vbi = vec_mergeh(vbi, vbi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ vsl = vec_mergeh(vsl, vsl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vul = vec_mergeh(vul, vul);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vbl = vec_mergeh(vbl, vbl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 0, i32 2>
+ vd = vec_mergeh(vd, vd);
+ // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
+
+ vsc = vec_mergel(vsc, vsc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vuc = vec_mergel(vuc, vuc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vbc = vec_mergel(vbc, vbc);
+ // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ vss = vec_mergel(vss, vss);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vus = vec_mergel(vus, vus);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vbs = vec_mergel(vbs, vbs);
+ // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ vsi = vec_mergel(vsi, vsi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vui = vec_mergel(vui, vui);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vbi = vec_mergel(vbi, vbi);
+ // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <i32 2, i32 6, i32 3, i32 7>
+ vsl = vec_mergel(vsl, vsl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vul = vec_mergel(vul, vul);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vbl = vec_mergel(vbl, vbl);
+ // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <i32 1, i32 3>
+ vd = vec_mergel(vd, vd);
+ // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <i32 1, i32 3>
+
+ vsc = vec_pack(vss, vss);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vuc = vec_pack(vus, vus);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vbc = vec_pack(vbs, vbs);
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ vss = vec_pack(vsi, vsi);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vus = vec_pack(vui, vui);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vbs = vec_pack(vbi, vbi);
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ vsi = vec_pack(vsl, vsl);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ vui = vec_pack(vul, vul);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ vbi = vec_pack(vbl, vbl);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+
+ vsc = vec_packs(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vpksh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packs(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vss = vec_packs(vsi, vsi);
+ // CHECK: call <8 x i16> @llvm.s390.vpksf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packs(vui, vui);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsi = vec_packs(vsl, vsl);
+ // CHECK: call <4 x i32> @llvm.s390.vpksg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packs(vul, vul);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_packs_cc(vss, vss, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpkshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packs_cc(vus, vus, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpklshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vss = vec_packs_cc(vsi, vsi, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpksfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packs_cc(vui, vui, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpklsfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsi = vec_packs_cc(vsl, vsl, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpksgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packs_cc(vul, vul, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpklsgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_packsu(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vuc = vec_packsu(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_packsu(vsi, vsi);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vus = vec_packsu(vui, vui);
+ // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_packsu(vsl, vsl);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vui = vec_packsu(vul, vul);
+ // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_packsu_cc(vus, vus, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpklshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_packsu_cc(vui, vui, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpklsfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_packsu_cc(vul, vul, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpklsgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vss = vec_unpackh(vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vuphb(<16 x i8> %{{.*}})
+ vus = vec_unpackh(vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplhb(<16 x i8> %{{.*}})
+ vbs = vec_unpackh(vbc);
+ // CHECK: call <8 x i16> @llvm.s390.vuphb(<16 x i8> %{{.*}})
+ vsi = vec_unpackh(vss);
+ // CHECK: call <4 x i32> @llvm.s390.vuphh(<8 x i16> %{{.*}})
+ vui = vec_unpackh(vus);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhh(<8 x i16> %{{.*}})
+ vbi = vec_unpackh(vbs);
+ // CHECK: call <4 x i32> @llvm.s390.vuphh(<8 x i16> %{{.*}})
+ vsl = vec_unpackh(vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vuphf(<4 x i32> %{{.*}})
+ vul = vec_unpackh(vui);
+ // CHECK: call <2 x i64> @llvm.s390.vuplhf(<4 x i32> %{{.*}})
+ vbl = vec_unpackh(vbi);
+ // CHECK: call <2 x i64> @llvm.s390.vuphf(<4 x i32> %{{.*}})
+
+ vss = vec_unpackl(vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplb(<16 x i8> %{{.*}})
+ vus = vec_unpackl(vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vupllb(<16 x i8> %{{.*}})
+ vbs = vec_unpackl(vbc);
+ // CHECK: call <8 x i16> @llvm.s390.vuplb(<16 x i8> %{{.*}})
+ vsi = vec_unpackl(vss);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhw(<8 x i16> %{{.*}})
+ vui = vec_unpackl(vus);
+ // CHECK: call <4 x i32> @llvm.s390.vupllh(<8 x i16> %{{.*}})
+ vbi = vec_unpackl(vbs);
+ // CHECK: call <4 x i32> @llvm.s390.vuplhw(<8 x i16> %{{.*}})
+ vsl = vec_unpackl(vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vuplf(<4 x i32> %{{.*}})
+ vul = vec_unpackl(vui);
+ // CHECK: call <2 x i64> @llvm.s390.vupllf(<4 x i32> %{{.*}})
+ vbl = vec_unpackl(vbi);
+ // CHECK: call <2 x i64> @llvm.s390.vuplf(<4 x i32> %{{.*}})
+}
+
+void test_compare(void) {
+ vbc = vec_cmpeq(vsc, vsc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpeq(vuc, vuc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpeq(vbc, vbc);
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vss, vss);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vus, vus);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpeq(vbs, vbs);
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vsi, vsi);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vui, vui);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpeq(vbi, vbi);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vsl, vsl);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vul, vul);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vbl, vbl);
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpeq(vd, vd);
+ // CHECK: fcmp oeq <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmpge(vsc, vsc);
+ // CHECK: icmp sge <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpge(vuc, vuc);
+ // CHECK: icmp uge <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpge(vss, vss);
+ // CHECK: icmp sge <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpge(vus, vus);
+ // CHECK: icmp uge <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpge(vsi, vsi);
+ // CHECK: icmp sge <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpge(vui, vui);
+ // CHECK: icmp uge <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vsl, vsl);
+ // CHECK: icmp sge <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vul, vul);
+ // CHECK: icmp uge <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpge(vd, vd);
+ // CHECK: fcmp oge <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmpgt(vsc, vsc);
+ // CHECK: icmp sgt <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmpgt(vuc, vuc);
+ // CHECK: icmp ugt <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmpgt(vss, vss);
+ // CHECK: icmp sgt <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmpgt(vus, vus);
+ // CHECK: icmp ugt <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmpgt(vsi, vsi);
+ // CHECK: icmp sgt <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmpgt(vui, vui);
+ // CHECK: icmp ugt <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vsl, vsl);
+ // CHECK: icmp sgt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vul, vul);
+ // CHECK: icmp ugt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmpgt(vd, vd);
+ // CHECK: fcmp ogt <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmple(vsc, vsc);
+ // CHECK: icmp sle <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmple(vuc, vuc);
+ // CHECK: icmp ule <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmple(vss, vss);
+ // CHECK: icmp sle <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmple(vus, vus);
+ // CHECK: icmp ule <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmple(vsi, vsi);
+ // CHECK: icmp sle <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmple(vui, vui);
+ // CHECK: icmp ule <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vsl, vsl);
+ // CHECK: icmp sle <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vul, vul);
+ // CHECK: icmp ule <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmple(vd, vd);
+ // CHECK: fcmp ole <2 x double> %{{.*}}, %{{.*}}
+
+ vbc = vec_cmplt(vsc, vsc);
+ // CHECK: icmp slt <16 x i8> %{{.*}}, %{{.*}}
+ vbc = vec_cmplt(vuc, vuc);
+ // CHECK: icmp ult <16 x i8> %{{.*}}, %{{.*}}
+ vbs = vec_cmplt(vss, vss);
+ // CHECK: icmp slt <8 x i16> %{{.*}}, %{{.*}}
+ vbs = vec_cmplt(vus, vus);
+ // CHECK: icmp ult <8 x i16> %{{.*}}, %{{.*}}
+ vbi = vec_cmplt(vsi, vsi);
+ // CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}}
+ vbi = vec_cmplt(vui, vui);
+ // CHECK: icmp ult <4 x i32> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vsl, vsl);
+ // CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vul, vul);
+ // CHECK: icmp ult <2 x i64> %{{.*}}, %{{.*}}
+ vbl = vec_cmplt(vd, vd);
+ // CHECK: fcmp olt <2 x double> %{{.*}}, %{{.*}}
+
+ idx = vec_all_eq(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_eq(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_eq(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_eq(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_eq(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_ne(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ne(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ne(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ne(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ne(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_ge(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_ge(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_ge(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_ge(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_ge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_gt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_gt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_gt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_gt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_gt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_le(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_le(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_le(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_le(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_le(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_lt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_all_lt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_all_lt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_all_lt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_all_lt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_nge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_ngt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_nle(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_all_nlt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_all_nan(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+ idx = vec_all_numeric(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+
+ idx = vec_any_eq(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_eq(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_eq(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_eq(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_eq(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_ne(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ne(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ne(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ne(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ne(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_ge(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_ge(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_ge(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_ge(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_ge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_gt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_gt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_gt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_gt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_gt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_le(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_le(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_le(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_le(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_le(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_lt(vsc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vsc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vsc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vuc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vuc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vuc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vbc, vbc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_any_lt(vss, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vss, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vss);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vus, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vus, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vus);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vbs, vbs);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ idx = vec_any_lt(vsi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vsi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vsi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vui, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vui, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vui);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vbi, vbi);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ idx = vec_any_lt(vsl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vsl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vsl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vul, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vul, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vul);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vbl, vbl);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ idx = vec_any_lt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_nge(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_ngt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_nle(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+ idx = vec_any_nlt(vd, vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+
+ idx = vec_any_nan(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+ idx = vec_any_numeric(vd);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
+}
+
+void test_integer(void) {
+ vsc = vec_andc(vsc, vsc);
+ vsc = vec_andc(vsc, vbc);
+ vsc = vec_andc(vbc, vsc);
+ vuc = vec_andc(vuc, vuc);
+ vuc = vec_andc(vuc, vbc);
+ vuc = vec_andc(vbc, vuc);
+ vbc = vec_andc(vbc, vbc);
+ vss = vec_andc(vss, vss);
+ vss = vec_andc(vss, vbs);
+ vss = vec_andc(vbs, vss);
+ vus = vec_andc(vus, vus);
+ vus = vec_andc(vus, vbs);
+ vus = vec_andc(vbs, vus);
+ vbs = vec_andc(vbs, vbs);
+ vsi = vec_andc(vsi, vsi);
+ vsi = vec_andc(vsi, vbi);
+ vsi = vec_andc(vbi, vsi);
+ vui = vec_andc(vui, vui);
+ vui = vec_andc(vui, vbi);
+ vui = vec_andc(vbi, vui);
+ vbi = vec_andc(vbi, vbi);
+ vsl = vec_andc(vsl, vsl);
+ vsl = vec_andc(vsl, vbl);
+ vsl = vec_andc(vbl, vsl);
+ vul = vec_andc(vul, vul);
+ vul = vec_andc(vul, vbl);
+ vul = vec_andc(vbl, vul);
+ vbl = vec_andc(vbl, vbl);
+ vd = vec_andc(vd, vd);
+ vd = vec_andc(vd, vbl);
+ vd = vec_andc(vbl, vd);
+
+ vsc = vec_nor(vsc, vsc);
+ vsc = vec_nor(vsc, vbc);
+ vsc = vec_nor(vbc, vsc);
+ vuc = vec_nor(vuc, vuc);
+ vuc = vec_nor(vuc, vbc);
+ vuc = vec_nor(vbc, vuc);
+ vbc = vec_nor(vbc, vbc);
+ vss = vec_nor(vss, vss);
+ vss = vec_nor(vss, vbs);
+ vss = vec_nor(vbs, vss);
+ vus = vec_nor(vus, vus);
+ vus = vec_nor(vus, vbs);
+ vus = vec_nor(vbs, vus);
+ vbs = vec_nor(vbs, vbs);
+ vsi = vec_nor(vsi, vsi);
+ vsi = vec_nor(vsi, vbi);
+ vsi = vec_nor(vbi, vsi);
+ vui = vec_nor(vui, vui);
+ vui = vec_nor(vui, vbi);
+ vui = vec_nor(vbi, vui);
+ vbi = vec_nor(vbi, vbi);
+ vsl = vec_nor(vsl, vsl);
+ vsl = vec_nor(vsl, vbl);
+ vsl = vec_nor(vbl, vsl);
+ vul = vec_nor(vul, vul);
+ vul = vec_nor(vul, vbl);
+ vul = vec_nor(vbl, vul);
+ vbl = vec_nor(vbl, vbl);
+ vd = vec_nor(vd, vd);
+ vd = vec_nor(vd, vbl);
+ vd = vec_nor(vbl, vd);
+
+ vuc = vec_cntlz(vsc);
+ // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vuc = vec_cntlz(vuc);
+ // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vus = vec_cntlz(vss);
+ // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vus = vec_cntlz(vus);
+ // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vui = vec_cntlz(vsi);
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vui = vec_cntlz(vui);
+ // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vul = vec_cntlz(vsl);
+ // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ vul = vec_cntlz(vul);
+ // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
+
+ vuc = vec_cnttz(vsc);
+ // CHECK: call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vuc = vec_cnttz(vuc);
+ // CHECK: call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %{{.*}}, i1 false)
+ vus = vec_cnttz(vss);
+ // CHECK: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vus = vec_cnttz(vus);
+ // CHECK: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %{{.*}}, i1 false)
+ vui = vec_cnttz(vsi);
+ // CHECK: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vui = vec_cnttz(vui);
+ // CHECK: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %{{.*}}, i1 false)
+ vul = vec_cnttz(vsl);
+ // CHECK: call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %{{.*}}, i1 false)
+ vul = vec_cnttz(vul);
+ // CHECK: call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %{{.*}}, i1 false)
+
+ vuc = vec_popcnt(vsc);
+ // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}})
+ vuc = vec_popcnt(vuc);
+ // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}})
+ vus = vec_popcnt(vss);
+ // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}})
+ vus = vec_popcnt(vus);
+ // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}})
+ vui = vec_popcnt(vsi);
+ // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}})
+ vui = vec_popcnt(vui);
+ // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}})
+ vul = vec_popcnt(vsl);
+ // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}})
+ vul = vec_popcnt(vul);
+ // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}})
+
+ vsc = vec_rl(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.verllvb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_rl(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.verllvb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_rl(vss, vus);
+ // CHECK: call <8 x i16> @llvm.s390.verllvh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_rl(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.verllvh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_rl(vsi, vui);
+ // CHECK: call <4 x i32> @llvm.s390.verllvf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_rl(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.verllvf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_rl(vsl, vul);
+ // CHECK: call <2 x i64> @llvm.s390.verllvg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_rl(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.verllvg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_rli(vsc, ul);
+ // CHECK: call <16 x i8> @llvm.s390.verllb(<16 x i8> %{{.*}}, i32 %{{.*}})
+ vuc = vec_rli(vuc, ul);
+ // CHECK: call <16 x i8> @llvm.s390.verllb(<16 x i8> %{{.*}}, i32 %{{.*}})
+ vss = vec_rli(vss, ul);
+ // CHECK: call <8 x i16> @llvm.s390.verllh(<8 x i16> %{{.*}}, i32 %{{.*}})
+ vus = vec_rli(vus, ul);
+ // CHECK: call <8 x i16> @llvm.s390.verllh(<8 x i16> %{{.*}}, i32 %{{.*}})
+ vsi = vec_rli(vsi, ul);
+ // CHECK: call <4 x i32> @llvm.s390.verllf(<4 x i32> %{{.*}}, i32 %{{.*}})
+ vui = vec_rli(vui, ul);
+ // CHECK: call <4 x i32> @llvm.s390.verllf(<4 x i32> %{{.*}}, i32 %{{.*}})
+ vsl = vec_rli(vsl, ul);
+ // CHECK: call <2 x i64> @llvm.s390.verllg(<2 x i64> %{{.*}}, i32 %{{.*}})
+ vul = vec_rli(vul, ul);
+ // CHECK: call <2 x i64> @llvm.s390.verllg(<2 x i64> %{{.*}}, i32 %{{.*}})
+
+ vsc = vec_rl_mask(vsc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_rl_mask(vsc, vuc, 255);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 255)
+ vuc = vec_rl_mask(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_rl_mask(vuc, vuc, 255);
+ // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 255)
+ vss = vec_rl_mask(vss, vus, 0);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vss = vec_rl_mask(vss, vus, 255);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 255)
+ vus = vec_rl_mask(vus, vus, 0);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_rl_mask(vus, vus, 255);
+ // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 255)
+ vsi = vec_rl_mask(vsi, vui, 0);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vsi = vec_rl_mask(vsi, vui, 255);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 255)
+ vui = vec_rl_mask(vui, vui, 0);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_rl_mask(vui, vui, 255);
+ // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 255)
+ vsl = vec_rl_mask(vsl, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vsl = vec_rl_mask(vsl, vul, 255);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 255)
+ vul = vec_rl_mask(vul, vul, 0);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0)
+ vul = vec_rl_mask(vul, vul, 255);
+ // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 255)
+
+ vsc = vec_sll(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sll(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sll(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sll(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sll(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sll(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sll(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sll(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sll(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sll(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sll(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sll(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sll(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sll(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_slb(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_slb(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_slb(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_slb(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_slb(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_slb(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_slb(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_slb(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_slb(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_slb(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_slb(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_slb(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_slb(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_slb(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_slb(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_slb(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_slb(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_slb(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_sld(vsc, vsc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_sld(vsc, vsc, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vuc = vec_sld(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_sld(vuc, vuc, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vss = vec_sld(vss, vss, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_sld(vss, vss, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vus = vec_sld(vus, vus, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_sld(vus, vus, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vsi = vec_sld(vsi, vsi, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsi = vec_sld(vsi, vsi, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vui = vec_sld(vui, vui, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vui = vec_sld(vui, vui, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vsl = vec_sld(vsl, vsl, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsl = vec_sld(vsl, vsl, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vul = vec_sld(vul, vul, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vul = vec_sld(vul, vul, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+ vd = vec_sld(vd, vd, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vd = vec_sld(vd, vd, 15);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15)
+
+ vsc = vec_sldw(vsc, vsc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsc = vec_sldw(vsc, vsc, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vuc = vec_sldw(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_sldw(vuc, vuc, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vss = vec_sldw(vss, vss, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_sldw(vss, vss, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vus = vec_sldw(vus, vus, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_sldw(vus, vus, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vsi = vec_sldw(vsi, vsi, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsi = vec_sldw(vsi, vsi, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vui = vec_sldw(vui, vui, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vui = vec_sldw(vui, vui, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vsl = vec_sldw(vsl, vsl, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vsl = vec_sldw(vsl, vsl, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vul = vec_sldw(vul, vul, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vul = vec_sldw(vul, vul, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vd = vec_sldw(vd, vd, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vd = vec_sldw(vd, vd, 3);
+ // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+
+ vsc = vec_sral(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sral(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_sral(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sral(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_sral(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_sral(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_sral(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_sral(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_sral(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sral(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_sral(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_sral(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_sral(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_sral(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srab(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srab(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srab(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srab(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srab(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srab(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srab(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srab(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srab(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srab(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srab(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srab(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srab(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srab(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srab(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srab(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srab(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srab(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srl(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srl(vsc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srl(vsc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srl(vuc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbc = vec_srl(vbc, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srl(vss, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srl(vus, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbs = vec_srl(vbs, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srl(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srl(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbi = vec_srl(vbi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srl(vsl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srl(vul, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vbl = vec_srl(vbl, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_srb(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsc = vec_srb(vsc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srb(vuc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_srb(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srb(vss, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_srb(vss, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srb(vus, vss);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_srb(vus, vus);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srb(vsi, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_srb(vsi, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srb(vui, vsi);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_srb(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srb(vsl, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsl = vec_srb(vsl, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srb(vul, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vul = vec_srb(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srb(vd, vsl);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vd = vec_srb(vd, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_abs(vsc);
+ vss = vec_abs(vss);
+ vsi = vec_abs(vsi);
+ vsl = vec_abs(vsl);
+
+ vsc = vec_max(vsc, vsc);
+ vsc = vec_max(vsc, vbc);
+ vsc = vec_max(vbc, vsc);
+ vuc = vec_max(vuc, vuc);
+ vuc = vec_max(vuc, vbc);
+ vuc = vec_max(vbc, vuc);
+ vss = vec_max(vss, vss);
+ vss = vec_max(vss, vbs);
+ vss = vec_max(vbs, vss);
+ vus = vec_max(vus, vus);
+ vus = vec_max(vus, vbs);
+ vus = vec_max(vbs, vus);
+ vsi = vec_max(vsi, vsi);
+ vsi = vec_max(vsi, vbi);
+ vsi = vec_max(vbi, vsi);
+ vui = vec_max(vui, vui);
+ vui = vec_max(vui, vbi);
+ vui = vec_max(vbi, vui);
+ vsl = vec_max(vsl, vsl);
+ vsl = vec_max(vsl, vbl);
+ vsl = vec_max(vbl, vsl);
+ vul = vec_max(vul, vul);
+ vul = vec_max(vul, vbl);
+ vul = vec_max(vbl, vul);
+ vd = vec_max(vd, vd);
+
+ vsc = vec_min(vsc, vsc);
+ vsc = vec_min(vsc, vbc);
+ vsc = vec_min(vbc, vsc);
+ vuc = vec_min(vuc, vuc);
+ vuc = vec_min(vuc, vbc);
+ vuc = vec_min(vbc, vuc);
+ vss = vec_min(vss, vss);
+ vss = vec_min(vss, vbs);
+ vss = vec_min(vbs, vss);
+ vus = vec_min(vus, vus);
+ vus = vec_min(vus, vbs);
+ vus = vec_min(vbs, vus);
+ vsi = vec_min(vsi, vsi);
+ vsi = vec_min(vsi, vbi);
+ vsi = vec_min(vbi, vsi);
+ vui = vec_min(vui, vui);
+ vui = vec_min(vui, vbi);
+ vui = vec_min(vbi, vui);
+ vsl = vec_min(vsl, vsl);
+ vsl = vec_min(vsl, vbl);
+ vsl = vec_min(vbl, vsl);
+ vul = vec_min(vul, vul);
+ vul = vec_min(vul, vbl);
+ vul = vec_min(vbl, vul);
+ vd = vec_min(vd, vd);
+
+ vuc = vec_addc(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaccb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_addc(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vacch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_addc(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vaccf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_addc(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vaccg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_add_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_addc_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vaccq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_adde_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vacq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_addec_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vacccq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_avg(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vavgb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_avg(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vavglb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_avg(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vavgh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_avg(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vavglh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_avg(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vavgf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_avg(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vavglf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_avg(vsl, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vavgg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_avg(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vavglg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vui = vec_checksum(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vcksm(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vus = vec_gfmsum(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vgfmb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_gfmsum(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vgfmh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_gfmsum(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vgfmf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_gfmsum_128(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vgfmg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vus = vec_gfmsum_accum(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vgfmab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_gfmsum_accum(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vgfmah(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_gfmsum_accum(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vgfmaf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vuc = vec_gfmsum_accum_128(vul, vul, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vgfmag(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}})
+
+ vsc = vec_mladd(vsc, vsc, vsc);
+ vsc = vec_mladd(vuc, vsc, vsc);
+ vsc = vec_mladd(vsc, vuc, vuc);
+ vuc = vec_mladd(vuc, vuc, vuc);
+ vss = vec_mladd(vss, vss, vss);
+ vss = vec_mladd(vus, vss, vss);
+ vss = vec_mladd(vss, vus, vus);
+ vus = vec_mladd(vus, vus, vus);
+ vsi = vec_mladd(vsi, vsi, vsi);
+ vsi = vec_mladd(vui, vsi, vsi);
+ vsi = vec_mladd(vsi, vui, vui);
+ vui = vec_mladd(vui, vui, vui);
+
+ vsc = vec_mhadd(vsc, vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vmahb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_mhadd(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vmalhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_mhadd(vss, vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmahh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_mhadd(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmalhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_mhadd(vsi, vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmahf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_mhadd(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmalhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_meadd(vsc, vsc, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_meadd(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmaleb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_meadd(vss, vss, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_meadd(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmaleh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_meadd(vsi, vsi, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vmaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_meadd(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vmalef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+
+ vss = vec_moadd(vsc, vsc, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmaob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_moadd(vuc, vuc, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmalob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_moadd(vss, vss, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmaoh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_moadd(vus, vus, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmaloh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}})
+ vsl = vec_moadd(vsi, vsi, vsl);
+ // CHECK: call <2 x i64> @llvm.s390.vmaof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+ vul = vec_moadd(vui, vui, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vmalof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}})
+
+ vsc = vec_mulh(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vmhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_mulh(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vmlhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_mulh(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vmhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_mulh(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vmlhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_mulh(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vmhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_mulh(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vmlhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_mule(vsc, vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vmeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_mule(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vmleb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_mule(vss, vss);
+ // CHECK: call <4 x i32> @llvm.s390.vmeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_mule(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vmleh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsl = vec_mule(vsi, vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vmef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_mule(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vmlef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vss = vec_mulo(vsc, vsc);
+ // CHECK: call <8 x i16> @llvm.s390.vmob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_mulo(vuc, vuc);
+ // CHECK: call <8 x i16> @llvm.s390.vmlob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vsi = vec_mulo(vss, vss);
+ // CHECK: call <4 x i32> @llvm.s390.vmoh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_mulo(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vmloh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsl = vec_mulo(vsi, vsi);
+ // CHECK: call <2 x i64> @llvm.s390.vmof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_mulo(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vmlof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vuc = vec_subc(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vscbib(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vus = vec_subc(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vscbih(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vui = vec_subc(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vscbif(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vul = vec_subc(vul, vul);
+ // CHECK: call <2 x i64> @llvm.s390.vscbig(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ vuc = vec_sub_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_subc_u128(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vscbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_sube_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_subec_u128(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vsbcbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+
+ vui = vec_sum4(vuc, vuc);
+ // CHECK: call <4 x i32> @llvm.s390.vsumb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vui = vec_sum4(vus, vus);
+ // CHECK: call <4 x i32> @llvm.s390.vsumh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_sum2(vus, vus);
+ // CHECK: call <2 x i64> @llvm.s390.vsumgh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vul = vec_sum2(vui, vui);
+ // CHECK: call <2 x i64> @llvm.s390.vsumgf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_sum_u128(vui, vui);
+ // CHECK: call <16 x i8> @llvm.s390.vsumqf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vuc = vec_sum_u128(vul, vul);
+ // CHECK: call <16 x i8> @llvm.s390.vsumqg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+
+ idx = vec_test_mask(vsc, vuc);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vuc, vuc);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vss, vus);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vus, vus);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vsi, vui);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vui, vui);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vsl, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vul, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ idx = vec_test_mask(vd, vul);
+ // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+}
+
+void test_string(void) {
+ vsc = vec_cp_until_zero(vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vuc = vec_cp_until_zero(vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vbc = vec_cp_until_zero(vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}})
+ vss = vec_cp_until_zero(vss);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vus = vec_cp_until_zero(vus);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vbs = vec_cp_until_zero(vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}})
+ vsi = vec_cp_until_zero(vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+ vui = vec_cp_until_zero(vui);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+ vbi = vec_cp_until_zero(vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}})
+
+ vsc = vec_cp_until_zero_cc(vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vuc = vec_cp_until_zero_cc(vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vbc = vec_cp_until_zero_cc(vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}})
+ vss = vec_cp_until_zero_cc(vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vus = vec_cp_until_zero_cc(vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vbs = vec_cp_until_zero_cc(vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}})
+ vsi = vec_cp_until_zero_cc(vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+ vui = vec_cp_until_zero_cc(vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+ vbi = vec_cp_until_zero_cc(vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpeq_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpeq_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpeq_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpeq_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpeq_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpeq_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vsc = vec_cmpne_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = vec_cmpne_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vss = vec_cmpne_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vus = vec_cmpne_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ vsi = vec_cmpne_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+ vui = vec_cmpne_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
+
+ vbc = vec_cmprg(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_cmprg(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_cmprg(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vbc = vec_cmprg_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_cmprg_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_cmprg_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vuc = vec_cmprg_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_or_0_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrczb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_or_0_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrczh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_or_0_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrczf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vuc = vec_cmprg_or_0_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrczbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vus = vec_cmprg_or_0_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrczhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vui = vec_cmprg_or_0_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrczfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vbc = vec_cmpnrg(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_cmpnrg(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_cmpnrg(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vbc = vec_cmpnrg_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_cmpnrg_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_cmpnrg_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vuc = vec_cmpnrg_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_or_0_idx(vuc, vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vstrczb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_or_0_idx(vus, vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vstrczh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_or_0_idx(vui, vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vstrczf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vuc = vec_cmpnrg_or_0_idx_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrczbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vus = vec_cmpnrg_or_0_idx_cc(vus, vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrczhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vui = vec_cmpnrg_or_0_idx_cc(vui, vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrczfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vbc = vec_find_any_eq(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vbc = vec_find_any_eq_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbc = vec_find_any_eq_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbs = vec_find_any_eq_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+ vbi = vec_find_any_eq_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4)
+
+ vsc = vec_find_any_eq_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vsc = vec_find_any_eq_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = vec_find_any_eq_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vss = vec_find_any_eq_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vus = vec_find_any_eq_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0)
+ vsi = vec_find_any_eq_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+ vui = vec_find_any_eq_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0)
+
+ vbc = vec_find_any_ne(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vbc = vec_find_any_ne_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbc = vec_find_any_ne_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbs = vec_find_any_ne_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+ vbi = vec_find_any_ne_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12)
+
+ vsc = vec_find_any_ne_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_or_0_idx(vsc, vsc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx(vuc, vuc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx(vbc, vbc);
+ // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_or_0_idx(vss, vss);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx(vus, vus);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx(vbs, vbs);
+ // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_or_0_idx(vsi, vsi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx(vui, vui);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx(vbi, vbi);
+ // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+
+ vsc = vec_find_any_ne_or_0_idx_cc(vsc, vsc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx_cc(vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vuc = vec_find_any_ne_or_0_idx_cc(vbc, vbc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8)
+ vss = vec_find_any_ne_or_0_idx_cc(vss, vss, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx_cc(vus, vus, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vus = vec_find_any_ne_or_0_idx_cc(vbs, vbs, &cc);
+ // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8)
+ vsi = vec_find_any_ne_or_0_idx_cc(vsi, vsi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx_cc(vui, vui, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+ vui = vec_find_any_ne_or_0_idx_cc(vbi, vbi, &cc);
+ // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8)
+}
+
+void test_float(void) {
+ vd = vec_abs(vd);
+ // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+
+ vd = vec_nabs(vd);
+ // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
+ // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[ABS]]
+
+ vd = vec_madd(vd, vd, vd);
+ // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ vd = vec_msub(vd, vd, vd);
+ // CHECK: [[NEG:%[^ ]+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.*}}
+ // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
+ vd = vec_sqrt(vd);
+ // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{.*}})
+
+ vd = vec_ld2f(cptrf);
+ // CHECK: [[VAL:%[^ ]+]] = load <2 x float>, <2 x float>* %{{.*}}
+ // CHECK: fpext <2 x float> [[VAL]] to <2 x double>
+ vec_st2f(vd, ptrf);
+ // CHECK: [[VAL:%[^ ]+]] = fptrunc <2 x double> %{{.*}} to <2 x float>
+ // CHECK: store <2 x float> [[VAL]], <2 x float>* %{{.*}}
+
+ vd = vec_ctd(vsl, 0);
+ // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
+ vd = vec_ctd(vul, 0);
+ // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
+ vd = vec_ctd(vsl, 1);
+ // CHECK: [[VAL:%[^ ]+]] = sitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 5.000000e-01, double 5.000000e-01>
+ vd = vec_ctd(vul, 1);
+ // CHECK: [[VAL:%[^ ]+]] = uitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 5.000000e-01, double 5.000000e-01>
+ vd = vec_ctd(vsl, 31);
+ // CHECK: [[VAL:%[^ ]+]] = sitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 0x3E00000000000000, double 0x3E00000000000000>
+ vd = vec_ctd(vul, 31);
+ // CHECK: [[VAL:%[^ ]+]] = uitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK: fmul <2 x double> [[VAL]], <double 0x3E00000000000000, double 0x3E00000000000000>
+
+ vsl = vec_ctsl(vd, 0);
+ // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
+ vul = vec_ctul(vd, 0);
+ // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
+ vsl = vec_ctsl(vd, 1);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 2.000000e+00, double 2.000000e+00>
+ // CHECK: fptosi <2 x double> [[VAL]] to <2 x i64>
+ vul = vec_ctul(vd, 1);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 2.000000e+00, double 2.000000e+00>
+ // CHECK: fptoui <2 x double> [[VAL]] to <2 x i64>
+ vsl = vec_ctsl(vd, 31);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 0x41E0000000000000, double 0x41E0000000000000>
+ // CHECK: fptosi <2 x double> [[VAL]] to <2 x i64>
+ vul = vec_ctul(vd, 31);
+ // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, <double 0x41E0000000000000, double 0x41E0000000000000>
+ // CHECK: fptoui <2 x double> [[VAL]] to <2 x i64>
+
+ vd = vec_roundp(vd);
+ // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}})
+ vd = vec_ceil(vd);
+ // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundm(vd);
+ // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}})
+ vd = vec_floor(vd);
+ // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundz(vd);
+ // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}})
+ vd = vec_trunc(vd);
+ // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}})
+ vd = vec_roundc(vd);
+ // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{.*}})
+ vd = vec_round(vd);
+ // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
+
+ vbl = vec_fp_test_data_class(vd, 0, &cc);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 0)
+ vbl = vec_fp_test_data_class(vd, 4095, &cc);
+ // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
+}
diff --git a/test/CodeGen/integer-overflow.c b/test/CodeGen/integer-overflow.c
index de3b53f4b5b36..6a7c3e51ee1bb 100644
--- a/test/CodeGen/integer-overflow.c
+++ b/test/CodeGen/integer-overflow.c
@@ -72,4 +72,11 @@ void test1() {
// TRAPV: add i8 {{.*}}, 1
// CATCH_UB: add i8 {{.*}}, 1
++PR9350;
+
+ // PR24256: don't instrument __builtin_frame_address.
+ __builtin_frame_address(0 + 0);
+ // DEFAULT: call i8* @llvm.frameaddress(i32 0)
+ // WRAPV: call i8* @llvm.frameaddress(i32 0)
+ // TRAPV: call i8* @llvm.frameaddress(i32 0)
+ // CATCH_UB: call i8* @llvm.frameaddress(i32 0)
}
diff --git a/test/CodeGen/le32-regparm.c b/test/CodeGen/le32-regparm.c
index c8f70694c43d1..ecb1030aa1ff2 100644
--- a/test/CodeGen/le32-regparm.c
+++ b/test/CodeGen/le32-regparm.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple le32-unknown-nacl %s -fsyntax-only -verify
+// RUN: %clang_cc1 -triple aarch64 %s -fsyntax-only -verify
void __attribute__((regparm(2))) fc_f1(int i, int j, int k) {} // expected-error{{'regparm' is not valid on this platform}}
diff --git a/test/CodeGen/long_double_fp128.cpp b/test/CodeGen/long_double_fp128.cpp
new file mode 100644
index 0000000000000..1780255cea97e
--- /dev/null
+++ b/test/CodeGen/long_double_fp128.cpp
@@ -0,0 +1,22 @@
+// RUN: %clang_cc1 -triple x86_64-linux-android -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=A64
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=G64
+// RUN: %clang_cc1 -triple powerpc64-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=P64
+// RUN: %clang_cc1 -triple i686-linux-android -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=A32
+// RUN: %clang_cc1 -triple i686-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=G32
+// RUN: %clang_cc1 -triple powerpc-linux-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=P32
+
+// Check mangled name of long double.
+// Android's gcc and llvm use fp128 for long double.
+void test(long, float, double, long double, long double _Complex) { }
+// A64: define void @_Z4testlfdgCg(i64, float, double, fp128, { fp128, fp128 }*
+// G64: define void @_Z4testlfdeCe(i64, float, double, x86_fp80, { x86_fp80, x86_fp80 }*
+// P64: define void @_Z4testlfdgCg(i64, float, double, ppc_fp128, ppc_fp128 {{.*}}, ppc_fp128
+// A32: define void @_Z4testlfdeCe(i32, float, double, double, { double, double }*
+// G32: define void @_Z4testlfdeCe(i32, float, double, x86_fp80, { x86_fp80, x86_fp80 }*
+// P32: define void @_Z4testlfdgCg(i32, float, double, ppc_fp128, { ppc_fp128, ppc_fp128 }*
diff --git a/test/CodeGen/palignr.c b/test/CodeGen/palignr.c
index 1712df5256ce9..5a77597c34031 100644
--- a/test/CodeGen/palignr.c
+++ b/test/CodeGen/palignr.c
@@ -4,13 +4,13 @@
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
typedef __attribute__((vector_size(16))) int int4;
-// CHECK: palignr
+// CHECK: palignr $15, %xmm1, %xmm0
int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
// CHECK: ret
// CHECK: ret
// CHECK-NOT: palignr
int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); }
-// CHECK: psrldq
+// CHECK: psrldq $1, %xmm0
int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); }
// CHECK: xor
int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); }
diff --git a/test/CodeGen/x86_64-fp128.c b/test/CodeGen/x86_64-fp128.c
new file mode 100644
index 0000000000000..0147721f9b6ae
--- /dev/null
+++ b/test/CodeGen/x86_64-fp128.c
@@ -0,0 +1,115 @@
+// RUN: %clang_cc1 -triple x86_64-linux-android -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=ANDROID --check-prefix=CHECK
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=GNU --check-prefix=CHECK
+// RUN: %clang_cc1 -triple x86_64 -emit-llvm -O -o - %s \
+// RUN: | FileCheck %s --check-prefix=GNU --check-prefix=CHECK
+
+// Android uses fp128 for long double but other x86_64 targets use x86_fp80.
+
+long double dataLD = 1.0L;
+// ANDROID: @dataLD = global fp128 0xL00000000000000003FFF000000000000, align 16
+// GNU: @dataLD = global x86_fp80 0xK3FFF8000000000000000, align 16
+
+long double _Complex dataLDC = {1.0L, 1.0L};
+// ANDROID: @dataLDC = global { fp128, fp128 } { fp128 0xL00000000000000003FFF000000000000, fp128 0xL00000000000000003FFF000000000000 }, align 16
+// GNU: @dataLDC = global { x86_fp80, x86_fp80 } { x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000 }, align 16
+
+long double TestLD(long double x) {
+ return x * x;
+// ANDROID: define fp128 @TestLD(fp128 %x)
+// GNU: define x86_fp80 @TestLD(x86_fp80 %x)
+}
+
+long double _Complex TestLDC(long double _Complex x) {
+ return x * x;
+// ANDROID: define void @TestLDC({ fp128, fp128 }* {{.*}}, { fp128, fp128 }* {{.*}} %x)
+// GNU: define { x86_fp80, x86_fp80 } @TestLDC({ x86_fp80, x86_fp80 }* {{.*}} %x)
+}
+
+typedef __builtin_va_list va_list;
+
+int TestGetVarInt(va_list ap) {
+ return __builtin_va_arg(ap, int);
+// Since int can be passed in memory or register there are two branches.
+// CHECK: define i32 @TestGetVarInt(
+// CHECK: br label
+// CHECK: br label
+// CHECK: = phi
+// CHECK: ret i32
+}
+
+double TestGetVarDouble(va_list ap) {
+ return __builtin_va_arg(ap, double);
+// Since double can be passed in memory or register there are two branches.
+// CHECK: define double @TestGetVarDouble(
+// CHECK: br label
+// CHECK: br label
+// CHECK: = phi
+// CHECK: ret double
+}
+
+long double TestGetVarLD(va_list ap) {
+ return __builtin_va_arg(ap, long double);
+// fp128 can be passed in memory or in register, but x86_fp80 is in memory.
+// ANDROID: define fp128 @TestGetVarLD(
+// GNU: define x86_fp80 @TestGetVarLD(
+// ANDROID: br label
+// ANDROID: br label
+// ANDROID: = phi
+// GNU-NOT: br
+// GNU-NOT: = phi
+// ANDROID: ret fp128
+// GNU: ret x86_fp80
+}
+
+long double _Complex TestGetVarLDC(va_list ap) {
+ return __builtin_va_arg(ap, long double _Complex);
+// Pair of fp128 or x86_fp80 are passed as struct in memory.
+// ANDROID: define void @TestGetVarLDC({ fp128, fp128 }* {{.*}}, %struct.__va_list_tag*
+// GNU: define { x86_fp80, x86_fp80 } @TestGetVarLDC(
+// CHECK-NOT: br
+// CHECK-NOT: phi
+// ANDROID: ret void
+// GNU: ret { x86_fp80, x86_fp80 }
+}
+
+void TestVarArg(const char *s, ...);
+
+void TestPassVarInt(int x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarInt(i32 %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, i32 %x)
+}
+
+void TestPassVarFloat(float x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarFloat(float %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, double %
+}
+
+void TestPassVarDouble(double x) {
+ TestVarArg("A", x);
+// CHECK: define void @TestPassVarDouble(double %x)
+// CHECK: call {{.*}} @TestVarArg(i8* {{.*}}, double %x
+}
+
+void TestPassVarLD(long double x) {
+ TestVarArg("A", x);
+// ANDROID: define void @TestPassVarLD(fp128 %x)
+// ANDROID: call {{.*}} @TestVarArg(i8* {{.*}}, fp128 %x
+// GNU: define void @TestPassVarLD(x86_fp80 %x)
+// GNU: call {{.*}} @TestVarArg(i8* {{.*}}, x86_fp80 %x
+}
+
+void TestPassVarLDC(long double _Complex x) {
+ TestVarArg("A", x);
+// ANDROID: define void @TestPassVarLDC({ fp128, fp128 }* {{.*}} %x)
+// ANDROID: store fp128 %{{.*}}, fp128* %
+// ANDROID-NEXT: store fp128 %{{.*}}, fp128* %
+// ANDROID-NEXT: call {{.*}} @TestVarArg(i8* {{.*}}, { fp128, fp128 }* {{.*}} %
+// GNU: define void @TestPassVarLDC({ x86_fp80, x86_fp80 }* {{.*}} %x)
+// GNU: store x86_fp80 %{{.*}}, x86_fp80* %
+// GNU-NEXT: store x86_fp80 %{{.*}}, x86_fp80* %
+// GNGNU-NEXT: call {{.*}} @TestVarArg(i8* {{.*}}, { x86_fp80, x86_fp80 }* {{.*}} %
+}
diff --git a/test/CodeGen/zvector.c b/test/CodeGen/zvector.c
new file mode 100644
index 0000000000000..ebe7e415e1db8
--- /dev/null
+++ b/test/CodeGen/zvector.c
@@ -0,0 +1,2798 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector \
+// RUN: -O -emit-llvm -o - -W -Wall -Werror %s | FileCheck %s
+
+volatile vector signed char sc, sc2;
+volatile vector unsigned char uc, uc2;
+volatile vector bool char bc, bc2;
+
+volatile vector signed short ss, ss2;
+volatile vector unsigned short us, us2;
+volatile vector bool short bs, bs2;
+
+volatile vector signed int si, si2;
+volatile vector unsigned int ui, ui2;
+volatile vector bool int bi, bi2;
+
+volatile vector signed long long sl, sl2;
+volatile vector unsigned long long ul, ul2;
+volatile vector bool long long bl, bl2;
+
+volatile vector double fd, fd2;
+
+volatile int cnt;
+
+void test_assign (void)
+{
+// CHECK-LABEL: test_assign
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
+ sc = sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
+ uc = uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
+ ss = ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
+ us = us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
+ si = si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
+ ui = ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
+ sl = sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
+ ul = ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
+ fd = fd2;
+}
+
+void test_pos (void)
+{
+// CHECK-LABEL: test_pos
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
+ sc = +sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
+ uc = +uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
+ ss = +ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
+ us = +us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
+ si = +si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
+ ui = +ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
+ sl = +sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
+ ul = +ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
+ fd = +fd2;
+}
+
+void test_neg (void)
+{
+// CHECK-LABEL: test_neg
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> zeroinitializer, [[VAL]]
+ sc = -sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> zeroinitializer, [[VAL]]
+ ss = -ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> zeroinitializer, [[VAL]]
+ si = -si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> zeroinitializer, [[VAL]]
+ sl = -sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[VAL]]
+ fd = -fd2;
+}
+
+void test_preinc (void)
+{
+// CHECK-LABEL: test_preinc
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ++sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ++uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ++ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ++us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ++si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ++ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ++sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ++ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
+ ++fd2;
+}
+
+void test_postinc (void)
+{
+// CHECK-LABEL: test_postinc
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ sc2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ uc2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ss2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ us2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ si2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
+ ui2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ sl2++;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
+ ul2++;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
+ fd2++;
+}
+
+void test_predec (void)
+{
+// CHECK-LABEL: test_predec
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ --sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ --uc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ --ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ --us2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ --si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ --ui2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ --sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ --ul2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
+ --fd2;
+}
+
+void test_postdec (void)
+{
+// CHECK-LABEL: test_postdec
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ sc2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ uc2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ss2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ us2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ si2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ ui2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ sl2--;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ ul2--;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
+ fd2--;
+}
+
+void test_add (void)
+{
+// CHECK-LABEL: test_add
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc + sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc + bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc + sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc + uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc + bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc + uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss + ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss + bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs + ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = us + us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = us + bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs + us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = si + si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = si + bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi + si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui + ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui + bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi + ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl + sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl + bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl + sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul + ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul + bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl + ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd + fd2;
+}
+
+void test_add_assign (void)
+{
+// CHECK-LABEL: test_add_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ sc += sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ sc += bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ uc += uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+ uc += bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ ss += ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ ss += bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ us += us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
+ us += bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ si += si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ si += bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ ui += ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
+ ui += bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ sl += sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ sl += bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ ul += ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
+ ul += bl2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fadd <2 x double> [[VAL2]], [[VAL1]]
+ fd += fd2;
+}
+
+void test_sub (void)
+{
+// CHECK-LABEL: test_sub
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc - sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc - bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc = bc - sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc - uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc - bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc = bc - uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss - ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss - bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss = bs - ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = us - us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = us - bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us = bs - us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = si - si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = si - bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si = bi - si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui - ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui - bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui = bi - ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl - sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl - bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl = bl - sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul - ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul - bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul = bl - ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd - fd2;
+}
+
+void test_sub_assign (void)
+{
+// CHECK-LABEL: test_sub_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc -= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ sc -= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc -= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+ uc -= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss -= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ ss -= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us -= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
+ us -= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si -= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ si -= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui -= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
+ ui -= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl -= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ sl -= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul -= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
+ ul -= bl2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
+ fd -= fd2;
+}
+
+void test_mul (void)
+{
+// CHECK-LABEL: test_mul
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc * sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc * uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss * ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
+ us = us * us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
+ si = si * si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui * ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl * sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul * ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fmul <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd * fd2;
+}
+
+void test_mul_assign (void)
+{
+// CHECK-LABEL: test_mul_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
+ sc *= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
+ uc *= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
+ ss *= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
+ us *= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
+ si *= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
+ ui *= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
+ sl *= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
+ ul *= ul2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fmul <2 x double> [[VAL2]], [[VAL1]]
+ fd *= fd2;
+}
+
+void test_div (void)
+{
+// CHECK-LABEL: test_div
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc / sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc / uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss / ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
+ us = us / us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
+ si = si / si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui / ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl / sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul / ul2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
+ fd = fd / fd2;
+}
+
+void test_div_assign (void)
+{
+// CHECK-LABEL: test_div_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
+ sc /= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
+ uc /= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
+ ss /= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
+ us /= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
+ si /= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
+ ui /= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
+ sl /= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
+ ul /= ul2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
+ fd /= fd2;
+}
+
+void test_rem (void)
+{
+// CHECK-LABEL: test_rem
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc % sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc % uc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss % ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
+ us = us % us2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
+ si = si % si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui % ui2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl % sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul % ul2;
+}
+
+void test_rem_assign (void)
+{
+// CHECK-LABEL: test_rem_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
+ sc %= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
+ uc %= uc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
+ ss %= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
+ us %= us2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
+ si %= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
+ ui %= ui2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
+ sl %= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
+ ul %= ul2;
+}
+
+void test_not (void)
+{
+// CHECK-LABEL: test_not
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ sc = ~sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ uc = ~uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ bc = ~bc2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ss = ~ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ us = ~us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ bs = ~bs2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ si = ~si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ ui = ~ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
+ bi = ~bi2;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ sl = ~sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ ul = ~ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
+ bl = ~bl2;
+}
+
+void test_and (void)
+{
+// CHECK-LABEL: test_and
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc & sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc & bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc & sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc & uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc & bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc & uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+ bc = bc & bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss & ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss & bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs & ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = us & us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = us & bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs & us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
+ bs = bs & bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = si & si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = si & bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi & si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui & ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui & bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi & ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
+ bi = bi & bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl & sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl & bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl & sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul & ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul & bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl & ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
+ bl = bl & bl2;
+}
+
+void test_and_assign (void)
+{
+// CHECK-LABEL: test_and_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ sc &= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ sc &= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ uc &= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ uc &= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+ bc &= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ ss &= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ ss &= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ us &= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ us &= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
+ bs &= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ si &= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ si &= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ ui &= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ ui &= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
+ bi &= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ sl &= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ sl &= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ ul &= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ ul &= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
+ bl &= bl2;
+}
+
+void test_or (void)
+{
+// CHECK-LABEL: test_or
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc | sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = sc | bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ sc = bc | sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc | uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = uc | bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ uc = bc | uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+ bc = bc | bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss | ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = ss | bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ ss = bs | ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = us | us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = us | bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ us = bs | us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
+ bs = bs | bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = si | si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = si | bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ si = bi | si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui | ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = ui | bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ ui = bi | ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
+ bi = bi | bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl | sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = sl | bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ sl = bl | sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul | ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = ul | bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ ul = bl | ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
+ bl = bl | bl2;
+}
+
+void test_or_assign (void)
+{
+// CHECK-LABEL: test_or_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ sc |= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ sc |= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ uc |= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ uc |= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+ bc |= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ ss |= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ ss |= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ us |= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ us |= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
+ bs |= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ si |= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ si |= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ ui |= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ ui |= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
+ bi |= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ sl |= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ sl |= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ ul |= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ ul |= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
+ bl |= bl2;
+}
+
+void test_xor (void)
+{
+// CHECK-LABEL: test_xor
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc ^ sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = sc ^ bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ sc = bc ^ sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc ^ uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = uc ^ bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ uc = bc ^ uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+ bc = bc ^ bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss ^ ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = ss ^ bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ ss = bs ^ ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = us ^ us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = us ^ bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ us = bs ^ us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
+ bs = bs ^ bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = si ^ si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = si ^ bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ si = bi ^ si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui ^ ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = ui ^ bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ ui = bi ^ ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
+ bi = bi ^ bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl ^ sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = sl ^ bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ sl = bl ^ sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul ^ ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = ul ^ bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ ul = bl ^ ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
+ bl = bl ^ bl2;
+}
+
+void test_xor_assign (void)
+{
+// CHECK-LABEL: test_xor_assign
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ sc ^= sc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ sc ^= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ uc ^= uc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ uc ^= bc2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+ bc ^= bc2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ ss ^= ss2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ ss ^= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ us ^= us2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ us ^= bs2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
+ bs ^= bs2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ si ^= si2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ si ^= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ ui ^= ui2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ ui ^= bi2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
+ bi ^= bi2;
+
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ sl ^= sl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ sl ^= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ ul ^= ul2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ ul ^= bl2;
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
+ bl ^= bl2;
+}
+
+void test_sl (void)
+{
+// CHECK-LABEL: test_sl
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc = sc << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc = sc << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc = uc << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc = uc << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss = ss << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss = ss << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us = us << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us = us << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si = si << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si = si << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui = ui << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui = ui << 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl = sl << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl = sl << 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul = ul << cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul = ul << 5;
+}
+
+void test_sl_assign (void)
+{
+// CHECK-LABEL: test_sl_assign
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ sc <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+ uc <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ ss <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
+ us <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ si <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
+ ui <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui <<= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ sl <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl <<= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
+ ul <<= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul <<= 5;
+}
+
+void test_sr (void)
+{
+// CHECK-LABEL: test_sr
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc = sc >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc = sc >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> sc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> uc2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc = uc >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc = uc >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss = ss >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss = ss >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> ss2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> us2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us = us >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us = us >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si = si >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si = si >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> si2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> ui2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui = ui >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui = ui >> 5;
+
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl = sl >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl = sl >> 5;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> sl2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> ul2;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul = ul >> cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul = ul >> 5;
+}
+
+void test_sr_assign (void)
+{
+// CHECK-LABEL: test_sr_assign
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+ sc >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ sc >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= sc2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= uc2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
+ uc >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ uc >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
+ ss >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ss >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= ss2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= us2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
+ us >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ us >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
+ si >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ si >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= si2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= ui2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
+ ui >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
+ ui >>= 5;
+
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
+ sl >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
+ sl >>= 5;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= sl2;
+// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= ul2;
+// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
+// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
+// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
+ ul >>= cnt;
+// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
+ ul >>= 5;
+}
+
+
+void test_cmpeq (void)
+{
+// CHECK-LABEL: test_cmpeq
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc == sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc == bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc == uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc == bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc == bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss == ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss == bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us == us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us == bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs == bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si == si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si == bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui == ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui == bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi == bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl == sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl == bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul == ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul == bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl == bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp oeq <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd == fd2;
+}
+
+void test_cmpne (void)
+{
+// CHECK-LABEL: test_cmpne
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc != sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc != bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc != uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc != bc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc != bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss != ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss != bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us != us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us != bs2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs != bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si != si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si != bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui != ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui != bi2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi != bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl != sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl != bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul != ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul != bl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl != bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp une <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd != fd2;
+}
+
+void test_cmpge (void)
+{
+// CHECK-LABEL: test_cmpge
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc >= sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc >= uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc >= bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss >= ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us >= us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs >= bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si >= si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui >= ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi >= bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl >= sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul >= ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl >= bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp oge <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd >= fd2;
+}
+
+void test_cmpgt (void)
+{
+// CHECK-LABEL: test_cmpgt
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc > sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc > uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc > bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss > ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us > us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs > bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si > si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui > ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi > bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sgt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl > sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul > ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl > bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp ogt <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd > fd2;
+}
+
+void test_cmple (void)
+{
+// CHECK-LABEL: test_cmple
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc <= sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc <= uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc <= bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss <= ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us <= us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs <= bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si <= si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui <= ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi <= bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp sle <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl <= sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul <= ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl <= bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp ole <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd <= fd2;
+}
+
+void test_cmplt (void)
+{
+// CHECK-LABEL: test_cmplt
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = sc < sc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = uc < uc2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+ bc = bc < bc2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = ss < ss2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = us < us2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
+ bs = bs < bs2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = si < si2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = ui < ui2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
+ bi = bi < bi2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
+// CHECK: [[CMP:%[^ ]+]] = icmp slt <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = sl < sl2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = ul < ul2;
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
+// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = bl < bl2;
+
+// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
+// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
+// CHECK: [[CMP:%[^ ]+]] = fcmp olt <2 x double> [[VAL1]], [[VAL2]]
+// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
+ bl = fd < fd2;
+}
+