summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/2006-05-19-SingleEltReturn.c11
-rw-r--r--test/CodeGen/Inputs/debug-info-macro.h12
-rw-r--r--test/CodeGen/Inputs/pgo-sample-thinlto-summary.prof4
-rw-r--r--test/CodeGen/aarch64-neon-intrinsics.c2
-rw-r--r--test/CodeGen/address-space.c13
-rw-r--r--test/CodeGen/alloc-align-attr.c101
-rw-r--r--test/CodeGen/alloc-size.c2
-rw-r--r--test/CodeGen/altivec.c2
-rw-r--r--test/CodeGen/avr-inline-asm-constraints.c124
-rw-r--r--test/CodeGen/avr-unsupported-inline-asm-constraints.c8
-rw-r--r--test/CodeGen/avr/attributes/interrupt.c6
-rw-r--r--test/CodeGen/avr/attributes/signal.c6
-rw-r--r--test/CodeGen/avr/target-cpu-defines/atmega328p.c7
-rw-r--r--test/CodeGen/avr/target-cpu-defines/attiny104.c7
-rw-r--r--test/CodeGen/avr/target-cpu-defines/common.c6
-rw-r--r--test/CodeGen/avx-builtins.c28
-rw-r--r--test/CodeGen/avx2-builtins.c36
-rw-r--r--test/CodeGen/avx512-reduceMinMaxIntrin.c10
-rw-r--r--test/CodeGen/avx512bw-builtins.c56
-rw-r--r--test/CodeGen/avx512dq-builtins.c105
-rw-r--r--test/CodeGen/avx512f-builtins.c295
-rw-r--r--test/CodeGen/avx512pf-builtins.c32
-rw-r--r--test/CodeGen/avx512vl-builtins.c62
-rw-r--r--test/CodeGen/avx512vlbw-builtins.c12
-rw-r--r--test/CodeGen/avx512vldq-builtins.c67
-rw-r--r--test/CodeGen/blocks.c34
-rw-r--r--test/CodeGen/builtin-clflushopt.c2
-rw-r--r--test/CodeGen/builtin-clzero.c9
-rw-r--r--test/CodeGen/builtins-mips-msa-error.c14
-rw-r--r--test/CodeGen/builtins-mips-msa.c7
-rw-r--r--test/CodeGen/builtins-ppc-altivec.c42
-rw-r--r--test/CodeGen/builtins-ppc-crypto-disabled.c6
-rw-r--r--test/CodeGen/builtins-ppc-crypto.c4
-rw-r--r--test/CodeGen/builtins-ppc-error.c4
-rw-r--r--test/CodeGen/builtins-ppc-htm.c2
-rw-r--r--test/CodeGen/builtins-ppc-p8vector.c6
-rw-r--r--test/CodeGen/builtins-ppc-p9vector.c4
-rw-r--r--test/CodeGen/builtins-ppc-quadword.c6
-rw-r--r--test/CodeGen/builtins-ppc-vsx.c4
-rw-r--r--test/CodeGen/builtins-wasm.c8
-rw-r--r--test/CodeGen/builtins-x86.c5
-rw-r--r--test/CodeGen/catch-undef-behavior.c21
-rw-r--r--test/CodeGen/cfi-check-fail.c5
-rw-r--r--test/CodeGen/cleanup-destslot-simple.c8
-rw-r--r--test/CodeGen/compound-assign-overflow.c4
-rw-r--r--test/CodeGen/debug-info-macro.c57
-rw-r--r--test/CodeGen/default-address-space.c58
-rw-r--r--test/CodeGen/fentry.c11
-rw-r--r--test/CodeGen/ffp-contract-fast-option.cpp29
-rw-r--r--test/CodeGen/ffp-contract-option.c6
-rw-r--r--test/CodeGen/fp-contract-fast-pragma.cpp69
-rw-r--r--test/CodeGen/fp-contract-on-pragma.cpp76
-rw-r--r--test/CodeGen/function-sections.c8
-rw-r--r--test/CodeGen/libcall-declarations.c15
-rw-r--r--test/CodeGen/lifetime-asan.c8
-rw-r--r--test/CodeGen/lifetime2.c52
-rw-r--r--test/CodeGen/mmx-builtins.c87
-rw-r--r--test/CodeGen/ms-declspecs.c2
-rw-r--r--test/CodeGen/ms-inline-asm-EVEN.c16
-rw-r--r--test/CodeGen/ms-inline-asm.c21
-rw-r--r--test/CodeGen/ms-intrinsics.c39
-rw-r--r--test/CodeGen/ms-x86-intrinsics.c65
-rw-r--r--test/CodeGen/object-size.c110
-rw-r--r--test/CodeGen/object-size.cpp14
-rw-r--r--test/CodeGen/opt-record-MIR.c33
-rw-r--r--test/CodeGen/pass-object-size.c57
-rw-r--r--test/CodeGen/pgo-sample-thinlto-summary.c42
-rw-r--r--test/CodeGen/ppc64-align-struct.c2
-rw-r--r--test/CodeGen/ppc64-complex-parms.c2
-rw-r--r--test/CodeGen/ppc64-vector.c2
-rw-r--r--test/CodeGen/ppc64le-aggregates.c2
-rw-r--r--test/CodeGen/pr3997.c13
-rw-r--r--test/CodeGen/sanitize-init-order.cpp4
-rw-r--r--test/CodeGen/sanitize-recover.c15
-rw-r--r--test/CodeGen/sanitize-thread-no-checking-at-run-time.m15
-rw-r--r--test/CodeGen/sse-builtins.c2
-rw-r--r--test/CodeGen/sse2-builtins.c4
-rw-r--r--test/CodeGen/sse41-builtins.c2
-rw-r--r--test/CodeGen/temporary-lifetime-exceptions.cpp12
-rw-r--r--test/CodeGen/temporary-lifetime.cpp48
-rw-r--r--test/CodeGen/thin_link_bitcode.c9
-rw-r--r--test/CodeGen/thinlto-emit-llvm.c10
-rw-r--r--test/CodeGen/thinlto-multi-module.ll22
-rw-r--r--test/CodeGen/transparent-union.c11
-rw-r--r--test/CodeGen/ubsan-promoted-arith.cpp131
-rw-r--r--test/CodeGen/ubsan-shift.c47
-rw-r--r--test/CodeGen/unaligned-decl.c22
-rw-r--r--test/CodeGen/unaligned-expr.c217
-rw-r--r--test/CodeGen/unaligned-field.c17
-rw-r--r--test/CodeGen/unsigned-promotion.c113
-rw-r--r--test/CodeGen/xop-builtins-cmp.c405
-rw-r--r--test/CodeGen/xop-builtins.c10
-rw-r--r--test/CodeGen/xray-always-instrument.cpp15
-rw-r--r--test/CodeGen/xray-attributes-supported-arm.cpp13
-rw-r--r--test/CodeGen/xray-attributes-supported.cpp6
-rw-r--r--test/CodeGen/xray-instruction-threshold.cpp14
-rw-r--r--test/CodeGen/xray-log-args.cpp13
-rw-r--r--test/CodeGen/zvector.c4502
98 files changed, 5200 insertions, 2584 deletions
diff --git a/test/CodeGen/2006-05-19-SingleEltReturn.c b/test/CodeGen/2006-05-19-SingleEltReturn.c
index 819237ce53bef..dfc23f84ab424 100644
--- a/test/CodeGen/2006-05-19-SingleEltReturn.c
+++ b/test/CodeGen/2006-05-19-SingleEltReturn.c
@@ -1,12 +1,13 @@
// Test returning a single element aggregate value containing a double.
+// RUN: %clang_cc1 -triple i686-linux %s -emit-llvm -o - | FileCheck %s --check-prefix=X86_32
// RUN: %clang_cc1 %s -emit-llvm -o -
struct X {
double D;
};
-struct Y {
- struct X x;
+struct Y {
+ struct X x;
};
struct Y bar();
@@ -21,3 +22,9 @@ struct Y bar() {
return a;
}
+
+// X86_32: define void @foo(%struct.Y* %P)
+// X86_32: call void @bar(%struct.Y* sret %{{[^),]*}})
+
+// X86_32: define void @bar(%struct.Y* noalias sret %{{[^,)]*}})
+// X86_32: ret void
diff --git a/test/CodeGen/Inputs/debug-info-macro.h b/test/CodeGen/Inputs/debug-info-macro.h
new file mode 100644
index 0000000000000..f71d5c3343d25
--- /dev/null
+++ b/test/CodeGen/Inputs/debug-info-macro.h
@@ -0,0 +1,12 @@
+
+#ifdef D1
+/*Line 3*/ #define A(x, y, z) (x)
+#endif
+
+#ifdef D2
+/*Line 7*/ #define A(x, y, z) (y)
+#endif
+
+#ifdef A
+/*Line 11*/ #undef A
+#endif
diff --git a/test/CodeGen/Inputs/pgo-sample-thinlto-summary.prof b/test/CodeGen/Inputs/pgo-sample-thinlto-summary.prof
new file mode 100644
index 0000000000000..d3680dc1f724d
--- /dev/null
+++ b/test/CodeGen/Inputs/pgo-sample-thinlto-summary.prof
@@ -0,0 +1,4 @@
+bar:100:100
+ 2: 2000 foo:2000
+icp:100:100
+ 1: 1000 unroll:1000
diff --git a/test/CodeGen/aarch64-neon-intrinsics.c b/test/CodeGen/aarch64-neon-intrinsics.c
index 2ffbcdce372c3..54877e9d8cd97 100644
--- a/test/CodeGen/aarch64-neon-intrinsics.c
+++ b/test/CodeGen/aarch64-neon-intrinsics.c
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
-// RUN: -fallow-half-arguments-and-returns -ffp-contract=fast -S -emit-llvm -o - %s \
+// RUN: -fallow-half-arguments-and-returns -S -emit-llvm -o - %s \
// RUN: | opt -S -mem2reg \
// RUN: | FileCheck %s
diff --git a/test/CodeGen/address-space.c b/test/CodeGen/address-space.c
index 61deb2625336a..5d57d5b6ebe5e 100644
--- a/test/CodeGen/address-space.c
+++ b/test/CodeGen/address-space.c
@@ -1,4 +1,6 @@
-// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm < %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm < %s | FileCheck -check-prefixes=CHECK,GIZ %s
+// RUN: %clang_cc1 -triple amdgcn -emit-llvm < %s | FileCheck -check-prefixes=CHECK,PIZ %s
+// RUN: %clang_cc1 -triple amdgcn---amdgiz -emit-llvm < %s | FileCheck -check-prefixes=CHeCK,GIZ %s
// CHECK: @foo = common addrspace(1) global
int foo __attribute__((address_space(1)));
@@ -6,6 +8,9 @@ int foo __attribute__((address_space(1)));
// CHECK: @ban = common addrspace(1) global
int ban[10] __attribute__((address_space(1)));
+// CHECK: @a = common global
+int a __attribute__((address_space(0)));
+
// CHECK-LABEL: define i32 @test1()
// CHECK: load i32, i32 addrspace(1)* @foo
int test1() { return foo; }
@@ -19,9 +24,11 @@ int test2(int i) { return ban[i]; }
__attribute__((address_space(2))) int *A, *B;
// CHECK-LABEL: define void @test3()
-// CHECK: load i32 addrspace(2)*, i32 addrspace(2)** @B
+// GIZ: load i32 addrspace(2)*, i32 addrspace(2)** @B
+// PIZ: load i32 addrspace(2)*, i32 addrspace(2)* addrspace(4)* @B
// CHECK: load i32, i32 addrspace(2)*
-// CHECK: load i32 addrspace(2)*, i32 addrspace(2)** @A
+// GIZ: load i32 addrspace(2)*, i32 addrspace(2)** @A
+// PIZ: load i32 addrspace(2)*, i32 addrspace(2)* addrspace(4)* @A
// CHECK: store i32 {{.*}}, i32 addrspace(2)*
void test3() {
*A = *B;
diff --git a/test/CodeGen/alloc-align-attr.c b/test/CodeGen/alloc-align-attr.c
new file mode 100644
index 0000000000000..b7cfcf76d4510
--- /dev/null
+++ b/test/CodeGen/alloc-align-attr.c
@@ -0,0 +1,101 @@
+// RUN: %clang_cc1 -triple x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s
+
+__INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1)));
+
+// Condition where parameter to m1 is not size_t.
+__INT32_TYPE__ test1(__INT32_TYPE__ a) {
+// CHECK: define i32 @test1
+ return *m1(a);
+// CHECK: call i32* @m1(i32 [[PARAM1:%[^\)]+]])
+// CHECK: [[ALIGNCAST1:%.+]] = sext i32 [[PARAM1]] to i64
+// CHECK: [[ISPOS1:%.+]] = icmp sgt i64 [[ALIGNCAST1]], 0
+// CHECK: [[POSMASK1:%.+]] = sub i64 [[ALIGNCAST1]], 1
+// CHECK: [[MASK1:%.+]] = select i1 [[ISPOS1]], i64 [[POSMASK1]], i64 0
+// CHECK: [[PTRINT1:%.+]] = ptrtoint
+// CHECK: [[MASKEDPTR1:%.+]] = and i64 [[PTRINT1]], [[MASK1]]
+// CHECK: [[MASKCOND1:%.+]] = icmp eq i64 [[MASKEDPTR1]], 0
+// CHECK: call void @llvm.assume(i1 [[MASKCOND1]])
+}
+// Condition where test2 param needs casting.
+__INT32_TYPE__ test2(__SIZE_TYPE__ a) {
+// CHECK: define i32 @test2
+ return *m1(a);
+// CHECK: [[CONV2:%.+]] = trunc i64 %{{.+}} to i32
+// CHECK: call i32* @m1(i32 [[CONV2]])
+// CHECK: [[ALIGNCAST2:%.+]] = sext i32 [[CONV2]] to i64
+// CHECK: [[ISPOS2:%.+]] = icmp sgt i64 [[ALIGNCAST2]], 0
+// CHECK: [[POSMASK2:%.+]] = sub i64 [[ALIGNCAST2]], 1
+// CHECK: [[MASK2:%.+]] = select i1 [[ISPOS2]], i64 [[POSMASK2]], i64 0
+// CHECK: [[PTRINT2:%.+]] = ptrtoint
+// CHECK: [[MASKEDPTR2:%.+]] = and i64 [[PTRINT2]], [[MASK2]]
+// CHECK: [[MASKCOND2:%.+]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK: call void @llvm.assume(i1 [[MASKCOND2]])
+}
+__INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1)));
+
+// test3 param needs casting, but 'm2' is correct.
+__INT32_TYPE__ test3(__INT32_TYPE__ a) {
+// CHECK: define i32 @test3
+ return *m2(a);
+// CHECK: [[CONV3:%.+]] = sext i32 %{{.+}} to i64
+// CHECK: call i32* @m2(i64 [[CONV3]])
+// CHECK: [[ISPOS3:%.+]] = icmp sgt i64 [[CONV3]], 0
+// CHECK: [[POSMASK3:%.+]] = sub i64 [[CONV3]], 1
+// CHECK: [[MASK3:%.+]] = select i1 [[ISPOS3]], i64 [[POSMASK3]], i64 0
+// CHECK: [[PTRINT3:%.+]] = ptrtoint
+// CHECK: [[MASKEDPTR3:%.+]] = and i64 [[PTRINT3]], [[MASK3]]
+// CHECK: [[MASKCOND3:%.+]] = icmp eq i64 [[MASKEDPTR3]], 0
+// CHECK: call void @llvm.assume(i1 [[MASKCOND3]])
+}
+
+// Every type matches, canonical example.
+__INT32_TYPE__ test4(__SIZE_TYPE__ a) {
+// CHECK: define i32 @test4
+ return *m2(a);
+// CHECK: call i32* @m2(i64 [[PARAM4:%[^\)]+]])
+// CHECK: [[ISPOS4:%.+]] = icmp sgt i64 [[PARAM4]], 0
+// CHECK: [[POSMASK4:%.+]] = sub i64 [[PARAM4]], 1
+// CHECK: [[MASK4:%.+]] = select i1 [[ISPOS4]], i64 [[POSMASK4]], i64 0
+// CHECK: [[PTRINT4:%.+]] = ptrtoint
+// CHECK: [[MASKEDPTR4:%.+]] = and i64 [[PTRINT4]], [[MASK4]]
+// CHECK: [[MASKCOND4:%.+]] = icmp eq i64 [[MASKEDPTR4]], 0
+// CHECK: call void @llvm.assume(i1 [[MASKCOND4]])
+}
+
+
+struct Empty {};
+struct MultiArgs { __INT64_TYPE__ a, b;};
+// Struct parameter doesn't take up an IR parameter, 'i' takes up 2.
+// Truncation to i64 is permissible, since alignments of greater than 2^64 are insane.
+__INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2)));
+__INT32_TYPE__ test5(__int128_t a) {
+// CHECK: define i32 @test5
+ struct Empty e;
+ return *m3(e, a);
+// CHECK: call i32* @m3(i64 %{{.*}}, i64 %{{.*}})
+// CHECK: [[ALIGNCAST5:%.+]] = trunc i128 %{{.*}} to i64
+// CHECK: [[ISPOS5:%.+]] = icmp sgt i64 [[ALIGNCAST5]], 0
+// CHECK: [[POSMASK5:%.+]] = sub i64 [[ALIGNCAST5]], 1
+// CHECK: [[MASK5:%.+]] = select i1 [[ISPOS5]], i64 [[POSMASK5]], i64 0
+// CHECK: [[PTRINT5:%.+]] = ptrtoint
+// CHECK: [[MASKEDPTR5:%.+]] = and i64 [[PTRINT5]], [[MASK5]]
+// CHECK: [[MASKCOND5:%.+]] = icmp eq i64 [[MASKEDPTR5]], 0
+// CHECK: call void @llvm.assume(i1 [[MASKCOND5]])
+}
+// Struct parameter takes up 2 parameters, 'i' takes up 2.
+__INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(2)));
+__INT32_TYPE__ test6(__int128_t a) {
+// CHECK: define i32 @test6
+ struct MultiArgs e;
+ return *m4(e, a);
+// CHECK: call i32* @m4(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+// CHECK: [[ALIGNCAST6:%.+]] = trunc i128 %{{.*}} to i64
+// CHECK: [[ISPOS6:%.+]] = icmp sgt i64 [[ALIGNCAST6]], 0
+// CHECK: [[POSMASK6:%.+]] = sub i64 [[ALIGNCAST6]], 1
+// CHECK: [[MASK6:%.+]] = select i1 [[ISPOS6]], i64 [[POSMASK6]], i64 0
+// CHECK: [[PTRINT6:%.+]] = ptrtoint
+// CHECK: [[MASKEDPTR6:%.+]] = and i64 [[PTRINT6]], [[MASK6]]
+// CHECK: [[MASKCOND6:%.+]] = icmp eq i64 [[MASKEDPTR6]], 0
+// CHECK: call void @llvm.assume(i1 [[MASKCOND6]])
+}
+
diff --git a/test/CodeGen/alloc-size.c b/test/CodeGen/alloc-size.c
index 1e503f0579c99..1c98b6874da29 100644
--- a/test/CodeGen/alloc-size.c
+++ b/test/CodeGen/alloc-size.c
@@ -231,7 +231,7 @@ void test7() {
void test8() {
// Non-const pointers aren't currently supported.
void *buf = my_calloc(100, 5);
- // CHECK: @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(buf, 0);
// CHECK: @llvm.objectsize
gi = __builtin_object_size(buf, 1);
diff --git a/test/CodeGen/altivec.c b/test/CodeGen/altivec.c
index 29823031b56ad..a4d38fa23b75c 100644
--- a/test/CodeGen/altivec.c
+++ b/test/CodeGen/altivec.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// Check initialization
diff --git a/test/CodeGen/avr-inline-asm-constraints.c b/test/CodeGen/avr-inline-asm-constraints.c
new file mode 100644
index 0000000000000..f1bfbac38978e
--- /dev/null
+++ b/test/CodeGen/avr-inline-asm-constraints.c
@@ -0,0 +1,124 @@
+// REQUIRES: avr-registered-target
+// RUN: %clang_cc1 -triple avr-unknown-unknown -emit-llvm -o - %s | FileCheck %s
+
+int data;
+
+void a() {
+ // CHECK: call void asm sideeffect "add r5, $0", "a"(i16 %0)
+ asm("add r5, %0" :: "a"(data));
+}
+
+void b() {
+ // CHECK: call void asm sideeffect "add r5, $0", "b"(i16 %0)
+ asm("add r5, %0" :: "b"(data));
+}
+
+void d() {
+ // CHECK: call void asm sideeffect "add r5, $0", "d"(i16 %0)
+ asm("add r5, %0" :: "d"(data));
+}
+
+void l() {
+ // CHECK: call void asm sideeffect "add r5, $0", "l"(i16 %0)
+ asm("add r5, %0" :: "l"(data));
+}
+
+void e() {
+ // CHECK: call void asm sideeffect "add r5, $0", "e"(i16 %0)
+ asm("add r5, %0" :: "e"(data));
+}
+
+void q() {
+ // CHECK: call void asm sideeffect "add r5, $0", "q"(i16 %0)
+ asm("add r5, %0" :: "q"(data));
+}
+
+void r() {
+ // CHECK: call void asm sideeffect "add r5, $0", "r"(i16 %0)
+ asm("add r5, %0" :: "r"(data));
+}
+
+void w() {
+ // CHECK: call void asm sideeffect "add r5, $0", "w"(i16 %0)
+ asm("add r5, %0" :: "w"(data));
+}
+
+void t() {
+ // CHECK: call void asm sideeffect "add r5, $0", "t"(i16 %0)
+ asm("add r5, %0" :: "t"(data));
+}
+
+void x() {
+ // CHECK: call void asm sideeffect "add r5, $0", "x"(i16 %0)
+ asm("add r5, %0" :: "x"(data));
+}
+
+void y() {
+ // CHECK: call void asm sideeffect "add r5, $0", "y"(i16 %0)
+ asm("add r5, %0" :: "y"(data));
+}
+
+void z() {
+ // CHECK: call void asm sideeffect "add r5, $0", "z"(i16 %0)
+ asm("add r5, %0" :: "z"(data));
+}
+
+void I() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "I"(i16 50)
+ asm("subi r30, %0" :: "I"(50));
+}
+
+void J() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "J"(i16 -50)
+ asm("subi r30, %0" :: "J"(-50));
+}
+
+void K() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "K"(i16 2)
+ asm("subi r30, %0" :: "K"(2));
+}
+
+void L() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "L"(i16 0)
+ asm("subi r30, %0" :: "L"(0));
+}
+
+void M() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "M"(i16 255)
+ asm("subi r30, %0" :: "M"(255));
+}
+
+void O() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "O"(i16 16)
+ asm("subi r30, %0" :: "O"(16));
+}
+
+void P() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "P"(i16 1)
+ asm("subi r30, %0" :: "P"(1));
+}
+
+void R() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "R"(i16 -3)
+ asm("subi r30, %0" :: "R"(-3));
+}
+
+void G() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "G"(i16 50)
+ asm("subi r30, %0" :: "G"(50));
+}
+
+void Q() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "Q"(i16 50)
+ asm("subi r30, %0" :: "Q"(50));
+}
+
+void ra() {
+ // CHECK: call void asm sideeffect "subi r30, $0", "ra"(i16 50)
+ asm("subi r30, %0" :: "ra"(50));
+}
+
+void ora() {
+ // CHECK: call i16 asm "subi r30, $0", "=ra"()
+ asm("subi r30, %0" : "=ra"(data));
+}
diff --git a/test/CodeGen/avr-unsupported-inline-asm-constraints.c b/test/CodeGen/avr-unsupported-inline-asm-constraints.c
new file mode 100644
index 0000000000000..5a875979a9835
--- /dev/null
+++ b/test/CodeGen/avr-unsupported-inline-asm-constraints.c
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -triple avr-unknown-unknown -verify %s
+
+const unsigned char val = 0;
+
+int foo() {
+ __asm__ volatile("foo %0, 1" : : "fo" (val)); // expected-error {{invalid input constraint 'fo' in asm}}
+ __asm__ volatile("foo %0, 1" : : "Nd" (val)); // expected-error {{invalid input constraint 'Nd' in asm}}
+}
diff --git a/test/CodeGen/avr/attributes/interrupt.c b/test/CodeGen/avr/attributes/interrupt.c
new file mode 100644
index 0000000000000..31b7ebb4f03d6
--- /dev/null
+++ b/test/CodeGen/avr/attributes/interrupt.c
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -triple avr-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+
+// CHECK: define void @foo() #0
+__attribute__((interrupt)) void foo(void) { }
+
+// CHECK: attributes #0 = {{{.*interrupt.*}}}
diff --git a/test/CodeGen/avr/attributes/signal.c b/test/CodeGen/avr/attributes/signal.c
new file mode 100644
index 0000000000000..82859000060e2
--- /dev/null
+++ b/test/CodeGen/avr/attributes/signal.c
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -triple avr-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+
+// CHECK: define void @foo() #0
+__attribute__((signal)) void foo(void) { }
+
+// CHECK: attributes #0 = {{{.*signal.*}}}
diff --git a/test/CodeGen/avr/target-cpu-defines/atmega328p.c b/test/CodeGen/avr/target-cpu-defines/atmega328p.c
new file mode 100644
index 0000000000000..83b0accbb93f1
--- /dev/null
+++ b/test/CodeGen/avr/target-cpu-defines/atmega328p.c
@@ -0,0 +1,7 @@
+// REQUIRES: avr-registered-target
+// RUN: %clang_cc1 -E -dM -triple avr-unknown-unknown -target-cpu atmega328p /dev/null | FileCheck -match-full-lines %s
+
+// CHECK: #define AVR 1
+// CHECK: #define __AVR 1
+// CHECK: #define __AVR_ATmega328P__ 1
+// CHECK: #define __AVR__ 1
diff --git a/test/CodeGen/avr/target-cpu-defines/attiny104.c b/test/CodeGen/avr/target-cpu-defines/attiny104.c
new file mode 100644
index 0000000000000..7de65bf09ba3d
--- /dev/null
+++ b/test/CodeGen/avr/target-cpu-defines/attiny104.c
@@ -0,0 +1,7 @@
+// REQUIRES: avr-registered-target
+// RUN: %clang_cc1 -E -dM -triple avr-unknown-unknown -target-cpu attiny104 /dev/null | FileCheck -match-full-lines %s
+
+// CHECK: #define AVR 1
+// CHECK: #define __AVR 1
+// CHECK: #define __AVR_ATtiny104__ 1
+// CHECK: #define __AVR__ 1
diff --git a/test/CodeGen/avr/target-cpu-defines/common.c b/test/CodeGen/avr/target-cpu-defines/common.c
new file mode 100644
index 0000000000000..0b11f5dbdd596
--- /dev/null
+++ b/test/CodeGen/avr/target-cpu-defines/common.c
@@ -0,0 +1,6 @@
+// REQUIRES: avr-registered-target
+// RUN: %clang_cc1 -E -dM -triple avr-unknown-unknown /dev/null | FileCheck -match-full-lines %s
+
+// CHECK: #define AVR 1
+// CHECK: #define __AVR 1
+// CHECK: #define __AVR__ 1
diff --git a/test/CodeGen/avx-builtins.c b/test/CodeGen/avx-builtins.c
index 6a9f7c050b32e..4832664081796 100644
--- a/test/CodeGen/avx-builtins.c
+++ b/test/CodeGen/avx-builtins.c
@@ -346,19 +346,19 @@ long long test_mm256_extract_epi64(__m256i A) {
__m128d test_mm256_extractf128_pd(__m256d A) {
// CHECK-LABEL: test_mm256_extractf128_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <2 x i32> <i32 2, i32 3>
return _mm256_extractf128_pd(A, 1);
}
__m128 test_mm256_extractf128_ps(__m256 A) {
// CHECK-LABEL: test_mm256_extractf128_ps
- // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extractf128_ps(A, 1);
}
__m128i test_mm256_extractf128_si256(__m256i A) {
// CHECK-LABEL: test_mm256_extractf128_si256
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <2 x i32> <i32 2, i32 3>
return _mm256_extractf128_si256(A, 1);
}
@@ -647,32 +647,32 @@ __m256 test_mm256_or_ps(__m256 A, __m256 B) {
__m128d test_mm_permute_pd(__m128d A) {
// CHECK-LABEL: test_mm_permute_pd
- // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> zeroinitializer, <2 x i32> <i32 1, i32 0>
return _mm_permute_pd(A, 1);
}
__m256d test_mm256_permute_pd(__m256d A) {
// CHECK-LABEL: test_mm256_permute_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
return _mm256_permute_pd(A, 5);
}
__m128 test_mm_permute_ps(__m128 A) {
// CHECK-LABEL: test_mm_permute_ps
- // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
return _mm_permute_ps(A, 0x1b);
}
// Test case for PR12401
__m128 test2_mm_permute_ps(__m128 a) {
// CHECK-LABEL: test2_mm_permute_ps
- // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 2, i32 3>
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <4 x i32> <i32 2, i32 1, i32 2, i32 3>
return _mm_permute_ps(a, 0xe6);
}
__m256 test_mm256_permute_ps(__m256 A) {
// CHECK-LABEL: test_mm256_permute_ps
- // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> zeroinitializer, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
return _mm256_permute_ps(A, 0x1b);
}
@@ -1177,7 +1177,7 @@ void test_mm256_storeu2_m128(float* A, float* B, __m256 C) {
// CHECK-LABEL: test_mm256_storeu2_m128
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
- // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128(A, B, C);
}
@@ -1186,7 +1186,7 @@ void test_mm256_storeu2_m128d(double* A, double* B, __m256d C) {
// CHECK-LABEL: test_mm256_storeu2_m128d
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: store <2 x double> %{{.*}}, <2 x double>* %{{.*}}, align 1{{$}}
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <2 x i32> <i32 2, i32 3>
// CHECK: store <2 x double> %{{.*}}, <2 x double>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128d(A, B, C);
}
@@ -1195,7 +1195,7 @@ void test_mm256_storeu2_m128i(__m128i* A, __m128i* B, __m256i C) {
// CHECK-LABEL: test_mm256_storeu2_m128i
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <2 x i32> <i32 2, i32 3>
// CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128i(A, B, C);
}
@@ -1322,19 +1322,19 @@ int test_mm256_testz_si256(__m256 A, __m256 B) {
__m256 test_mm256_undefined_ps() {
// CHECK-LABEL: @test_mm256_undefined_ps
- // CHECK: ret <8 x float> undef
+ // CHECK: ret <8 x float> zeroinitializer
return _mm256_undefined_ps();
}
__m256d test_mm256_undefined_pd() {
// CHECK-LABEL: @test_mm256_undefined_pd
- // CHECK: ret <4 x double> undef
+ // CHECK: ret <4 x double> zeroinitializer
return _mm256_undefined_pd();
}
__m256i test_mm256_undefined_si256() {
// CHECK-LABEL: @test_mm256_undefined_si256
- // CHECK: ret <4 x i64> undef
+ // CHECK: ret <4 x i64> zeroinitializer
return _mm256_undefined_si256();
}
diff --git a/test/CodeGen/avx2-builtins.c b/test/CodeGen/avx2-builtins.c
index 4ccf6e6da97be..10f3e715de9b5 100644
--- a/test/CodeGen/avx2-builtins.c
+++ b/test/CodeGen/avx2-builtins.c
@@ -368,20 +368,20 @@ __m256i test_mm256_cvtepu32_epi64(__m128i a) {
__m128i test0_mm256_extracti128_si256_0(__m256i a) {
// CHECK-LABEL: test0_mm256_extracti128_si256
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <2 x i32> <i32 0, i32 1>
return _mm256_extracti128_si256(a, 0);
}
__m128i test1_mm256_extracti128_si256_1(__m256i a) {
// CHECK-LABEL: test1_mm256_extracti128_si256
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <2 x i32> <i32 2, i32 3>
return _mm256_extracti128_si256(a, 1);
}
// Immediate should be truncated to one bit.
__m128i test2_mm256_extracti128_si256(__m256i a) {
// CHECK-LABEL: test2_mm256_extracti128_si256
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <2 x i32> <i32 0, i32 1>
return _mm256_extracti128_si256(a, 2);
}
@@ -447,7 +447,7 @@ __m256i test_mm256_mask_i32gather_epi32(__m256i a, int const *b, __m256i c, __m2
__m128i test_mm_i32gather_epi64(long long const *b, __m128i c) {
// CHECK-LABEL: test_mm_i32gather_epi64
- // CHECK: call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> undef, i8* %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}}, i8 2)
+ // CHECK: call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> zeroinitializer, i8* %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}}, i8 2)
return _mm_i32gather_epi64(b, c, 2);
}
@@ -459,7 +459,7 @@ __m128i test_mm_mask_i32gather_epi64(__m128i a, long long const *b, __m128i c, _
__m256i test_mm256_i32gather_epi64(long long const *b, __m128i c) {
// CHECK-LABEL: test_mm256_i32gather_epi64
- // CHECK: call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8* %{{.*}}, <4 x i32> %{{.*}}, <4 x i64> %{{.*}}, i8 2)
+ // CHECK: call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> zeroinitializer, i8* %{{.*}}, <4 x i32> %{{.*}}, <4 x i64> %{{.*}}, i8 2)
return _mm256_i32gather_epi64(b, c, 2);
}
@@ -474,7 +474,7 @@ __m128d test_mm_i32gather_pd(double const *b, __m128i c) {
// CHECK: [[CMP:%.*]] = fcmp oeq <2 x double>
// CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> [[CMP]] to <2 x i64>
// CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i64> [[SEXT]] to <2 x double>
- // CHECK: call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> undef, i8* %{{.*}}, <4 x i32> %{{.*}}, <2 x double> %{{.*}}, i8 2)
+ // CHECK: call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> zeroinitializer, i8* %{{.*}}, <4 x i32> %{{.*}}, <2 x double> %{{.*}}, i8 2)
return _mm_i32gather_pd(b, c, 2);
}
@@ -489,7 +489,7 @@ __m256d test_mm256_i32gather_pd(double const *b, __m128i c) {
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x double>
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
// CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i64> [[SEXT]] to <4 x double>
- // CHECK: call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8* %{{.*}}, <4 x i32> %{{.*}}, <4 x double> %{{.*}}, i8 2)
+ // CHECK: call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> zeroinitializer, i8* %{{.*}}, <4 x i32> %{{.*}}, <4 x double> %{{.*}}, i8 2)
return _mm256_i32gather_pd(b, c, 2);
}
@@ -504,7 +504,7 @@ __m128 test_mm_i32gather_ps(float const *b, __m128i c) {
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x float>
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
// CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
- // CHECK: call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> undef, i8* %{{.*}}, <4 x i32> %{{.*}}, <4 x float> %{{.*}}, i8 2)
+ // CHECK: call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> zeroinitializer, i8* %{{.*}}, <4 x i32> %{{.*}}, <4 x float> %{{.*}}, i8 2)
return _mm_i32gather_ps(b, c, 2);
}
@@ -519,7 +519,7 @@ __m256 test_mm256_i32gather_ps(float const *b, __m256i c) {
// CHECK: [[CMP:%.*]] = fcmp oeq <8 x float>
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32>
// CHECK-NEXT: [[BC:%.*]] = bitcast <8 x i32> [[SEXT]] to <8 x float>
- // CHECK: call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %{{.*}}, <8 x i32> %{{.*}}, <8 x float> %{{.*}}, i8 2)
+ // CHECK: call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> zeroinitializer, i8* %{{.*}}, <8 x i32> %{{.*}}, <8 x float> %{{.*}}, i8 2)
return _mm256_i32gather_ps(b, c, 2);
}
@@ -555,7 +555,7 @@ __m128i test_mm256_mask_i64gather_epi32(__m128i a, int const *b, __m256i c, __m1
__m128i test_mm_i64gather_epi64(long long const *b, __m128i c) {
// CHECK-LABEL: test_mm_i64gather_epi64
- // CHECK: call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> undef, i8* %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 2)
+ // CHECK: call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> zeroinitializer, i8* %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 2)
return _mm_i64gather_epi64(b, c, 2);
}
@@ -567,7 +567,7 @@ __m128i test_mm_mask_i64gather_epi64(__m128i a, long long const *b, __m128i c, _
__m256i test_mm256_i64gather_epi64(long long const *b, __m256i c) {
// CHECK-LABEL: test_mm256_i64gather_epi64
- // CHECK: call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8* %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, i8 2)
+ // CHECK: call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> zeroinitializer, i8* %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, i8 2)
return _mm256_i64gather_epi64(b, c, 2);
}
@@ -582,7 +582,7 @@ __m128d test_mm_i64gather_pd(double const *b, __m128i c) {
// CHECK: [[CMP:%.*]] = fcmp oeq <2 x double>
// CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> [[CMP]] to <2 x i64>
// CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i64> [[SEXT]] to <2 x double>
- // CHECK: call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> undef, i8* %{{.*}}, <2 x i64> %{{.*}}, <2 x double> %{{.*}}, i8 2)
+ // CHECK: call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> zeroinitializer, i8* %{{.*}}, <2 x i64> %{{.*}}, <2 x double> %{{.*}}, i8 2)
return _mm_i64gather_pd(b, c, 2);
}
@@ -597,7 +597,7 @@ __m256d test_mm256_i64gather_pd(double const *b, __m256i c) {
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x double>
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64>
// CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i64> [[SEXT]] to <4 x double>
- // CHECK: call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8* %{{.*}}, <4 x i64> %{{.*}}, <4 x double> %{{.*}}, i8 2)
+ // CHECK: call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> zeroinitializer, i8* %{{.*}}, <4 x i64> %{{.*}}, <4 x double> %{{.*}}, i8 2)
return _mm256_i64gather_pd(b, c, 2);
}
@@ -612,7 +612,7 @@ __m128 test_mm_i64gather_ps(float const *b, __m128i c) {
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x float>
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
// CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
- // CHECK: call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> undef, i8* %{{.*}}, <2 x i64> %{{.*}}, <4 x float> %{{.*}}, i8 2)
+ // CHECK: call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> zeroinitializer, i8* %{{.*}}, <2 x i64> %{{.*}}, <4 x float> %{{.*}}, i8 2)
return _mm_i64gather_ps(b, c, 2);
}
@@ -627,7 +627,7 @@ __m128 test_mm256_i64gather_ps(float const *b, __m256i c) {
// CHECK: [[CMP:%.*]] = fcmp oeq <4 x float>
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
// CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
- // CHECK: call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8* %{{.*}}, <4 x i64> %{{.*}}, <4 x float> %{{.*}}, i8 2)
+ // CHECK: call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> zeroinitializer, i8* %{{.*}}, <4 x i64> %{{.*}}, <4 x float> %{{.*}}, i8 2)
return _mm256_i64gather_ps(b, c, 2);
}
@@ -895,13 +895,13 @@ __m256i test_mm256_permute2x128_si256(__m256i a, __m256i b) {
__m256i test_mm256_permute4x64_epi64(__m256i a) {
// CHECK-LABEL: test_mm256_permute4x64_epi64
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 0>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <4 x i32> <i32 3, i32 0, i32 2, i32 0>
return _mm256_permute4x64_epi64(a, 35);
}
__m256d test_mm256_permute4x64_pd(__m256d a) {
// CHECK-LABEL: test_mm256_permute4x64_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <4 x i32> <i32 1, i32 2, i32 1, i32 0>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 1, i32 0>
return _mm256_permute4x64_pd(a, 25);
}
@@ -1117,7 +1117,7 @@ __m256i test_mm256_srlv_epi64(__m256i a, __m256i b) {
__m256i test_mm256_stream_load_si256(__m256i const *a) {
// CHECK-LABEL: test_mm256_stream_load_si256
- // CHECK: call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %{{.*}})
+ // CHECK: load <4 x i64>, <4 x i64>* %{{.*}}, align 32, !nontemporal
return _mm256_stream_load_si256(a);
}
diff --git a/test/CodeGen/avx512-reduceMinMaxIntrin.c b/test/CodeGen/avx512-reduceMinMaxIntrin.c
index 8249b229c8f5f..993e2964a19b5 100644
--- a/test/CodeGen/avx512-reduceMinMaxIntrin.c
+++ b/test/CodeGen/avx512-reduceMinMaxIntrin.c
@@ -1,3 +1,5 @@
+// FIXME: We should not be testing with -O2 (ie, a dependency on the entire IR optimizer).
+
// RUN: %clang_cc1 -ffreestanding %s -O2 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror |opt -instnamer -S |FileCheck %s
#include <immintrin.h>
@@ -202,7 +204,7 @@ double test_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __W){
int test_mm512_reduce_max_epi32(__m512i __W){
// CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
// CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = icmp sgt <16 x i32> %tmp, %shuffle1.i
+ // CHECK: %tmp1 = icmp slt <16 x i32> %shuffle1.i, %tmp
// CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> %shuffle1.i
// CHECK: %shuffle3.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
// CHECK: %tmp3 = icmp sgt <16 x i32> %tmp2, %shuffle3.i
@@ -223,7 +225,7 @@ int test_mm512_reduce_max_epi32(__m512i __W){
unsigned int test_mm512_reduce_max_epu32(__m512i __W){
// CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
// CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = icmp ugt <16 x i32> %tmp, %shuffle1.i
+ // CHECK: %tmp1 = icmp ult <16 x i32> %shuffle1.i, %tmp
// CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> %shuffle1.i
// CHECK: %shuffle3.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
// CHECK: %tmp3 = icmp ugt <16 x i32> %tmp2, %shuffle3.i
@@ -258,7 +260,7 @@ float test_mm512_reduce_max_ps(__m512 __W){
int test_mm512_reduce_min_epi32(__m512i __W){
// CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
// CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = icmp slt <16 x i32> %tmp, %shuffle1.i
+ // CHECK: %tmp1 = icmp sgt <16 x i32> %shuffle1.i, %tmp
// CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> %shuffle1.i
// CHECK: %shuffle3.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
// CHECK: %tmp3 = icmp slt <16 x i32> %tmp2, %shuffle3.i
@@ -279,7 +281,7 @@ int test_mm512_reduce_min_epi32(__m512i __W){
unsigned int test_mm512_reduce_min_epu32(__m512i __W){
// CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
// CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = icmp ult <16 x i32> %tmp, %shuffle1.i
+ // CHECK: %tmp1 = icmp ugt <16 x i32> %shuffle1.i, %tmp
// CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> %shuffle1.i
// CHECK: %shuffle3.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
// CHECK: %tmp3 = icmp ult <16 x i32> %tmp2, %shuffle3.i
diff --git a/test/CodeGen/avx512bw-builtins.c b/test/CodeGen/avx512bw-builtins.c
index b4dfb5ccb9ece..60d2423e9c345 100644
--- a/test/CodeGen/avx512bw-builtins.c
+++ b/test/CodeGen/avx512bw-builtins.c
@@ -510,62 +510,70 @@ __m512i test_mm512_maskz_abs_epi16(__mmask32 __U, __m512i __A) {
}
__m512i test_mm512_packs_epi32(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_packs_epi32
- // CHECK: @llvm.x86.avx512.mask.packssdw.512
+ // CHECK: @llvm.x86.avx512.packssdw.512
return _mm512_packs_epi32(__A,__B);
}
__m512i test_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_packs_epi32
- // CHECK: @llvm.x86.avx512.mask.packssdw.512
+ // CHECK: @llvm.x86.avx512.packssdw.512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_packs_epi32(__M,__A,__B);
}
__m512i test_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_packs_epi32
- // CHECK: @llvm.x86.avx512.mask.packssdw.512
+ // CHECK: @llvm.x86.avx512.packssdw.512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_packs_epi32(__W,__M,__A,__B);
}
__m512i test_mm512_packs_epi16(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_packs_epi16
- // CHECK: @llvm.x86.avx512.mask.packsswb.512
+ // CHECK: @llvm.x86.avx512.packsswb.512
return _mm512_packs_epi16(__A,__B);
}
__m512i test_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_packs_epi16
- // CHECK: @llvm.x86.avx512.mask.packsswb.512
+ // CHECK: @llvm.x86.avx512.packsswb.512
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_packs_epi16(__W,__M,__A,__B);
}
__m512i test_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_packs_epi16
- // CHECK: @llvm.x86.avx512.mask.packsswb.512
+ // CHECK: @llvm.x86.avx512.packsswb.512
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_packs_epi16(__M,__A,__B);
}
__m512i test_mm512_packus_epi32(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_packus_epi32
- // CHECK: @llvm.x86.avx512.mask.packusdw.512
+ // CHECK: @llvm.x86.avx512.packusdw.512
return _mm512_packus_epi32(__A,__B);
}
__m512i test_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_packus_epi32
- // CHECK: @llvm.x86.avx512.mask.packusdw.512
+ // CHECK: @llvm.x86.avx512.packusdw.512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_packus_epi32(__M,__A,__B);
}
__m512i test_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_packus_epi32
- // CHECK: @llvm.x86.avx512.mask.packusdw.512
+ // CHECK: @llvm.x86.avx512.packusdw.512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_packus_epi32(__W,__M,__A,__B);
}
__m512i test_mm512_packus_epi16(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_packus_epi16
- // CHECK: @llvm.x86.avx512.mask.packuswb.512
+ // CHECK: @llvm.x86.avx512.packuswb.512
return _mm512_packus_epi16(__A,__B);
}
__m512i test_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_packus_epi16
- // CHECK: @llvm.x86.avx512.mask.packuswb.512
+ // CHECK: @llvm.x86.avx512.packuswb.512
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_packus_epi16(__W,__M,__A,__B);
}
__m512i test_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_packus_epi16
- // CHECK: @llvm.x86.avx512.mask.packuswb.512
+ // CHECK: @llvm.x86.avx512.packuswb.512
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_packus_epi16(__M,__A,__B);
}
__m512i test_mm512_adds_epi8(__m512i __A, __m512i __B) {
@@ -1535,52 +1543,54 @@ __mmask64 test_mm512_movepi8_mask(__m512i __A) {
__m512i test_mm512_movm_epi8(__mmask64 __A) {
// CHECK-LABEL: @test_mm512_movm_epi8
- // CHECK: @llvm.x86.avx512.cvtmask2b.512
+ // CHECK: %{{.*}} = bitcast i64 %{{.*}} to <64 x i1>
+ // CHECK: %vpmovm2.i = sext <64 x i1> %{{.*}} to <64 x i8>
return _mm512_movm_epi8(__A);
}
__m512i test_mm512_movm_epi16(__mmask32 __A) {
// CHECK-LABEL: @test_mm512_movm_epi16
- // CHECK: @llvm.x86.avx512.cvtmask2w.512
+ // CHECK: %{{.*}} = bitcast i32 %{{.*}} to <32 x i1>
+ // CHECK: %vpmovm2.i = sext <32 x i1> %{{.*}} to <32 x i16>
return _mm512_movm_epi16(__A);
}
__m512i test_mm512_broadcastb_epi8(__m128i __A) {
// CHECK-LABEL: @test_mm512_broadcastb_epi8
- // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <64 x i32> zeroinitializer
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> zeroinitializer, <64 x i32> zeroinitializer
return _mm512_broadcastb_epi8(__A);
}
__m512i test_mm512_mask_broadcastb_epi8(__m512i __O, __mmask64 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_mask_broadcastb_epi8
- // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <64 x i32> zeroinitializer
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> zeroinitializer, <64 x i32> zeroinitializer
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_broadcastb_epi8(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcastb_epi8(__mmask64 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcastb_epi8
- // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <64 x i32> zeroinitializer
+ // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> zeroinitializer, <64 x i32> zeroinitializer
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_broadcastb_epi8(__M, __A);
}
__m512i test_mm512_broadcastw_epi16(__m128i __A) {
// CHECK-LABEL: @test_mm512_broadcastw_epi16
- // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <32 x i32> zeroinitializer
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> zeroinitializer, <32 x i32> zeroinitializer
return _mm512_broadcastw_epi16(__A);
}
__m512i test_mm512_mask_broadcastw_epi16(__m512i __O, __mmask32 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_mask_broadcastw_epi16
- // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <32 x i32> zeroinitializer
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> zeroinitializer, <32 x i32> zeroinitializer
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_broadcastw_epi16(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcastw_epi16(__mmask32 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcastw_epi16
- // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <32 x i32> zeroinitializer
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> zeroinitializer, <32 x i32> zeroinitializer
// CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_broadcastw_epi16(__M, __A);
}
@@ -1669,19 +1679,19 @@ void test_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __
{
// CHECK-LABEL: @test_mm512_mask_cvtepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.mem.512
- __builtin_ia32_pmovwb512mem_mask ( __P, __A, __M);
+ _mm512_mask_cvtepi16_storeu_epi8 ( __P, __M, __A);
}
void test_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
{
// CHECK-LABEL: @test_mm512_mask_cvtsepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovs.wb.mem.512
- __builtin_ia32_pmovswb512mem_mask ( __P, __A, __M);
+ _mm512_mask_cvtsepi16_storeu_epi8 ( __P, __M, __A);
}
void test_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
{
// CHECK-LABEL: @test_mm512_mask_cvtusepi16_storeu_epi8
// CHECK: @llvm.x86.avx512.mask.pmovus.wb.mem.512
- __builtin_ia32_pmovuswb512mem_mask ( __P, __A, __M);
+ _mm512_mask_cvtusepi16_storeu_epi8 ( __P, __M, __A);
}
diff --git a/test/CodeGen/avx512dq-builtins.c b/test/CodeGen/avx512dq-builtins.c
index f57433a3616be..ca8566c5979a1 100644
--- a/test/CodeGen/avx512dq-builtins.c
+++ b/test/CodeGen/avx512dq-builtins.c
@@ -929,13 +929,15 @@ __mmask16 test_mm512_movepi32_mask(__m512i __A) {
__m512i test_mm512_movm_epi32(__mmask16 __A) {
// CHECK-LABEL: @test_mm512_movm_epi32
- // CHECK: @llvm.x86.avx512.cvtmask2d.512
+ // CHECK: %{{.*}} = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: %vpmovm2.i = sext <16 x i1> %{{.*}} to <16 x i32>
return _mm512_movm_epi32(__A);
}
__m512i test_mm512_movm_epi64(__mmask8 __A) {
// CHECK-LABEL: @test_mm512_movm_epi64
- // CHECK: @llvm.x86.avx512.cvtmask2q.512
+ // CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: %vpmovm2.i = sext <8 x i1> %{{.*}} to <8 x i64>
return _mm512_movm_epi64(__A);
}
@@ -963,40 +965,44 @@ __m512 test_mm512_maskz_broadcast_f32x2(__mmask16 __M, __m128 __A) {
return _mm512_maskz_broadcast_f32x2(__M, __A);
}
-__m512 test_mm512_broadcast_f32x8(__m256 __A) {
+__m512 test_mm512_broadcast_f32x8(float const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_f32x8
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x8
- return _mm512_broadcast_f32x8(__A);
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ return _mm512_broadcast_f32x8(_mm256_loadu_ps(__A));
}
-__m512 test_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A) {
+__m512 test_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, float const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_f32x8
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x8
- return _mm512_mask_broadcast_f32x8(__O, __M, __A);
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
+ return _mm512_mask_broadcast_f32x8(__O, __M, _mm256_loadu_ps(__A));
}
-__m512 test_mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A) {
+__m512 test_mm512_maskz_broadcast_f32x8(__mmask16 __M, float const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_f32x8
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x8
- return _mm512_maskz_broadcast_f32x8(__M, __A);
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
+ return _mm512_maskz_broadcast_f32x8(__M, _mm256_loadu_ps(__A));
}
-__m512d test_mm512_broadcast_f64x2(__m128d __A) {
+__m512d test_mm512_broadcast_f64x2(double const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_f64x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x2
- return _mm512_broadcast_f64x2(__A);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ return _mm512_broadcast_f64x2(_mm_loadu_pd(__A));
}
-__m512d test_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, __m128d __A) {
+__m512d test_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, double const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_f64x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x2
- return _mm512_mask_broadcast_f64x2(__O, __M, __A);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
+ return _mm512_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A));
}
-__m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A) {
+__m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_f64x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x2
- return _mm512_maskz_broadcast_f64x2(__M, __A);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
+ return _mm512_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A));
}
__m512i test_mm512_broadcast_i32x2(__m128i __A) {
@@ -1017,77 +1023,82 @@ __m512i test_mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) {
return _mm512_maskz_broadcast_i32x2(__M, __A);
}
-__m512i test_mm512_broadcast_i32x8(__m256i __A) {
+__m512i test_mm512_broadcast_i32x8(__m256i const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_i32x8
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x8
- return _mm512_broadcast_i32x8(__A);
+ // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ return _mm512_broadcast_i32x8(_mm256_loadu_si256(__A));
}
-__m512i test_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A) {
+__m512i test_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_i32x8
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x8
- return _mm512_mask_broadcast_i32x8(__O, __M, __A);
+ // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
+ return _mm512_mask_broadcast_i32x8(__O, __M, _mm256_loadu_si256(__A));
}
-__m512i test_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A) {
+__m512i test_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_i32x8
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x8
- return _mm512_maskz_broadcast_i32x8(__M, __A);
+ // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
+ return _mm512_maskz_broadcast_i32x8(__M, _mm256_loadu_si256(__A));
}
-__m512i test_mm512_broadcast_i64x2(__m128i __A) {
+__m512i test_mm512_broadcast_i64x2(__m128i const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_i64x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x2
- return _mm512_broadcast_i64x2(__A);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ return _mm512_broadcast_i64x2(_mm_loadu_si128(__A));
}
-__m512i test_mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i __A) {
+__m512i test_mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_i64x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x2
- return _mm512_mask_broadcast_i64x2(__O, __M, __A);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
+ return _mm512_mask_broadcast_i64x2(__O, __M, _mm_loadu_si128(__A));
}
-__m512i test_mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
+__m512i test_mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_i64x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x2
- return _mm512_maskz_broadcast_i64x2(__M, __A);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
+ return _mm512_maskz_broadcast_i64x2(__M, _mm_loadu_si128(__A));
}
+
__m256 test_mm512_extractf32x8_ps(__m512 __A) {
// CHECK-LABEL: @test_mm512_extractf32x8_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm512_extractf32x8_ps(__A, 1);
}
__m256 test_mm512_mask_extractf32x8_ps(__m256 __W, __mmask8 __U, __m512 __A) {
// CHECK-LABEL: @test_mm512_mask_extractf32x8_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_extractf32x8_ps(__W, __U, __A, 1);
}
__m256 test_mm512_maskz_extractf32x8_ps(__mmask8 __U, __m512 __A) {
// CHECK-LABEL: @test_mm512_maskz_extractf32x8_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_extractf32x8_ps(__U, __A, 1);
}
__m128d test_mm512_extractf64x2_pd(__m512d __A) {
// CHECK-LABEL: @test_mm512_extractf64x2_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <2 x i32> <i32 6, i32 7>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <2 x i32> <i32 6, i32 7>
return _mm512_extractf64x2_pd(__A, 3);
}
__m128d test_mm512_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m512d __A) {
// CHECK-LABEL: @test_mm512_mask_extractf64x2_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <2 x i32> <i32 6, i32 7>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm512_mask_extractf64x2_pd(__W, __U, __A, 3);
}
__m128d test_mm512_maskz_extractf64x2_pd(__mmask8 __U, __m512d __A) {
// CHECK-LABEL: @test_mm512_maskz_extractf64x2_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <2 x i32> <i32 6, i32 7>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm512_maskz_extractf64x2_pd(__U, __A, 3);
}
@@ -1114,20 +1125,20 @@ __m256i test_mm512_maskz_extracti32x8_epi32(__mmask8 __U, __m512i __A) {
__m128i test_mm512_extracti64x2_epi64(__m512i __A) {
// CHECK-LABEL: @test_mm512_extracti64x2_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <2 x i32> <i32 6, i32 7>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <2 x i32> <i32 6, i32 7>
return _mm512_extracti64x2_epi64(__A, 3);
}
__m128i test_mm512_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_extracti64x2_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <2 x i32> <i32 6, i32 7>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm512_mask_extracti64x2_epi64(__W, __U, __A, 3);
}
__m128i test_mm512_maskz_extracti64x2_epi64(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_extracti64x2_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <2 x i32> <i32 6, i32 7>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <2 x i32> <i32 6, i32 7>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm512_maskz_extracti64x2_epi64(__U, __A, 3);
}
diff --git a/test/CodeGen/avx512f-builtins.c b/test/CodeGen/avx512f-builtins.c
index 760783af1ce0e..3ae80141b3b98 100644
--- a/test/CodeGen/avx512f-builtins.c
+++ b/test/CodeGen/avx512f-builtins.c
@@ -1241,20 +1241,20 @@ __mmask8 test_mm512_mask_cmpunord_ps_mask(__mmask8 k, __m512 a, __m512 b) {
__m256d test_mm512_extractf64x4_pd(__m512d a)
{
// CHECK-LABEL: @test_mm512_extractf64x4_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm512_extractf64x4_pd(a, 1);
}
__m256d test_mm512_mask_extractf64x4_pd(__m256d __W,__mmask8 __U,__m512d __A){
// CHECK-LABEL:@test_mm512_mask_extractf64x4_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm512_mask_extractf64x4_pd( __W, __U, __A, 1);
}
__m256d test_mm512_maskz_extractf64x4_pd(__mmask8 __U,__m512d __A){
// CHECK-LABEL:@test_mm512_maskz_extractf64x4_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm512_maskz_extractf64x4_pd( __U, __A, 1);
}
@@ -1262,20 +1262,20 @@ __m256d test_mm512_maskz_extractf64x4_pd(__mmask8 __U,__m512d __A){
__m128 test_mm512_extractf32x4_ps(__m512 a)
{
// CHECK-LABEL: @test_mm512_extractf32x4_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm512_extractf32x4_ps(a, 1);
}
__m128 test_mm512_mask_extractf32x4_ps(__m128 __W, __mmask8 __U,__m512d __A){
// CHECK-LABEL:@test_mm512_mask_extractf32x4_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm512_mask_extractf32x4_ps( __W, __U, __A, 1);
}
__m128 test_mm512_maskz_extractf32x4_ps( __mmask8 __U,__m512d __A){
// CHECK-LABEL:@test_mm512_maskz_extractf32x4_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm512_maskz_extractf32x4_ps( __U, __A, 1);
}
@@ -2458,25 +2458,25 @@ __m128d test_mm_maskz_min_sd(__mmask8 __U, __m128d __A, __m128d __B) {
__m512 test_mm512_undefined() {
// CHECK-LABEL: @test_mm512_undefined
- // CHECK: ret <16 x float> undef
+ // CHECK: ret <16 x float> zeroinitializer
return _mm512_undefined();
}
__m512 test_mm512_undefined_ps() {
// CHECK-LABEL: @test_mm512_undefined_ps
- // CHECK: ret <16 x float> undef
+ // CHECK: ret <16 x float> zeroinitializer
return _mm512_undefined_ps();
}
__m512d test_mm512_undefined_pd() {
// CHECK-LABEL: @test_mm512_undefined_pd
- // CHECK: ret <8 x double> undef
+ // CHECK: ret <8 x double> zeroinitializer
return _mm512_undefined_pd();
}
__m512i test_mm512_undefined_epi32() {
// CHECK-LABEL: @test_mm512_undefined_epi32
- // CHECK: ret <8 x i64> undef
+ // CHECK: ret <8 x i64> zeroinitializer
return _mm512_undefined_epi32();
}
@@ -3714,40 +3714,40 @@ __m512i test_mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 _
__m512d test_mm512_permute_pd(__m512d __X) {
// CHECK-LABEL: @test_mm512_permute_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
return _mm512_permute_pd(__X, 2);
}
__m512d test_mm512_mask_permute_pd(__m512d __W, __mmask8 __U, __m512d __X) {
// CHECK-LABEL: @test_mm512_mask_permute_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_permute_pd(__W, __U, __X, 2);
}
__m512d test_mm512_maskz_permute_pd(__mmask8 __U, __m512d __X) {
// CHECK-LABEL: @test_mm512_maskz_permute_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_permute_pd(__U, __X, 2);
}
__m512 test_mm512_permute_ps(__m512 __X) {
// CHECK-LABEL: @test_mm512_permute_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <16 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4, i32 10, i32 8, i32 8, i32 8, i32 14, i32 12, i32 12, i32 12>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <16 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4, i32 10, i32 8, i32 8, i32 8, i32 14, i32 12, i32 12, i32 12>
return _mm512_permute_ps(__X, 2);
}
__m512 test_mm512_mask_permute_ps(__m512 __W, __mmask16 __U, __m512 __X) {
// CHECK-LABEL: @test_mm512_mask_permute_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <16 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4, i32 10, i32 8, i32 8, i32 8, i32 14, i32 12, i32 12, i32 12>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <16 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4, i32 10, i32 8, i32 8, i32 8, i32 14, i32 12, i32 12, i32 12>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_permute_ps(__W, __U, __X, 2);
}
__m512 test_mm512_maskz_permute_ps(__mmask16 __U, __m512 __X) {
// CHECK-LABEL: @test_mm512_maskz_permute_ps
- // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <16 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4, i32 10, i32 8, i32 8, i32 8, i32 14, i32 12, i32 12, i32 12>
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> zeroinitializer, <16 x i32> <i32 2, i32 0, i32 0, i32 0, i32 6, i32 4, i32 4, i32 4, i32 10, i32 8, i32 8, i32 8, i32 14, i32 12, i32 12, i32 12>
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_permute_ps(__U, __X, 2);
}
@@ -4647,154 +4647,162 @@ __m128 test_mm_maskz_sqrt_round_ss(__mmask8 __U, __m128 __A, __m128 __B){
return _mm_maskz_sqrt_round_ss(__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}
-__m512 test_mm512_broadcast_f32x4(__m128 __A) {
+__m512 test_mm512_broadcast_f32x4(float const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_f32x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x4
- return _mm512_broadcast_f32x4(__A);
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ return _mm512_broadcast_f32x4(_mm_loadu_ps(__A));
}
-__m512 test_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A) {
+__m512 test_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, float const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_f32x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x4
- return _mm512_mask_broadcast_f32x4(__O, __M, __A);
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
+ return _mm512_mask_broadcast_f32x4(__O, __M, _mm_loadu_ps(__A));
}
-__m512 test_mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A) {
+__m512 test_mm512_maskz_broadcast_f32x4(__mmask16 __M, float const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_f32x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x4
- return _mm512_maskz_broadcast_f32x4(__M, __A);
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
+ return _mm512_maskz_broadcast_f32x4(__M, _mm_loadu_ps(__A));
}
-__m512d test_mm512_broadcast_f64x4(__m256d __A) {
+__m512d test_mm512_broadcast_f64x4(float const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_f64x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x4
- return _mm512_broadcast_f64x4(__A);
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ return _mm512_broadcast_f64x4(_mm256_loadu_ps(__A));
}
-__m512d test_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A) {
+__m512d test_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, float const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_f64x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x4
- return _mm512_mask_broadcast_f64x4(__O, __M, __A);
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
+ return _mm512_mask_broadcast_f64x4(__O, __M, _mm256_loadu_ps(__A));
}
-__m512d test_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A) {
+__m512d test_mm512_maskz_broadcast_f64x4(__mmask8 __M, float const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_f64x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x4
- return _mm512_maskz_broadcast_f64x4(__M, __A);
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
+ return _mm512_maskz_broadcast_f64x4(__M, _mm256_loadu_ps(__A));
}
-__m512i test_mm512_broadcast_i32x4(__m128i __A) {
+__m512i test_mm512_broadcast_i32x4(__m128i const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_i32x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x4
- return _mm512_broadcast_i32x4(__A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ return _mm512_broadcast_i32x4(_mm_loadu_si128(__A));
}
-__m512i test_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A) {
+__m512i test_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_i32x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x4
- return _mm512_mask_broadcast_i32x4(__O, __M, __A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
+ return _mm512_mask_broadcast_i32x4(__O, __M, _mm_loadu_si128(__A));
}
-__m512i test_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A) {
+__m512i test_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_i32x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x4
- return _mm512_maskz_broadcast_i32x4(__M, __A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
+ return _mm512_maskz_broadcast_i32x4(__M, _mm_loadu_si128(__A));
}
-__m512i test_mm512_broadcast_i64x4(__m256i __A) {
+__m512i test_mm512_broadcast_i64x4(__m256i const* __A) {
// CHECK-LABEL: @test_mm512_broadcast_i64x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x4
- return _mm512_broadcast_i64x4(__A);
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ return _mm512_broadcast_i64x4(_mm256_loadu_si256(__A));
}
-__m512i test_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A) {
+__m512i test_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i const* __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_i64x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x4
- return _mm512_mask_broadcast_i64x4(__O, __M, __A);
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
+ return _mm512_mask_broadcast_i64x4(__O, __M, _mm256_loadu_si256(__A));
}
-__m512i test_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A) {
+__m512i test_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i const* __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_i64x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x4
- return _mm512_maskz_broadcast_i64x4(__M, __A);
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
+ return _mm512_maskz_broadcast_i64x4(__M, _mm256_loadu_si256(__A));
}
__m512d test_mm512_broadcastsd_pd(__m128d __A) {
// CHECK-LABEL: @test_mm512_broadcastsd_pd
- // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <8 x i32> zeroinitializer
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> zeroinitializer, <8 x i32> zeroinitializer
return _mm512_broadcastsd_pd(__A);
}
__m512d test_mm512_mask_broadcastsd_pd(__m512d __O, __mmask8 __M, __m128d __A) {
// CHECK-LABEL: @test_mm512_mask_broadcastsd_pd
- // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <8 x i32> zeroinitializer
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> zeroinitializer, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_broadcastsd_pd(__O, __M, __A);
}
__m512d test_mm512_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcastsd_pd
- // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <8 x i32> zeroinitializer
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> zeroinitializer, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_broadcastsd_pd(__M, __A);
}
__m512 test_mm512_broadcastss_ps(__m128 __A) {
// CHECK-LABEL: @test_mm512_broadcastss_ps
- // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <16 x i32> zeroinitializer
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <16 x i32> zeroinitializer
return _mm512_broadcastss_ps(__A);
}
__m512 test_mm512_mask_broadcastss_ps(__m512 __O, __mmask16 __M, __m128 __A) {
// CHECK-LABEL: @test_mm512_mask_broadcastss_ps
- // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <16 x i32> zeroinitializer
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <16 x i32> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_broadcastss_ps(__O, __M, __A);
}
__m512 test_mm512_maskz_broadcastss_ps(__mmask16 __M, __m128 __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcastss_ps
- // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <16 x i32> zeroinitializer
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <16 x i32> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_broadcastss_ps(__M, __A);
}
__m512i test_mm512_broadcastd_epi32(__m128i __A) {
// CHECK-LABEL: @test_mm512_broadcastd_epi32
- // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <16 x i32> zeroinitializer
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <16 x i32> zeroinitializer
return _mm512_broadcastd_epi32(__A);
}
__m512i test_mm512_mask_broadcastd_epi32(__m512i __O, __mmask16 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_mask_broadcastd_epi32
- // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <16 x i32> zeroinitializer
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <16 x i32> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_broadcastd_epi32(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcastd_epi32(__mmask16 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcastd_epi32
- // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <16 x i32> zeroinitializer
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <16 x i32> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_broadcastd_epi32(__M, __A);
}
__m512i test_mm512_broadcastq_epi64(__m128i __A) {
// CHECK-LABEL: @test_mm512_broadcastq_epi64
- // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <8 x i32> zeroinitializer
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> zeroinitializer, <8 x i32> zeroinitializer
return _mm512_broadcastq_epi64(__A);
}
__m512i test_mm512_mask_broadcastq_epi64(__m512i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_mask_broadcastq_epi64
- // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <8 x i32> zeroinitializer
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> zeroinitializer, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_broadcastq_epi64(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcastq_epi64
- // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <8 x i32> zeroinitializer
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> zeroinitializer, <8 x i32> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_broadcastq_epi64(__M, __A);
}
@@ -5181,20 +5189,20 @@ __m128i test_mm512_maskz_extracti32x4_epi32(__mmask8 __U, __m512i __A) {
__m256i test_mm512_extracti64x4_epi64(__m512i __A) {
// CHECK-LABEL: @test_mm512_extracti64x4_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm512_extracti64x4_epi64(__A, 1);
}
__m256i test_mm512_mask_extracti64x4_epi64(__m256i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_extracti64x4_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm512_mask_extracti64x4_epi64(__W, __U, __A, 1);
}
__m256i test_mm512_maskz_extracti64x4_epi64(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_extracti64x4_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm512_maskz_extracti64x4_epi64(__U, __A, 1);
}
@@ -6077,40 +6085,40 @@ __m128d test_mm_mask3_fnmsub_round_sd(__m128d __W, __m128d __X, __m128d __Y, __m
__m512d test_mm512_permutex_pd(__m512d __X) {
// CHECK-LABEL: @test_mm512_permutex_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
return _mm512_permutex_pd(__X, 0);
}
__m512d test_mm512_mask_permutex_pd(__m512d __W, __mmask8 __U, __m512d __X) {
// CHECK-LABEL: @test_mm512_mask_permutex_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_permutex_pd(__W, __U, __X, 0);
}
__m512d test_mm512_maskz_permutex_pd(__mmask8 __U, __m512d __X) {
// CHECK-LABEL: @test_mm512_maskz_permutex_pd
- // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> zeroinitializer, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_permutex_pd(__U, __X, 0);
}
__m512i test_mm512_permutex_epi64(__m512i __X) {
// CHECK-LABEL: @test_mm512_permutex_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
return _mm512_permutex_epi64(__X, 0);
}
__m512i test_mm512_mask_permutex_epi64(__m512i __W, __mmask8 __M, __m512i __X) {
// CHECK-LABEL: @test_mm512_mask_permutex_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_permutex_epi64(__W, __M, __X, 0);
}
__m512i test_mm512_maskz_permutex_epi64(__mmask8 __M, __m512i __X) {
// CHECK-LABEL: @test_mm512_maskz_permutex_epi64
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_permutex_epi64(__M, __X, 0);
}
@@ -6243,7 +6251,7 @@ void test_mm512_stream_si512(__m512i * __P, __m512i __A) {
__m512i test_mm512_stream_load_si512(void *__P) {
// CHECK-LABEL: @test_mm512_stream_load_si512
- // CHECK: @llvm.x86.avx512.movntdqa
+ // CHECK: load <8 x i64>, <8 x i64>* %{{.*}}, align 64, !nontemporal
return _mm512_stream_load_si512(__P);
}
@@ -7251,6 +7259,18 @@ __m512i test_mm512_maskz_cvtps_epu32 (__mmask16 __U, __m512 __A)
return _mm512_maskz_cvtps_epu32( __U, __A);
}
+double test_mm512_cvtsd_f64(__m512d A) {
+ // CHECK-LABEL: test_mm512_cvtsd_f64
+ // CHECK: extractelement <8 x double> %{{.*}}, i32 0
+ return _mm512_cvtsd_f64(A);
+}
+
+float test_mm512_cvtss_f32(__m512 A) {
+ // CHECK-LABEL: test_mm512_cvtss_f32
+ // CHECK: extractelement <16 x float> %{{.*}}, i32 0
+ return _mm512_cvtss_f32(A);
+}
+
__m512d test_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
{
// CHECK-LABEL: @test_mm512_mask_max_pd
@@ -7704,6 +7724,133 @@ __m512i test_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
return _mm512_mask_set1_epi32 ( __O, __M, __A);
}
+__m512i test_mm512_set_epi8(char e63, char e62, char e61, char e60, char e59,
+ char e58, char e57, char e56, char e55, char e54, char e53, char e52,
+ char e51, char e50, char e49, char e48, char e47, char e46, char e45,
+ char e44, char e43, char e42, char e41, char e40, char e39, char e38,
+ char e37, char e36, char e35, char e34, char e33, char e32, char e31,
+ char e30, char e29, char e28, char e27, char e26, char e25, char e24,
+ char e23, char e22, char e21, char e20, char e19, char e18, char e17,
+ char e16, char e15, char e14, char e13, char e12, char e11, char e10,
+ char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2,
+ char e1, char e0) {
+
+ //CHECK-LABEL: @test_mm512_set_epi8
+ //CHECK: load i8, i8* %e63.addr, align 1
+ //CHECK: load i8, i8* %e62.addr, align 1
+ //CHECK: load i8, i8* %e61.addr, align 1
+ //CHECK: load i8, i8* %e60.addr, align 1
+ //CHECK: load i8, i8* %e59.addr, align 1
+ //CHECK: load i8, i8* %e58.addr, align 1
+ //CHECK: load i8, i8* %e57.addr, align 1
+ //CHECK: load i8, i8* %e56.addr, align 1
+ //CHECK: load i8, i8* %e55.addr, align 1
+ //CHECK: load i8, i8* %e54.addr, align 1
+ //CHECK: load i8, i8* %e53.addr, align 1
+ //CHECK: load i8, i8* %e52.addr, align 1
+ //CHECK: load i8, i8* %e51.addr, align 1
+ //CHECK: load i8, i8* %e50.addr, align 1
+ //CHECK: load i8, i8* %e49.addr, align 1
+ //CHECK: load i8, i8* %e48.addr, align 1
+ //CHECK: load i8, i8* %e47.addr, align 1
+ //CHECK: load i8, i8* %e46.addr, align 1
+ //CHECK: load i8, i8* %e45.addr, align 1
+ //CHECK: load i8, i8* %e44.addr, align 1
+ //CHECK: load i8, i8* %e43.addr, align 1
+ //CHECK: load i8, i8* %e42.addr, align 1
+ //CHECK: load i8, i8* %e41.addr, align 1
+ //CHECK: load i8, i8* %e40.addr, align 1
+ //CHECK: load i8, i8* %e39.addr, align 1
+ //CHECK: load i8, i8* %e38.addr, align 1
+ //CHECK: load i8, i8* %e37.addr, align 1
+ //CHECK: load i8, i8* %e36.addr, align 1
+ //CHECK: load i8, i8* %e35.addr, align 1
+ //CHECK: load i8, i8* %e34.addr, align 1
+ //CHECK: load i8, i8* %e33.addr, align 1
+ //CHECK: load i8, i8* %e32.addr, align 1
+ //CHECK: load i8, i8* %e31.addr, align 1
+ //CHECK: load i8, i8* %e30.addr, align 1
+ //CHECK: load i8, i8* %e29.addr, align 1
+ //CHECK: load i8, i8* %e28.addr, align 1
+ //CHECK: load i8, i8* %e27.addr, align 1
+ //CHECK: load i8, i8* %e26.addr, align 1
+ //CHECK: load i8, i8* %e25.addr, align 1
+ //CHECK: load i8, i8* %e24.addr, align 1
+ //CHECK: load i8, i8* %e23.addr, align 1
+ //CHECK: load i8, i8* %e22.addr, align 1
+ //CHECK: load i8, i8* %e21.addr, align 1
+ //CHECK: load i8, i8* %e20.addr, align 1
+ //CHECK: load i8, i8* %e19.addr, align 1
+ //CHECK: load i8, i8* %e18.addr, align 1
+ //CHECK: load i8, i8* %e17.addr, align 1
+ //CHECK: load i8, i8* %e16.addr, align 1
+ //CHECK: load i8, i8* %e15.addr, align 1
+ //CHECK: load i8, i8* %e14.addr, align 1
+ //CHECK: load i8, i8* %e13.addr, align 1
+ //CHECK: load i8, i8* %e12.addr, align 1
+ //CHECK: load i8, i8* %e11.addr, align 1
+ //CHECK: load i8, i8* %e10.addr, align 1
+ //CHECK: load i8, i8* %e9.addr, align 1
+ //CHECK: load i8, i8* %e8.addr, align 1
+ //CHECK: load i8, i8* %e7.addr, align 1
+ //CHECK: load i8, i8* %e6.addr, align 1
+ //CHECK: load i8, i8* %e5.addr, align 1
+ //CHECK: load i8, i8* %e4.addr, align 1
+ //CHECK: load i8, i8* %e3.addr, align 1
+ //CHECK: load i8, i8* %e2.addr, align 1
+ //CHECK: load i8, i8* %e1.addr, align 1
+ //CHECK: load i8, i8* %e0.addr, align 1
+ return _mm512_set_epi8(e63, e62, e61, e60, e59, e58, e57, e56, e55, e54,
+ e53, e52, e51, e50, e49, e48,e47, e46, e45, e44, e43, e42, e41, e40,
+ e39, e38, e37, e36, e35, e34, e33, e32,e31, e30, e29, e28, e27, e26,
+ e25, e24, e23, e22, e21, e20, e19, e18, e17, e16, e15, e14, e13, e12,
+ e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0);
+}
+
+__m512i test_mm512_set_epi16(short e31, short e30, short e29, short e28,
+ short e27, short e26, short e25, short e24, short e23, short e22,
+ short e21, short e20, short e19, short e18, short e17,
+ short e16, short e15, short e14, short e13, short e12,
+ short e11, short e10, short e9, short e8, short e7,
+ short e6, short e5, short e4, short e3, short e2, short e1, short e0) {
+ //CHECK-LABEL: @test_mm512_set_epi16
+ //CHECK: insertelement{{.*}}i32 0
+ //CHECK: insertelement{{.*}}i32 1
+ //CHECK: insertelement{{.*}}i32 2
+ //CHECK: insertelement{{.*}}i32 3
+ //CHECK: insertelement{{.*}}i32 4
+ //CHECK: insertelement{{.*}}i32 5
+ //CHECK: insertelement{{.*}}i32 6
+ //CHECK: insertelement{{.*}}i32 7
+ //CHECK: insertelement{{.*}}i32 8
+ //CHECK: insertelement{{.*}}i32 9
+ //CHECK: insertelement{{.*}}i32 10
+ //CHECK: insertelement{{.*}}i32 11
+ //CHECK: insertelement{{.*}}i32 12
+ //CHECK: insertelement{{.*}}i32 13
+ //CHECK: insertelement{{.*}}i32 14
+ //CHECK: insertelement{{.*}}i32 15
+ //CHECK: insertelement{{.*}}i32 16
+ //CHECK: insertelement{{.*}}i32 17
+ //CHECK: insertelement{{.*}}i32 18
+ //CHECK: insertelement{{.*}}i32 19
+ //CHECK: insertelement{{.*}}i32 20
+ //CHECK: insertelement{{.*}}i32 21
+ //CHECK: insertelement{{.*}}i32 22
+ //CHECK: insertelement{{.*}}i32 23
+ //CHECK: insertelement{{.*}}i32 24
+ //CHECK: insertelement{{.*}}i32 25
+ //CHECK: insertelement{{.*}}i32 26
+ //CHECK: insertelement{{.*}}i32 27
+ //CHECK: insertelement{{.*}}i32 28
+ //CHECK: insertelement{{.*}}i32 29
+ //CHECK: insertelement{{.*}}i32 30
+ //CHECK: insertelement{{.*}}i32 31
+ return _mm512_set_epi16(e31, e30, e29, e28, e27, e26, e25, e24, e23, e22,
+ e21, e20, e19, e18, e17, e16, e15, e14, e13, e12, e11, e10, e9, e8, e7,
+ e6, e5, e4, e3, e2, e1, e0);
+
+}
__m512i test_mm512_set_epi32 (int __A, int __B, int __C, int __D,
int __E, int __F, int __G, int __H,
int __I, int __J, int __K, int __L,
diff --git a/test/CodeGen/avx512pf-builtins.c b/test/CodeGen/avx512pf-builtins.c
index 19ee083eae2df..b35d90ca63096 100644
--- a/test/CodeGen/avx512pf-builtins.c
+++ b/test/CodeGen/avx512pf-builtins.c
@@ -6,95 +6,95 @@
void test_mm512_mask_prefetch_i32gather_pd(__m256i index, __mmask8 mask, void const *addr, int hint) {
// CHECK-LABEL: @test_mm512_mask_prefetch_i32gather_pd
// CHECK: @llvm.x86.avx512.gatherpf.dpd
- return _mm512_mask_prefetch_i32gather_pd(index, mask, addr, 2, 1);
+ return _mm512_mask_prefetch_i32gather_pd(index, mask, addr, 2, _MM_HINT_T0);
}
void test_mm512_prefetch_i32gather_pd(__m256i index, void const *addr, int hint) {
// CHECK-LABEL: @test_mm512_prefetch_i32gather_pd
// CHECK: @llvm.x86.avx512.gatherpf.dpd
- return _mm512_prefetch_i32gather_pd(index, addr, 2, 1);
+ return _mm512_prefetch_i32gather_pd(index, addr, 2, _MM_HINT_T0);
}
void test_mm512_mask_prefetch_i32gather_ps(__m512i index, __mmask16 mask, void const *addr, int hint) {
// CHECK-LABEL: @test_mm512_mask_prefetch_i32gather_ps
// CHECK: @llvm.x86.avx512.gatherpf.dps
- return _mm512_mask_prefetch_i32gather_ps(index, mask, addr, 2, 1);
+ return _mm512_mask_prefetch_i32gather_ps(index, mask, addr, 2, _MM_HINT_T0);
}
void test_mm512_prefetch_i32gather_ps(__m512i index, void const *addr, int hint) {
// CHECK-LABEL: @test_mm512_prefetch_i32gather_ps
// CHECK: @llvm.x86.avx512.gatherpf.dps
- return _mm512_prefetch_i32gather_ps(index, addr, 2, 1);
+ return _mm512_prefetch_i32gather_ps(index, addr, 2, _MM_HINT_T0);
}
void test_mm512_mask_prefetch_i64gather_pd(__m512i index, __mmask8 mask, void const *addr, int hint) {
// CHECK-LABEL: @test_mm512_mask_prefetch_i64gather_pd
// CHECK: @llvm.x86.avx512.gatherpf.qpd
- return _mm512_mask_prefetch_i64gather_pd(index, mask, addr, 2, 1);
+ return _mm512_mask_prefetch_i64gather_pd(index, mask, addr, 2, _MM_HINT_T0);
}
void test_mm512_prefetch_i64gather_pd(__m512i index, void const *addr, int hint) {
// CHECK-LABEL: @test_mm512_prefetch_i64gather_pd
// CHECK: @llvm.x86.avx512.gatherpf.qpd
- return _mm512_prefetch_i64gather_pd(index, addr, 2, 1);
+ return _mm512_prefetch_i64gather_pd(index, addr, 2, _MM_HINT_T0);
}
void test_mm512_mask_prefetch_i64gather_ps(__m512i index, __mmask8 mask, void const *addr, int hint) {
// CHECK-LABEL: @test_mm512_mask_prefetch_i64gather_ps
// CHECK: @llvm.x86.avx512.gatherpf.qps
- return _mm512_mask_prefetch_i64gather_ps(index, mask, addr, 2, 1);
+ return _mm512_mask_prefetch_i64gather_ps(index, mask, addr, 2, _MM_HINT_T0);
}
void test_mm512_prefetch_i64gather_ps(__m512i index, void const *addr, int hint) {
// CHECK-LABEL: @test_mm512_prefetch_i64gather_ps
// CHECK: @llvm.x86.avx512.gatherpf.qps
- return _mm512_prefetch_i64gather_ps(index, addr, 2, 1);
+ return _mm512_prefetch_i64gather_ps(index, addr, 2, _MM_HINT_T0);
}
void test_mm512_prefetch_i32scatter_pd(void *addr, __m256i index) {
// CHECK-LABEL: @test_mm512_prefetch_i32scatter_pd
// CHECK: @llvm.x86.avx512.scatterpf.dpd.512
- return _mm512_prefetch_i32scatter_pd(addr, index, 1, 2);
+ return _mm512_prefetch_i32scatter_pd(addr, index, 1, _MM_HINT_T1);
}
void test_mm512_mask_prefetch_i32scatter_pd(void *addr, __mmask8 mask, __m256i index) {
// CHECK-LABEL: @test_mm512_mask_prefetch_i32scatter_pd
// CHECK: @llvm.x86.avx512.scatterpf.dpd.512
- return _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, 1, 2);
+ return _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, 1, _MM_HINT_T1);
}
void test_mm512_prefetch_i32scatter_ps(void *addr, __m512i index) {
// CHECK-LABEL: @test_mm512_prefetch_i32scatter_ps
// CHECK: @llvm.x86.avx512.scatterpf.dps.512
- return _mm512_prefetch_i32scatter_ps(addr, index, 1, 2);
+ return _mm512_prefetch_i32scatter_ps(addr, index, 1, _MM_HINT_T1);
}
void test_mm512_mask_prefetch_i32scatter_ps(void *addr, __mmask16 mask, __m512i index) {
// CHECK-LABEL: @test_mm512_mask_prefetch_i32scatter_ps
// CHECK: @llvm.x86.avx512.scatterpf.dps.512
- return _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, 1, 2);
+ return _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, 1, _MM_HINT_T1);
}
void test_mm512_prefetch_i64scatter_pd(void *addr, __m512i index) {
// CHECK-LABEL: @test_mm512_prefetch_i64scatter_pd
// CHECK: @llvm.x86.avx512.scatterpf.qpd.512
- return _mm512_prefetch_i64scatter_pd(addr, index, 1, 2);
+ return _mm512_prefetch_i64scatter_pd(addr, index, 1, _MM_HINT_T1);
}
void test_mm512_mask_prefetch_i64scatter_pd(void *addr, __mmask16 mask, __m512i index) {
// CHECK-LABEL: @test_mm512_mask_prefetch_i64scatter_pd
// CHECK: @llvm.x86.avx512.scatterpf.qpd.512
- return _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, 1, 2);
+ return _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, 1, _MM_HINT_T1);
}
void test_mm512_prefetch_i64scatter_ps(void *addr, __m512i index) {
// CHECK-LABEL: @test_mm512_prefetch_i64scatter_ps
// CHECK: @llvm.x86.avx512.scatterpf.qps.512
- return _mm512_prefetch_i64scatter_ps(addr, index, 1, 2);
+ return _mm512_prefetch_i64scatter_ps(addr, index, 1, _MM_HINT_T1);
}
void test_mm512_mask_prefetch_i64scatter_ps(void *addr, __mmask16 mask, __m512i index) {
// CHECK-LABEL: @test_mm512_mask_prefetch_i64scatter_ps
// CHECK: @llvm.x86.avx512.scatterpf.qps.512
- return _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, 1, 2);
+ return _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, 1, _MM_HINT_T1);
}
diff --git a/test/CodeGen/avx512vl-builtins.c b/test/CodeGen/avx512vl-builtins.c
index fe4ebe12ddd49..c64b7bcec23e1 100644
--- a/test/CodeGen/avx512vl-builtins.c
+++ b/test/CodeGen/avx512vl-builtins.c
@@ -5008,56 +5008,56 @@ __m256 test_mm256_maskz_rcp14_ps(__mmask8 __U, __m256 __A) {
__m128d test_mm_mask_permute_pd(__m128d __W, __mmask8 __U, __m128d __X) {
// CHECK-LABEL: @test_mm_mask_permute_pd
- // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> zeroinitializer, <2 x i32> <i32 1, i32 0>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_permute_pd(__W, __U, __X, 1);
}
__m128d test_mm_maskz_permute_pd(__mmask8 __U, __m128d __X) {
// CHECK-LABEL: @test_mm_maskz_permute_pd
- // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> zeroinitializer, <2 x i32> <i32 1, i32 0>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_permute_pd(__U, __X, 1);
}
__m256d test_mm256_mask_permute_pd(__m256d __W, __mmask8 __U, __m256d __X) {
// CHECK-LABEL: @test_mm256_mask_permute_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permute_pd(__W, __U, __X, 5);
}
__m256d test_mm256_maskz_permute_pd(__mmask8 __U, __m256d __X) {
// CHECK-LABEL: @test_mm256_maskz_permute_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permute_pd(__U, __X, 5);
}
__m128 test_mm_mask_permute_ps(__m128 __W, __mmask8 __U, __m128 __X) {
// CHECK-LABEL: @test_mm_mask_permute_ps
- // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_permute_ps(__W, __U, __X, 0x1b);
}
__m128 test_mm_maskz_permute_ps(__mmask8 __U, __m128 __X) {
// CHECK-LABEL: @test_mm_maskz_permute_ps
- // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_permute_ps(__U, __X, 0x1b);
}
__m256 test_mm256_mask_permute_ps(__m256 __W, __mmask8 __U, __m256 __X) {
// CHECK-LABEL: @test_mm256_mask_permute_ps
- // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> zeroinitializer, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_permute_ps(__W, __U, __X, 0x1b);
}
__m256 test_mm256_maskz_permute_ps(__mmask8 __U, __m256 __X) {
// CHECK-LABEL: @test_mm256_maskz_permute_ps
- // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> zeroinitializer, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_permute_ps(__U, __X, 0x1b);
}
@@ -5734,38 +5734,42 @@ __m256 test_mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A) {
__m256 test_mm256_broadcast_f32x4(__m128 __A) {
// CHECK-LABEL: @test_mm256_broadcast_f32x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x4
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
return _mm256_broadcast_f32x4(__A);
}
__m256 test_mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A) {
// CHECK-LABEL: @test_mm256_mask_broadcast_f32x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x4
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_broadcast_f32x4(__O, __M, __A);
}
__m256 test_mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A) {
// CHECK-LABEL: @test_mm256_maskz_broadcast_f32x4
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x4
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_broadcast_f32x4(__M, __A);
}
-__m256i test_mm256_broadcast_i32x4(__m128i __A) {
+__m256i test_mm256_broadcast_i32x4(__m128i const* __A) {
// CHECK-LABEL: @test_mm256_broadcast_i32x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x4
- return _mm256_broadcast_i32x4(__A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ return _mm256_broadcast_i32x4(_mm_loadu_si128(__A));
}
-__m256i test_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A) {
+__m256i test_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i const* __A) {
// CHECK-LABEL: @test_mm256_mask_broadcast_i32x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x4
- return _mm256_mask_broadcast_i32x4(__O, __M, __A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
+ return _mm256_mask_broadcast_i32x4(__O, __M, _mm_loadu_si128(__A));
}
-__m256i test_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A) {
+__m256i test_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i const* __A) {
// CHECK-LABEL: @test_mm256_maskz_broadcast_i32x4
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x4
- return _mm256_maskz_broadcast_i32x4(__M, __A);
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
+ return _mm256_maskz_broadcast_i32x4(__M, _mm_loadu_si128(__A));
}
__m256d test_mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A) {
@@ -6588,20 +6592,20 @@ void test_mm256_mask_cvtepi64_storeu_epi16(void * __P, __mmask8 __M, __m256i __A
__m128 test_mm256_extractf32x4_ps(__m256 __A) {
// CHECK-LABEL: @test_mm256_extractf32x4_ps
- // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extractf32x4_ps(__A, 1);
}
__m128 test_mm256_mask_extractf32x4_ps(__m128 __W, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_mask_extractf32x4_ps
- // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_mask_extractf32x4_ps(__W, __U, __A, 1);
}
__m128 test_mm256_maskz_extractf32x4_ps(__mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_maskz_extractf32x4_ps
- // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_maskz_extractf32x4_ps(__U, __A, 1);
}
@@ -6836,40 +6840,40 @@ __m256i test_mm256_mask_i32gather_epi32(__m256i __v1_old, __mmask8 __mask, __m25
__m256d test_mm256_permutex_pd(__m256d __X) {
// CHECK-LABEL: @test_mm256_permutex_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
return _mm256_permutex_pd(__X, 3);
}
__m256d test_mm256_mask_permutex_pd(__m256d __W, __mmask8 __U, __m256d __X) {
// CHECK-LABEL: @test_mm256_mask_permutex_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_permutex_pd(__W, __U, __X, 1);
}
__m256d test_mm256_maskz_permutex_pd(__mmask8 __U, __m256d __X) {
// CHECK-LABEL: @test_mm256_maskz_permutex_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_permutex_pd(__U, __X, 1);
}
__m256i test_mm256_permutex_epi64(__m256i __X) {
// CHECK-LABEL: @test_mm256_permutex_epi64
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
return _mm256_permutex_epi64(__X, 3);
}
__m256i test_mm256_mask_permutex_epi64(__m256i __W, __mmask8 __M, __m256i __X) {
// CHECK-LABEL: @test_mm256_mask_permutex_epi64
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_permutex_epi64(__W, __M, __X, 3);
}
__m256i test_mm256_maskz_permutex_epi64(__mmask8 __M, __m256i __X) {
// CHECK-LABEL: @test_mm256_maskz_permutex_epi64
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_permutex_epi64(__M, __X, 3);
}
diff --git a/test/CodeGen/avx512vlbw-builtins.c b/test/CodeGen/avx512vlbw-builtins.c
index fe363c9ffde5e..5a7283608bc32 100644
--- a/test/CodeGen/avx512vlbw-builtins.c
+++ b/test/CodeGen/avx512vlbw-builtins.c
@@ -2521,25 +2521,29 @@ __mmask32 test_mm256_movepi8_mask(__m256i __A) {
__m128i test_mm_movm_epi8(__mmask16 __A) {
// CHECK-LABEL: @test_mm_movm_epi8
- // CHECK: @llvm.x86.avx512.cvtmask2b.128
+ // CHECK: %{{.*}} = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: %vpmovm2.i = sext <16 x i1> %{{.*}} to <16 x i8>
return _mm_movm_epi8(__A);
}
__m256i test_mm256_movm_epi8(__mmask32 __A) {
// CHECK-LABEL: @test_mm256_movm_epi8
- // CHECK: @llvm.x86.avx512.cvtmask2b.256
+ // CHECK: %{{.*}} = bitcast i32 %{{.*}} to <32 x i1>
+ // CHECK: %vpmovm2.i = sext <32 x i1> %{{.*}} to <32 x i8>
return _mm256_movm_epi8(__A);
}
__m128i test_mm_movm_epi16(__mmask8 __A) {
// CHECK-LABEL: @test_mm_movm_epi16
- // CHECK: @llvm.x86.avx512.cvtmask2w.128
+ // CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: %vpmovm2.i = sext <8 x i1> %{{.*}} to <8 x i16>
return _mm_movm_epi16(__A);
}
__m256i test_mm256_movm_epi16(__mmask16 __A) {
// CHECK-LABEL: @test_mm256_movm_epi16
- // CHECK: @llvm.x86.avx512.cvtmask2w.256
+ // CHECK: %{{.*}} = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: %vpmovm2.i = sext <16 x i1> %{{.*}} to <16 x i16>
return _mm256_movm_epi16(__A);
}
diff --git a/test/CodeGen/avx512vldq-builtins.c b/test/CodeGen/avx512vldq-builtins.c
index 6834b6f46b288..b18c811f845b4 100644
--- a/test/CodeGen/avx512vldq-builtins.c
+++ b/test/CodeGen/avx512vldq-builtins.c
@@ -865,25 +865,32 @@ __mmask8 test_mm256_movepi32_mask(__m256i __A) {
__m128i test_mm_movm_epi32(__mmask8 __A) {
// CHECK-LABEL: @test_mm_movm_epi32
- // CHECK: @llvm.x86.avx512.cvtmask2d.128
+ // CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: %extract.i = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: %vpmovm2.i = sext <4 x i1> %extract.i to <4 x i32>
return _mm_movm_epi32(__A);
}
__m256i test_mm256_movm_epi32(__mmask8 __A) {
// CHECK-LABEL: @test_mm256_movm_epi32
- // CHECK: @llvm.x86.avx512.cvtmask2d.256
+ // CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: %vpmovm2.i = sext <8 x i1> %{{.*}} to <8 x i32>
return _mm256_movm_epi32(__A);
}
__m128i test_mm_movm_epi64(__mmask8 __A) {
// CHECK-LABEL: @test_mm_movm_epi64
- // CHECK: @llvm.x86.avx512.cvtmask2q.128
+ // CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: %extract.i = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
+ // CHECK: %vpmovm2.i = sext <2 x i1> %extract.i to <2 x i64>
return _mm_movm_epi64(__A);
}
__m256i test_mm256_movm_epi64(__mmask8 __A) {
// CHECK-LABEL: @test_mm256_movm_epi64
- // CHECK: @llvm.x86.avx512.cvtmask2q.256
+ // CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: %extract.i = shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: %vpmovm2.i = sext <4 x i1> %extract.i to <4 x i64>
return _mm256_movm_epi64(__A);
}
@@ -918,22 +925,24 @@ __m256 test_mm256_maskz_broadcast_f32x2(__mmask8 __M, __m128 __A) {
return _mm256_maskz_broadcast_f32x2(__M, __A);
}
-__m256d test_mm256_broadcast_f64x2(__m128d __A) {
+__m256d test_mm256_broadcast_f64x2(double const* __A) {
// CHECK-LABEL: @test_mm256_broadcast_f64x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x2
- return _mm256_broadcast_f64x2(__A);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ return _mm256_broadcast_f64x2(_mm_loadu_pd(__A));
}
-__m256d test_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, __m128d __A) {
+__m256d test_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, double const* __A) {
// CHECK-LABEL: @test_mm256_mask_broadcast_f64x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x2
- return _mm256_mask_broadcast_f64x2(__O, __M, __A);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
+ return _mm256_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A));
}
-__m256d test_mm256_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A) {
+__m256d test_mm256_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
// CHECK-LABEL: @test_mm256_maskz_broadcast_f64x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf64x2
- return _mm256_maskz_broadcast_f64x2(__M, __A);
+ // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
+ return _mm256_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A));
}
__m128i test_mm_broadcast_i32x2(__m128i __A) {
@@ -972,60 +981,62 @@ __m256i test_mm256_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
return _mm256_maskz_broadcast_i32x2(__M, __A);
}
-__m256i test_mm256_broadcast_i64x2(__m128i __A) {
+__m256i test_mm256_broadcast_i64x2(__m128i const* __A) {
// CHECK-LABEL: @test_mm256_broadcast_i64x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x2
- return _mm256_broadcast_i64x2(__A);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ return _mm256_broadcast_i64x2(_mm_loadu_si128(__A));
}
-__m256i test_mm256_mask_broadcast_i64x2(__m256i __O, __mmask8 __M, __m128i __A) {
+__m256i test_mm256_mask_broadcast_i64x2(__m256i __O, __mmask8 __M, __m128i const* __A) {
// CHECK-LABEL: @test_mm256_mask_broadcast_i64x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x2
- return _mm256_mask_broadcast_i64x2(__O, __M, __A);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
+ return _mm256_mask_broadcast_i64x2(__O, __M, _mm_loadu_si128(__A));
}
-__m256i test_mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
+__m256i test_mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i const* __A) {
// CHECK-LABEL: @test_mm256_maskz_broadcast_i64x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti64x2
- return _mm256_maskz_broadcast_i64x2(__M, __A);
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
+ return _mm256_maskz_broadcast_i64x2(__M, _mm_loadu_si128(__A));
}
__m128d test_mm256_extractf64x2_pd(__m256d __A) {
// CHECK-LABEL: @test_mm256_extractf64x2_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <2 x i32> <i32 2, i32 3>
return _mm256_extractf64x2_pd(__A, 1);
}
__m128d test_mm256_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_mask_extractf64x2_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <2 x i32> <i32 2, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm256_mask_extractf64x2_pd(__W, __U, __A, 1);
}
__m128d test_mm256_maskz_extractf64x2_pd(__mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_maskz_extractf64x2_pd
- // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> zeroinitializer, <2 x i32> <i32 2, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm256_maskz_extractf64x2_pd(__U, __A, 1);
}
__m128i test_mm256_extracti64x2_epi64(__m256i __A) {
// CHECK-LABEL: @test_mm256_extracti64x2_epi64
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <2 x i32> <i32 2, i32 3>
return _mm256_extracti64x2_epi64(__A, 1);
}
__m128i test_mm256_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_extracti64x2_epi64
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <2 x i32> <i32 2, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm256_mask_extracti64x2_epi64(__W, __U, __A, 1);
}
__m128i test_mm256_maskz_extracti64x2_epi64(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_extracti64x2_epi64
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> zeroinitializer, <2 x i32> <i32 2, i32 3>
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm256_maskz_extracti64x2_epi64(__U, __A, 1);
}
diff --git a/test/CodeGen/blocks.c b/test/CodeGen/blocks.c
index 2a8182691156b..911c63e41d347 100644
--- a/test/CodeGen/blocks.c
+++ b/test/CodeGen/blocks.c
@@ -78,3 +78,37 @@ int main() {
// CHECK: [[ONE:%.*]] = bitcast void (...)* [[ZERO]] to void ()*
// CHECK-NEXT: br label [[CE:%.*]]
+// Ensure that we don't emit helper code in copy/dispose routines for variables
+// that are const-captured.
+void testConstCaptureInCopyAndDestroyHelpers() {
+ const int x = 0;
+ __block int i;
+ (^ { i = x; })();
+}
+// CHECK-LABEL: testConstCaptureInCopyAndDestroyHelpers_block_invoke
+
+// CHECK: @__copy_helper_block
+// CHECK: alloca
+// CHECK-NEXT: alloca
+// CHECK-NEXT: store
+// CHECK-NEXT: store
+// CHECK-NEXT: load
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: load
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: getelementptr
+// CHECK-NEXT: getelementptr
+// CHECK-NEXT: load
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: call void @_Block_object_assign
+// CHECK-NEXT: ret
+
+// CHECK: @__destroy_helper_block
+// CHECK: alloca
+// CHECK-NEXT: store
+// CHECK-NEXT: load
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: getelementptr
+// CHECK-NEXT: load
+// CHECK-NEXT: call void @_Block_object_dispose
+// CHECK-NEXT: ret
diff --git a/test/CodeGen/builtin-clflushopt.c b/test/CodeGen/builtin-clflushopt.c
index e98c2aaba5732..93861164c4a89 100644
--- a/test/CodeGen/builtin-clflushopt.c
+++ b/test/CodeGen/builtin-clflushopt.c
@@ -3,7 +3,7 @@
#include <immintrin.h>
void test_mm_clflushopt(char * __m) {
- //CHECK-LABLE: @test_mm_clflushopt
+ //CHECK-LABEL: @test_mm_clflushopt
//CHECK: @llvm.x86.clflushopt
_mm_clflushopt(__m);
}
diff --git a/test/CodeGen/builtin-clzero.c b/test/CodeGen/builtin-clzero.c
new file mode 100644
index 0000000000000..c9960ced12ecb
--- /dev/null
+++ b/test/CodeGen/builtin-clzero.c
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin -target-feature +clzero -emit-llvm -o - -Wall -Werror | FileCheck %s
+#define __MM_MALLOC_H
+
+#include <x86intrin.h>
+void test_mm_clzero(void * __m) {
+ //CHECK-LABEL: @test_mm_clzero
+ //CHECK: @llvm.x86.clzero
+ _mm_clzero(__m);
+}
diff --git a/test/CodeGen/builtins-mips-msa-error.c b/test/CodeGen/builtins-mips-msa-error.c
index fcdf6f0c48c84..11750922bb4fb 100644
--- a/test/CodeGen/builtins-mips-msa-error.c
+++ b/test/CodeGen/builtins-mips-msa-error.c
@@ -119,7 +119,7 @@ void test(void) {
v4i32_r = __msa_ld_w(&v4i32_a, 512); // expected-error {{argument should be a value from -512 to 511}}
v2i64_r = __msa_ld_d(&v2i64_a, 512); // expected-error {{argument should be a value from -512 to 511}}
- v16i8_r = __msa_ldi_b(512); // expected-error {{argument should be a value from -512 to 511}}
+ v16i8_r = __msa_ldi_b(256); // expected-error {{argument should be a value from -128 to 255}}
v8i16_r = __msa_ldi_h(512); // expected-error {{argument should be a value from -512 to 511}}
v4i32_r = __msa_ldi_w(512); // expected-error {{argument should be a value from -512 to 511}}
v2i64_r = __msa_ldi_d(512); // expected-error {{argument should be a value from -512 to 511}}
@@ -162,11 +162,6 @@ void test(void) {
v8i16_r = __msa_shf_h(v8i16_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
v4i32_r = __msa_shf_w(v4i32_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
- v16i8_r = __msa_sld_b(v16i8_r, v16i8_a, 16); // expected-error {{argument should be a value from 0 to 15}}
- v8i16_r = __msa_sld_h(v8i16_r, v8i16_a, 8); // expected-error {{argument should be a value from 0 to 7}}
- v4i32_r = __msa_sld_w(v4i32_r, v4i32_a, 4); // expected-error {{argument should be a value from 0 to 3}}
- v2i64_r = __msa_sld_d(v2i64_r, v2i64_a, 2); // expected-error {{argument should be a value from 0 to 1}}
-
v16i8_r = __msa_sldi_b(v16i8_r, v16i8_a, 16); // expected-error {{argument should be a value from 0 to 15}}
v8i16_r = __msa_sldi_h(v8i16_r, v8i16_a, 8); // expected-error {{argument should be a value from 0 to 7}}
v4i32_r = __msa_sldi_w(v4i32_r, v4i32_a, 4); // expected-error {{argument should be a value from 0 to 3}}
@@ -315,7 +310,7 @@ void test(void) {
v4i32_r = __msa_ld_w(&v4i32_a, -513); // expected-error {{argument should be a value from -512 to 511}}
v2i64_r = __msa_ld_d(&v2i64_a, -513); // expected-error {{argument should be a value from -512 to 511}}
- v16i8_r = __msa_ldi_b(-513); // expected-error {{argument should be a value from -512 to 511}}
+ v16i8_r = __msa_ldi_b(-129); // expected-error {{argument should be a value from -128 to 255}}
v8i16_r = __msa_ldi_h(-513); // expected-error {{argument should be a value from -512 to 511}}
v4i32_r = __msa_ldi_w(-513); // expected-error {{argument should be a value from -512 to 511}}
v2i64_r = __msa_ldi_d(-513); // expected-error {{argument should be a value from -512 to 511}}
@@ -358,11 +353,6 @@ void test(void) {
v8i16_r = __msa_shf_h(v8i16_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
v4i32_r = __msa_shf_w(v4i32_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
- v16i8_r = __msa_sld_b(v16i8_r, v16i8_a, -17); // expected-error {{argument should be a value from 0 to 15}}
- v8i16_r = __msa_sld_h(v8i16_r, v8i16_a, -8); // expected-error {{argument should be a value from 0 to 7}}
- v4i32_r = __msa_sld_w(v4i32_r, v4i32_a, -4); // expected-error {{argument should be a value from 0 to 3}}
- v2i64_r = __msa_sld_d(v2i64_r, v2i64_a, -2); // expected-error {{argument should be a value from 0 to 1}}
-
v16i8_r = __msa_sldi_b(v16i8_r, v16i8_a, -17); // expected-error {{argument should be a value from 0 to 15}}
v8i16_r = __msa_sldi_h(v8i16_r, v8i16_a, -8); // expected-error {{argument should be a value from 0 to 7}}
v4i32_r = __msa_sldi_w(v4i32_r, v4i32_a, -4); // expected-error {{argument should be a value from 0 to 3}}
diff --git a/test/CodeGen/builtins-mips-msa.c b/test/CodeGen/builtins-mips-msa.c
index 125679545601f..9d09a42090563 100644
--- a/test/CodeGen/builtins-mips-msa.c
+++ b/test/CodeGen/builtins-mips-msa.c
@@ -526,6 +526,8 @@ void test(void) {
v2i64_r = __msa_ld_d(&v2i64_a, 96); // CHECK: call <2 x i64> @llvm.mips.ld.d(
v16i8_r = __msa_ldi_b(3); // CHECK: call <16 x i8> @llvm.mips.ldi.b(
+ v16i8_r = __msa_ldi_b(-128); // CHECK: call <16 x i8> @llvm.mips.ldi.b(
+ v16i8_r = __msa_ldi_b(255); // CHECK: call <16 x i8> @llvm.mips.ldi.b(
v8i16_r = __msa_ldi_h(3); // CHECK: call <8 x i16> @llvm.mips.ldi.h(
v4i32_r = __msa_ldi_w(3); // CHECK: call <4 x i32> @llvm.mips.ldi.w(
v2i64_r = __msa_ldi_d(3); // CHECK: call <2 x i64> @llvm.mips.ldi.d(
@@ -699,6 +701,11 @@ void test(void) {
v4i32_r = __msa_sld_w(v4i32_r, v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.sld.w(
v2i64_r = __msa_sld_d(v2i64_r, v2i64_a, 1); // CHECK: call <2 x i64> @llvm.mips.sld.d(
+ v16i8_r = __msa_sld_b(v16i8_r, v16i8_a, 16); // CHECK: call <16 x i8> @llvm.mips.sld.b(
+ v8i16_r = __msa_sld_h(v8i16_r, v8i16_a, 8); // CHECK: call <8 x i16> @llvm.mips.sld.h(
+ v4i32_r = __msa_sld_w(v4i32_r, v4i32_a, 4); // CHECK: call <4 x i32> @llvm.mips.sld.w(
+ v2i64_r = __msa_sld_d(v2i64_r, v2i64_a, 2); // CHECK: call <2 x i64> @llvm.mips.sld.d(
+
v16i8_r = __msa_sldi_b(v16i8_r, v16i8_a, 7); // CHECK: call <16 x i8> @llvm.mips.sldi.b(
v8i16_r = __msa_sldi_h(v8i16_r, v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.sldi.h(
v4i32_r = __msa_sldi_w(v4i32_r, v4i32_a, 2); // CHECK: call <4 x i32> @llvm.mips.sldi.w(
diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c
index 3b75cb49c3fe6..99cf3c2538792 100644
--- a/test/CodeGen/builtins-ppc-altivec.c
+++ b/test/CodeGen/builtins-ppc-altivec.c
@@ -1,9 +1,9 @@
// REQUIRES: powerpc-registered-target
-// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s \
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc-unknown-unknown -emit-llvm %s \
// RUN: -o - | FileCheck %s
-// RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-unknown -emit-llvm %s \
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc64-unknown-unknown -emit-llvm %s \
// RUN: -o - | FileCheck %s
-// RUN: %clang_cc1 -faltivec -triple powerpc64le-unknown-unknown -emit-llvm %s \
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc64le-unknown-unknown -emit-llvm %s \
// RUN: -o - | FileCheck %s -check-prefix=CHECK-LE
// RUN: not %clang_cc1 -triple powerpc64le-unknown-unknown -emit-llvm %s \
// RUN: -ferror-limit 0 -DNO_ALTIVEC -o - 2>&1 \
@@ -3419,28 +3419,40 @@ void test6() {
/* vec_sl */
res_vsc = vec_sl(vsc, vuc);
-// CHECK: shl <16 x i8>
-// CHECK-LE: shl <16 x i8>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK: shl <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK-LE: shl <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
res_vuc = vec_sl(vuc, vuc);
-// CHECK: shl <16 x i8>
-// CHECK-LE: shl <16 x i8>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK: shl <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK-LE: shl <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
res_vs = vec_sl(vs, vus);
-// CHECK: shl <8 x i16>
-// CHECK-LE: shl <8 x i16>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK: shl <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK-LE: shl <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
res_vus = vec_sl(vus, vus);
-// CHECK: shl <8 x i16>
-// CHECK-LE: shl <8 x i16>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK: shl <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK-LE: shl <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
res_vi = vec_sl(vi, vui);
-// CHECK: shl <4 x i32>
-// CHECK-LE: shl <4 x i32>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK: shl <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK-LE: shl <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
res_vui = vec_sl(vui, vui);
-// CHECK: shl <4 x i32>
-// CHECK-LE: shl <4 x i32>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK: shl <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK-LE: shl <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
res_vsc = vec_vslb(vsc, vuc);
// CHECK: shl <16 x i8>
diff --git a/test/CodeGen/builtins-ppc-crypto-disabled.c b/test/CodeGen/builtins-ppc-crypto-disabled.c
index e6a8a9337aafc..e0b9da0647c09 100644
--- a/test/CodeGen/builtins-ppc-crypto-disabled.c
+++ b/test/CodeGen/builtins-ppc-crypto-disabled.c
@@ -1,13 +1,13 @@
// REQUIRES: powerpc-registered-target
-// RUN: not %clang_cc1 -faltivec -triple powerpc64le-unknown-unknown \
+// RUN: not %clang_cc1 -target-feature +altivec -triple powerpc64le-unknown-unknown \
// RUN: -target-cpu pwr8 -target-feature -crypto -emit-llvm %s -o - 2>&1 \
// RUN: | FileCheck %s
-// RUN: not %clang_cc1 -faltivec -triple powerpc64-unknown-unknown \
+// RUN: not %clang_cc1 -target-feature +altivec -triple powerpc64-unknown-unknown \
// RUN: -target-cpu pwr8 -target-feature -crypto -emit-llvm %s -o - 2>&1 \
// RUN: | FileCheck %s
-// RUN: not %clang_cc1 -faltivec -triple powerpc64-unknown-unknown \
+// RUN: not %clang_cc1 -target-feature +altivec -triple powerpc64-unknown-unknown \
// RUN: -target-cpu pwr8 -target-feature -power8-vector \
// RUN: -target-feature -crypto -emit-llvm %s -o - 2>&1 \
// RUN: | FileCheck %s -check-prefix=CHECK-P8V
diff --git a/test/CodeGen/builtins-ppc-crypto.c b/test/CodeGen/builtins-ppc-crypto.c
index eaf568b09fb97..04f06f4d67db3 100644
--- a/test/CodeGen/builtins-ppc-crypto.c
+++ b/test/CodeGen/builtins-ppc-crypto.c
@@ -1,9 +1,9 @@
// REQUIRES: powerpc-registered-target
-// RUN: %clang_cc1 -faltivec -triple powerpc64le-unknown-unknown \
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc64le-unknown-unknown \
// RUN: -target-feature +crypto -target-feature +power8-vector \
// RUN: -emit-llvm %s -o - | FileCheck %s
-// RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-unknown \
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc64-unknown-unknown \
// RUN: -target-feature +crypto -target-feature +power8-vector \
// RUN: -emit-llvm %s -o - | FileCheck %s
#include <altivec.h>
diff --git a/test/CodeGen/builtins-ppc-error.c b/test/CodeGen/builtins-ppc-error.c
index 5860c4f9e77eb..e8d2a37a21eda 100644
--- a/test/CodeGen/builtins-ppc-error.c
+++ b/test/CodeGen/builtins-ppc-error.c
@@ -1,10 +1,10 @@
// REQUIRES: powerpc-registered-target
-// RUN: %clang_cc1 -faltivec -target-feature +power9-vector \
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +power9-vector \
// RUN: -triple powerpc64-unknown-unknown -fsyntax-only \
// RUN: -Wall -Werror -verify %s
-// RUN: %clang_cc1 -faltivec -target-feature +power9-vector \
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +power9-vector \
// RUN: -triple powerpc64le-unknown-unknown -fsyntax-only \
// RUN: -Wall -Werror -verify %s
diff --git a/test/CodeGen/builtins-ppc-htm.c b/test/CodeGen/builtins-ppc-htm.c
index 87baa77af81fc..d7e7a9148e0f3 100644
--- a/test/CodeGen/builtins-ppc-htm.c
+++ b/test/CodeGen/builtins-ppc-htm.c
@@ -1,5 +1,5 @@
// REQUIRES: powerpc-registered-target
-// RUN: %clang_cc1 -faltivec -target-feature +htm -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +htm -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
void test1(long int *r, int code, long int *a, long int *b) {
// CHECK-LABEL: define void @test1
diff --git a/test/CodeGen/builtins-ppc-p8vector.c b/test/CodeGen/builtins-ppc-p8vector.c
index 97a663c2f28f7..9f2913847e567 100644
--- a/test/CodeGen/builtins-ppc-p8vector.c
+++ b/test/CodeGen/builtins-ppc-p8vector.c
@@ -1,7 +1,7 @@
// REQUIRES: powerpc-registered-target
-// RUN: %clang_cc1 -faltivec -target-feature +power8-vector -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
-// RUN: %clang_cc1 -faltivec -target-feature +power8-vector -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
-// RUN: not %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - 2>&1 | FileCheck %s -check-prefix=CHECK-PPC
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +power8-vector -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +power8-vector -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
+// RUN: not %clang_cc1 -target-feature +altivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - 2>&1 | FileCheck %s -check-prefix=CHECK-PPC
// Added -target-feature +vsx above to avoid errors about "vector double" and to
// generate the correct errors for functions that are only overloaded with VSX
// (vec_cmpge, vec_cmple). Without this option, there is only one overload so
diff --git a/test/CodeGen/builtins-ppc-p9vector.c b/test/CodeGen/builtins-ppc-p9vector.c
index 42316970d8da5..f92df86561ada 100644
--- a/test/CodeGen/builtins-ppc-p9vector.c
+++ b/test/CodeGen/builtins-ppc-p9vector.c
@@ -1,9 +1,9 @@
// REQUIRES: powerpc-registered-target
-// RUN: %clang_cc1 -faltivec -target-feature +power9-vector \
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +power9-vector \
// RUN: -triple powerpc64-unknown-unknown -emit-llvm %s \
// RUN: -o - | FileCheck %s -check-prefix=CHECK-BE
-// RUN: %clang_cc1 -faltivec -target-feature +power9-vector \
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +power9-vector \
// RUN: -triple powerpc64le-unknown-unknown -emit-llvm %s \
// RUN: -o - | FileCheck %s
diff --git a/test/CodeGen/builtins-ppc-quadword.c b/test/CodeGen/builtins-ppc-quadword.c
index 3e168c8b1be60..7d014db61323a 100644
--- a/test/CodeGen/builtins-ppc-quadword.c
+++ b/test/CodeGen/builtins-ppc-quadword.c
@@ -1,12 +1,12 @@
// REQUIRES: powerpc-registered-target
-// RUN: %clang_cc1 -faltivec -target-feature +power8-vector \
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +power8-vector \
// RUN: -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
-// RUN: %clang_cc1 -faltivec -target-feature +power8-vector \
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +power8-vector \
// RUN: -triple powerpc64le-unknown-unknown -emit-llvm %s -o - \
// RUN: | FileCheck %s -check-prefix=CHECK-LE
-// RUN: not %clang_cc1 -faltivec -triple powerpc-unknown-unknown \
+// RUN: not %clang_cc1 -target-feature +altivec -triple powerpc-unknown-unknown \
// RUN: -emit-llvm %s -o - 2>&1 | FileCheck %s -check-prefix=CHECK-PPC
#include <altivec.h>
diff --git a/test/CodeGen/builtins-ppc-vsx.c b/test/CodeGen/builtins-ppc-vsx.c
index 16c72c404d9ce..9e0052630ef7f 100644
--- a/test/CodeGen/builtins-ppc-vsx.c
+++ b/test/CodeGen/builtins-ppc-vsx.c
@@ -1,6 +1,6 @@
// REQUIRES: powerpc-registered-target
-// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
-// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -target-feature +altivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
#include <altivec.h>
vector bool char vbc = { 0, 1, 0, 1, 0, 1, 0, 1,
diff --git a/test/CodeGen/builtins-wasm.c b/test/CodeGen/builtins-wasm.c
index 135e32976b7dc..0c0b87945d426 100644
--- a/test/CodeGen/builtins-wasm.c
+++ b/test/CodeGen/builtins-wasm.c
@@ -9,8 +9,8 @@ __SIZE_TYPE__ f1(void) {
// WEBASSEMBLY64: call {{i.*}} @llvm.wasm.current.memory.i64()
}
-void f2(long delta) {
- __builtin_wasm_grow_memory(delta);
-// WEBASSEMBLY32: call void @llvm.wasm.grow.memory.i32(i32 %{{.*}})
-// WEBASSEMBLY64: call void @llvm.wasm.grow.memory.i64(i64 %{{.*}})
+__SIZE_TYPE__ f2(__SIZE_TYPE__ delta) {
+ return __builtin_wasm_grow_memory(delta);
+// WEBASSEMBLY32: call i32 @llvm.wasm.grow.memory.i32(i32 %{{.*}})
+// WEBASSEMBLY64: call i64 @llvm.wasm.grow.memory.i64(i64 %{{.*}})
}
diff --git a/test/CodeGen/builtins-x86.c b/test/CodeGen/builtins-x86.c
index ec8a8bf868c52..0086f7079dd97 100644
--- a/test/CodeGen/builtins-x86.c
+++ b/test/CodeGen/builtins-x86.c
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -DUSE_64 -triple x86_64-unknown-unknown -target-feature +fxsr -target-feature +avx -target-feature +xsaveopt -target-feature +xsaves -target-feature +xsavec -target-feature +mwaitx -emit-llvm -o %t %s
-// RUN: %clang_cc1 -DUSE_ALL -triple x86_64-unknown-unknown -target-feature +fxsr -target-feature +avx -target-feature +xsaveopt -target-feature +xsaves -target-feature +xsavec -target-feature +mwaitx -fsyntax-only -o %t %s
+// RUN: %clang_cc1 -DUSE_64 -triple x86_64-unknown-unknown -target-feature +fxsr -target-feature +avx -target-feature +xsaveopt -target-feature +xsaves -target-feature +xsavec -target-feature +mwaitx -target-feature +clzero -emit-llvm -o %t %s
+// RUN: %clang_cc1 -DUSE_ALL -triple x86_64-unknown-unknown -target-feature +fxsr -target-feature +avx -target-feature +xsaveopt -target-feature +xsaves -target-feature +xsavec -target-feature +mwaitx -target-feature +clzero -fsyntax-only -o %t %s
#ifdef USE_ALL
#define USE_3DNOW
@@ -285,6 +285,7 @@ void f0() {
(void) __builtin_ia32_monitorx(tmp_vp, tmp_Ui, tmp_Ui);
(void) __builtin_ia32_mwaitx(tmp_Ui, tmp_Ui, tmp_Ui);
+ (void) __builtin_ia32_clzero(tmp_vp);
tmp_V4f = __builtin_ia32_cvtpi2ps(tmp_V4f, tmp_V2i);
tmp_V2i = __builtin_ia32_cvtps2pi(tmp_V4f);
diff --git a/test/CodeGen/catch-undef-behavior.c b/test/CodeGen/catch-undef-behavior.c
index d7a26f8a7d4be..c5f3a79429a01 100644
--- a/test/CodeGen/catch-undef-behavior.c
+++ b/test/CodeGen/catch-undef-behavior.c
@@ -1,6 +1,5 @@
// RUN: %clang_cc1 -fsanitize=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -emit-llvm %s -o - -triple x86_64-linux-gnu | opt -instnamer -S | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-UBSAN
// RUN: %clang_cc1 -fsanitize-trap=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -emit-llvm %s -o - -triple x86_64-linux-gnu | opt -instnamer -S | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-TRAP
-// RUN: %clang_cc1 -fsanitize=null -fsanitize-recover=null -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-NULL
// RUN: %clang_cc1 -fsanitize=signed-integer-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-OVERFLOW
// CHECK-UBSAN: @[[INT:.*]] = private unnamed_addr constant { i16, i16, [6 x i8] } { i16 0, i16 11, [6 x i8] c"'int'\00" }
@@ -30,25 +29,20 @@
// CHECK-UBSAN: @[[LINE_1500:.*]] = {{.*}}, i32 1500, i32 10 {{.*}} @[[FP16]], {{.*}} }
// CHECK-UBSAN: @[[LINE_1600:.*]] = {{.*}}, i32 1600, i32 10 {{.*}} @{{.*}} }
-// CHECK-NULL: @[[LINE_100:.*]] = private unnamed_addr global {{.*}}, i32 100, i32 5 {{.*}}
-
// PR6805
// CHECK-COMMON-LABEL: @foo
-// CHECK-NULL-LABEL: @foo
void foo() {
union { int i; } u;
- // CHECK-COMMON: %[[CHECK0:.*]] = icmp ne {{.*}}* %[[PTR:.*]], null
- // CHECK-COMMON: %[[I8PTR:.*]] = bitcast i32* %[[PTR]] to i8*
- // CHECK-COMMON-NEXT: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* %[[I8PTR]], i1 false)
- // CHECK-COMMON-NEXT: %[[CHECK1:.*]] = icmp uge i64 %[[SIZE]], 4
+ // CHECK-COMMON: %[[I8PTR:.*]] = bitcast i32* %[[PTR:.*]] to i8*
+ // CHECK-COMMON-NEXT: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* %[[I8PTR]], i1 false, i1 false)
+ // CHECK-COMMON-NEXT: %[[CHECK0:.*]] = icmp uge i64 %[[SIZE]], 4
// CHECK-COMMON: %[[PTRTOINT:.*]] = ptrtoint {{.*}}* %[[PTR]] to i64
// CHECK-COMMON-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRTOINT]], 3
- // CHECK-COMMON-NEXT: %[[CHECK2:.*]] = icmp eq i64 %[[MISALIGN]], 0
+ // CHECK-COMMON-NEXT: %[[CHECK1:.*]] = icmp eq i64 %[[MISALIGN]], 0
- // CHECK-COMMON: %[[CHECK01:.*]] = and i1 %[[CHECK0]], %[[CHECK1]]
- // CHECK-COMMON-NEXT: %[[OK:.*]] = and i1 %[[CHECK01]], %[[CHECK2]]
+ // CHECK-COMMON: %[[OK:.*]] = and i1 %[[CHECK0]], %[[CHECK1]]
// CHECK-UBSAN: br i1 %[[OK]], {{.*}} !prof ![[WEIGHT_MD:.*]], !nosanitize
// CHECK-TRAP: br i1 %[[OK]], {{.*}}
@@ -58,11 +52,6 @@ void foo() {
// CHECK-TRAP: call void @llvm.trap() [[NR_NUW:#[0-9]+]]
// CHECK-TRAP-NEXT: unreachable
-
- // With -fsanitize=null, only perform the null check.
- // CHECK-NULL: %[[NULL:.*]] = icmp ne {{.*}}, null
- // CHECK-NULL: br i1 %[[NULL]]
- // CHECK-NULL: call void @__ubsan_handle_type_mismatch_v1(i8* bitcast ({{.*}} @[[LINE_100]] to i8*), i64 %{{.*}})
#line 100
u.i=1;
}
diff --git a/test/CodeGen/cfi-check-fail.c b/test/CodeGen/cfi-check-fail.c
index b850193b54acb..0eb786ab963b4 100644
--- a/test/CodeGen/cfi-check-fail.c
+++ b/test/CodeGen/cfi-check-fail.c
@@ -72,3 +72,8 @@ void caller(void (*f)()) {
// CHECK: [[CONT5]]:
// CHECK: ret void
+
+// CHECK: define weak void @__cfi_check(i64, i8*, i8*)
+// CHECK-NOT: }
+// CHECK: call void @llvm.trap()
+// CHECK-NEXT: ret void
diff --git a/test/CodeGen/cleanup-destslot-simple.c b/test/CodeGen/cleanup-destslot-simple.c
index 848a8dc847a96..e7067e7b04853 100644
--- a/test/CodeGen/cleanup-destslot-simple.c
+++ b/test/CodeGen/cleanup-destslot-simple.c
@@ -13,9 +13,9 @@ int test() {
return *p;
// CHECK: [[X:%.*]] = alloca i32
// CHECK: [[P:%.*]] = alloca i32*
-// LIFETIME: call void @llvm.lifetime.start(i64 4, i8* nonnull %{{.*}}){{( #[0-9]+)?}}, !dbg
-// LIFETIME: call void @llvm.lifetime.start(i64 8, i8* nonnull %{{.*}}){{( #[0-9]+)?}}, !dbg
+// LIFETIME: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %{{.*}}){{( #[0-9]+)?}}, !dbg
+// LIFETIME: call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %{{.*}}){{( #[0-9]+)?}}, !dbg
// CHECK-NOT: store i32 %{{.*}}, i32* %cleanup.dest.slot
-// LIFETIME: call void @llvm.lifetime.end(i64 8, {{.*}}){{( #[0-9]+)?}}, !dbg
-// LIFETIME: call void @llvm.lifetime.end(i64 4, {{.*}}){{( #[0-9]+)?}}, !dbg
+// LIFETIME: call void @llvm.lifetime.end.p0i8(i64 8, {{.*}}){{( #[0-9]+)?}}, !dbg
+// LIFETIME: call void @llvm.lifetime.end.p0i8(i64 4, {{.*}}){{( #[0-9]+)?}}, !dbg
}
diff --git a/test/CodeGen/compound-assign-overflow.c b/test/CodeGen/compound-assign-overflow.c
index f126bb05d53c1..92ae249eb9ff4 100644
--- a/test/CodeGen/compound-assign-overflow.c
+++ b/test/CodeGen/compound-assign-overflow.c
@@ -25,11 +25,9 @@ void compaddunsigned() {
// CHECK: @__ubsan_handle_add_overflow(i8* bitcast ({{.*}} @[[LINE_200]] to i8*), {{.*}})
}
-int8_t a, b;
-
// CHECK: @compdiv
void compdiv() {
#line 300
- a /= b;
+ x /= x;
// CHECK: @__ubsan_handle_divrem_overflow(i8* bitcast ({{.*}} @[[LINE_300]] to i8*), {{.*}})
}
diff --git a/test/CodeGen/debug-info-macro.c b/test/CodeGen/debug-info-macro.c
new file mode 100644
index 0000000000000..889c7ffdbac34
--- /dev/null
+++ b/test/CodeGen/debug-info-macro.c
@@ -0,0 +1,57 @@
+// RUN: %clang_cc1 -emit-llvm -debug-info-kind=line-tables-only -debug-info-macro %s -o - "-DC1(x)=( x + 5 )" -DA -include %S/Inputs/debug-info-macro.h -UC1 | FileCheck -check-prefixes=CHECK,NO_PCH %s
+// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited -debug-info-macro %s -o - "-DC1(x)=( x + 5 )" -DA -include %S/Inputs/debug-info-macro.h -UC1 | FileCheck -check-prefixes=CHECK,NO_PCH %s
+// RUN: %clang_cc1 -emit-llvm -debug-info-kind=standalone -debug-info-macro %s -o - "-DC1(x)=( x + 5 )" -DA -include %S/Inputs/debug-info-macro.h -UC1 | FileCheck -check-prefixes=CHECK,NO_PCH %s
+// RUN: %clang_cc1 -emit-llvm -debug-info-macro %s -o - "-DC1(x)=( x + 5 )" -DA -include %S/Inputs/debug-info-macro.h -UC1 | FileCheck -check-prefixes=NO_MACRO %s
+
+// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited -debug-info-macro %S/Inputs/debug-info-macro.h -emit-pch -o %t.pch -DC3
+// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited -debug-info-macro %s -o - -include-pch %t.pch "-DC1(x)=( x + 5 )" -DA -include %S/Inputs/debug-info-macro.h -UC1 | FileCheck -check-prefixes=CHECK,PCH %s
+
+// This test checks that macro Debug info is correctly generated.
+
+// TODO: Check for an following entry once support macros defined in pch files.
+// -PCH: !DIMacro(type: DW_MACINFO_define, name: "C3", value: "1")>
+
+#line 15
+/*Line 15*/ #define D1 1
+/*Line 16*/ #include "Inputs/debug-info-macro.h"
+/*Line 17*/ #undef D1
+/*Line 18*/ #define D2 2
+/*Line 19*/ #include "Inputs/debug-info-macro.h"
+/*Line 20*/ #undef D2
+
+// NO_MACRO-NOT: macros
+// NO_MACRO-NOT: DIMacro
+// NO_MACRO-NOT: DIMacroFile
+
+// CHECK: !DICompileUnit({{.*}} macros: [[Macros:![0-9]+]])
+// CHECK: [[EmptyMD:![0-9]+]] = !{}
+
+// NO_PCH: [[Macros]] = !{[[MainMacroFile:![0-9]+]], [[BuiltinMacro:![0-9]+]], {{.*}}, [[DefineC1:![0-9]+]], [[DefineA:![0-9]+]], [[UndefC1:![0-9]+]]}
+// PCH: [[Macros]] = !{[[MainMacroFile:![0-9]+]], [[DefineC1:![0-9]+]], [[DefineA:![0-9]+]], [[UndefC1:![0-9]+]]}
+
+// CHECK: [[MainMacroFile]] = !DIMacroFile(file: [[MainFile:![0-9]+]], nodes: [[N1:![0-9]+]])
+// CHECK: [[MainFile]] = !DIFile(filename: "{{.*}}debug-info-macro.c"
+// CHECK: [[N1]] = !{[[CommandLineInclude:![0-9]+]], [[DefineD1:![0-9]+]], [[FileInclude1:![0-9]+]], [[UndefD1:![0-9]+]], [[DefineD2:![0-9]+]], [[FileInclude2:![0-9]+]], [[UndefD2:![0-9]+]]}
+
+// CHECK: [[CommandLineInclude]] = !DIMacroFile(file: [[HeaderFile:![0-9]+]], nodes: [[N2:![0-9]+]])
+// CHECK: [[HeaderFile]] = !DIFile(filename: "{{.*}}debug-info-macro.h"
+// CHECK: [[N2]] = !{[[UndefA:![0-9]+]]}
+// CHECK: [[UndefA]] = !DIMacro(type: DW_MACINFO_undef, line: 11, name: "A")
+
+// CHECK: [[DefineD1]] = !DIMacro(type: DW_MACINFO_define, line: 15, name: "D1", value: "1")
+// CHECK: [[FileInclude1]] = !DIMacroFile(line: 16, file: [[HeaderFile]], nodes: [[N3:![0-9]+]])
+// CHECK: [[N3]] = !{[[DefineAx:![0-9]+]], [[UndefA]]}
+// CHECK: [[DefineAx]] = !DIMacro(type: DW_MACINFO_define, line: 3, name: "A(x,y,z)", value: "(x)")
+// CHECK: [[UndefD1]] = !DIMacro(type: DW_MACINFO_undef, line: 17, name: "D1")
+
+// CHECK: [[DefineD2]] = !DIMacro(type: DW_MACINFO_define, line: 18, name: "D2", value: "2")
+// CHECK: [[FileInclude2]] = !DIMacroFile(line: 19, file: [[HeaderFile]], nodes: [[N4:![0-9]+]])
+// CHECK: [[N4]] = !{[[DefineAy:![0-9]+]], [[UndefA]]}
+// CHECK: [[DefineAy]] = !DIMacro(type: DW_MACINFO_define, line: 7, name: "A(x,y,z)", value: "(y)")
+// CHECK: [[UndefD2]] = !DIMacro(type: DW_MACINFO_undef, line: 20, name: "D2")
+
+// NO_PCH: [[BuiltinMacro]] = !DIMacro(type: DW_MACINFO_define, name: "__llvm__", value: "1")
+
+// CHECK: [[DefineC1]] = !DIMacro(type: DW_MACINFO_define, name: "C1(x)", value: "( x + 5 )")
+// CHECK: [[DefineA]] = !DIMacro(type: DW_MACINFO_define, name: "A", value: "1")
+// CHECK: [[UndefC1]] = !DIMacro(type: DW_MACINFO_undef, name: "C1")
diff --git a/test/CodeGen/default-address-space.c b/test/CodeGen/default-address-space.c
new file mode 100644
index 0000000000000..07ddf48fac2fa
--- /dev/null
+++ b/test/CodeGen/default-address-space.c
@@ -0,0 +1,58 @@
+// RUN: %clang_cc1 -triple amdgcn -emit-llvm < %s | FileCheck -check-prefixes=PIZ,COM %s
+// RUN: %clang_cc1 -triple amdgcn---amdgiz -emit-llvm < %s | FileCheck -check-prefixes=CHECK,COM %s
+
+// PIZ-DAG: @foo = common addrspace(4) global i32 0
+// CHECK-DAG: @foo = common global i32 0
+int foo;
+
+// PIZ-DAG: @ban = common addrspace(4) global [10 x i32] zeroinitializer
+// CHECK-DAG: @ban = common global [10 x i32] zeroinitializer
+int ban[10];
+
+// PIZ-DAG: @A = common addrspace(4) global i32 addrspace(4)* null
+// PIZ-DAG: @B = common addrspace(4) global i32 addrspace(4)* null
+// CHECK-DAG: @A = common global i32* null
+// CHECK-DAG: @B = common global i32* null
+int *A;
+int *B;
+
+// COM-LABEL: define i32 @test1()
+// PIZ: load i32, i32 addrspace(4)* @foo
+// CHECK: load i32, i32* @foo
+int test1() { return foo; }
+
+// COM-LABEL: define i32 @test2(i32 %i)
+// PIZ: load i32, i32 addrspace(4)*
+// PIZ-NEXT: ret i32
+// CHECK: load i32, i32*
+// CHECK-NEXT: ret i32
+int test2(int i) { return ban[i]; }
+
+// COM-LABEL: define void @test3()
+// PIZ: load i32 addrspace(4)*, i32 addrspace(4)* addrspace(4)* @B
+// PIZ: load i32, i32 addrspace(4)*
+// PIZ: load i32 addrspace(4)*, i32 addrspace(4)* addrspace(4)* @A
+// PIZ: store i32 {{.*}}, i32 addrspace(4)*
+// CHECK: load i32*, i32** @B
+// CHECK: load i32, i32*
+// CHECK: load i32*, i32** @A
+// CHECK: store i32 {{.*}}, i32*
+void test3() {
+ *A = *B;
+}
+
+// PIZ-LABEL: define void @test4(i32 addrspace(4)* %a)
+// PIZ: %[[a_addr:.*]] = alloca i32 addrspace(4)*
+// PIZ: store i32 addrspace(4)* %a, i32 addrspace(4)** %[[a_addr]]
+// PIZ: %[[r0:.*]] = load i32 addrspace(4)*, i32 addrspace(4)** %[[a_addr]]
+// PIZ: %[[arrayidx:.*]] = getelementptr inbounds i32, i32 addrspace(4)* %[[r0]]
+// PIZ: store i32 0, i32 addrspace(4)* %[[arrayidx]]
+// CHECK-LABEL: define void @test4(i32* %a)
+// CHECK: %[[a_addr:.*]] = alloca i32*, align 4, addrspace(5)
+// CHECK: store i32* %a, i32* addrspace(5)* %[[a_addr]]
+// CHECK: %[[r0:.*]] = load i32*, i32* addrspace(5)* %[[a_addr]]
+// CHECK: %[[arrayidx:.*]] = getelementptr inbounds i32, i32* %[[r0]]
+// CHECK: store i32 0, i32* %[[arrayidx]]
+void test4(int *a) {
+ a[0] = 0;
+}
diff --git a/test/CodeGen/fentry.c b/test/CodeGen/fentry.c
new file mode 100644
index 0000000000000..b9133184e4d46
--- /dev/null
+++ b/test/CodeGen/fentry.c
@@ -0,0 +1,11 @@
+// RUN: %clang_cc1 -pg -mfentry -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -pg -mfentry -triple x86_64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -mfentry -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck -check-prefix=NOPG %s
+// RUN: %clang_cc1 -mfentry -triple x86_64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck -check-prefix=NOPG %s
+
+int foo(void) {
+ return 0;
+}
+
+//CHECK: attributes #{{[0-9]+}} = { {{.*}}"fentry-call"="true"{{.*}} }
+//NOPG-NOT: attributes #{{[0-9]+}} = { {{.*}}"fentry-call"{{.*}} }
diff --git a/test/CodeGen/ffp-contract-fast-option.cpp b/test/CodeGen/ffp-contract-fast-option.cpp
new file mode 100644
index 0000000000000..3db93de107cde
--- /dev/null
+++ b/test/CodeGen/ffp-contract-fast-option.cpp
@@ -0,0 +1,29 @@
+// RUN: %clang_cc1 -O3 -ffp-contract=fast -triple %itanium_abi_triple -emit-llvm -o - %s | FileCheck %s
+
+float fp_contract_1(float a, float b, float c) {
+ // CHECK-LABEL: fp_contract_1fff(
+ // CHECK: fmul contract float
+ // CHECK: fadd contract float
+ return a * b + c;
+}
+
+float fp_contract_2(float a, float b, float c) {
+ // CHECK-LABEL: fp_contract_2fff(
+ // CHECK: fmul contract float
+ // CHECK: fsub contract float
+ return a * b - c;
+}
+
+void fp_contract_3(float *a, float b, float c) {
+ // CHECK-LABEL: fp_contract_3Pfff(
+ // CHECK: fmul contract float
+ // CHECK: fadd contract float
+ a[0] += b * c;
+}
+
+void fp_contract_4(float *a, float b, float c) {
+ // CHECK-LABEL: fp_contract_4Pfff(
+ // CHECK: fmul contract float
+ // CHECK: fsub contract float
+ a[0] -= b * c;
+}
diff --git a/test/CodeGen/ffp-contract-option.c b/test/CodeGen/ffp-contract-option.c
index 61913b0aa333a..52b7507959402 100644
--- a/test/CodeGen/ffp-contract-option.c
+++ b/test/CodeGen/ffp-contract-option.c
@@ -1,8 +1,8 @@
-// RUN: %clang_cc1 -O3 -ffp-contract=fast -triple=powerpc-apple-darwin10 -S -o - %s | FileCheck %s
-// REQUIRES: powerpc-registered-target
+// RUN: %clang_cc1 -O3 -ffp-contract=fast -triple=aarch64-apple-darwin -S -o - %s | FileCheck %s
+// REQUIRES: aarch64-registered-target
float fma_test1(float a, float b, float c) {
-// CHECK: fmadds
+// CHECK: fmadd
float x = a * b;
float y = x + c;
return y;
diff --git a/test/CodeGen/fp-contract-fast-pragma.cpp b/test/CodeGen/fp-contract-fast-pragma.cpp
new file mode 100644
index 0000000000000..c2e52f070e93b
--- /dev/null
+++ b/test/CodeGen/fp-contract-fast-pragma.cpp
@@ -0,0 +1,69 @@
+// RUN: %clang_cc1 -O3 -triple %itanium_abi_triple -emit-llvm -o - %s | FileCheck %s
+
+// Is FP_CONTRACT honored in a simple case?
+float fp_contract_1(float a, float b, float c) {
+// CHECK: _Z13fp_contract_1fff
+// CHECK: %[[M:.+]] = fmul contract float %a, %b
+// CHECK-NEXT: fadd contract float %[[M]], %c
+#pragma clang fp contract(fast)
+ return a * b + c;
+}
+
+// Is FP_CONTRACT state cleared on exiting compound statements?
+float fp_contract_2(float a, float b, float c) {
+ // CHECK: _Z13fp_contract_2fff
+ // CHECK: %[[M:.+]] = fmul float %a, %b
+ // CHECK-NEXT: fadd float %[[M]], %c
+ {
+#pragma clang fp contract(fast)
+ }
+ return a * b + c;
+}
+
+// Does FP_CONTRACT survive template instantiation?
+class Foo {};
+Foo operator+(Foo, Foo);
+
+template <typename T>
+T template_muladd(T a, T b, T c) {
+#pragma clang fp contract(fast)
+ return a * b + c;
+}
+
+float fp_contract_3(float a, float b, float c) {
+ // CHECK: _Z13fp_contract_3fff
+ // CHECK: %[[M:.+]] = fmul contract float %a, %b
+ // CHECK-NEXT: fadd contract float %[[M]], %c
+ return template_muladd<float>(a, b, c);
+}
+
+template <typename T>
+class fp_contract_4 {
+ float method(float a, float b, float c) {
+#pragma clang fp contract(fast)
+ return a * b + c;
+ }
+};
+
+template class fp_contract_4<int>;
+// CHECK: _ZN13fp_contract_4IiE6methodEfff
+// CHECK: %[[M:.+]] = fmul contract float %a, %b
+// CHECK-NEXT: fadd contract float %[[M]], %c
+
+// Check file-scoped FP_CONTRACT
+#pragma clang fp contract(fast)
+float fp_contract_5(float a, float b, float c) {
+ // CHECK: _Z13fp_contract_5fff
+ // CHECK: %[[M:.+]] = fmul contract float %a, %b
+ // CHECK-NEXT: fadd contract float %[[M]], %c
+ return a * b + c;
+}
+
+// Verify that we can handle multiple flags on the same pragma
+#pragma clang fp contract(fast) contract(off)
+float fp_contract_6(float a, float b, float c) {
+ // CHECK: _Z13fp_contract_6fff
+ // CHECK: %[[M:.+]] = fmul float %a, %b
+ // CHECK-NEXT: fadd float %[[M]], %c
+ return a * b + c;
+}
diff --git a/test/CodeGen/fp-contract-on-pragma.cpp b/test/CodeGen/fp-contract-on-pragma.cpp
new file mode 100644
index 0000000000000..812a7176b515c
--- /dev/null
+++ b/test/CodeGen/fp-contract-on-pragma.cpp
@@ -0,0 +1,76 @@
+// RUN: %clang_cc1 -O3 -triple %itanium_abi_triple -emit-llvm -o - %s | FileCheck %s
+
+// Is FP_CONTRACT honored in a simple case?
+float fp_contract_1(float a, float b, float c) {
+// CHECK: _Z13fp_contract_1fff
+// CHECK: tail call float @llvm.fmuladd
+#pragma clang fp contract(on)
+ return a * b + c;
+}
+
+// Is FP_CONTRACT state cleared on exiting compound statements?
+float fp_contract_2(float a, float b, float c) {
+ // CHECK: _Z13fp_contract_2fff
+ // CHECK: %[[M:.+]] = fmul float %a, %b
+ // CHECK-NEXT: fadd float %[[M]], %c
+ {
+#pragma clang fp contract(on)
+ }
+ return a * b + c;
+}
+
+// Does FP_CONTRACT survive template instantiation?
+class Foo {};
+Foo operator+(Foo, Foo);
+
+template <typename T>
+T template_muladd(T a, T b, T c) {
+#pragma clang fp contract(on)
+ return a * b + c;
+}
+
+float fp_contract_3(float a, float b, float c) {
+ // CHECK: _Z13fp_contract_3fff
+ // CHECK: tail call float @llvm.fmuladd
+ return template_muladd<float>(a, b, c);
+}
+
+template <typename T>
+class fp_contract_4 {
+ float method(float a, float b, float c) {
+#pragma clang fp contract(on)
+ return a * b + c;
+ }
+};
+
+template class fp_contract_4<int>;
+// CHECK: _ZN13fp_contract_4IiE6methodEfff
+// CHECK: tail call float @llvm.fmuladd
+
+// Check file-scoped FP_CONTRACT
+#pragma clang fp contract(on)
+float fp_contract_5(float a, float b, float c) {
+ // CHECK: _Z13fp_contract_5fff
+ // CHECK: tail call float @llvm.fmuladd
+ return a * b + c;
+}
+
+#pragma clang fp contract(off)
+float fp_contract_6(float a, float b, float c) {
+ // CHECK: _Z13fp_contract_6fff
+ // CHECK: %[[M:.+]] = fmul float %a, %b
+ // CHECK-NEXT: fadd float %[[M]], %c
+ return a * b + c;
+}
+
+// If the multiply has multiple uses, don't produce fmuladd.
+// This used to assert (PR25719):
+// https://llvm.org/bugs/show_bug.cgi?id=25719
+
+float fp_contract_7(float a, float b, float c) {
+// CHECK: _Z13fp_contract_7fff
+// CHECK: %[[M:.+]] = fmul float %b, 2.000000e+00
+// CHECK-NEXT: fsub float %[[M]], %c
+#pragma clang fp contract(on)
+ return (a = 2 * b) - c;
+}
diff --git a/test/CodeGen/function-sections.c b/test/CodeGen/function-sections.c
index 7994acf4dcac1..c34216dec6c7c 100644
--- a/test/CodeGen/function-sections.c
+++ b/test/CodeGen/function-sections.c
@@ -9,6 +9,12 @@
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -S -fdata-sections -o - < %s | FileCheck %s --check-prefix=DATA_SECT
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -S -fno-data-sections -fdata-sections -o - < %s | FileCheck %s --check-prefix=DATA_SECT
+// Try again through a clang invocation of the ThinLTO backend.
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -O2 %s -flto=thin -emit-llvm-bc -o %t.o
+// RUN: llvm-lto -thinlto -o %t %t.o
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -O2 -x ir %t.o -fthinlto-index=%t.thinlto.bc -S -ffunction-sections -o - | FileCheck %s --check-prefix=FUNC_SECT
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -O2 -x ir %t.o -fthinlto-index=%t.thinlto.bc -S -fdata-sections -o - | FileCheck %s --check-prefix=DATA_SECT
+
const int hello = 123;
void world() {}
@@ -22,7 +28,7 @@ void world() {}
// FUNC_SECT: section .rodata,
// FUNC_SECT: hello:
-// DATA_SECT-NOT: section
+// DATA_SECT-NOT: .section
// DATA_SECT: world:
// DATA_SECT: .section .rodata.hello,
// DATA_SECT: hello:
diff --git a/test/CodeGen/libcall-declarations.c b/test/CodeGen/libcall-declarations.c
index 345b74fe97db2..5a0b2ba0e6365 100644
--- a/test/CodeGen/libcall-declarations.c
+++ b/test/CodeGen/libcall-declarations.c
@@ -402,9 +402,9 @@ void *use[] = {
// CHECK-NOERRNO: declare i32 @ilogb(double) [[NUW]]
// CHECK-NOERRNO: declare i32 @ilogbf(float) [[NUW]]
// CHECK-NOERRNO: declare i32 @ilogbl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @lgamma(double) [[NUW]]
-// CHECK-NOERRNO: declare float @lgammaf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NUW]]
+// CHECK-NOERRNO: declare double @lgamma(double) [[NONCONST:#[0-9]+]]
+// CHECK-NOERRNO: declare float @lgammaf(float) [[NONCONST]]
+// CHECK-NOERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NONCONST]]
// CHECK-NOERRNO: declare i64 @llrint(double) [[NUW]]
// CHECK-NOERRNO: declare i64 @llrintf(float) [[NUW]]
// CHECK-NOERRNO: declare i64 @llrintl(x86_fp80) [[NUW]]
@@ -554,6 +554,9 @@ void *use[] = {
// CHECK-ERRNO: declare double @fmin(double, double) [[NUW]]
// CHECK-ERRNO: declare float @fminf(float, float) [[NUW]]
// CHECK-ERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) [[NUW]]
+// CHECK-ERRNO: declare double @lgamma(double) [[NONCONST:#[0-9]+]]
+// CHECK-ERRNO: declare float @lgammaf(float) [[NONCONST]]
+// CHECK-ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NONCONST]]
// CHECK-ERRNO: declare double @nearbyint(double) [[NUW]]
// CHECK-ERRNO: declare float @nearbyintf(float) [[NUW]]
// CHECK-ERRNO: declare x86_fp80 @nearbyintl(x86_fp80) [[NUW]]
@@ -612,5 +615,11 @@ void *use[] = {
// CHECK-ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NUW]]
// CHECK-NOERRNO: attributes [[NUW]] = { nounwind readnone{{.*}} }
+// CHECK-NOERRNO: attributes [[NONCONST]] = {
+// CHECK-NOERRNO-NOT: readnone
+// CHECK-NOERRNO-SAME: nounwind{{.*}} }
+// CHECK-ERRNO: attributes [[NONCONST]] = {
+// CHECK-ERRNO-NOT: readnone
+// CHECK-ERRNO-SAME: nounwind{{.*}} }
// CHECK-ERRNO: attributes [[NUW]] = { nounwind readnone{{.*}} }
diff --git a/test/CodeGen/lifetime-asan.c b/test/CodeGen/lifetime-asan.c
index 5f0c66d513de6..c5f25a2eaf757 100644
--- a/test/CodeGen/lifetime-asan.c
+++ b/test/CodeGen/lifetime-asan.c
@@ -8,14 +8,14 @@ extern int bar(char *A, int n);
// CHECK-O0-NOT: @llvm.lifetime.start
int foo(int n) {
if (n) {
- // CHECK-ASAN-USE-AFTER-SCOPE: @llvm.lifetime.start(i64 10, i8* {{.*}})
+ // CHECK-ASAN-USE-AFTER-SCOPE: @llvm.lifetime.start.p0i8(i64 10, i8* {{.*}})
char A[10];
return bar(A, 1);
- // CHECK-ASAN-USE-AFTER-SCOPE: @llvm.lifetime.end(i64 10, i8* {{.*}})
+ // CHECK-ASAN-USE-AFTER-SCOPE: @llvm.lifetime.end.p0i8(i64 10, i8* {{.*}})
} else {
- // CHECK-ASAN-USE-AFTER-SCOPE: @llvm.lifetime.start(i64 20, i8* {{.*}})
+ // CHECK-ASAN-USE-AFTER-SCOPE: @llvm.lifetime.start.p0i8(i64 20, i8* {{.*}})
char A[20];
return bar(A, 2);
- // CHECK-ASAN-USE-AFTER-SCOPE: @llvm.lifetime.end(i64 20, i8* {{.*}})
+ // CHECK-ASAN-USE-AFTER-SCOPE: @llvm.lifetime.end.p0i8(i64 20, i8* {{.*}})
}
}
diff --git a/test/CodeGen/lifetime2.c b/test/CodeGen/lifetime2.c
index 4374b3c279c70..fcdcff36017e8 100644
--- a/test/CodeGen/lifetime2.c
+++ b/test/CodeGen/lifetime2.c
@@ -1,7 +1,7 @@
-// RUN: %clang -S -emit-llvm -o - -O2 %s | FileCheck %s -check-prefixes=CHECK,O2
-// RUN: %clang -S -emit-llvm -o - -O2 -Xclang -disable-lifetime-markers %s \
+// RUN: %clang_cc1 -S -emit-llvm -o - -O2 -disable-llvm-passes %s | FileCheck %s -check-prefixes=CHECK,O2
+// RUN: %clang_cc1 -S -emit-llvm -o - -O2 -disable-lifetime-markers %s \
// RUN: | FileCheck %s -check-prefixes=CHECK,O0
-// RUN: %clang -S -emit-llvm -o - -O0 %s | FileCheck %s -check-prefixes=CHECK,O0
+// RUN: %clang_cc1 -S -emit-llvm -o - -O0 %s | FileCheck %s -check-prefixes=CHECK,O0
extern int bar(char *A, int n);
@@ -21,24 +21,22 @@ int foo (int n) {
// CHECK-LABEL: @no_goto_bypass
void no_goto_bypass() {
- // O2: @llvm.lifetime.start(i64 1
+ // O2: @llvm.lifetime.start.p0i8(i64 1
char x;
l1:
bar(&x, 1);
- // O2: @llvm.lifetime.start(i64 5
- // O2: @llvm.lifetime.end(i64 5
char y[5];
bar(y, 5);
goto l1;
// Infinite loop
- // O2-NOT: @llvm.lifetime.end(i64 1
+ // O2-NOT: @llvm.lifetime.end.p0i8(
}
// CHECK-LABEL: @goto_bypass
void goto_bypass() {
{
- // O2-NOT: @llvm.lifetime.start(i64 1
- // O2-NOT: @llvm.lifetime.end(i64 1
+ // O2-NOT: @llvm.lifetime.start.p0i8(i64 1
+ // O2-NOT: @llvm.lifetime.end.p0i8(i64 1
char x;
l1:
bar(&x, 1);
@@ -50,16 +48,16 @@ void goto_bypass() {
void no_switch_bypass(int n) {
switch (n) {
case 1: {
- // O2: @llvm.lifetime.start(i64 1
- // O2: @llvm.lifetime.end(i64 1
+ // O2: @llvm.lifetime.start.p0i8(i64 1
+ // O2: @llvm.lifetime.end.p0i8(i64 1
char x;
bar(&x, 1);
break;
}
case 2:
n = n;
- // O2: @llvm.lifetime.start(i64 5
- // O2: @llvm.lifetime.end(i64 5
+ // O2: @llvm.lifetime.start.p0i8(i64 5
+ // O2: @llvm.lifetime.end.p0i8(i64 5
char y[5];
bar(y, 5);
break;
@@ -71,8 +69,8 @@ void switch_bypass(int n) {
switch (n) {
case 1:
n = n;
- // O2-NOT: @llvm.lifetime.start(i64 1
- // O2-NOT: @llvm.lifetime.end(i64 1
+ // O2-NOT: @llvm.lifetime.start.p0i8(i64 1
+ // O2-NOT: @llvm.lifetime.end.p0i8(i64 1
char x;
bar(&x, 1);
break;
@@ -91,3 +89,27 @@ void indirect_jump(int n) {
L:
bar(&x, 1);
}
+
+// O2-LABEL: @jump_backward_over_declaration(
+// O2: %[[p:.*]] = alloca i32*
+// O2: %[[v0:.*]] = bitcast i32** %[[p]] to i8*
+// O2: call void @llvm.lifetime.start.p0i8(i64 {{.*}}, i8* %[[v0]])
+// O2-NOT: call void @llvm.lifetime.start.p0i8(
+
+extern void foo2(int p);
+
+int jump_backward_over_declaration(int a) {
+ int *p = 0;
+label1:
+ if (p) {
+ foo2(*p);
+ return 0;
+ }
+
+ int i = 999;
+ if (a != 2) {
+ p = &i;
+ goto label1;
+ }
+ return -1;
+}
diff --git a/test/CodeGen/mmx-builtins.c b/test/CodeGen/mmx-builtins.c
index ddc6f66548aea..cd725e22b83c8 100644
--- a/test/CodeGen/mmx-builtins.c
+++ b/test/CodeGen/mmx-builtins.c
@@ -383,6 +383,93 @@ __m64 test_mm_sad_pu8(__m64 a, __m64 b) {
return _mm_sad_pu8(a, b);
}
+__m64 test_mm_set_pi8(char a, char b, char c, char d, char e, char f, char g, char h) {
+ // CHECK-LABEL: test_mm_set_pi8
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ return _mm_set_pi8(a, b, c, d, e, f, g, h);
+}
+
+__m64 test_mm_set_pi16(short a, short b, short c, short d) {
+ // CHECK-LABEL: test_mm_set_pi16
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ return _mm_set_pi16(a, b, c, d);
+}
+
+__m64 test_mm_set_pi32(int a, int b) {
+ // CHECK-LABEL: test_mm_set_pi32
+ // CHECK: insertelement <2 x i32>
+ // CHECK: insertelement <2 x i32>
+ return _mm_set_pi32(a, b);
+}
+
+__m64 test_mm_setr_pi8(char a, char b, char c, char d, char e, char f, char g, char h) {
+ // CHECK-LABEL: test_mm_setr_pi8
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ return _mm_setr_pi8(a, b, c, d, e, f, g, h);
+}
+
+__m64 test_mm_setr_pi16(short a, short b, short c, short d) {
+ // CHECK-LABEL: test_mm_setr_pi16
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ return _mm_setr_pi16(a, b, c, d);
+}
+
+__m64 test_mm_setr_pi32(int a, int b) {
+ // CHECK-LABEL: test_mm_setr_pi32
+ // CHECK: insertelement <2 x i32>
+ // CHECK: insertelement <2 x i32>
+ return _mm_setr_pi32(a, b);
+}
+
+__m64 test_mm_set1_pi8(char a) {
+ // CHECK-LABEL: test_mm_set1_pi8
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ // CHECK: insertelement <8 x i8>
+ return _mm_set1_pi8(a);
+}
+
+__m64 test_mm_set1_pi16(short a) {
+ // CHECK-LABEL: test_mm_set1_pi16
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ // CHECK: insertelement <4 x i16>
+ return _mm_set1_pi16(a);
+}
+
+__m64 test_mm_set1_pi32(int a) {
+ // CHECK-LABEL: test_mm_set1_pi32
+ // CHECK: insertelement <2 x i32>
+ // CHECK: insertelement <2 x i32>
+ return _mm_set1_pi32(a);
+}
+
__m64 test_mm_shuffle_pi8(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_shuffle_pi8
// CHECK: call x86_mmx @llvm.x86.ssse3.pshuf.b
diff --git a/test/CodeGen/ms-declspecs.c b/test/CodeGen/ms-declspecs.c
index 4842050e5be54..05810bb4b71c1 100644
--- a/test/CodeGen/ms-declspecs.c
+++ b/test/CodeGen/ms-declspecs.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -triple i386-pc-win32 %s -emit-llvm -fms-compatibility -O2 -disable-llvm-optzns -o - | FileCheck %s
+// RUN: %clang_cc1 -triple i386-pc-win32 %s -emit-llvm -fms-compatibility -O2 -disable-llvm-passes -o - | FileCheck %s
__declspec(selectany) int x1 = 1;
const __declspec(selectany) int x2 = 2;
diff --git a/test/CodeGen/ms-inline-asm-EVEN.c b/test/CodeGen/ms-inline-asm-EVEN.c
new file mode 100644
index 0000000000000..a188af30910ad
--- /dev/null
+++ b/test/CodeGen/ms-inline-asm-EVEN.c
@@ -0,0 +1,16 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 %s -triple i386-apple-darwin10 -fasm-blocks -emit-llvm -o - | FileCheck %s
+
+// CHECK: .byte 64
+// CHECK: .byte 64
+// CHECK: .byte 64
+// CHECK: .even
+void t1() {
+ __asm {
+ .byte 64
+ .byte 64
+ .byte 64
+ EVEN
+ mov eax, ebx
+ }
+}
diff --git a/test/CodeGen/ms-inline-asm.c b/test/CodeGen/ms-inline-asm.c
index 6efc09aec51da..c4fe08a3e13b6 100644
--- a/test/CodeGen/ms-inline-asm.c
+++ b/test/CodeGen/ms-inline-asm.c
@@ -55,9 +55,15 @@ void t7() {
}
}
__asm {}
+ __asm {
+ ;
+ ; label
+ mov eax, ebx
+ }
// CHECK: t7
// CHECK: call void asm sideeffect inteldialect "int $$0x2cU", "~{dirflag},~{fpsr},~{flags}"()
// CHECK: call void asm sideeffect inteldialect "", "~{dirflag},~{fpsr},~{flags}"()
+// CHECK: call void asm sideeffect inteldialect "mov eax, ebx", "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
int t8() {
@@ -195,6 +201,8 @@ void t20() {
// CHECK: mov eax, $$4
__asm mov eax, LENGTH _bar
// CHECK: mov eax, $$2
+ __asm mov eax, [eax + LENGTH foo * 4]
+// CHECK: mov eax, [eax + $$1 * $$4]
__asm mov eax, TYPE foo
// CHECK: mov eax, $$4
@@ -204,6 +212,8 @@ void t20() {
// CHECK: mov eax, $$4
__asm mov eax, TYPE _bar
// CHECK: mov eax, $$1
+ __asm mov eax, [eax + TYPE foo * 4]
+// CHECK: mov eax, [eax + $$4 * $$4]
__asm mov eax, SIZE foo
// CHECK: mov eax, $$4
@@ -211,9 +221,12 @@ void t20() {
// CHECK: mov eax, $$1
__asm mov eax, SIZE _foo
// CHECK: mov eax, $$16
+ __asm mov eax, [eax + SIZE _foo * 4]
+// CHECK: mov eax, [eax + $$16 * $$4]
__asm mov eax, SIZE _bar
// CHECK: mov eax, $$2
// CHECK: "~{eax},~{dirflag},~{fpsr},~{flags}"()
+
}
void t21() {
@@ -643,6 +656,14 @@ void label6(){
// CHECK: call void asm sideeffect inteldialect "jmp {{.*}}__MSASMLABEL_.${:uid}__label\0A\09{{.*}}__MSASMLABEL_.${:uid}__label:", "~{dirflag},~{fpsr},~{flags}"()
}
+// Don't include mxcsr in the clobber list.
+void mxcsr() {
+ char buf[4096];
+ __asm fxrstor buf
+}
+// CHECK-LABEL: define void @mxcsr
+// CHECK: call void asm sideeffect inteldialect "fxrstor byte ptr $0", "=*m,~{dirflag},~{fpsr},~{flags}"
+
typedef union _LARGE_INTEGER {
struct {
unsigned int LowPart;
diff --git a/test/CodeGen/ms-intrinsics.c b/test/CodeGen/ms-intrinsics.c
index 25eae44e02720..818be7fd7ffd8 100644
--- a/test/CodeGen/ms-intrinsics.c
+++ b/test/CodeGen/ms-intrinsics.c
@@ -3,7 +3,7 @@
// RUN: | FileCheck %s -check-prefixes CHECK,CHECK-I386,CHECK-INTEL
// RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
// RUN: -triple thumbv7--windows -Oz -emit-llvm %s -o - \
-// RUN: | FileCheck %s --check-prefixes CHECK,CHECK-ARM-X64
+// RUN: | FileCheck %s --check-prefixes CHECK,CHECK-ARM,CHECK-ARM-X64
// RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
// RUN: -triple x86_64--windows -Oz -emit-llvm %s -o - \
// RUN: | FileCheck %s --check-prefixes CHECK,CHECK-X64,CHECK-ARM-X64,CHECK-INTEL
@@ -28,6 +28,20 @@ void test__stosb(unsigned char *Dest, unsigned char Data, size_t Count) {
// CHECK-X64: tail call void @llvm.memset.p0i8.i64(i8* %Dest, i8 %Data, i64 %Count, i32 1, i1 true)
// CHECK-X64: ret void
// CHECK-X64: }
+
+void test__ud2(void) {
+ __ud2();
+}
+// CHECK-INTEL-LABEL: define{{.*}} void @test__ud2()
+// CHECK-INTEL: call void @llvm.trap()
+
+void test__int2c(void) {
+ __int2c();
+}
+// CHECK-INTEL-LABEL: define{{.*}} void @test__int2c()
+// CHECK-INTEL: call void asm sideeffect "int $$0x2c", ""() #[[NORETURN:[0-9]+]]
+
+
#endif
void *test_ReturnAddress() {
@@ -419,3 +433,26 @@ __int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
// CHECK-ARM-X64: }
#endif
+
+unsigned char test_interlockedbittestandset(volatile long *ptr, long bit) {
+ return _interlockedbittestandset(ptr, bit);
+}
+// CHECK-LABEL: define{{.*}} i8 @test_interlockedbittestandset
+// CHECK: [[MASKBIT:%[0-9]+]] = shl i32 1, %bit
+// CHECK: [[OLD:%[0-9]+]] = atomicrmw or i32* %ptr, i32 [[MASKBIT]] seq_cst
+// CHECK: [[SHIFT:%[0-9]+]] = lshr i32 [[OLD]], %bit
+// CHECK: [[TRUNC:%[0-9]+]] = trunc i32 [[SHIFT]] to i8
+// CHECK: [[AND:%[0-9]+]] = and i8 [[TRUNC]], 1
+// CHECK: ret i8 [[AND]]
+
+void test__fastfail() {
+ __fastfail(42);
+}
+// CHECK-LABEL: define{{.*}} void @test__fastfail()
+// CHECK-ARM: call void asm sideeffect "udf #251", "{r0}"(i32 42) #[[NORETURN:[0-9]+]]
+// CHECK-INTEL: call void asm sideeffect "int $$0x29", "{cx}"(i32 42) #[[NORETURN]]
+
+// Attributes come last.
+
+// CHECK: attributes #[[NORETURN]] = { noreturn{{.*}} }
+
diff --git a/test/CodeGen/ms-x86-intrinsics.c b/test/CodeGen/ms-x86-intrinsics.c
index e635220e8c13f..51520d1f658b4 100644
--- a/test/CodeGen/ms-x86-intrinsics.c
+++ b/test/CodeGen/ms-x86-intrinsics.c
@@ -6,15 +6,37 @@
// RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-X64
#if defined(__i386__)
+char test__readfsbyte(unsigned long Offset) {
+ return __readfsbyte(Offset);
+}
+// CHECK-I386-LABEL: define signext i8 @test__readfsbyte(i32 %Offset)
+// CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %Offset to i8 addrspace(257)*
+// CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i8, i8 addrspace(257)* [[PTR]], align 1
+// CHECK-I386: ret i8 [[VALUE:%[0-9]+]]
+
+short test__readfsword(unsigned long Offset) {
+ return __readfsword(Offset);
+}
+// CHECK-I386-LABEL: define signext i16 @test__readfsword(i32 %Offset)
+// CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %Offset to i16 addrspace(257)*
+// CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i16, i16 addrspace(257)* [[PTR]], align 2
+// CHECK-I386: ret i16 [[VALUE:%[0-9]+]]
+
long test__readfsdword(unsigned long Offset) {
return __readfsdword(Offset);
}
-
-// CHECK-I386-LABEL: define i32 @test__readfsdword(i32 %Offset){{.*}}{
+// CHECK-I386-LABEL: define i32 @test__readfsdword(i32 %Offset)
// CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %Offset to i32 addrspace(257)*
// CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i32, i32 addrspace(257)* [[PTR]], align 4
// CHECK-I386: ret i32 [[VALUE:%[0-9]+]]
-// CHECK-I386: }
+
+long long test__readfsqword(unsigned long Offset) {
+ return __readfsqword(Offset);
+}
+// CHECK-I386-LABEL: define i64 @test__readfsqword(i32 %Offset)
+// CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %Offset to i64 addrspace(257)*
+// CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i64, i64 addrspace(257)* [[PTR]], align 8
+// CHECK-I386: ret i64 [[VALUE:%[0-9]+]]
#endif
__int64 test__emul(int a, int b) {
@@ -36,6 +58,43 @@ unsigned __int64 test__emulu(unsigned int a, unsigned int b) {
// CHECK: ret i64 [[RES]]
#if defined(__x86_64__)
+
+char test__readgsbyte(unsigned long Offset) {
+ return __readgsbyte(Offset);
+}
+// CHECK-X64-LABEL: define i8 @test__readgsbyte(i32 %Offset)
+// CHECK-X64: [[ZEXT:%[0-9]+]] = zext i32 %Offset to i64
+// CHECK-X64: [[PTR:%[0-9]+]] = inttoptr i64 [[ZEXT]] to i8 addrspace(256)*
+// CHECK-X64: [[VALUE:%[0-9]+]] = load volatile i8, i8 addrspace(256)* [[PTR]], align 1
+// CHECK-X64: ret i8 [[VALUE:%[0-9]+]]
+
+short test__readgsword(unsigned long Offset) {
+ return __readgsword(Offset);
+}
+// CHECK-X64-LABEL: define i16 @test__readgsword(i32 %Offset)
+// CHECK-X64: [[ZEXT:%[0-9]+]] = zext i32 %Offset to i64
+// CHECK-X64: [[PTR:%[0-9]+]] = inttoptr i64 [[ZEXT]] to i16 addrspace(256)*
+// CHECK-X64: [[VALUE:%[0-9]+]] = load volatile i16, i16 addrspace(256)* [[PTR]], align 2
+// CHECK-X64: ret i16 [[VALUE:%[0-9]+]]
+
+long test__readgsdword(unsigned long Offset) {
+ return __readgsdword(Offset);
+}
+// CHECK-X64-LABEL: define i32 @test__readgsdword(i32 %Offset)
+// CHECK-X64: [[ZEXT:%[0-9]+]] = zext i32 %Offset to i64
+// CHECK-X64: [[PTR:%[0-9]+]] = inttoptr i64 [[ZEXT]] to i32 addrspace(256)*
+// CHECK-X64: [[VALUE:%[0-9]+]] = load volatile i32, i32 addrspace(256)* [[PTR]], align 4
+// CHECK-X64: ret i32 [[VALUE:%[0-9]+]]
+
+long long test__readgsqword(unsigned long Offset) {
+ return __readgsqword(Offset);
+}
+// CHECK-X64-LABEL: define i64 @test__readgsqword(i32 %Offset)
+// CHECK-X64: [[ZEXT:%[0-9]+]] = zext i32 %Offset to i64
+// CHECK-X64: [[PTR:%[0-9]+]] = inttoptr i64 [[ZEXT]] to i64 addrspace(256)*
+// CHECK-X64: [[VALUE:%[0-9]+]] = load volatile i64, i64 addrspace(256)* [[PTR]], align 8
+// CHECK-X64: ret i64 [[VALUE:%[0-9]+]]
+
__int64 test__mulh(__int64 a, __int64 b) {
return __mulh(a, b);
}
diff --git a/test/CodeGen/object-size.c b/test/CodeGen/object-size.c
index a824f554b5f43..a1095798c16b3 100644
--- a/test/CodeGen/object-size.c
+++ b/test/CodeGen/object-size.c
@@ -40,7 +40,7 @@ void test4() {
// CHECK-LABEL: define void @test5
void test5() {
// CHECK: = load i8*, i8** @gp
- // CHECK-NEXT:= call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK-NEXT:= call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
strcpy(gp, "Hi there");
}
@@ -254,31 +254,31 @@ struct Test23Ty { int a; int t[10]; };
// CHECK-LABEL: @test23
void test23(struct Test23Ty *p) {
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(p, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(p, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(p, 2);
// Note: this is currently fixed at 0 because LLVM doesn't have sufficient
// data to correctly handle type=3
// CHECK: store i32 0
gi = __builtin_object_size(p, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&p->a, 0);
// CHECK: store i32 4
gi = __builtin_object_size(&p->a, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(&p->a, 2);
// CHECK: store i32 4
gi = __builtin_object_size(&p->a, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&p->t[5], 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&p->t[5], 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(&p->t[5], 2);
// CHECK: store i32 20
gi = __builtin_object_size(&p->t[5], 3);
@@ -287,11 +287,11 @@ void test23(struct Test23Ty *p) {
// PR24493 -- ICE if __builtin_object_size called with NULL and (Type & 1) != 0
// CHECK-LABEL: @test24
void test24() {
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 true)
gi = __builtin_object_size((void*)0, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 true)
gi = __builtin_object_size((void*)0, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 true, i1 true)
gi = __builtin_object_size((void*)0, 2);
// Note: Currently fixed at zero because LLVM can't handle type=3 correctly.
// Hopefully will be lowered properly in the future.
@@ -301,22 +301,22 @@ void test24() {
// CHECK-LABEL: @test25
void test25() {
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 true)
gi = __builtin_object_size((void*)0x1000, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 true)
gi = __builtin_object_size((void*)0x1000, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 true, i1 true)
gi = __builtin_object_size((void*)0x1000, 2);
// Note: Currently fixed at zero because LLVM can't handle type=3 correctly.
// Hopefully will be lowered properly in the future.
// CHECK: store i32 0
gi = __builtin_object_size((void*)0x1000, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 true)
gi = __builtin_object_size((void*)0 + 0x1000, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 true)
gi = __builtin_object_size((void*)0 + 0x1000, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 true, i1 true)
gi = __builtin_object_size((void*)0 + 0x1000, 2);
// Note: Currently fixed at zero because LLVM can't handle type=3 correctly.
// Hopefully will be lowered properly in the future.
@@ -342,22 +342,22 @@ struct Test27IncompleteTy;
// CHECK-LABEL: @test27
void test27(struct Test27IncompleteTy *t) {
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(t, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(t, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(t, 2);
// Note: this is currently fixed at 0 because LLVM doesn't have sufficient
// data to correctly handle type=3
// CHECK: store i32 0
gi = __builtin_object_size(t, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&test27, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&test27, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 true, i1 true)
gi = __builtin_object_size(&test27, 2);
// Note: this is currently fixed at 0 because LLVM doesn't have sufficient
// data to correctly handle type=3
@@ -415,38 +415,38 @@ struct StaticStruct {
// CHECK-LABEL: @test29
void test29(struct DynStructVar *dv, struct DynStruct0 *d0,
struct DynStruct1 *d1, struct StaticStruct *ss) {
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(dv->snd, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(dv->snd, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(dv->snd, 2);
// CHECK: store i32 0
gi = __builtin_object_size(dv->snd, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(d0->snd, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(d0->snd, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(d0->snd, 2);
// CHECK: store i32 0
gi = __builtin_object_size(d0->snd, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(d1->snd, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(d1->snd, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(d1->snd, 2);
// CHECK: store i32 1
gi = __builtin_object_size(d1->snd, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(ss->snd, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(ss->snd, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(ss->snd, 2);
// CHECK: store i32 2
gi = __builtin_object_size(ss->snd, 3);
@@ -456,39 +456,39 @@ void test29(struct DynStructVar *dv, struct DynStruct0 *d0,
void test30() {
struct { struct DynStruct1 fst, snd; } *nested;
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(nested->fst.snd, 0);
// CHECK: store i32 1
gi = __builtin_object_size(nested->fst.snd, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(nested->fst.snd, 2);
// CHECK: store i32 1
gi = __builtin_object_size(nested->fst.snd, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(nested->snd.snd, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(nested->snd.snd, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(nested->snd.snd, 2);
// CHECK: store i32 1
gi = __builtin_object_size(nested->snd.snd, 3);
union { struct DynStruct1 d1; char c[1]; } *u;
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(u->c, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(u->c, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(u->c, 2);
// CHECK: store i32 1
gi = __builtin_object_size(u->c, 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(u->d1.snd, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(u->d1.snd, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(u->d1.snd, 2);
// CHECK: store i32 1
gi = __builtin_object_size(u->d1.snd, 3);
@@ -502,19 +502,19 @@ void test31() {
struct DynStruct1 *ds1;
struct StaticStruct *ss;
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(ds1[9].snd, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&ss[9].snd[0], 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&ds1[9].snd[0], 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&ds0[9].snd[0], 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&dsv[9].snd[0], 1);
}
@@ -527,11 +527,11 @@ void PR30346() {
};
struct sockaddr *sa;
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(sa->sa_data, 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(sa->sa_data, 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(sa->sa_data, 2);
// CHECK: store i32 14
gi = __builtin_object_size(sa->sa_data, 3);
diff --git a/test/CodeGen/object-size.cpp b/test/CodeGen/object-size.cpp
index 81b44a55a4e30..725c49214dd5c 100644
--- a/test/CodeGen/object-size.cpp
+++ b/test/CodeGen/object-size.cpp
@@ -35,29 +35,29 @@ void test2() {
struct B : A {};
struct C { int i; B bs[1]; } *c;
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&c->bs[0], 0);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&c->bs[0], 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(&c->bs[0], 2);
// CHECK: store i32 16
gi = __builtin_object_size(&c->bs[0], 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size((A*)&c->bs[0], 0);
// CHECK: store i32 16
gi = __builtin_object_size((A*)&c->bs[0], 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size((A*)&c->bs[0], 2);
// CHECK: store i32 16
gi = __builtin_object_size((A*)&c->bs[0], 3);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false, i1 true)
gi = __builtin_object_size(&c->bs[0].buf[0], 0);
// CHECK: store i32 16
gi = __builtin_object_size(&c->bs[0].buf[0], 1);
- // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true)
+ // CHECK: call i64 @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 true, i1 true)
gi = __builtin_object_size(&c->bs[0].buf[0], 2);
// CHECK: store i32 16
gi = __builtin_object_size(&c->bs[0].buf[0], 3);
diff --git a/test/CodeGen/opt-record-MIR.c b/test/CodeGen/opt-record-MIR.c
new file mode 100644
index 0000000000000..00b91ffdf3cba
--- /dev/null
+++ b/test/CodeGen/opt-record-MIR.c
@@ -0,0 +1,33 @@
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple arm64-apple-ios -S -o /dev/null %s -O2 -dwarf-column-info -Rpass-missed=regalloc 2>&1 | FileCheck -check-prefix=REMARK %s
+// RUN: %clang_cc1 -triple arm64-apple-ios -S -o /dev/null %s -O2 -dwarf-column-info 2>&1 | FileCheck -allow-empty -check-prefix=NO_REMARK %s
+// RUN: %clang_cc1 -triple arm64-apple-ios -S -o /dev/null %s -O2 -dwarf-column-info -opt-record-file %t.yaml
+// RUN: cat %t.yaml | FileCheck -check-prefix=YAML %s
+
+void bar(float);
+
+void foo(float *p, int i) {
+ while (i--) {
+ float f = *p;
+ asm("" ::
+ : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp", "memory");
+ bar(f);
+ }
+}
+
+// REMARK: opt-record-MIR.c:10:11: remark: {{.}} spills {{.}} reloads generated in loop
+// NO_REMARK-NOT: remark:
+
+// YAML: --- !Missed
+// YAML: Pass: regalloc
+// YAML: Name: LoopSpillReload
+// YAML: DebugLoc: { File: {{.*}},
+// YAML: Line: 10, Column: 11 }
+// YAML: Function: foo
+// YAML: Args:
+// YAML: - NumSpills: '{{.}}'
+// YAML: - String: ' spills '
+// YAML: - NumReloads: '{{.}}'
+// YAML: - String: ' reloads '
+// YAML: - String: generated
+// YAML: ...
diff --git a/test/CodeGen/pass-object-size.c b/test/CodeGen/pass-object-size.c
index 6f5827befa714..f5c12317ec323 100644
--- a/test/CodeGen/pass-object-size.c
+++ b/test/CodeGen/pass-object-size.c
@@ -343,16 +343,26 @@ void test12(void *const p __attribute__((pass_object_size(3)))) {
// CHECK-LABEL: define void @test13
void test13() {
- // Ensuring that we don't lower objectsize if the expression has side-effects
char c[10];
+ unsigned i = 0;
char *p = c;
// CHECK: @llvm.objectsize
ObjectSize0(p);
- // CHECK-NOT: @llvm.objectsize
- ObjectSize0(++p);
- ObjectSize0(p++);
+ // Allow side-effects, since they always need to happen anyway. Just make sure
+ // we don't perform them twice.
+ // CHECK: = add
+ // CHECK-NOT: = add
+ // CHECK: @llvm.objectsize
+ // CHECK: call i32 @ObjectSize0
+ ObjectSize0(p + ++i);
+
+ // CHECK: = add
+ // CHECK: @llvm.objectsize
+ // CHECK-NOT: = add
+ // CHECK: call i32 @ObjectSize0
+ ObjectSize0(p + i++);
}
// There was a bug where variadic functions with pass_object_size would cause
@@ -369,3 +379,42 @@ void test14(char *c) {
// CHECK: call void (i8*, i64, ...) @my_sprintf
my_sprintf(c, 1, 2, 3);
}
+
+void pass_size_unsigned(unsigned *const PS(0));
+
+// Bug: we weren't lowering to the proper @llvm.objectsize for pointers that
+// don't turn into i8*s, which caused crashes.
+// CHECK-LABEL: define void @test15
+void test15(unsigned *I) {
+ // CHECK: @llvm.objectsize.i64.p0i32
+ // CHECK: call void @pass_size_unsigned
+ pass_size_unsigned(I);
+}
+
+void pass_size_as1(__attribute__((address_space(1))) void *const PS(0));
+
+void pass_size_unsigned_as1(
+ __attribute__((address_space(1))) unsigned *const PS(0));
+
+// CHECK-LABEL: define void @test16
+void test16(__attribute__((address_space(1))) unsigned *I) {
+ // CHECK: call i64 @llvm.objectsize.i64.p1i8
+ // CHECK: call void @pass_size_as1
+ pass_size_as1(I);
+ // CHECK: call i64 @llvm.objectsize.i64.p1i32
+ // CHECK: call void @pass_size_unsigned_as1
+ pass_size_unsigned_as1(I);
+}
+
+// This used to cause assertion failures, since we'd try to emit the statement
+// expression (and definitions for `a`) twice.
+// CHECK-LABEL: define void @test17
+void test17(char *C) {
+ // Check for 65535 to see if we're emitting this pointer twice.
+ // CHECK: 65535
+ // CHECK-NOT: 65535
+ // CHECK: @llvm.objectsize.i64.p0i8(i8* [[PTR:%[^,]+]],
+ // CHECK-NOT: 65535
+ // CHECK: call i32 @ObjectSize0(i8* [[PTR]]
+ ObjectSize0(C + ({ int a = 65535; a; }));
+}
diff --git a/test/CodeGen/pgo-sample-thinlto-summary.c b/test/CodeGen/pgo-sample-thinlto-summary.c
new file mode 100644
index 0000000000000..a284af3c80877
--- /dev/null
+++ b/test/CodeGen/pgo-sample-thinlto-summary.c
@@ -0,0 +1,42 @@
+// RUN: %clang_cc1 -O2 -fprofile-sample-use=%S/Inputs/pgo-sample-thinlto-summary.prof %s -emit-llvm -o - 2>&1 | FileCheck %s -check-prefix=O2
+// RUN: %clang_cc1 -O2 -fprofile-sample-use=%S/Inputs/pgo-sample-thinlto-summary.prof %s -emit-llvm -flto=thin -o - 2>&1 | FileCheck %s -check-prefix=THINLTO
+// Checks if hot call is inlined by normal compile, but not inlined by
+// thinlto compile.
+
+int baz(int);
+int g;
+
+void foo(int n) {
+ for (int i = 0; i < n; i++)
+ g += baz(i);
+}
+
+// O2-LABEL: define void @bar
+// THINLTO-LABEL: define void @bar
+// O2-NOT: call{{.*}}foo
+// THINLTO: call{{.*}}foo
+void bar(int n) {
+ for (int i = 0; i < n; i++)
+ foo(i);
+}
+
+// Checks if loop unroll is invoked by normal compile, but not thinlto compile.
+// O2-LABEL: define void @unroll
+// THINLTO-LABEL: define void @unroll
+// O2: call{{.*}}baz
+// O2: call{{.*}}baz
+// THINLTO: call{{.*}}baz
+// THINLTO-NOT: call{{.*}}baz
+void unroll() {
+ for (int i = 0; i < 2; i++)
+ baz(i);
+}
+
+// Checks if icp is invoked by normal compile, but not thinlto compile.
+// O2-LABEL: define void @icp
+// THINLTO-LABEL: define void @icp
+// O2: if.true.direct_targ
+// ThinLTO-NOT: if.true.direct_targ
+void icp(void (*p)()) {
+ p();
+}
diff --git a/test/CodeGen/ppc64-align-struct.c b/test/CodeGen/ppc64-align-struct.c
index 6a04d0cd84f37..5894a6aeb3796 100644
--- a/test/CodeGen/ppc64-align-struct.c
+++ b/test/CodeGen/ppc64-align-struct.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
#include <stdarg.h>
diff --git a/test/CodeGen/ppc64-complex-parms.c b/test/CodeGen/ppc64-complex-parms.c
index 32163fa4a9957..c0e1794bf47c6 100644
--- a/test/CodeGen/ppc64-complex-parms.c
+++ b/test/CodeGen/ppc64-complex-parms.c
@@ -93,7 +93,7 @@ void bar_long_double(void) {
// CHECK: %[[VAR22:[A-Za-z0-9.]+]] = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 0
// CHECK: %[[VAR23:[A-Za-z0-9.]+]] = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 1
// CHECK: store ppc_fp128 0xM40000000000000000000000000000000, ppc_fp128* %[[VAR22]]
-// CHECK: store ppc_fp128 0xMC0040000000000000000000000000000, ppc_fp128* %[[VAR23]]
+// CHECK: store ppc_fp128 0xMC0040000000000008000000000000000, ppc_fp128* %[[VAR23]]
// CHECK: %[[VAR24:[A-Za-z0-9.]+]] = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 0
// CHECK: %[[VAR25:[A-Za-z0-9.]+]] = load ppc_fp128, ppc_fp128* %[[VAR24]], align 16
// CHECK: %[[VAR26:[A-Za-z0-9.]+]] = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 1
diff --git a/test/CodeGen/ppc64-vector.c b/test/CodeGen/ppc64-vector.c
index f0211f0ec197b..87deb0f585ffa 100644
--- a/test/CodeGen/ppc64-vector.c
+++ b/test/CodeGen/ppc64-vector.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
typedef short v2i16 __attribute__((vector_size (4)));
typedef short v3i16 __attribute__((vector_size (6)));
diff --git a/test/CodeGen/ppc64le-aggregates.c b/test/CodeGen/ppc64le-aggregates.c
index 04d2fb4766ea2..f78f26a592851 100644
--- a/test/CodeGen/ppc64le-aggregates.c
+++ b/test/CodeGen/ppc64le-aggregates.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -faltivec -triple powerpc64le-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -target-feature +altivec -triple powerpc64le-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
// Test homogeneous float aggregate passing and returning.
diff --git a/test/CodeGen/pr3997.c b/test/CodeGen/pr3997.c
new file mode 100644
index 0000000000000..814144cd14d13
--- /dev/null
+++ b/test/CodeGen/pr3997.c
@@ -0,0 +1,13 @@
+// RUN: %clang_cc1 %s -triple i386-unknown-linux-gnu -mregparm 3 -emit-llvm -o - | FileCheck %s
+
+void *memcpy(void *dest, const void *src, unsigned int n);
+
+void use_builtin_memcpy(void *dest, const void *src, unsigned int n) {
+ __builtin_memcpy(dest, src, n);
+}
+
+void use_memcpy(void *dest, const void *src, unsigned int n) {
+ memcpy(dest, src, n);
+}
+
+//CHECK: !{i32 1, !"NumRegisterParameters", i32 3}
diff --git a/test/CodeGen/sanitize-init-order.cpp b/test/CodeGen/sanitize-init-order.cpp
index 894f75e967391..f6a4140847914 100644
--- a/test/CodeGen/sanitize-init-order.cpp
+++ b/test/CodeGen/sanitize-init-order.cpp
@@ -36,13 +36,13 @@ const volatile PODWithCtor array[5][5];
// Check that ASan init-order checking ignores structs with trivial default
// constructor.
-// CHECK: !llvm.asan.globals = !{![[GLOB_1:[0-9]+]], ![[GLOB_2:[0-9]+]], ![[GLOB_3:[0-9]]], ![[GLOB_4:[0-9]]]}
+// CHECK: !llvm.asan.globals = !{![[GLOB_1:[0-9]+]], ![[GLOB_2:[0-9]+]], ![[GLOB_3:[0-9]+]], ![[GLOB_4:[0-9]+]]
// CHECK: ![[GLOB_1]] = !{%struct.PODStruct* {{.*}}, i1 false, i1 false}
// CHECK: ![[GLOB_2]] = !{%struct.PODWithDtor* {{.*}}, i1 false, i1 false}
// CHECK: ![[GLOB_3]] = !{%struct.PODWithCtorAndDtor* {{.*}}, i1 true, i1 false}
// CHECK: ![[GLOB_4]] = !{{{.*}}class.NS::PODWithCtor{{.*}}, i1 true, i1 false}
-// BLACKLIST: !llvm.asan.globals = !{![[GLOB_1:[0-9]+]], ![[GLOB_2:[0-9]+]], ![[GLOB_3:[0-9]]], ![[GLOB_4:[0-9]]]}
+// BLACKLIST: !llvm.asan.globals = !{![[GLOB_1:[0-9]+]], ![[GLOB_2:[0-9]+]], ![[GLOB_3:[0-9]+]], ![[GLOB_4:[0-9]+]]}
// BLACKLIST: ![[GLOB_1]] = !{%struct.PODStruct* {{.*}}, i1 false, i1 false}
// BLACKLIST: ![[GLOB_2]] = !{%struct.PODWithDtor* {{.*}}, i1 false, i1 false}
// BLACKLIST: ![[GLOB_3]] = !{%struct.PODWithCtorAndDtor* {{.*}}, i1 false, i1 false}
diff --git a/test/CodeGen/sanitize-recover.c b/test/CodeGen/sanitize-recover.c
index dd8734e971eba..99eff8518ce84 100644
--- a/test/CodeGen/sanitize-recover.c
+++ b/test/CodeGen/sanitize-recover.c
@@ -19,20 +19,17 @@ void test() {
void foo() {
union { int i; } u;
u.i=1;
- // PARTIAL: %[[CHECK0:.*]] = icmp ne {{.*}}* %[[PTR:.*]], null
-
- // PARTIAL: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false)
- // PARTIAL-NEXT: %[[CHECK1:.*]] = icmp uge i64 %[[SIZE]], 4
+ // PARTIAL: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 false)
+ // PARTIAL-NEXT: %[[CHECK0:.*]] = icmp uge i64 %[[SIZE]], 4
// PARTIAL: %[[MISALIGN:.*]] = and i64 {{.*}}, 3
- // PARTIAL-NEXT: %[[CHECK2:.*]] = icmp eq i64 %[[MISALIGN]], 0
+ // PARTIAL-NEXT: %[[CHECK1:.*]] = icmp eq i64 %[[MISALIGN]], 0
- // PARTIAL: %[[CHECK02:.*]] = and i1 %[[CHECK0]], %[[CHECK2]]
- // PARTIAL-NEXT: %[[CHECK012:.*]] = and i1 %[[CHECK02]], %[[CHECK1]]
+ // PARTIAL: %[[CHECK01:.*]] = and i1 %[[CHECK1]], %[[CHECK0]]
- // PARTIAL: br i1 %[[CHECK012]], {{.*}} !prof ![[WEIGHT_MD:.*]], !nosanitize
+ // PARTIAL: br i1 %[[CHECK01]], {{.*}} !nosanitize
+ // PARTIAL: br i1 %[[CHECK1]], {{.*}} !nosanitize
- // PARTIAL: br i1 %[[CHECK02]], {{.*}}
// PARTIAL: call void @__ubsan_handle_type_mismatch_v1_abort(
// PARTIAL-NEXT: unreachable
// PARTIAL: call void @__ubsan_handle_type_mismatch_v1(
diff --git a/test/CodeGen/sanitize-thread-no-checking-at-run-time.m b/test/CodeGen/sanitize-thread-no-checking-at-run-time.m
index 098b7cf72ffd4..3d862c0bf7ed3 100644
--- a/test/CodeGen/sanitize-thread-no-checking-at-run-time.m
+++ b/test/CodeGen/sanitize-thread-no-checking-at-run-time.m
@@ -1,5 +1,7 @@
-// RUN: %clang_cc1 -triple x86_64-apple-darwin -x objective-c++ -emit-llvm -o - %s | FileCheck -check-prefix=WITHOUT %s
-// RUN: %clang_cc1 -triple x86_64-apple-darwin -x objective-c++ -emit-llvm -o - %s -fsanitize=thread | FileCheck -check-prefix=TSAN %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -x objective-c++ -fblocks -emit-llvm -o - %s | FileCheck -check-prefix=WITHOUT %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -x objective-c++ -fblocks -emit-llvm -o - %s -fsanitize=thread | FileCheck -check-prefix=TSAN %s
+
+// WITHOUT-NOT: "sanitize_thread_no_checking_at_run_time"
__attribute__((objc_root_class))
@interface NSObject
@@ -26,9 +28,14 @@ public:
}
@end
-// WITHOUT-NOT: "sanitize_thread_no_checking_at_run_time"
-
// TSAN: initialize{{.*}}) [[ATTR:#[0-9]+]]
// TSAN: dealloc{{.*}}) [[ATTR:#[0-9]+]]
// TSAN: cxx_destruct{{.*}}) [[ATTR:#[0-9]+]]
+
+void test2(id x) {
+ extern void test2_helper(id (^)(void));
+ test2_helper(^{ return x; });
+// TSAN: define internal void @__destroy_helper_block_(i8*) [[ATTR:#[0-9]+]]
+}
+
// TSAN: attributes [[ATTR]] = { noinline nounwind {{.*}} "sanitize_thread_no_checking_at_run_time" {{.*}} }
diff --git a/test/CodeGen/sse-builtins.c b/test/CodeGen/sse-builtins.c
index 27b016f66517b..28b4f2cae1f06 100644
--- a/test/CodeGen/sse-builtins.c
+++ b/test/CodeGen/sse-builtins.c
@@ -802,7 +802,7 @@ int test_mm_ucomineq_ss(__m128 A, __m128 B) {
__m128 test_mm_undefined_ps() {
// CHECK-LABEL: @test_mm_undefined_ps
- // CHECK: ret <4 x float> undef
+ // CHECK: ret <4 x float> zeroinitializer
return _mm_undefined_ps();
}
diff --git a/test/CodeGen/sse2-builtins.c b/test/CodeGen/sse2-builtins.c
index 48c703685479f..a140a6ce55b51 100644
--- a/test/CodeGen/sse2-builtins.c
+++ b/test/CodeGen/sse2-builtins.c
@@ -1455,13 +1455,13 @@ int test_mm_ucomineq_sd(__m128d A, __m128d B) {
__m128d test_mm_undefined_pd() {
// CHECK-LABEL: @test_mm_undefined_pd
- // CHECK: ret <2 x double> undef
+ // CHECK: ret <2 x double> zeroinitializer
return _mm_undefined_pd();
}
__m128i test_mm_undefined_si128() {
// CHECK-LABEL: @test_mm_undefined_si128
- // CHECK: ret <2 x i64> undef
+ // CHECK: ret <2 x i64> zeroinitializer
return _mm_undefined_si128();
}
diff --git a/test/CodeGen/sse41-builtins.c b/test/CodeGen/sse41-builtins.c
index adf9609b68f9f..b48b73ec18d9e 100644
--- a/test/CodeGen/sse41-builtins.c
+++ b/test/CodeGen/sse41-builtins.c
@@ -354,7 +354,7 @@ __m128 test_mm_round_ss(__m128 x, __m128 y) {
__m128i test_mm_stream_load_si128(__m128i const *a) {
// CHECK-LABEL: test_mm_stream_load_si128
- // CHECK: call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %{{.*}})
+ // CHECK: load <2 x i64>, <2 x i64>* %{{.*}}, align 16, !nontemporal
return _mm_stream_load_si128(a);
}
diff --git a/test/CodeGen/temporary-lifetime-exceptions.cpp b/test/CodeGen/temporary-lifetime-exceptions.cpp
index f435560c97598..1dc1c40dc6ba8 100644
--- a/test/CodeGen/temporary-lifetime-exceptions.cpp
+++ b/test/CodeGen/temporary-lifetime-exceptions.cpp
@@ -9,16 +9,16 @@ A Baz(const A&);
void Test1() {
// CHECK-LABEL: @_Z5Test1v(
// CHECK: getelementptr
- // CHECK-NEXT: call void @llvm.lifetime.start(i64 1, i8* nonnull [[TMP:[^ ]+]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull [[TMP:[^ ]+]])
// CHECK-NEXT: getelementptr
- // CHECK-NEXT: call void @llvm.lifetime.start(i64 1, i8* nonnull [[TMP1:[^ ]+]])
+ // CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull [[TMP1:[^ ]+]])
// Normal exit
- // CHECK: call void @llvm.lifetime.end(i64 1, i8* nonnull [[TMP1]])
- // CHECK-NEXT: call void @llvm.lifetime.end(i64 1, i8* nonnull [[TMP]])
+ // CHECK: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP1]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP]])
// Exception exit
- // CHECK: call void @llvm.lifetime.end(i64 1, i8* nonnull [[TMP1]])
- // CHECK-NEXT: call void @llvm.lifetime.end(i64 1, i8* nonnull [[TMP]])
+ // CHECK: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP1]])
+ // CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP]])
Baz(Baz(A()));
}
diff --git a/test/CodeGen/temporary-lifetime.cpp b/test/CodeGen/temporary-lifetime.cpp
index f105a441a3679..309b4e52da52a 100644
--- a/test/CodeGen/temporary-lifetime.cpp
+++ b/test/CodeGen/temporary-lifetime.cpp
@@ -21,27 +21,27 @@ T Baz();
void Test1() {
// CHECK-DTOR-LABEL: Test1
- // CHECK-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(%struct.A* nonnull %[[VAR:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(%struct.A* nonnull %[[VAR]])
- // CHECK-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(%struct.A* nonnull %[[VAR:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(%struct.A* nonnull %[[VAR]])
- // CHECK-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR]])
// CHECK-DTOR: }
// CHECK-NO-DTOR-LABEL: Test1
- // CHECK-NO-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(%struct.A* nonnull %[[VAR:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR]])
- // CHECK-NO-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(%struct.A* nonnull %[[VAR:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR]])
// CHECK-NO-DTOR: }
{
const A &a = A{};
@@ -55,27 +55,27 @@ void Test1() {
void Test2() {
// CHECK-DTOR-LABEL: Test2
- // CHECK-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR1:[0-9]+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR1:[0-9]+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(%struct.A* nonnull %[[VAR1:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR2:[0-9]+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR2:[0-9]+]])
// CHECK-DTOR: call void @_ZN1AC1Ev(%struct.A* nonnull %[[VAR2:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooIRK1AEvOT_
// CHECK-DTOR: call void @_ZN1AD1Ev(%struct.A* nonnull %[[VAR2]])
- // CHECK-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR2]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR2]])
// CHECK-DTOR: call void @_ZN1AD1Ev(%struct.A* nonnull %[[VAR1]])
- // CHECK-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR1]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR1]])
// CHECK-DTOR: }
// CHECK-NO-DTOR-LABEL: Test2
- // CHECK-NO-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR1:[0-9]+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR1:[0-9]+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(%struct.A* nonnull %[[VAR1:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR2:[0-9]+]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR2:[0-9]+]])
// CHECK-NO-DTOR: call void @_ZN1AC1Ev(%struct.A* nonnull %[[VAR2:[^ ]+]])
// CHECK-NO-DTOR: call void @_Z3FooIRK1AEvOT_
- // CHECK-NO-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR2]])
- // CHECK-NO-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR1]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR2]])
+ // CHECK-NO-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR1]])
// CHECK-NO-DTOR: }
const A &a = A{};
Foo(a);
@@ -135,16 +135,16 @@ int Test5() {
void Test6() {
// CHECK-DTOR-LABEL: Test6
- // CHECK-DTOR: call void @llvm.lifetime.start(i64 {{[0-9]+}}, i8* nonnull %[[ADDR:[0-9]+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0i8(i64 {{[0-9]+}}, i8* nonnull %[[ADDR:[0-9]+]])
// CHECK-DTOR: call i32 @_Z3BazIiET_v()
// CHECK-DTOR: store
// CHECK-DTOR: call void @_Z3FooIiEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.end(i64 {{[0-9]+}}, i8* nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start(i64 {{[0-9]+}}, i8* nonnull %[[ADDR:[0-9]+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0i8(i64 {{[0-9]+}}, i8* nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0i8(i64 {{[0-9]+}}, i8* nonnull %[[ADDR:[0-9]+]])
// CHECK-DTOR: call i32 @_Z3BazIiET_v()
// CHECK-DTOR: store
// CHECK-DTOR: call void @_Z3FooIiEvOT_
- // CHECK-DTOR: call void @llvm.lifetime.end(i64 {{[0-9]+}}, i8* nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0i8(i64 {{[0-9]+}}, i8* nonnull %[[ADDR]])
// CHECK-DTOR: }
Foo(Baz<int>());
Foo(Baz<int>());
@@ -152,16 +152,16 @@ void Test6() {
void Test7() {
// CHECK-DTOR-LABEL: Test7
- // CHECK-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
// CHECK-DTOR: call void @_Z3BazI1AET_v({{.*}} %[[SLOT:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooI1AEvOT_({{.*}} %[[SLOT]])
// CHECK-DTOR: call void @_ZN1AD1Ev(%struct.A* nonnull %[[SLOT]])
- // CHECK-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR]])
- // CHECK-DTOR: call void @llvm.lifetime.start(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.start.p0i8(i64 1024, i8* nonnull %[[ADDR:[0-9]+]])
// CHECK-DTOR: call void @_Z3BazI1AET_v({{.*}} %[[SLOT:[^ ]+]])
// CHECK-DTOR: call void @_Z3FooI1AEvOT_({{.*}} %[[SLOT]])
// CHECK-DTOR: call void @_ZN1AD1Ev(%struct.A* nonnull %[[SLOT]])
- // CHECK-DTOR: call void @llvm.lifetime.end(i64 1024, i8* nonnull %[[ADDR]])
+ // CHECK-DTOR: call void @llvm.lifetime.end.p0i8(i64 1024, i8* nonnull %[[ADDR]])
// CHECK-DTOR: }
Foo(Baz<A>());
Foo(Baz<A>());
diff --git a/test/CodeGen/thin_link_bitcode.c b/test/CodeGen/thin_link_bitcode.c
new file mode 100644
index 0000000000000..4cb5f798a5685
--- /dev/null
+++ b/test/CodeGen/thin_link_bitcode.c
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -o %t -flto=thin -fthin-link-bitcode=%t.nodebug -triple x86_64-unknown-linux-gnu -emit-llvm-bc -debug-info-kind=limited %s
+// RUN: llvm-bcanalyzer -dump %t | FileCheck %s
+// RUN: llvm-bcanalyzer -dump %t.nodebug | FileCheck %s --check-prefix=NO_DEBUG
+int main (void) {
+ return 0;
+}
+
+// CHECK: COMPILE_UNIT
+// NO_DEBUG-NOT: COMPILE_UNIT
diff --git a/test/CodeGen/thinlto-emit-llvm.c b/test/CodeGen/thinlto-emit-llvm.c
new file mode 100644
index 0000000000000..f611162d1999d
--- /dev/null
+++ b/test/CodeGen/thinlto-emit-llvm.c
@@ -0,0 +1,10 @@
+// Test to ensure -emit-llvm and -emit-llvm-bc work when invoking the
+// ThinLTO backend path.
+// RUN: %clang -O2 %s -flto=thin -c -o %t.o
+// RUN: llvm-lto -thinlto -o %t %t.o
+// RUN: %clang_cc1 -O2 -x ir %t.o -fthinlto-index=%t.thinlto.bc -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -O2 -x ir %t.o -fthinlto-index=%t.thinlto.bc -emit-llvm-bc -o - | llvm-dis -o - | FileCheck %s
+
+// CHECK: define void @foo()
+void foo() {
+}
diff --git a/test/CodeGen/thinlto-multi-module.ll b/test/CodeGen/thinlto-multi-module.ll
new file mode 100644
index 0000000000000..21d28cf44da25
--- /dev/null
+++ b/test/CodeGen/thinlto-multi-module.ll
@@ -0,0 +1,22 @@
+; REQUIRES: x86-registered-target
+
+; RUN: opt -module-summary -o %t1.o %s
+; RUN: llvm-lto -thinlto -o %t %t1.o
+
+; RUN: opt -o %t2.o %S/Inputs/thinlto_backend.ll
+; RUN: llvm-cat -b -o %t1cat.o %t1.o %t2.o
+; RUN: cp %t1cat.o %t1.o
+; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -o %t3.o -x ir %t1.o -c -fthinlto-index=%t.thinlto.bc
+; RUN: llvm-nm %t3.o | FileCheck --check-prefix=CHECK-OBJ %s
+; CHECK-OBJ: T f1
+; CHECK-OBJ: U f2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare void @f2()
+
+define void @f1() {
+ call void @f2()
+ ret void
+}
diff --git a/test/CodeGen/transparent-union.c b/test/CodeGen/transparent-union.c
index 2f00c2d21a05f..efaef1bae9873 100644
--- a/test/CodeGen/transparent-union.c
+++ b/test/CodeGen/transparent-union.c
@@ -3,10 +3,21 @@
// RUN: %clang_cc1 -Werror -triple armv7-linux -emit-llvm -o - %s | FileCheck %s --check-prefix=ARM
// RUN: %clang_cc1 -Werror -triple powerpc64le-linux -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -Werror -triple aarch64-linux -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -DINFRONT -Werror -triple x86_64-linux -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -DINFRONT -Werror -triple i386-linux -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -DINFRONT -Werror -triple armv7-linux -emit-llvm -o - %s | FileCheck %s --check-prefix=ARM
+// RUN: %clang_cc1 -DINFRONT -Werror -triple powerpc64le-linux -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -DINFRONT -Werror -triple aarch64-linux -emit-llvm -o - %s | FileCheck %s
+#ifdef INFRONT
+typedef union __attribute__((transparent_union)) {
+ void *f0;
+} transp_t0;
+#else
typedef union {
void *f0;
} transp_t0 __attribute__((transparent_union));
+#endif
void f0(transp_t0 obj);
diff --git a/test/CodeGen/ubsan-promoted-arith.cpp b/test/CodeGen/ubsan-promoted-arith.cpp
new file mode 100644
index 0000000000000..5a2898b5423bc
--- /dev/null
+++ b/test/CodeGen/ubsan-promoted-arith.cpp
@@ -0,0 +1,131 @@
+// RUN: %clang_cc1 -std=c++11 -triple x86_64-apple-darwin10 -emit-llvm -o - %s -fsanitize=signed-integer-overflow,unsigned-integer-overflow | FileCheck %s
+
+typedef unsigned char uchar;
+typedef unsigned short ushort;
+typedef int int4 __attribute__((ext_vector_type(4)));
+
+enum E1 : int {
+ a
+};
+
+enum E2 : char {
+ b
+};
+
+// CHECK-LABEL: define signext i8 @_Z4add1
+// CHECK-NOT: sadd.with.overflow
+char add1(char c) { return c + c; }
+
+// CHECK-LABEL: define zeroext i8 @_Z4add2
+// CHECK-NOT: uadd.with.overflow
+uchar add2(uchar uc) { return uc + uc; }
+
+// CHECK-LABEL: define i32 @_Z4add3
+// CHECK: sadd.with.overflow
+int add3(E1 e) { return e + a; }
+
+// CHECK-LABEL: define signext i8 @_Z4add4
+// CHECK-NOT: sadd.with.overflow
+char add4(E2 e) { return e + b; }
+
+// CHECK-LABEL: define signext i8 @_Z4sub1
+// CHECK-NOT: ssub.with.overflow
+char sub1(char c) { return c - c; }
+
+// CHECK-LABEL: define zeroext i8 @_Z4sub2
+// CHECK-NOT: usub.with.overflow
+uchar sub2(uchar uc) { return uc - uc; }
+
+// CHECK-LABEL: define signext i8 @_Z4sub3
+// CHECK-NOT: ssub.with.overflow
+char sub3(char c) { return -c; }
+
+// Note: -INT_MIN can overflow.
+//
+// CHECK-LABEL: define i32 @_Z4sub4
+// CHECK: ssub.with.overflow
+int sub4(int i) { return -i; }
+
+// CHECK-LABEL: define signext i8 @_Z4mul1
+// CHECK-NOT: smul.with.overflow
+char mul1(char c) { return c * c; }
+
+// CHECK-LABEL: define zeroext i8 @_Z4mul2
+// CHECK-NOT: smul.with.overflow
+uchar mul2(uchar uc) { return uc * uc; }
+
+// Note: USHRT_MAX * USHRT_MAX can overflow.
+//
+// CHECK-LABEL: define zeroext i16 @_Z4mul3
+// CHECK: smul.with.overflow
+ushort mul3(ushort us) { return us * us; }
+
+// CHECK-LABEL: define i32 @_Z4mul4
+// CHECK: smul.with.overflow
+int mul4(int i, char c) { return i * c; }
+
+// CHECK-LABEL: define i32 @_Z4mul5
+// CHECK: smul.with.overflow
+int mul5(int i, char c) { return c * i; }
+
+// CHECK-LABEL: define signext i16 @_Z4mul6
+// CHECK-NOT: smul.with.overflow
+short mul6(short s) { return s * s; }
+
+// CHECK-LABEL: define signext i8 @_Z4div1
+// CHECK-NOT: ubsan_handle_divrem_overflow
+char div1(char c) { return c / c; }
+
+// CHECK-LABEL: define zeroext i8 @_Z4div2
+// CHECK-NOT: ubsan_handle_divrem_overflow
+uchar div2(uchar uc) { return uc / uc; }
+
+// CHECK-LABEL: define signext i8 @_Z4div3
+// CHECK-NOT: ubsan_handle_divrem_overflow
+char div3(char c, int i) { return c / i; }
+
+// CHECK-LABEL: define signext i8 @_Z4div4
+// CHECK: ubsan_handle_divrem_overflow
+char div4(int i, char c) { return i / c; }
+
+// Note: INT_MIN / -1 can overflow.
+//
+// CHECK-LABEL: define signext i8 @_Z4div5
+// CHECK: ubsan_handle_divrem_overflow
+char div5(int i, char c) { return i / c; }
+
+// CHECK-LABEL: define signext i8 @_Z4rem1
+// CHECK-NOT: ubsan_handle_divrem_overflow
+char rem1(char c) { return c % c; }
+
+// CHECK-LABEL: define zeroext i8 @_Z4rem2
+// CHECK-NOT: ubsan_handle_divrem_overflow
+uchar rem2(uchar uc) { return uc % uc; }
+
+// CHECK-LABEL: define signext i8 @_Z4rem3
+// CHECK: ubsan_handle_divrem_overflow
+char rem3(int i, char c) { return i % c; }
+
+// CHECK-LABEL: define signext i8 @_Z4rem4
+// CHECK-NOT: ubsan_handle_divrem_overflow
+char rem4(char c, int i) { return c % i; }
+
+// CHECK-LABEL: define signext i8 @_Z4inc1
+// CHECK-NOT: sadd.with.overflow
+char inc1(char c) { return c++ + (char)0; }
+
+// CHECK-LABEL: define zeroext i8 @_Z4inc2
+// CHECK-NOT: uadd.with.overflow
+uchar inc2(uchar uc) { return uc++ + (uchar)0; }
+
+// CHECK-LABEL: define void @_Z4inc3
+// CHECK-NOT: sadd.with.overflow
+void inc3(char c) { c++; }
+
+// CHECK-LABEL: define void @_Z4inc4
+// CHECK-NOT: uadd.with.overflow
+void inc4(uchar uc) { uc++; }
+
+// CHECK-LABEL: define <4 x i32> @_Z4vremDv4_iS_
+// CHECK-NOT: ubsan_handle_divrem_overflow
+int4 vrem(int4 a, int4 b) { return a % b; }
diff --git a/test/CodeGen/ubsan-shift.c b/test/CodeGen/ubsan-shift.c
new file mode 100644
index 0000000000000..90c15d8c086ff
--- /dev/null
+++ b/test/CodeGen/ubsan-shift.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -triple=x86_64-apple-darwin -fsanitize=shift-exponent,shift-base -emit-llvm %s -o - | FileCheck %s
+
+// CHECK-LABEL: define i32 @f1
+int f1(int c, int shamt) {
+// CHECK: icmp ule i32 %{{.*}}, 31, !nosanitize
+// CHECK: icmp ule i32 %{{.*}}, 31, !nosanitize
+ return 1 << (c << shamt);
+}
+
+// CHECK-LABEL: define i32 @f2
+int f2(long c, int shamt) {
+// CHECK: icmp ule i32 %{{.*}}, 63, !nosanitize
+// CHECK: icmp ule i64 %{{.*}}, 31, !nosanitize
+ return 1 << (c << shamt);
+}
+
+// CHECK-LABEL: define i32 @f3
+unsigned f3(unsigned c, int shamt) {
+// CHECK: icmp ule i32 %{{.*}}, 31, !nosanitize
+// CHECK: icmp ule i32 %{{.*}}, 31, !nosanitize
+ return 1U << (c << shamt);
+}
+
+// CHECK-LABEL: define i32 @f4
+unsigned f4(unsigned long c, int shamt) {
+// CHECK: icmp ule i32 %{{.*}}, 63, !nosanitize
+// CHECK: icmp ule i64 %{{.*}}, 31, !nosanitize
+ return 1U << (c << shamt);
+}
+
+// CHECK-LABEL: define i32 @f5
+int f5(int c, long long shamt) {
+// CHECK: icmp ule i64 %{{[0-9]+}}, 31, !nosanitize
+//
+// CHECK: sub nuw nsw i32 31, %sh_prom, !nosanitize
+// CHECK: lshr i32 %{{.*}}, %shl.zeros, !nosanitize
+ return c << shamt;
+}
+
+// CHECK-LABEL: define i32 @f6
+int f6(int c, int shamt) {
+// CHECK: icmp ule i32 %[[WIDTH:.*]], 31, !nosanitize
+//
+// CHECK: sub nuw nsw i32 31, %[[WIDTH]], !nosanitize
+// CHECK: lshr i32 %{{.*}}, %shl.zeros, !nosanitize
+ return c << shamt;
+}
diff --git a/test/CodeGen/unaligned-decl.c b/test/CodeGen/unaligned-decl.c
new file mode 100644
index 0000000000000..d5d32bd830fa3
--- /dev/null
+++ b/test/CodeGen/unaligned-decl.c
@@ -0,0 +1,22 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fms-extensions -emit-llvm < %s | FileCheck %s
+
+// CHECK: @a1 = global i32 1, align 1
+__unaligned int a1 = 1;
+
+// CHECK: @a2 = global i32 1, align 1
+int __unaligned a2 = 1;
+
+// CHECK: @a3 = {{.*}} align 1
+__unaligned int a3[10];
+
+// CHECK: @a4 = {{.*}} align 1
+int __unaligned a4[10];
+
+// CHECK: @p1 = {{.*}} align 1
+int *__unaligned p1;
+
+// CHECK: @p2 = {{.*}} align 8
+int __unaligned *p2;
+
+// CHECK: @p3 = {{.*}} align 1
+int __unaligned *__unaligned p3;
diff --git a/test/CodeGen/unaligned-expr.c b/test/CodeGen/unaligned-expr.c
new file mode 100644
index 0000000000000..6e23cbc729fbd
--- /dev/null
+++ b/test/CodeGen/unaligned-expr.c
@@ -0,0 +1,217 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fms-extensions -emit-llvm < %s | FileCheck %s
+
+// -------------
+// Scalar integer
+// -------------
+__unaligned int x;
+void test1(void) {
+ // CHECK: {{%.*}} = load i32, i32* @x, align 1
+ // CHECK: store i32 {{%.*}}, i32* @x, align 1
+ x++;
+}
+
+void test2(void) {
+ // CHECK: %y = alloca i32, align 1
+ // CHECK: {{%.*}} = load i32, i32* %y, align 1
+ // CHECK: store i32 {{%.*}}, i32* %y, align 1
+ __unaligned int y;
+ y++;
+}
+
+void test2_1(void) {
+ // CHECK: %y = alloca i32, align 1
+ // CHECK: store i32 1, i32* %y, align 1
+ __unaligned int y = 1;
+}
+
+// -------------
+// Global pointer
+// -------------
+int *__unaligned p1;
+void test3(void) {
+
+ // CHECK: {{%.*}} = load i32*, i32** @p1, align 1
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 4
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 4
+ (*p1)++;
+}
+
+int __unaligned *p2;
+void test4(void) {
+ // CHECK: {{%.*}} = load i32*, i32** @p2, align 8
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 1
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 1
+ (*p2)++;
+}
+
+int __unaligned *__unaligned p3;
+void test5(void) {
+ // CHECK: {{%.*}} = load i32*, i32** @p3, align 1
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 1
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 1
+ (*p3)++;
+}
+
+// -------------
+// Local pointer
+// -------------
+void test6(void) {
+ // CHECK: %lp1 = alloca i32*, align 1
+ // CHECK: {{%.*}} = load i32*, i32** %lp1, align 1
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 4
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 4
+ int *__unaligned lp1;
+ (*lp1)++;
+}
+
+void test7(void) {
+ // CHECK: %lp2 = alloca i32*, align 8
+ // CHECK: {{%.*}} = load i32*, i32** %lp2, align 8
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 1
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 1
+ int __unaligned *lp2;
+ (*lp2)++;
+}
+
+void test8(void) {
+ // CHECK: %lp3 = alloca i32*, align 1
+ // CHECK: {{%.*}} = load i32*, i32** %lp3, align 1
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 1
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 1
+ int __unaligned *__unaligned lp3;
+ (*lp3)++;
+}
+
+// -------------
+// Global array
+// -------------
+__unaligned int a[10];
+void test9(void) {
+ // CHECK: {{%.*}} = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @a, i64 0, i64 3), align 1
+ // CHECK: store i32 {{%.*}}, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @a, i64 0, i64 3), align 1
+ (a[3])++;
+}
+
+// -------------
+// Local array
+// -------------
+void test10(void) {
+ // CHECK: %la = alloca [10 x i32], align 1
+ // CHECK: {{%.*}} = getelementptr inbounds [10 x i32], [10 x i32]* %la, i64 0, i64 3
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 1
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 1
+ __unaligned int la[10];
+ (la[3])++;
+}
+
+// --------
+// Typedefs
+// --------
+
+typedef __unaligned int UnalignedInt;
+void test13() {
+ // CHECK: %i = alloca i32, align 1
+ // CHECK: {{%.*}} = load i32, i32* %i, align 1
+ // CHECK: store i32 {{%.*}}, i32* %i, align 1
+ UnalignedInt i;
+ i++;
+}
+
+typedef int Aligned;
+typedef __unaligned Aligned UnalignedInt2;
+void test14() {
+ // CHECK: %i = alloca i32, align 1
+ // CHECK: {{%.*}} = load i32, i32* %i, align 1
+ // CHECK: store i32 {{%.*}}, i32* %i, align 1
+ UnalignedInt2 i;
+ i++;
+}
+
+typedef UnalignedInt UnalignedInt3;
+void test15() {
+ // CHECK: %i = alloca i32, align 1
+ // CHECK: {{%.*}} = load i32, i32* %i, align 1
+ // CHECK: store i32 {{%.*}}, i32* %i, align 1
+ UnalignedInt3 i;
+ i++;
+}
+
+// -------------
+// Decayed types
+// -------------
+void test16(__unaligned int c[10]) {
+ // CHECK: {{%.*}} = alloca i32*, align 8
+ // CHECK: store i32* %c, i32** {{%.*}}, align 8
+ // CHECK: {{%.*}} = load i32*, i32** {{%.*}}, align 8
+ // CHECK: {{%.*}} = getelementptr inbounds i32, i32* {{%.*}}, i64 3
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 1
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 1
+ c[3]++;
+}
+
+// -----------
+// __alignof__
+// -----------
+int test17(void) {
+ // CHECK: ret i32 1
+ return __alignof__(__unaligned int);
+}
+
+int test18(void) {
+ // CHECK: ret i32 1
+ __unaligned int a;
+ return __alignof__(a);
+}
+
+int test19(void) {
+ // CHECK: ret i32 1
+ __unaligned int a[10];
+ return __alignof__(a);
+}
+
+// -----------
+// structs
+// -----------
+typedef
+struct S1 {
+ char c;
+ int x;
+} S1;
+
+__unaligned S1 s1;
+void test20(void) {
+ // CHECK: {{%.*}} = load i32, i32* getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 1), align 1
+ // CHECK: store i32 {{%.*}}, i32* getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 1), align 1
+ s1.x++;
+}
+
+void test21(void) {
+ // CHECK: {{%.*}} = alloca %struct.S1, align 1
+ // CHECK: {{%.*}} = getelementptr inbounds %struct.S1, %struct.S1* {{%.*}}, i32 0, i32 1
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 1
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 1
+ __unaligned S1 s1_2;
+ s1_2.x++;
+}
+
+typedef
+struct __attribute__((packed)) S2 {
+ char c;
+ int x;
+} S2;
+
+__unaligned S2 s2;
+void test22(void) {
+ // CHECK: {{%.*}} = load i32, i32* getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1), align 1
+ // CHECK: store i32 {{%.*}}, i32* getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1), align 1
+ s2.x++;
+}
+
+void test23(void) {
+ // CHECK: {{%.*}} = alloca %struct.S2, align 1
+ // CHECK: {{%.*}} = getelementptr inbounds %struct.S2, %struct.S2* {{%.*}}, i32 0, i32 1
+ // CHECK: {{%.*}} = load i32, i32* {{%.*}}, align 1
+ // CHECK: store i32 {{%.*}}, i32* {{%.*}}, align 1
+ __unaligned S2 s2_2;
+ s2_2.x++;
+}
diff --git a/test/CodeGen/unaligned-field.c b/test/CodeGen/unaligned-field.c
new file mode 100644
index 0000000000000..5aa59c2799177
--- /dev/null
+++ b/test/CodeGen/unaligned-field.c
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fms-extensions -emit-llvm < %s | FileCheck %s
+// Test that __unaligned does not impact the layout of the fields.
+
+struct A
+{
+ char a;
+ __unaligned int b;
+} a;
+// CHECK: %struct.A = type { i8, i32 }
+
+struct A2
+{
+ int b;
+ char a;
+ __unaligned int c;
+} a2;
+// CHECK: %struct.A2 = type { i32, i8, i32 }
diff --git a/test/CodeGen/unsigned-promotion.c b/test/CodeGen/unsigned-promotion.c
index 4e7a4426a03e2..4b13f68781b90 100644
--- a/test/CodeGen/unsigned-promotion.c
+++ b/test/CodeGen/unsigned-promotion.c
@@ -7,53 +7,6 @@
// RUN: -fsanitize=unsigned-integer-overflow | FileCheck %s --check-prefix=CHECKU
unsigned short si, sj, sk;
-unsigned char ci, cj, ck;
-
-extern void opaqueshort(unsigned short);
-extern void opaquechar(unsigned char);
-
-// CHECKS-LABEL: define void @testshortadd()
-// CHECKU-LABEL: define void @testshortadd()
-void testshortadd() {
- // CHECKS: load i16, i16* @sj
- // CHECKS: load i16, i16* @sk
- // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]])
- // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0
- // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1
- // CHECKS: call void @__ubsan_handle_add_overflow
- //
- // CHECKU: [[T1:%.*]] = load i16, i16* @sj
- // CHECKU: [[T2:%.*]] = zext i16 [[T1]]
- // CHECKU: [[T3:%.*]] = load i16, i16* @sk
- // CHECKU: [[T4:%.*]] = zext i16 [[T3]]
- // CHECKU-NOT: llvm.sadd
- // CHECKU-NOT: llvm.uadd
- // CHECKU: [[T5:%.*]] = add nsw i32 [[T2]], [[T4]]
-
- si = sj + sk;
-}
-
-// CHECKS-LABEL: define void @testshortsub()
-// CHECKU-LABEL: define void @testshortsub()
-void testshortsub() {
-
- // CHECKS: load i16, i16* @sj
- // CHECKS: load i16, i16* @sk
- // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]])
- // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0
- // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1
- // CHECKS: call void @__ubsan_handle_sub_overflow
- //
- // CHECKU: [[T1:%.*]] = load i16, i16* @sj
- // CHECKU: [[T2:%.*]] = zext i16 [[T1]]
- // CHECKU: [[T3:%.*]] = load i16, i16* @sk
- // CHECKU: [[T4:%.*]] = zext i16 [[T3]]
- // CHECKU-NOT: llvm.ssub
- // CHECKU-NOT: llvm.usub
- // CHECKU: [[T5:%.*]] = sub nsw i32 [[T2]], [[T4]]
-
- si = sj - sk;
-}
// CHECKS-LABEL: define void @testshortmul()
// CHECKU-LABEL: define void @testshortmul()
@@ -75,69 +28,3 @@ void testshortmul() {
// CHECKU: [[T5:%.*]] = mul nsw i32 [[T2]], [[T4]]
si = sj * sk;
}
-
-// CHECKS-LABEL: define void @testcharadd()
-// CHECKU-LABEL: define void @testcharadd()
-void testcharadd() {
-
- // CHECKS: load i8, i8* @cj
- // CHECKS: load i8, i8* @ck
- // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]])
- // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0
- // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1
- // CHECKS: call void @__ubsan_handle_add_overflow
- //
- // CHECKU: [[T1:%.*]] = load i8, i8* @cj
- // CHECKU: [[T2:%.*]] = zext i8 [[T1]]
- // CHECKU: [[T3:%.*]] = load i8, i8* @ck
- // CHECKU: [[T4:%.*]] = zext i8 [[T3]]
- // CHECKU-NOT: llvm.sadd
- // CHECKU-NOT: llvm.uadd
- // CHECKU: [[T5:%.*]] = add nsw i32 [[T2]], [[T4]]
-
- ci = cj + ck;
-}
-
-// CHECKS-LABEL: define void @testcharsub()
-// CHECKU-LABEL: define void @testcharsub()
-void testcharsub() {
-
- // CHECKS: load i8, i8* @cj
- // CHECKS: load i8, i8* @ck
- // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]])
- // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0
- // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1
- // CHECKS: call void @__ubsan_handle_sub_overflow
- //
- // CHECKU: [[T1:%.*]] = load i8, i8* @cj
- // CHECKU: [[T2:%.*]] = zext i8 [[T1]]
- // CHECKU: [[T3:%.*]] = load i8, i8* @ck
- // CHECKU: [[T4:%.*]] = zext i8 [[T3]]
- // CHECKU-NOT: llvm.ssub
- // CHECKU-NOT: llvm.usub
- // CHECKU: [[T5:%.*]] = sub nsw i32 [[T2]], [[T4]]
-
- ci = cj - ck;
-}
-
-// CHECKS-LABEL: define void @testcharmul()
-// CHECKU-LABEL: define void @testcharmul()
-void testcharmul() {
-
- // CHECKS: load i8, i8* @cj
- // CHECKS: load i8, i8* @ck
- // CHECKS: [[T1:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[T2:%.*]], i32 [[T3:%.*]])
- // CHECKS-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T1]], 0
- // CHECKS-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T1]], 1
- // CHECKS: call void @__ubsan_handle_mul_overflow
- //
- // CHECKU: [[T1:%.*]] = load i8, i8* @cj
- // CHECKU: [[T2:%.*]] = zext i8 [[T1]]
- // CHECKU: [[T3:%.*]] = load i8, i8* @ck
- // CHECKU: [[T4:%.*]] = zext i8 [[T3]]
- // CHECKU-NOT: llvm.smul
- // CHECKU-NOT: llvm.umul
- // CHECKU: [[T5:%.*]] = mul nsw i32 [[T2]], [[T4]]
-
- ci = cj * ck;
-}
diff --git a/test/CodeGen/xop-builtins-cmp.c b/test/CodeGen/xop-builtins-cmp.c
new file mode 100644
index 0000000000000..a805352ad3b93
--- /dev/null
+++ b/test/CodeGen/xop-builtins-cmp.c
@@ -0,0 +1,405 @@
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +xop -fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+
+#include <x86intrin.h>
+
+// _MM_PCOMCTRL_LT
+
+__m128i test_mm_comlt_epu8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comlt_epu8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 0)
+ return _mm_comlt_epu8(a, b);
+}
+
+__m128i test_mm_comlt_epu16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comlt_epu16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 0)
+ return _mm_comlt_epu16(a, b);
+}
+
+__m128i test_mm_comlt_epu32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comlt_epu32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 0)
+ return _mm_comlt_epu32(a, b);
+}
+
+__m128i test_mm_comlt_epu64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comlt_epu64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 0)
+ return _mm_comlt_epu64(a, b);
+}
+
+__m128i test_mm_comlt_epi8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comlt_epi8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 0)
+ return _mm_comlt_epi8(a, b);
+}
+
+__m128i test_mm_comlt_epi16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comlt_epi16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 0)
+ return _mm_comlt_epi16(a, b);
+}
+
+__m128i test_mm_comlt_epi32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comlt_epi32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 0)
+ return _mm_comlt_epi32(a, b);
+}
+
+__m128i test_mm_comlt_epi64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comlt_epi64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 0)
+ return _mm_comlt_epi64(a, b);
+}
+
+// _MM_PCOMCTRL_LE
+
+__m128i test_mm_comle_epu8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comle_epu8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 1)
+ return _mm_comle_epu8(a, b);
+}
+
+__m128i test_mm_comle_epu16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comle_epu16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 1)
+ return _mm_comle_epu16(a, b);
+}
+
+__m128i test_mm_comle_epu32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comle_epu32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 1)
+ return _mm_comle_epu32(a, b);
+}
+
+__m128i test_mm_comle_epu64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comle_epu64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 1)
+ return _mm_comle_epu64(a, b);
+}
+
+__m128i test_mm_comle_epi8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comle_epi8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 1)
+ return _mm_comle_epi8(a, b);
+}
+
+__m128i test_mm_comle_epi16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comle_epi16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 1)
+ return _mm_comle_epi16(a, b);
+}
+
+__m128i test_mm_comle_epi32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comle_epi32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 1)
+ return _mm_comle_epi32(a, b);
+}
+
+__m128i test_mm_comle_epi64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comle_epi64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 1)
+ return _mm_comle_epi64(a, b);
+}
+
+// _MM_PCOMCTRL_GT
+
+__m128i test_mm_comgt_epu8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comgt_epu8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 2)
+ return _mm_comgt_epu8(a, b);
+}
+
+__m128i test_mm_comgt_epu16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comgt_epu16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 2)
+ return _mm_comgt_epu16(a, b);
+}
+
+__m128i test_mm_comgt_epu32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comgt_epu32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 2)
+ return _mm_comgt_epu32(a, b);
+}
+
+__m128i test_mm_comgt_epu64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comgt_epu64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 2)
+ return _mm_comgt_epu64(a, b);
+}
+
+__m128i test_mm_comgt_epi8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comgt_epi8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 2)
+ return _mm_comgt_epi8(a, b);
+}
+
+__m128i test_mm_comgt_epi16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comgt_epi16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 2)
+ return _mm_comgt_epi16(a, b);
+}
+
+__m128i test_mm_comgt_epi32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comgt_epi32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 2)
+ return _mm_comgt_epi32(a, b);
+}
+
+__m128i test_mm_comgt_epi64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comgt_epi64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 2)
+ return _mm_comgt_epi64(a, b);
+}
+
+// _MM_PCOMCTRL_GE
+
+__m128i test_mm_comge_epu8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comge_epu8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 3)
+ return _mm_comge_epu8(a, b);
+}
+
+__m128i test_mm_comge_epu16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comge_epu16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 3)
+ return _mm_comge_epu16(a, b);
+}
+
+__m128i test_mm_comge_epu32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comge_epu32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 3)
+ return _mm_comge_epu32(a, b);
+}
+
+__m128i test_mm_comge_epu64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comge_epu64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 3)
+ return _mm_comge_epu64(a, b);
+}
+
+__m128i test_mm_comge_epi8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comge_epi8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 3)
+ return _mm_comge_epi8(a, b);
+}
+
+__m128i test_mm_comge_epi16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comge_epi16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 3)
+ return _mm_comge_epi16(a, b);
+}
+
+__m128i test_mm_comge_epi32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comge_epi32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 3)
+ return _mm_comge_epi32(a, b);
+}
+
+__m128i test_mm_comge_epi64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comge_epi64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 3)
+ return _mm_comge_epi64(a, b);
+}
+
+// _MM_PCOMCTRL_EQ
+
+__m128i test_mm_comeq_epu8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comeq_epu8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 4)
+ return _mm_comeq_epu8(a, b);
+}
+
+__m128i test_mm_comeq_epu16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comeq_epu16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 4)
+ return _mm_comeq_epu16(a, b);
+}
+
+__m128i test_mm_comeq_epu32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comeq_epu32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 4)
+ return _mm_comeq_epu32(a, b);
+}
+
+__m128i test_mm_comeq_epu64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comeq_epu64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 4)
+ return _mm_comeq_epu64(a, b);
+}
+
+__m128i test_mm_comeq_epi8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comeq_epi8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 4)
+ return _mm_comeq_epi8(a, b);
+}
+
+__m128i test_mm_comeq_epi16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comeq_epi16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 4)
+ return _mm_comeq_epi16(a, b);
+}
+
+__m128i test_mm_comeq_epi32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comeq_epi32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 4)
+ return _mm_comeq_epi32(a, b);
+}
+
+__m128i test_mm_comeq_epi64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comeq_epi64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 4)
+ return _mm_comeq_epi64(a, b);
+}
+
+// _MM_PCOMCTRL_NEQ
+
+__m128i test_mm_comneq_epu8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comneq_epu8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 5)
+ return _mm_comneq_epu8(a, b);
+}
+
+__m128i test_mm_comneq_epu16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comneq_epu16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 5)
+ return _mm_comneq_epu16(a, b);
+}
+
+__m128i test_mm_comneq_epu32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comneq_epu32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 5)
+ return _mm_comneq_epu32(a, b);
+}
+
+__m128i test_mm_comneq_epu64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comneq_epu64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 5)
+ return _mm_comneq_epu64(a, b);
+}
+
+__m128i test_mm_comneq_epi8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comneq_epi8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 5)
+ return _mm_comneq_epi8(a, b);
+}
+
+__m128i test_mm_comneq_epi16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comneq_epi16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 5)
+ return _mm_comneq_epi16(a, b);
+}
+
+__m128i test_mm_comneq_epi32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comneq_epi32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 5)
+ return _mm_comneq_epi32(a, b);
+}
+
+__m128i test_mm_comneq_epi64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comneq_epi64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 5)
+ return _mm_comneq_epi64(a, b);
+}
+
+// _MM_PCOMCTRL_FALSE
+
+__m128i test_mm_comfalse_epu8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comfalse_epu8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 6)
+ return _mm_comfalse_epu8(a, b);
+}
+
+__m128i test_mm_comfalse_epu16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comfalse_epu16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 6)
+ return _mm_comfalse_epu16(a, b);
+}
+
+__m128i test_mm_comfalse_epu32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comfalse_epu32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 6)
+ return _mm_comfalse_epu32(a, b);
+}
+
+__m128i test_mm_comfalse_epu64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comfalse_epu64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 6)
+ return _mm_comfalse_epu64(a, b);
+}
+
+__m128i test_mm_comfalse_epi8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comfalse_epi8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 6)
+ return _mm_comfalse_epi8(a, b);
+}
+
+__m128i test_mm_comfalse_epi16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comfalse_epi16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 6)
+ return _mm_comfalse_epi16(a, b);
+}
+
+__m128i test_mm_comfalse_epi32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comfalse_epi32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 6)
+ return _mm_comfalse_epi32(a, b);
+}
+
+__m128i test_mm_comfalse_epi64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comfalse_epi64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 6)
+ return _mm_comfalse_epi64(a, b);
+}
+
+// _MM_PCOMCTRL_TRUE
+
+__m128i test_mm_comtrue_epu8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comtrue_epu8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 7)
+ return _mm_comtrue_epu8(a, b);
+}
+
+__m128i test_mm_comtrue_epu16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comtrue_epu16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 7)
+ return _mm_comtrue_epu16(a, b);
+}
+
+__m128i test_mm_comtrue_epu32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comtrue_epu32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 7)
+ return _mm_comtrue_epu32(a, b);
+}
+
+__m128i test_mm_comtrue_epu64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comtrue_epu64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 7)
+ return _mm_comtrue_epu64(a, b);
+}
+
+__m128i test_mm_comtrue_epi8(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comtrue_epi8
+ // CHECK: call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i8 7)
+ return _mm_comtrue_epi8(a, b);
+}
+
+__m128i test_mm_comtrue_epi16(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comtrue_epi16
+ // CHECK: call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i8 7)
+ return _mm_comtrue_epi16(a, b);
+}
+
+__m128i test_mm_comtrue_epi32(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comtrue_epi32
+ // CHECK: call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i8 7)
+ return _mm_comtrue_epi32(a, b);
+}
+
+__m128i test_mm_comtrue_epi64(__m128i a, __m128i b) {
+ // CHECK-LABEL: test_mm_comtrue_epi64
+ // CHECK: call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i8 7)
+ return _mm_comtrue_epi64(a, b);
+}
diff --git a/test/CodeGen/xop-builtins.c b/test/CodeGen/xop-builtins.c
index da9a3b925de20..5302b9ab8f2fc 100644
--- a/test/CodeGen/xop-builtins.c
+++ b/test/CodeGen/xop-builtins.c
@@ -170,13 +170,19 @@ __m128i test_mm_hsubq_epi32(__m128i a) {
__m128i test_mm_cmov_si128(__m128i a, __m128i b, __m128i c) {
// CHECK-LABEL: test_mm_cmov_si128
- // CHECK: call <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: [[AND:%.*]] = and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: [[NEG:%.*]] = xor <2 x i64> %{{.*}}, <i64 -1, i64 -1>
+ // CHECK-NEXT: [[ANDN:%.*]] = and <2 x i64> %{{.*}}, [[NEG]]
+ // CHECK-NEXT: %{{.*}} = or <2 x i64> [[AND]], [[ANDN]]
return _mm_cmov_si128(a, b, c);
}
__m256i test_mm256_cmov_si256(__m256i a, __m256i b, __m256i c) {
// CHECK-LABEL: test_mm256_cmov_si256
- // CHECK: call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: [[AND:%.*]] = and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: [[NEG:%.*]] = xor <4 x i64> %{{.*}}, <i64 -1, i64 -1, i64 -1, i64 -1>
+ // CHECK-NEXT: [[ANDN:%.*]] = and <4 x i64> %{{.*}}, [[NEG]]
+ // CHECK-NEXT: %{{.*}} = or <4 x i64> [[AND]], [[ANDN]]
return _mm256_cmov_si256(a, b, c);
}
diff --git a/test/CodeGen/xray-always-instrument.cpp b/test/CodeGen/xray-always-instrument.cpp
new file mode 100644
index 0000000000000..60d8595699584
--- /dev/null
+++ b/test/CodeGen/xray-always-instrument.cpp
@@ -0,0 +1,15 @@
+// RUN: echo "fun:*foo*" > %t.always-instrument
+// RUN: echo "src:*xray-always-instrument.cpp" >> %t.always-instrument
+// RUN: %clang_cc1 -fxray-instrument -x c++ -std=c++11 -fxray-always-instrument=%t.always-instrument -emit-llvm -o - %s -triple x86_64-unknown-linux-gnu | FileCheck %s
+
+void foo() {}
+
+[[clang::xray_never_instrument]] void bar() {}
+
+void baz() {}
+
+// CHECK: define void @_Z3foov() #[[ALWAYSATTR:[0-9]+]] {
+// CHECK: define void @_Z3barv() #[[NEVERATTR:[0-9]+]] {
+// CHECK: define void @_Z3bazv() #[[ALWAYSATTR:[0-9]+]] {
+// CHECK: attributes #[[ALWAYSATTR]] = {{.*}} "function-instrument"="xray-always" {{.*}}
+// CHECK: attributes #[[NEVERATTR]] = {{.*}} "function-instrument"="xray-never" {{.*}}
diff --git a/test/CodeGen/xray-attributes-supported-arm.cpp b/test/CodeGen/xray-attributes-supported-arm.cpp
deleted file mode 100644
index 3104f285bfb31..0000000000000
--- a/test/CodeGen/xray-attributes-supported-arm.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple arm-unknown-linux-gnu | FileCheck %s
-
-// Make sure that the LLVM attribute for XRay-annotated functions do show up.
-[[clang::xray_always_instrument]] void foo() {
-// CHECK: define void @_Z3foov() #0
-};
-
-[[clang::xray_never_instrument]] void bar() {
-// CHECK: define void @_Z3barv() #1
-};
-
-// CHECK: #0 = {{.*}}"function-instrument"="xray-always"
-// CHECK: #1 = {{.*}}"function-instrument"="xray-never"
diff --git a/test/CodeGen/xray-attributes-supported.cpp b/test/CodeGen/xray-attributes-supported.cpp
index d70b3aa260132..860efb276f699 100644
--- a/test/CodeGen/xray-attributes-supported.cpp
+++ b/test/CodeGen/xray-attributes-supported.cpp
@@ -1,4 +1,10 @@
// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple x86_64-unknown-linux-gnu | FileCheck %s
+// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple arm-unknown-linux-gnu | FileCheck %s
+// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple mips-unknown-linux-gnu | FileCheck %s
+// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple mipsel-unknown-linux-gnu | FileCheck %s
+// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple mips64-unknown-linux-gnu | FileCheck %s
+// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple mips64el-unknown-linux-gnu | FileCheck %s
+// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple powerpc64le-unknown-linux-gnu | FileCheck %s
// Make sure that the LLVM attribute for XRay-annotated functions do show up.
[[clang::xray_always_instrument]] void foo() {
diff --git a/test/CodeGen/xray-instruction-threshold.cpp b/test/CodeGen/xray-instruction-threshold.cpp
new file mode 100644
index 0000000000000..b5f4489bd97ee
--- /dev/null
+++ b/test/CodeGen/xray-instruction-threshold.cpp
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 -fxray-instrument -fxray-instruction-threshold=1 -x c++ -std=c++11 -emit-llvm -o - %s -triple x86_64-unknown-linux-gnu | FileCheck %s
+
+int foo() {
+ return 1;
+}
+
+[[clang::xray_never_instrument]] int bar() {
+ return 2;
+}
+
+// CHECK: define i32 @_Z3foov() #[[THRESHOLD:[0-9]+]] {
+// CHECK: define i32 @_Z3barv() #[[NEVERATTR:[0-9]+]] {
+// CHECK-DAG: attributes #[[THRESHOLD]] = {{.*}} "xray-instruction-threshold"="1" {{.*}}
+// CHECK-DAG: attributes #[[NEVERATTR]] = {{.*}} "function-instrument"="xray-never" {{.*}}
diff --git a/test/CodeGen/xray-log-args.cpp b/test/CodeGen/xray-log-args.cpp
new file mode 100644
index 0000000000000..d4f4a1ba851b3
--- /dev/null
+++ b/test/CodeGen/xray-log-args.cpp
@@ -0,0 +1,13 @@
+// RUN: %clang_cc1 %s -fxray-instrument -std=c++11 -x c++ -emit-llvm -o - -triple x86_64-unknown-linux-gnu | FileCheck %s
+
+// Make sure that the LLVM attribute for XRay-annotated functions do show up.
+[[clang::xray_always_instrument,clang::xray_log_args(1)]] void foo(int a) {
+// CHECK: define void @_Z3fooi(i32 %a) #0
+};
+
+[[clang::xray_log_args(1)]] void bar(int a) {
+// CHECK: define void @_Z3bari(i32 %a) #1
+};
+
+// CHECK: #0 = {{.*}}"function-instrument"="xray-always"{{.*}}"xray-log-args"="1"
+// CHECK-NOT: #1 = {{.*}}"xray-log-args"="1"
diff --git a/test/CodeGen/zvector.c b/test/CodeGen/zvector.c
index ebe7e415e1db8..a8405a78e92b7 100644
--- a/test/CodeGen/zvector.c
+++ b/test/CodeGen/zvector.c
@@ -1,5 +1,4 @@
-// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector \
-// RUN: -O -emit-llvm -o - -W -Wall -Werror %s | FileCheck %s
+// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector -emit-llvm -o - -W -Wall -Werror %s | opt -S -mem2reg | FileCheck %s
volatile vector signed char sc, sc2;
volatile vector unsigned char uc, uc2;
@@ -21,2778 +20,3349 @@ volatile vector double fd, fd2;
volatile int cnt;
-void test_assign (void)
-{
-// CHECK-LABEL: test_assign
+// CHECK-LABEL: define void @test_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: store volatile <16 x i8> [[TMP0]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: store volatile <16 x i8> [[TMP1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: store volatile <8 x i16> [[TMP2]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: store volatile <8 x i16> [[TMP3]], <8 x i16>* @us, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: store volatile <4 x i32> [[TMP4]], <4 x i32>* @si, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: store volatile <4 x i32> [[TMP5]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: store volatile <2 x i64> [[TMP6]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: store volatile <2 x i64> [[TMP7]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: store volatile <2 x double> [[TMP8]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_assign(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
sc = sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
uc = uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
ss = ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
us = us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
si = si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
ui = ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
sl = sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
ul = ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
fd = fd2;
}
-void test_pos (void)
-{
-// CHECK-LABEL: test_pos
+// CHECK-LABEL: define void @test_pos() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: store volatile <16 x i8> [[TMP0]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: store volatile <16 x i8> [[TMP1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: store volatile <8 x i16> [[TMP2]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: store volatile <8 x i16> [[TMP3]], <8 x i16>* @us, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: store volatile <4 x i32> [[TMP4]], <4 x i32>* @si, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: store volatile <4 x i32> [[TMP5]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: store volatile <2 x i64> [[TMP6]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: store volatile <2 x i64> [[TMP7]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: store volatile <2 x double> [[TMP8]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_pos(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
sc = +sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
uc = +uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
ss = +ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
us = +us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
si = +si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
ui = +ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
sl = +sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
ul = +ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
fd = +fd2;
}
-void test_neg (void)
-{
-// CHECK-LABEL: test_neg
+// CHECK-LABEL: define void @test_neg() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[SUB:%.*]] = sub <16 x i8> zeroinitializer, [[TMP0]]
+// CHECK: store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[SUB1:%.*]] = sub <8 x i16> zeroinitializer, [[TMP1]]
+// CHECK: store volatile <8 x i16> [[SUB1]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[SUB2:%.*]] = sub <4 x i32> zeroinitializer, [[TMP2]]
+// CHECK: store volatile <4 x i32> [[SUB2]], <4 x i32>* @si, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[SUB3:%.*]] = sub <2 x i64> zeroinitializer, [[TMP3]]
+// CHECK: store volatile <2 x i64> [[SUB3]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[SUB4:%.*]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[TMP4]]
+// CHECK: store volatile <2 x double> [[SUB4]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_neg(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = sub <16 x i8> zeroinitializer, [[VAL]]
sc = -sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = sub <8 x i16> zeroinitializer, [[VAL]]
ss = -ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = sub <4 x i32> zeroinitializer, [[VAL]]
si = -si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = sub <2 x i64> zeroinitializer, [[VAL]]
sl = -sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[VAL]]
fd = -fd2;
}
-void test_preinc (void)
-{
-// CHECK-LABEL: test_preinc
+// CHECK-LABEL: define void @test_preinc() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[INC:%.*]] = add <16 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK: store volatile <16 x i8> [[INC]], <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[INC1:%.*]] = add <16 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK: store volatile <16 x i8> [[INC1]], <16 x i8>* @uc2, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[INC2:%.*]] = add <8 x i16> [[TMP2]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK: store volatile <8 x i16> [[INC2]], <8 x i16>* @ss2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[INC3:%.*]] = add <8 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK: store volatile <8 x i16> [[INC3]], <8 x i16>* @us2, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[INC4:%.*]] = add <4 x i32> [[TMP4]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK: store volatile <4 x i32> [[INC4]], <4 x i32>* @si2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[INC5:%.*]] = add <4 x i32> [[TMP5]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK: store volatile <4 x i32> [[INC5]], <4 x i32>* @ui2, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[INC6:%.*]] = add <2 x i64> [[TMP6]], <i64 1, i64 1>
+// CHECK: store volatile <2 x i64> [[INC6]], <2 x i64>* @sl2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[INC7:%.*]] = add <2 x i64> [[TMP7]], <i64 1, i64 1>
+// CHECK: store volatile <2 x i64> [[INC7]], <2 x i64>* @ul2, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[INC8:%.*]] = fadd <2 x double> [[TMP8]], <double 1.000000e+00, double 1.000000e+00>
+// CHECK: store volatile <2 x double> [[INC8]], <2 x double>* @fd2, align 8
+// CHECK: ret void
+void test_preinc(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
++sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
++uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
++ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
++us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
++si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
++ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
++sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
++ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
++fd2;
}
-void test_postinc (void)
-{
-// CHECK-LABEL: test_postinc
+// CHECK-LABEL: define void @test_postinc() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[INC:%.*]] = add <16 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK: store volatile <16 x i8> [[INC]], <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[INC1:%.*]] = add <16 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK: store volatile <16 x i8> [[INC1]], <16 x i8>* @uc2, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[INC2:%.*]] = add <8 x i16> [[TMP2]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK: store volatile <8 x i16> [[INC2]], <8 x i16>* @ss2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[INC3:%.*]] = add <8 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK: store volatile <8 x i16> [[INC3]], <8 x i16>* @us2, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[INC4:%.*]] = add <4 x i32> [[TMP4]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK: store volatile <4 x i32> [[INC4]], <4 x i32>* @si2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[INC5:%.*]] = add <4 x i32> [[TMP5]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK: store volatile <4 x i32> [[INC5]], <4 x i32>* @ui2, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[INC6:%.*]] = add <2 x i64> [[TMP6]], <i64 1, i64 1>
+// CHECK: store volatile <2 x i64> [[INC6]], <2 x i64>* @sl2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[INC7:%.*]] = add <2 x i64> [[TMP7]], <i64 1, i64 1>
+// CHECK: store volatile <2 x i64> [[INC7]], <2 x i64>* @ul2, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[INC8:%.*]] = fadd <2 x double> [[TMP8]], <double 1.000000e+00, double 1.000000e+00>
+// CHECK: store volatile <2 x double> [[INC8]], <2 x double>* @fd2, align 8
+// CHECK: ret void
+void test_postinc(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
sc2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
uc2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ss2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
us2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
si2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
ui2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
sl2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
ul2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
fd2++;
}
-void test_predec (void)
-{
-// CHECK-LABEL: test_predec
+// CHECK-LABEL: define void @test_predec() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[DEC:%.*]] = add <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: store volatile <16 x i8> [[DEC]], <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[DEC1:%.*]] = add <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: store volatile <16 x i8> [[DEC1]], <16 x i8>* @uc2, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[DEC2:%.*]] = add <8 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: store volatile <8 x i16> [[DEC2]], <8 x i16>* @ss2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[DEC3:%.*]] = add <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: store volatile <8 x i16> [[DEC3]], <8 x i16>* @us2, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[DEC4:%.*]] = add <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: store volatile <4 x i32> [[DEC4]], <4 x i32>* @si2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[DEC5:%.*]] = add <4 x i32> [[TMP5]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: store volatile <4 x i32> [[DEC5]], <4 x i32>* @ui2, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[DEC6:%.*]] = add <2 x i64> [[TMP6]], <i64 -1, i64 -1>
+// CHECK: store volatile <2 x i64> [[DEC6]], <2 x i64>* @sl2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[DEC7:%.*]] = add <2 x i64> [[TMP7]], <i64 -1, i64 -1>
+// CHECK: store volatile <2 x i64> [[DEC7]], <2 x i64>* @ul2, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[DEC8:%.*]] = fadd <2 x double> [[TMP8]], <double -1.000000e+00, double -1.000000e+00>
+// CHECK: store volatile <2 x double> [[DEC8]], <2 x double>* @fd2, align 8
+// CHECK: ret void
+void test_predec(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
--sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
--uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
--ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
--us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
--si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
--ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
--sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
--ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
--fd2;
}
-void test_postdec (void)
-{
-// CHECK-LABEL: test_postdec
+// CHECK-LABEL: define void @test_postdec() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[DEC:%.*]] = add <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: store volatile <16 x i8> [[DEC]], <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[DEC1:%.*]] = add <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: store volatile <16 x i8> [[DEC1]], <16 x i8>* @uc2, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[DEC2:%.*]] = add <8 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: store volatile <8 x i16> [[DEC2]], <8 x i16>* @ss2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[DEC3:%.*]] = add <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: store volatile <8 x i16> [[DEC3]], <8 x i16>* @us2, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[DEC4:%.*]] = add <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: store volatile <4 x i32> [[DEC4]], <4 x i32>* @si2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[DEC5:%.*]] = add <4 x i32> [[TMP5]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: store volatile <4 x i32> [[DEC5]], <4 x i32>* @ui2, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[DEC6:%.*]] = add <2 x i64> [[TMP6]], <i64 -1, i64 -1>
+// CHECK: store volatile <2 x i64> [[DEC6]], <2 x i64>* @sl2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[DEC7:%.*]] = add <2 x i64> [[TMP7]], <i64 -1, i64 -1>
+// CHECK: store volatile <2 x i64> [[DEC7]], <2 x i64>* @ul2, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[DEC8:%.*]] = fadd <2 x double> [[TMP8]], <double -1.000000e+00, double -1.000000e+00>
+// CHECK: store volatile <2 x double> [[DEC8]], <2 x double>* @fd2, align 8
+// CHECK: ret void
+void test_postdec(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
sc2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
uc2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
ss2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
us2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
si2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
ui2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
sl2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
ul2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
fd2--;
}
-void test_add (void)
-{
-// CHECK-LABEL: test_add
+// CHECK-LABEL: define void @test_add() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[ADD:%.*]] = add <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[ADD]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[ADD1:%.*]] = add <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[ADD1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[ADD2:%.*]] = add <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: store volatile <16 x i8> [[ADD2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[ADD3:%.*]] = add <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK: store volatile <16 x i8> [[ADD3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[ADD4:%.*]] = add <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK: store volatile <16 x i8> [[ADD4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[ADD5:%.*]] = add <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK: store volatile <16 x i8> [[ADD5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[ADD6:%.*]] = add <8 x i16> [[TMP12]], [[TMP13]]
+// CHECK: store volatile <8 x i16> [[ADD6]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[ADD7:%.*]] = add <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <8 x i16> [[ADD7]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[ADD8:%.*]] = add <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <8 x i16> [[ADD8]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[ADD9:%.*]] = add <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK: store volatile <8 x i16> [[ADD9]], <8 x i16>* @us, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[ADD10:%.*]] = add <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK: store volatile <8 x i16> [[ADD10]], <8 x i16>* @us, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[ADD11:%.*]] = add <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK: store volatile <8 x i16> [[ADD11]], <8 x i16>* @us, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[ADD12:%.*]] = add <4 x i32> [[TMP24]], [[TMP25]]
+// CHECK: store volatile <4 x i32> [[ADD12]], <4 x i32>* @si, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[ADD13:%.*]] = add <4 x i32> [[TMP26]], [[TMP27]]
+// CHECK: store volatile <4 x i32> [[ADD13]], <4 x i32>* @si, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[ADD14:%.*]] = add <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: store volatile <4 x i32> [[ADD14]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[ADD15:%.*]] = add <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: store volatile <4 x i32> [[ADD15]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[ADD16:%.*]] = add <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK: store volatile <4 x i32> [[ADD16]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[ADD17:%.*]] = add <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK: store volatile <4 x i32> [[ADD17]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[ADD18:%.*]] = add <2 x i64> [[TMP36]], [[TMP37]]
+// CHECK: store volatile <2 x i64> [[ADD18]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[ADD19:%.*]] = add <2 x i64> [[TMP38]], [[TMP39]]
+// CHECK: store volatile <2 x i64> [[ADD19]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP40:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[ADD20:%.*]] = add <2 x i64> [[TMP40]], [[TMP41]]
+// CHECK: store volatile <2 x i64> [[ADD20]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[ADD21:%.*]] = add <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: store volatile <2 x i64> [[ADD21]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[ADD22:%.*]] = add <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: store volatile <2 x i64> [[ADD22]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[ADD23:%.*]] = add <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK: store volatile <2 x i64> [[ADD23]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[ADD24:%.*]] = fadd <2 x double> [[TMP48]], [[TMP49]]
+// CHECK: store volatile <2 x double> [[ADD24]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_add(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
sc = sc + sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
sc = sc + bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
sc = bc + sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
uc = uc + uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
uc = uc + bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
uc = bc + uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
ss = ss + ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
ss = ss + bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
ss = bs + ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
us = us + us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
us = us + bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
us = bs + us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
si = si + si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
si = si + bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
si = bi + si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
ui = ui + ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
ui = ui + bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
ui = bi + ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
sl = sl + sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
sl = sl + bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
sl = bl + sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
ul = ul + ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
ul = ul + bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
ul = bl + ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL1]], [[VAL2]]
fd = fd + fd2;
}
-void test_add_assign (void)
-{
-// CHECK-LABEL: test_add_assign
+// CHECK-LABEL: define void @test_add_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[ADD:%.*]] = add <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[ADD]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[ADD1:%.*]] = add <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[ADD1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[ADD2:%.*]] = add <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK: store volatile <16 x i8> [[ADD2]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[ADD3:%.*]] = add <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK: store volatile <16 x i8> [[ADD3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[ADD4:%.*]] = add <8 x i16> [[TMP9]], [[TMP8]]
+// CHECK: store volatile <8 x i16> [[ADD4]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[ADD5:%.*]] = add <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK: store volatile <8 x i16> [[ADD5]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[ADD6:%.*]] = add <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK: store volatile <8 x i16> [[ADD6]], <8 x i16>* @us, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[ADD7:%.*]] = add <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <8 x i16> [[ADD7]], <8 x i16>* @us, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[ADD8:%.*]] = add <4 x i32> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <4 x i32> [[ADD8]], <4 x i32>* @si, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[ADD9:%.*]] = add <4 x i32> [[TMP19]], [[TMP18]]
+// CHECK: store volatile <4 x i32> [[ADD9]], <4 x i32>* @si, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[ADD10:%.*]] = add <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK: store volatile <4 x i32> [[ADD10]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[ADD11:%.*]] = add <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK: store volatile <4 x i32> [[ADD11]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[ADD12:%.*]] = add <2 x i64> [[TMP25]], [[TMP24]]
+// CHECK: store volatile <2 x i64> [[ADD12]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[ADD13:%.*]] = add <2 x i64> [[TMP27]], [[TMP26]]
+// CHECK: store volatile <2 x i64> [[ADD13]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[ADD14:%.*]] = add <2 x i64> [[TMP29]], [[TMP28]]
+// CHECK: store volatile <2 x i64> [[ADD14]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[ADD15:%.*]] = add <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK: store volatile <2 x i64> [[ADD15]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[ADD16:%.*]] = fadd <2 x double> [[TMP33]], [[TMP32]]
+// CHECK: store volatile <2 x double> [[ADD16]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_add_assign(void) {
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
sc += sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
sc += bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
uc += uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
uc += bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
ss += ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
ss += bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
us += us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
us += bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
si += si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
si += bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
ui += ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
ui += bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
sl += sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
sl += bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
ul += ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
ul += bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL2]], [[VAL1]]
fd += fd2;
}
-void test_sub (void)
-{
-// CHECK-LABEL: test_sub
+// CHECK-LABEL: define void @test_sub() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[SUB:%.*]] = sub <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[SUB1:%.*]] = sub <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[SUB1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[SUB2:%.*]] = sub <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: store volatile <16 x i8> [[SUB2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[SUB3:%.*]] = sub <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK: store volatile <16 x i8> [[SUB3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[SUB4:%.*]] = sub <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK: store volatile <16 x i8> [[SUB4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[SUB5:%.*]] = sub <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK: store volatile <16 x i8> [[SUB5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[SUB6:%.*]] = sub <8 x i16> [[TMP12]], [[TMP13]]
+// CHECK: store volatile <8 x i16> [[SUB6]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[SUB7:%.*]] = sub <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <8 x i16> [[SUB7]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[SUB8:%.*]] = sub <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <8 x i16> [[SUB8]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[SUB9:%.*]] = sub <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK: store volatile <8 x i16> [[SUB9]], <8 x i16>* @us, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[SUB10:%.*]] = sub <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK: store volatile <8 x i16> [[SUB10]], <8 x i16>* @us, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[SUB11:%.*]] = sub <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK: store volatile <8 x i16> [[SUB11]], <8 x i16>* @us, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[SUB12:%.*]] = sub <4 x i32> [[TMP24]], [[TMP25]]
+// CHECK: store volatile <4 x i32> [[SUB12]], <4 x i32>* @si, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[SUB13:%.*]] = sub <4 x i32> [[TMP26]], [[TMP27]]
+// CHECK: store volatile <4 x i32> [[SUB13]], <4 x i32>* @si, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[SUB14:%.*]] = sub <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: store volatile <4 x i32> [[SUB14]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[SUB15:%.*]] = sub <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: store volatile <4 x i32> [[SUB15]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[SUB16:%.*]] = sub <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK: store volatile <4 x i32> [[SUB16]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[SUB17:%.*]] = sub <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK: store volatile <4 x i32> [[SUB17]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[SUB18:%.*]] = sub <2 x i64> [[TMP36]], [[TMP37]]
+// CHECK: store volatile <2 x i64> [[SUB18]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[SUB19:%.*]] = sub <2 x i64> [[TMP38]], [[TMP39]]
+// CHECK: store volatile <2 x i64> [[SUB19]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP40:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[SUB20:%.*]] = sub <2 x i64> [[TMP40]], [[TMP41]]
+// CHECK: store volatile <2 x i64> [[SUB20]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[SUB21:%.*]] = sub <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: store volatile <2 x i64> [[SUB21]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[SUB22:%.*]] = sub <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: store volatile <2 x i64> [[SUB22]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[SUB23:%.*]] = sub <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK: store volatile <2 x i64> [[SUB23]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[SUB24:%.*]] = fsub <2 x double> [[TMP48]], [[TMP49]]
+// CHECK: store volatile <2 x double> [[SUB24]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_sub(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
sc = sc - sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
sc = sc - bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
sc = bc - sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
uc = uc - uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
uc = uc - bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
uc = bc - uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
ss = ss - ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
ss = ss - bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
ss = bs - ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
us = us - us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
us = us - bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
us = bs - us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
si = si - si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
si = si - bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
si = bi - si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
ui = ui - ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
ui = ui - bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
ui = bi - ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
sl = sl - sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
sl = sl - bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
sl = bl - sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
ul = ul - ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
ul = ul - bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
ul = bl - ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
fd = fd - fd2;
}
-void test_sub_assign (void)
-{
-// CHECK-LABEL: test_sub_assign
+// CHECK-LABEL: define void @test_sub_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SUB:%.*]] = sub <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SUB1:%.*]] = sub <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[SUB1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SUB2:%.*]] = sub <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK: store volatile <16 x i8> [[SUB2]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SUB3:%.*]] = sub <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK: store volatile <16 x i8> [[SUB3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SUB4:%.*]] = sub <8 x i16> [[TMP9]], [[TMP8]]
+// CHECK: store volatile <8 x i16> [[SUB4]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SUB5:%.*]] = sub <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK: store volatile <8 x i16> [[SUB5]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SUB6:%.*]] = sub <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK: store volatile <8 x i16> [[SUB6]], <8 x i16>* @us, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SUB7:%.*]] = sub <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <8 x i16> [[SUB7]], <8 x i16>* @us, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SUB8:%.*]] = sub <4 x i32> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <4 x i32> [[SUB8]], <4 x i32>* @si, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SUB9:%.*]] = sub <4 x i32> [[TMP19]], [[TMP18]]
+// CHECK: store volatile <4 x i32> [[SUB9]], <4 x i32>* @si, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SUB10:%.*]] = sub <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK: store volatile <4 x i32> [[SUB10]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SUB11:%.*]] = sub <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK: store volatile <4 x i32> [[SUB11]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SUB12:%.*]] = sub <2 x i64> [[TMP25]], [[TMP24]]
+// CHECK: store volatile <2 x i64> [[SUB12]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SUB13:%.*]] = sub <2 x i64> [[TMP27]], [[TMP26]]
+// CHECK: store volatile <2 x i64> [[SUB13]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SUB14:%.*]] = sub <2 x i64> [[TMP29]], [[TMP28]]
+// CHECK: store volatile <2 x i64> [[SUB14]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SUB15:%.*]] = sub <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK: store volatile <2 x i64> [[SUB15]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[SUB16:%.*]] = fsub <2 x double> [[TMP33]], [[TMP32]]
+// CHECK: store volatile <2 x double> [[SUB16]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_sub_assign(void) {
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
sc -= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
sc -= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
uc -= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
uc -= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
ss -= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
ss -= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
us -= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
us -= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
si -= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
si -= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
ui -= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
ui -= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
sl -= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
sl -= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
ul -= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
ul -= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
fd -= fd2;
}
-void test_mul (void)
-{
-// CHECK-LABEL: test_mul
+// CHECK-LABEL: define void @test_mul() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[MUL:%.*]] = mul <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[MUL]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[MUL1:%.*]] = mul <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[MUL1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[MUL2:%.*]] = mul <8 x i16> [[TMP4]], [[TMP5]]
+// CHECK: store volatile <8 x i16> [[MUL2]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[MUL3:%.*]] = mul <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK: store volatile <8 x i16> [[MUL3]], <8 x i16>* @us, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[MUL4:%.*]] = mul <4 x i32> [[TMP8]], [[TMP9]]
+// CHECK: store volatile <4 x i32> [[MUL4]], <4 x i32>* @si, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[MUL5:%.*]] = mul <4 x i32> [[TMP10]], [[TMP11]]
+// CHECK: store volatile <4 x i32> [[MUL5]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[MUL6:%.*]] = mul <2 x i64> [[TMP12]], [[TMP13]]
+// CHECK: store volatile <2 x i64> [[MUL6]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[MUL7:%.*]] = mul <2 x i64> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <2 x i64> [[MUL7]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[MUL8:%.*]] = fmul <2 x double> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <2 x double> [[MUL8]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_mul(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
sc = sc * sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
uc = uc * uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
ss = ss * ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
us = us * us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
si = si * si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
ui = ui * ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
sl = sl * sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
ul = ul * ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fmul <2 x double> [[VAL1]], [[VAL2]]
fd = fd * fd2;
}
-void test_mul_assign (void)
-{
-// CHECK-LABEL: test_mul_assign
+// CHECK-LABEL: define void @test_mul_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[MUL:%.*]] = mul <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[MUL]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[MUL1:%.*]] = mul <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[MUL1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[MUL2:%.*]] = mul <8 x i16> [[TMP5]], [[TMP4]]
+// CHECK: store volatile <8 x i16> [[MUL2]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[MUL3:%.*]] = mul <8 x i16> [[TMP7]], [[TMP6]]
+// CHECK: store volatile <8 x i16> [[MUL3]], <8 x i16>* @us, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[MUL4:%.*]] = mul <4 x i32> [[TMP9]], [[TMP8]]
+// CHECK: store volatile <4 x i32> [[MUL4]], <4 x i32>* @si, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[MUL5:%.*]] = mul <4 x i32> [[TMP11]], [[TMP10]]
+// CHECK: store volatile <4 x i32> [[MUL5]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[MUL6:%.*]] = mul <2 x i64> [[TMP13]], [[TMP12]]
+// CHECK: store volatile <2 x i64> [[MUL6]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[MUL7:%.*]] = mul <2 x i64> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <2 x i64> [[MUL7]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[MUL8:%.*]] = fmul <2 x double> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <2 x double> [[MUL8]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_mul_assign(void) {
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
sc *= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
uc *= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
ss *= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
us *= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
si *= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
ui *= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
sl *= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
ul *= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: %{{.*}} = fmul <2 x double> [[VAL2]], [[VAL1]]
fd *= fd2;
}
-void test_div (void)
-{
-// CHECK-LABEL: test_div
+// CHECK-LABEL: define void @test_div() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[DIV:%.*]] = sdiv <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[DIV]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[DIV1:%.*]] = udiv <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[DIV1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[DIV2:%.*]] = sdiv <8 x i16> [[TMP4]], [[TMP5]]
+// CHECK: store volatile <8 x i16> [[DIV2]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[DIV3:%.*]] = udiv <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK: store volatile <8 x i16> [[DIV3]], <8 x i16>* @us, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[DIV4:%.*]] = sdiv <4 x i32> [[TMP8]], [[TMP9]]
+// CHECK: store volatile <4 x i32> [[DIV4]], <4 x i32>* @si, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[DIV5:%.*]] = udiv <4 x i32> [[TMP10]], [[TMP11]]
+// CHECK: store volatile <4 x i32> [[DIV5]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[DIV6:%.*]] = sdiv <2 x i64> [[TMP12]], [[TMP13]]
+// CHECK: store volatile <2 x i64> [[DIV6]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[DIV7:%.*]] = udiv <2 x i64> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <2 x i64> [[DIV7]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[DIV8:%.*]] = fdiv <2 x double> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <2 x double> [[DIV8]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_div(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
sc = sc / sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
uc = uc / uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
ss = ss / ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
us = us / us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
si = si / si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
ui = ui / ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
sl = sl / sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
ul = ul / ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
fd = fd / fd2;
}
-void test_div_assign (void)
-{
-// CHECK-LABEL: test_div_assign
+// CHECK-LABEL: define void @test_div_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[DIV:%.*]] = sdiv <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[DIV]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[DIV1:%.*]] = udiv <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[DIV1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[DIV2:%.*]] = sdiv <8 x i16> [[TMP5]], [[TMP4]]
+// CHECK: store volatile <8 x i16> [[DIV2]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[DIV3:%.*]] = udiv <8 x i16> [[TMP7]], [[TMP6]]
+// CHECK: store volatile <8 x i16> [[DIV3]], <8 x i16>* @us, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[DIV4:%.*]] = sdiv <4 x i32> [[TMP9]], [[TMP8]]
+// CHECK: store volatile <4 x i32> [[DIV4]], <4 x i32>* @si, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[DIV5:%.*]] = udiv <4 x i32> [[TMP11]], [[TMP10]]
+// CHECK: store volatile <4 x i32> [[DIV5]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[DIV6:%.*]] = sdiv <2 x i64> [[TMP13]], [[TMP12]]
+// CHECK: store volatile <2 x i64> [[DIV6]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[DIV7:%.*]] = udiv <2 x i64> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <2 x i64> [[DIV7]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[DIV8:%.*]] = fdiv <2 x double> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <2 x double> [[DIV8]], <2 x double>* @fd, align 8
+// CHECK: ret void
+void test_div_assign(void) {
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
sc /= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
uc /= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
ss /= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
us /= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
si /= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
ui /= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
sl /= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
ul /= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
fd /= fd2;
}
-void test_rem (void)
-{
-// CHECK-LABEL: test_rem
+// CHECK-LABEL: define void @test_rem() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[REM:%.*]] = srem <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[REM]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[REM1:%.*]] = urem <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[REM1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[REM2:%.*]] = srem <8 x i16> [[TMP4]], [[TMP5]]
+// CHECK: store volatile <8 x i16> [[REM2]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[REM3:%.*]] = urem <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK: store volatile <8 x i16> [[REM3]], <8 x i16>* @us, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[REM4:%.*]] = srem <4 x i32> [[TMP8]], [[TMP9]]
+// CHECK: store volatile <4 x i32> [[REM4]], <4 x i32>* @si, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[REM5:%.*]] = urem <4 x i32> [[TMP10]], [[TMP11]]
+// CHECK: store volatile <4 x i32> [[REM5]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[REM6:%.*]] = srem <2 x i64> [[TMP12]], [[TMP13]]
+// CHECK: store volatile <2 x i64> [[REM6]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[REM7:%.*]] = urem <2 x i64> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <2 x i64> [[REM7]], <2 x i64>* @ul, align 8
+// CHECK: ret void
+void test_rem(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
sc = sc % sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
uc = uc % uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
ss = ss % ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
us = us % us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
si = si % si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
ui = ui % ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
sl = sl % sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
ul = ul % ul2;
}
-void test_rem_assign (void)
-{
-// CHECK-LABEL: test_rem_assign
+// CHECK-LABEL: define void @test_rem_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[REM:%.*]] = srem <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[REM]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[REM1:%.*]] = urem <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[REM1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[REM2:%.*]] = srem <8 x i16> [[TMP5]], [[TMP4]]
+// CHECK: store volatile <8 x i16> [[REM2]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[REM3:%.*]] = urem <8 x i16> [[TMP7]], [[TMP6]]
+// CHECK: store volatile <8 x i16> [[REM3]], <8 x i16>* @us, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[REM4:%.*]] = srem <4 x i32> [[TMP9]], [[TMP8]]
+// CHECK: store volatile <4 x i32> [[REM4]], <4 x i32>* @si, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[REM5:%.*]] = urem <4 x i32> [[TMP11]], [[TMP10]]
+// CHECK: store volatile <4 x i32> [[REM5]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[REM6:%.*]] = srem <2 x i64> [[TMP13]], [[TMP12]]
+// CHECK: store volatile <2 x i64> [[REM6]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[REM7:%.*]] = urem <2 x i64> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <2 x i64> [[REM7]], <2 x i64>* @ul, align 8
+// CHECK: ret void
+void test_rem_assign(void) {
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
sc %= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
uc %= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
ss %= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
us %= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
si %= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
ui %= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
sl %= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
ul %= ul2;
}
-void test_not (void)
-{
-// CHECK-LABEL: test_not
+// CHECK-LABEL: define void @test_not() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[NEG:%.*]] = xor <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: store volatile <16 x i8> [[NEG]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[NEG1:%.*]] = xor <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: store volatile <16 x i8> [[NEG1]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[NEG2:%.*]] = xor <16 x i8> [[TMP2]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: store volatile <16 x i8> [[NEG2]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[NEG3:%.*]] = xor <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: store volatile <8 x i16> [[NEG3]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[NEG4:%.*]] = xor <8 x i16> [[TMP4]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: store volatile <8 x i16> [[NEG4]], <8 x i16>* @us, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[NEG5:%.*]] = xor <8 x i16> [[TMP5]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: store volatile <8 x i16> [[NEG5]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[NEG6:%.*]] = xor <4 x i32> [[TMP6]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: store volatile <4 x i32> [[NEG6]], <4 x i32>* @si, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[NEG7:%.*]] = xor <4 x i32> [[TMP7]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: store volatile <4 x i32> [[NEG7]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[NEG8:%.*]] = xor <4 x i32> [[TMP8]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: store volatile <4 x i32> [[NEG8]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[NEG9:%.*]] = xor <2 x i64> [[TMP9]], <i64 -1, i64 -1>
+// CHECK: store volatile <2 x i64> [[NEG9]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[NEG10:%.*]] = xor <2 x i64> [[TMP10]], <i64 -1, i64 -1>
+// CHECK: store volatile <2 x i64> [[NEG10]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[NEG11:%.*]] = xor <2 x i64> [[TMP11]], <i64 -1, i64 -1>
+// CHECK: store volatile <2 x i64> [[NEG11]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_not(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
sc = ~sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
uc = ~uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
bc = ~bc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
ss = ~ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
us = ~us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
bs = ~bs2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
si = ~si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
ui = ~ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
bi = ~bi2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
sl = ~sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
ul = ~ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
bl = ~bl2;
}
-void test_and (void)
-{
-// CHECK-LABEL: test_and
+// CHECK-LABEL: define void @test_and() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[AND:%.*]] = and <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[AND]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[AND1:%.*]] = and <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[AND1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[AND2:%.*]] = and <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: store volatile <16 x i8> [[AND2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[AND3:%.*]] = and <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK: store volatile <16 x i8> [[AND3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[AND4:%.*]] = and <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK: store volatile <16 x i8> [[AND4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[AND5:%.*]] = and <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK: store volatile <16 x i8> [[AND5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[AND6:%.*]] = and <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK: store volatile <16 x i8> [[AND6]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[AND7:%.*]] = and <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <8 x i16> [[AND7]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[AND8:%.*]] = and <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <8 x i16> [[AND8]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[AND9:%.*]] = and <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK: store volatile <8 x i16> [[AND9]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[AND10:%.*]] = and <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK: store volatile <8 x i16> [[AND10]], <8 x i16>* @us, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[AND11:%.*]] = and <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK: store volatile <8 x i16> [[AND11]], <8 x i16>* @us, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[AND12:%.*]] = and <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK: store volatile <8 x i16> [[AND12]], <8 x i16>* @us, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[AND13:%.*]] = and <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK: store volatile <8 x i16> [[AND13]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[AND14:%.*]] = and <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: store volatile <4 x i32> [[AND14]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[AND15:%.*]] = and <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: store volatile <4 x i32> [[AND15]], <4 x i32>* @si, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[AND16:%.*]] = and <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK: store volatile <4 x i32> [[AND16]], <4 x i32>* @si, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[AND17:%.*]] = and <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK: store volatile <4 x i32> [[AND17]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[AND18:%.*]] = and <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK: store volatile <4 x i32> [[AND18]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[AND19:%.*]] = and <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK: store volatile <4 x i32> [[AND19]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[AND20:%.*]] = and <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK: store volatile <4 x i32> [[AND20]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[AND21:%.*]] = and <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: store volatile <2 x i64> [[AND21]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[AND22:%.*]] = and <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: store volatile <2 x i64> [[AND22]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[AND23:%.*]] = and <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK: store volatile <2 x i64> [[AND23]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[AND24:%.*]] = and <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK: store volatile <2 x i64> [[AND24]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[AND25:%.*]] = and <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK: store volatile <2 x i64> [[AND25]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[AND26:%.*]] = and <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK: store volatile <2 x i64> [[AND26]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[AND27:%.*]] = and <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK: store volatile <2 x i64> [[AND27]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_and(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
sc = sc & sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
sc = sc & bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
sc = bc & sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
uc = uc & uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
uc = uc & bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
uc = bc & uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
bc = bc & bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
ss = ss & ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
ss = ss & bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
ss = bs & ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
us = us & us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
us = us & bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
us = bs & us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
bs = bs & bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
si = si & si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
si = si & bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
si = bi & si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
ui = ui & ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
ui = ui & bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
ui = bi & ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
bi = bi & bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
sl = sl & sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
sl = sl & bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
sl = bl & sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
ul = ul & ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
ul = ul & bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
ul = bl & ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
bl = bl & bl2;
}
-void test_and_assign (void)
-{
-// CHECK-LABEL: test_and_assign
+// CHECK-LABEL: define void @test_and_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[AND:%.*]] = and <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[AND]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[AND1:%.*]] = and <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[AND1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[AND2:%.*]] = and <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK: store volatile <16 x i8> [[AND2]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[AND3:%.*]] = and <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK: store volatile <16 x i8> [[AND3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[AND4:%.*]] = and <16 x i8> [[TMP9]], [[TMP8]]
+// CHECK: store volatile <16 x i8> [[AND4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[AND5:%.*]] = and <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK: store volatile <8 x i16> [[AND5]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[AND6:%.*]] = and <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK: store volatile <8 x i16> [[AND6]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[AND7:%.*]] = and <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <8 x i16> [[AND7]], <8 x i16>* @us, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[AND8:%.*]] = and <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <8 x i16> [[AND8]], <8 x i16>* @us, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[AND9:%.*]] = and <8 x i16> [[TMP19]], [[TMP18]]
+// CHECK: store volatile <8 x i16> [[AND9]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[AND10:%.*]] = and <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK: store volatile <4 x i32> [[AND10]], <4 x i32>* @si, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[AND11:%.*]] = and <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK: store volatile <4 x i32> [[AND11]], <4 x i32>* @si, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[AND12:%.*]] = and <4 x i32> [[TMP25]], [[TMP24]]
+// CHECK: store volatile <4 x i32> [[AND12]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[AND13:%.*]] = and <4 x i32> [[TMP27]], [[TMP26]]
+// CHECK: store volatile <4 x i32> [[AND13]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[AND14:%.*]] = and <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK: store volatile <4 x i32> [[AND14]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[AND15:%.*]] = and <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK: store volatile <2 x i64> [[AND15]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[AND16:%.*]] = and <2 x i64> [[TMP33]], [[TMP32]]
+// CHECK: store volatile <2 x i64> [[AND16]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[AND17:%.*]] = and <2 x i64> [[TMP35]], [[TMP34]]
+// CHECK: store volatile <2 x i64> [[AND17]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[AND18:%.*]] = and <2 x i64> [[TMP37]], [[TMP36]]
+// CHECK: store volatile <2 x i64> [[AND18]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[AND19:%.*]] = and <2 x i64> [[TMP39]], [[TMP38]]
+// CHECK: store volatile <2 x i64> [[AND19]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_and_assign(void) {
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
sc &= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
sc &= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
uc &= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
uc &= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
bc &= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
ss &= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
ss &= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
us &= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
us &= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
bs &= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
si &= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
si &= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
ui &= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
ui &= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
bi &= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
sl &= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
sl &= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
ul &= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
ul &= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
bl &= bl2;
}
-void test_or (void)
-{
-// CHECK-LABEL: test_or
+// CHECK-LABEL: define void @test_or() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[OR:%.*]] = or <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[OR]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[OR1:%.*]] = or <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[OR1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[OR2:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: store volatile <16 x i8> [[OR2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[OR3:%.*]] = or <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK: store volatile <16 x i8> [[OR3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[OR4:%.*]] = or <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK: store volatile <16 x i8> [[OR4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[OR5:%.*]] = or <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK: store volatile <16 x i8> [[OR5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[OR6:%.*]] = or <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK: store volatile <16 x i8> [[OR6]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[OR7:%.*]] = or <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <8 x i16> [[OR7]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[OR8:%.*]] = or <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <8 x i16> [[OR8]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[OR9:%.*]] = or <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK: store volatile <8 x i16> [[OR9]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[OR10:%.*]] = or <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK: store volatile <8 x i16> [[OR10]], <8 x i16>* @us, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[OR11:%.*]] = or <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK: store volatile <8 x i16> [[OR11]], <8 x i16>* @us, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[OR12:%.*]] = or <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK: store volatile <8 x i16> [[OR12]], <8 x i16>* @us, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[OR13:%.*]] = or <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK: store volatile <8 x i16> [[OR13]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[OR14:%.*]] = or <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: store volatile <4 x i32> [[OR14]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[OR15:%.*]] = or <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: store volatile <4 x i32> [[OR15]], <4 x i32>* @si, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[OR16:%.*]] = or <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK: store volatile <4 x i32> [[OR16]], <4 x i32>* @si, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[OR17:%.*]] = or <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK: store volatile <4 x i32> [[OR17]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[OR18:%.*]] = or <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK: store volatile <4 x i32> [[OR18]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[OR19:%.*]] = or <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK: store volatile <4 x i32> [[OR19]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[OR20:%.*]] = or <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK: store volatile <4 x i32> [[OR20]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[OR21:%.*]] = or <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: store volatile <2 x i64> [[OR21]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[OR22:%.*]] = or <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: store volatile <2 x i64> [[OR22]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[OR23:%.*]] = or <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK: store volatile <2 x i64> [[OR23]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[OR24:%.*]] = or <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK: store volatile <2 x i64> [[OR24]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[OR25:%.*]] = or <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK: store volatile <2 x i64> [[OR25]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[OR26:%.*]] = or <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK: store volatile <2 x i64> [[OR26]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[OR27:%.*]] = or <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK: store volatile <2 x i64> [[OR27]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_or(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
sc = sc | sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
sc = sc | bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
sc = bc | sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
uc = uc | uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
uc = uc | bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
uc = bc | uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
bc = bc | bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
ss = ss | ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
ss = ss | bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
ss = bs | ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
us = us | us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
us = us | bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
us = bs | us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
bs = bs | bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
si = si | si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
si = si | bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
si = bi | si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
ui = ui | ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
ui = ui | bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
ui = bi | ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
bi = bi | bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
sl = sl | sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
sl = sl | bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
sl = bl | sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
ul = ul | ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
ul = ul | bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
ul = bl | ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
bl = bl | bl2;
}
-void test_or_assign (void)
-{
-// CHECK-LABEL: test_or_assign
+// CHECK-LABEL: define void @test_or_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[OR:%.*]] = or <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[OR]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[OR1:%.*]] = or <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[OR1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[OR2:%.*]] = or <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK: store volatile <16 x i8> [[OR2]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[OR3:%.*]] = or <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK: store volatile <16 x i8> [[OR3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[OR4:%.*]] = or <16 x i8> [[TMP9]], [[TMP8]]
+// CHECK: store volatile <16 x i8> [[OR4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[OR5:%.*]] = or <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK: store volatile <8 x i16> [[OR5]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[OR6:%.*]] = or <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK: store volatile <8 x i16> [[OR6]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[OR7:%.*]] = or <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <8 x i16> [[OR7]], <8 x i16>* @us, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[OR8:%.*]] = or <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <8 x i16> [[OR8]], <8 x i16>* @us, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[OR9:%.*]] = or <8 x i16> [[TMP19]], [[TMP18]]
+// CHECK: store volatile <8 x i16> [[OR9]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[OR10:%.*]] = or <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK: store volatile <4 x i32> [[OR10]], <4 x i32>* @si, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[OR11:%.*]] = or <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK: store volatile <4 x i32> [[OR11]], <4 x i32>* @si, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[OR12:%.*]] = or <4 x i32> [[TMP25]], [[TMP24]]
+// CHECK: store volatile <4 x i32> [[OR12]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[OR13:%.*]] = or <4 x i32> [[TMP27]], [[TMP26]]
+// CHECK: store volatile <4 x i32> [[OR13]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[OR14:%.*]] = or <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK: store volatile <4 x i32> [[OR14]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[OR15:%.*]] = or <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK: store volatile <2 x i64> [[OR15]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[OR16:%.*]] = or <2 x i64> [[TMP33]], [[TMP32]]
+// CHECK: store volatile <2 x i64> [[OR16]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[OR17:%.*]] = or <2 x i64> [[TMP35]], [[TMP34]]
+// CHECK: store volatile <2 x i64> [[OR17]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[OR18:%.*]] = or <2 x i64> [[TMP37]], [[TMP36]]
+// CHECK: store volatile <2 x i64> [[OR18]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[OR19:%.*]] = or <2 x i64> [[TMP39]], [[TMP38]]
+// CHECK: store volatile <2 x i64> [[OR19]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_or_assign(void) {
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
sc |= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
sc |= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
uc |= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
uc |= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
bc |= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
ss |= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
ss |= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
us |= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
us |= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
bs |= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
si |= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
si |= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
ui |= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
ui |= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
bi |= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
sl |= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
sl |= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
ul |= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
ul |= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
bl |= bl2;
}
-void test_xor (void)
-{
-// CHECK-LABEL: test_xor
+// CHECK-LABEL: define void @test_xor() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[XOR:%.*]] = xor <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[XOR]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[XOR1:%.*]] = xor <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[XOR1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[XOR2:%.*]] = xor <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: store volatile <16 x i8> [[XOR2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[XOR3:%.*]] = xor <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK: store volatile <16 x i8> [[XOR3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[XOR4:%.*]] = xor <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK: store volatile <16 x i8> [[XOR4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[XOR5:%.*]] = xor <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK: store volatile <16 x i8> [[XOR5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[XOR6:%.*]] = xor <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK: store volatile <16 x i8> [[XOR6]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[XOR7:%.*]] = xor <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <8 x i16> [[XOR7]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[XOR8:%.*]] = xor <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <8 x i16> [[XOR8]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[XOR9:%.*]] = xor <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK: store volatile <8 x i16> [[XOR9]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[XOR10:%.*]] = xor <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK: store volatile <8 x i16> [[XOR10]], <8 x i16>* @us, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[XOR11:%.*]] = xor <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK: store volatile <8 x i16> [[XOR11]], <8 x i16>* @us, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[XOR12:%.*]] = xor <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK: store volatile <8 x i16> [[XOR12]], <8 x i16>* @us, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[XOR13:%.*]] = xor <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK: store volatile <8 x i16> [[XOR13]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[XOR14:%.*]] = xor <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: store volatile <4 x i32> [[XOR14]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[XOR15:%.*]] = xor <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: store volatile <4 x i32> [[XOR15]], <4 x i32>* @si, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[XOR16:%.*]] = xor <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK: store volatile <4 x i32> [[XOR16]], <4 x i32>* @si, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[XOR17:%.*]] = xor <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK: store volatile <4 x i32> [[XOR17]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[XOR18:%.*]] = xor <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK: store volatile <4 x i32> [[XOR18]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[XOR19:%.*]] = xor <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK: store volatile <4 x i32> [[XOR19]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[XOR20:%.*]] = xor <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK: store volatile <4 x i32> [[XOR20]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[XOR21:%.*]] = xor <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: store volatile <2 x i64> [[XOR21]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[XOR22:%.*]] = xor <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: store volatile <2 x i64> [[XOR22]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[XOR23:%.*]] = xor <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK: store volatile <2 x i64> [[XOR23]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[XOR24:%.*]] = xor <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK: store volatile <2 x i64> [[XOR24]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[XOR25:%.*]] = xor <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK: store volatile <2 x i64> [[XOR25]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[XOR26:%.*]] = xor <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK: store volatile <2 x i64> [[XOR26]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[XOR27:%.*]] = xor <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK: store volatile <2 x i64> [[XOR27]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_xor(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
sc = sc ^ sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
sc = sc ^ bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
sc = bc ^ sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
uc = uc ^ uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
uc = uc ^ bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
uc = bc ^ uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
bc = bc ^ bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
ss = ss ^ ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
ss = ss ^ bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
ss = bs ^ ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
us = us ^ us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
us = us ^ bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
us = bs ^ us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
bs = bs ^ bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
si = si ^ si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
si = si ^ bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
si = bi ^ si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
ui = ui ^ ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
ui = ui ^ bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
ui = bi ^ ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
bi = bi ^ bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
sl = sl ^ sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
sl = sl ^ bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
sl = bl ^ sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
ul = ul ^ ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
ul = ul ^ bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
ul = bl ^ ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
bl = bl ^ bl2;
}
-void test_xor_assign (void)
-{
-// CHECK-LABEL: test_xor_assign
+// CHECK-LABEL: define void @test_xor_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[XOR:%.*]] = xor <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[XOR]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[XOR1:%.*]] = xor <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[XOR1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[XOR2:%.*]] = xor <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK: store volatile <16 x i8> [[XOR2]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[XOR3:%.*]] = xor <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK: store volatile <16 x i8> [[XOR3]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[XOR4:%.*]] = xor <16 x i8> [[TMP9]], [[TMP8]]
+// CHECK: store volatile <16 x i8> [[XOR4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[XOR5:%.*]] = xor <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK: store volatile <8 x i16> [[XOR5]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[XOR6:%.*]] = xor <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK: store volatile <8 x i16> [[XOR6]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[XOR7:%.*]] = xor <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <8 x i16> [[XOR7]], <8 x i16>* @us, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[XOR8:%.*]] = xor <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <8 x i16> [[XOR8]], <8 x i16>* @us, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[XOR9:%.*]] = xor <8 x i16> [[TMP19]], [[TMP18]]
+// CHECK: store volatile <8 x i16> [[XOR9]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[XOR10:%.*]] = xor <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK: store volatile <4 x i32> [[XOR10]], <4 x i32>* @si, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[XOR11:%.*]] = xor <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK: store volatile <4 x i32> [[XOR11]], <4 x i32>* @si, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[XOR12:%.*]] = xor <4 x i32> [[TMP25]], [[TMP24]]
+// CHECK: store volatile <4 x i32> [[XOR12]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[XOR13:%.*]] = xor <4 x i32> [[TMP27]], [[TMP26]]
+// CHECK: store volatile <4 x i32> [[XOR13]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[XOR14:%.*]] = xor <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK: store volatile <4 x i32> [[XOR14]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[XOR15:%.*]] = xor <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK: store volatile <2 x i64> [[XOR15]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[XOR16:%.*]] = xor <2 x i64> [[TMP33]], [[TMP32]]
+// CHECK: store volatile <2 x i64> [[XOR16]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[XOR17:%.*]] = xor <2 x i64> [[TMP35]], [[TMP34]]
+// CHECK: store volatile <2 x i64> [[XOR17]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[XOR18:%.*]] = xor <2 x i64> [[TMP37]], [[TMP36]]
+// CHECK: store volatile <2 x i64> [[XOR18]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[XOR19:%.*]] = xor <2 x i64> [[TMP39]], [[TMP38]]
+// CHECK: store volatile <2 x i64> [[XOR19]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_xor_assign(void) {
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
sc ^= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
sc ^= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
uc ^= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
uc ^= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
bc ^= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
ss ^= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
ss ^= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
us ^= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
us ^= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
bs ^= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
si ^= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
si ^= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
ui ^= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
ui ^= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
bi ^= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
sl ^= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
sl ^= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
ul ^= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
ul ^= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
bl ^= bl2;
}
-void test_sl (void)
-{
-// CHECK-LABEL: test_sl
+// CHECK-LABEL: define void @test_sl() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[SHL:%.*]] = shl <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[SHL]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[SHL1:%.*]] = shl <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[SHL1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0
+// CHECK: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8>
+// CHECK: [[SHL2:%.*]] = shl <16 x i8> [[TMP4]], [[SH_PROM]]
+// CHECK: store volatile <16 x i8> [[SHL2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SHL3:%.*]] = shl <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK: store volatile <16 x i8> [[SHL3]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[SHL4:%.*]] = shl <16 x i8> [[TMP7]], [[TMP8]]
+// CHECK: store volatile <16 x i8> [[SHL4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[SHL5:%.*]] = shl <16 x i8> [[TMP9]], [[TMP10]]
+// CHECK: store volatile <16 x i8> [[SHL5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP12]], i32 0
+// CHECK: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8>
+// CHECK: [[SHL9:%.*]] = shl <16 x i8> [[TMP11]], [[SH_PROM8]]
+// CHECK: store volatile <16 x i8> [[SHL9]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SHL10:%.*]] = shl <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK: store volatile <16 x i8> [[SHL10]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[SHL11:%.*]] = shl <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <8 x i16> [[SHL11]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[SHL12:%.*]] = shl <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <8 x i16> [[SHL12]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP19:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP19]], i32 0
+// CHECK: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16>
+// CHECK: [[SHL16:%.*]] = shl <8 x i16> [[TMP18]], [[SH_PROM15]]
+// CHECK: store volatile <8 x i16> [[SHL16]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SHL17:%.*]] = shl <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK: store volatile <8 x i16> [[SHL17]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[SHL18:%.*]] = shl <8 x i16> [[TMP21]], [[TMP22]]
+// CHECK: store volatile <8 x i16> [[SHL18]], <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[SHL19:%.*]] = shl <8 x i16> [[TMP23]], [[TMP24]]
+// CHECK: store volatile <8 x i16> [[SHL19]], <8 x i16>* @us, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP26:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP26]], i32 0
+// CHECK: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16>
+// CHECK: [[SHL23:%.*]] = shl <8 x i16> [[TMP25]], [[SH_PROM22]]
+// CHECK: store volatile <8 x i16> [[SHL23]], <8 x i16>* @us, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SHL24:%.*]] = shl <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK: store volatile <8 x i16> [[SHL24]], <8 x i16>* @us, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[SHL25:%.*]] = shl <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: store volatile <4 x i32> [[SHL25]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[SHL26:%.*]] = shl <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: store volatile <4 x i32> [[SHL26]], <4 x i32>* @si, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP33:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP33]], i32 0
+// CHECK: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[SHL29:%.*]] = shl <4 x i32> [[TMP32]], [[SPLAT_SPLAT28]]
+// CHECK: store volatile <4 x i32> [[SHL29]], <4 x i32>* @si, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHL30:%.*]] = shl <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK: store volatile <4 x i32> [[SHL30]], <4 x i32>* @si, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[SHL31:%.*]] = shl <4 x i32> [[TMP35]], [[TMP36]]
+// CHECK: store volatile <4 x i32> [[SHL31]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[SHL32:%.*]] = shl <4 x i32> [[TMP37]], [[TMP38]]
+// CHECK: store volatile <4 x i32> [[SHL32]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP40:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP40]], i32 0
+// CHECK: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[SHL35:%.*]] = shl <4 x i32> [[TMP39]], [[SPLAT_SPLAT34]]
+// CHECK: store volatile <4 x i32> [[SHL35]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHL36:%.*]] = shl <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK: store volatile <4 x i32> [[SHL36]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[SHL37:%.*]] = shl <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: store volatile <2 x i64> [[SHL37]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[SHL38:%.*]] = shl <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: store volatile <2 x i64> [[SHL38]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP47]], i32 0
+// CHECK: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64>
+// CHECK: [[SHL42:%.*]] = shl <2 x i64> [[TMP46]], [[SH_PROM41]]
+// CHECK: store volatile <2 x i64> [[SHL42]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SHL43:%.*]] = shl <2 x i64> [[TMP48]], <i64 5, i64 5>
+// CHECK: store volatile <2 x i64> [[SHL43]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[SHL44:%.*]] = shl <2 x i64> [[TMP49]], [[TMP50]]
+// CHECK: store volatile <2 x i64> [[SHL44]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[SHL45:%.*]] = shl <2 x i64> [[TMP51]], [[TMP52]]
+// CHECK: store volatile <2 x i64> [[SHL45]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP54:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP54]], i32 0
+// CHECK: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64>
+// CHECK: [[SHL49:%.*]] = shl <2 x i64> [[TMP53]], [[SH_PROM48]]
+// CHECK: store volatile <2 x i64> [[SHL49]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SHL50:%.*]] = shl <2 x i64> [[TMP55]], <i64 5, i64 5>
+// CHECK: store volatile <2 x i64> [[SHL50]], <2 x i64>* @ul, align 8
+// CHECK: ret void
+void test_sl(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
sc = sc << sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
sc = sc << uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
sc = sc << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
sc = sc << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
uc = uc << sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
uc = uc << uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
uc = uc << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
uc = uc << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
ss = ss << ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
ss = ss << us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
ss = ss << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
ss = ss << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
us = us << ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
us = us << us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
us = us << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
us = us << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
si = si << si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
si = si << ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
si = si << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
si = si << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
ui = ui << si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
ui = ui << ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
ui = ui << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
ui = ui << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
sl = sl << sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
sl = sl << ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
sl = sl << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
sl = sl << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
ul = ul << sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
ul = ul << ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
ul = ul << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
ul = ul << 5;
}
-void test_sl_assign (void)
-{
-// CHECK-LABEL: test_sl_assign
+// CHECK-LABEL: define void @test_sl_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SHL:%.*]] = shl <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[SHL]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SHL1:%.*]] = shl <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[SHL1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP4]], i32 0
+// CHECK: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8>
+// CHECK: [[SHL2:%.*]] = shl <16 x i8> [[TMP5]], [[SH_PROM]]
+// CHECK: store volatile <16 x i8> [[SHL2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SHL3:%.*]] = shl <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK: store volatile <16 x i8> [[SHL3]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SHL4:%.*]] = shl <16 x i8> [[TMP8]], [[TMP7]]
+// CHECK: store volatile <16 x i8> [[SHL4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SHL5:%.*]] = shl <16 x i8> [[TMP10]], [[TMP9]]
+// CHECK: store volatile <16 x i8> [[SHL5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP11]], i32 0
+// CHECK: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8>
+// CHECK: [[SHL9:%.*]] = shl <16 x i8> [[TMP12]], [[SH_PROM8]]
+// CHECK: store volatile <16 x i8> [[SHL9]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SHL10:%.*]] = shl <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK: store volatile <16 x i8> [[SHL10]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SHL11:%.*]] = shl <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <8 x i16> [[SHL11]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SHL12:%.*]] = shl <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <8 x i16> [[SHL12]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP18]], i32 0
+// CHECK: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16>
+// CHECK: [[SHL16:%.*]] = shl <8 x i16> [[TMP19]], [[SH_PROM15]]
+// CHECK: store volatile <8 x i16> [[SHL16]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SHL17:%.*]] = shl <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK: store volatile <8 x i16> [[SHL17]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SHL18:%.*]] = shl <8 x i16> [[TMP22]], [[TMP21]]
+// CHECK: store volatile <8 x i16> [[SHL18]], <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SHL19:%.*]] = shl <8 x i16> [[TMP24]], [[TMP23]]
+// CHECK: store volatile <8 x i16> [[SHL19]], <8 x i16>* @us, align 8
+// CHECK: [[TMP25:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP25]], i32 0
+// CHECK: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16>
+// CHECK: [[SHL23:%.*]] = shl <8 x i16> [[TMP26]], [[SH_PROM22]]
+// CHECK: store volatile <8 x i16> [[SHL23]], <8 x i16>* @us, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SHL24:%.*]] = shl <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK: store volatile <8 x i16> [[SHL24]], <8 x i16>* @us, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHL25:%.*]] = shl <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK: store volatile <4 x i32> [[SHL25]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHL26:%.*]] = shl <4 x i32> [[TMP31]], [[TMP30]]
+// CHECK: store volatile <4 x i32> [[SHL26]], <4 x i32>* @si, align 8
+// CHECK: [[TMP32:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP32]], i32 0
+// CHECK: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHL29:%.*]] = shl <4 x i32> [[TMP33]], [[SPLAT_SPLAT28]]
+// CHECK: store volatile <4 x i32> [[SHL29]], <4 x i32>* @si, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHL30:%.*]] = shl <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK: store volatile <4 x i32> [[SHL30]], <4 x i32>* @si, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHL31:%.*]] = shl <4 x i32> [[TMP36]], [[TMP35]]
+// CHECK: store volatile <4 x i32> [[SHL31]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHL32:%.*]] = shl <4 x i32> [[TMP38]], [[TMP37]]
+// CHECK: store volatile <4 x i32> [[SHL32]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP39:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP39]], i32 0
+// CHECK: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHL35:%.*]] = shl <4 x i32> [[TMP40]], [[SPLAT_SPLAT34]]
+// CHECK: store volatile <4 x i32> [[SHL35]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHL36:%.*]] = shl <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK: store volatile <4 x i32> [[SHL36]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SHL37:%.*]] = shl <2 x i64> [[TMP43]], [[TMP42]]
+// CHECK: store volatile <2 x i64> [[SHL37]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SHL38:%.*]] = shl <2 x i64> [[TMP45]], [[TMP44]]
+// CHECK: store volatile <2 x i64> [[SHL38]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP46]], i32 0
+// CHECK: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64>
+// CHECK: [[SHL42:%.*]] = shl <2 x i64> [[TMP47]], [[SH_PROM41]]
+// CHECK: store volatile <2 x i64> [[SHL42]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SHL43:%.*]] = shl <2 x i64> [[TMP48]], <i64 5, i64 5>
+// CHECK: store volatile <2 x i64> [[SHL43]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SHL44:%.*]] = shl <2 x i64> [[TMP50]], [[TMP49]]
+// CHECK: store volatile <2 x i64> [[SHL44]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SHL45:%.*]] = shl <2 x i64> [[TMP52]], [[TMP51]]
+// CHECK: store volatile <2 x i64> [[SHL45]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP53:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP53]], i32 0
+// CHECK: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64>
+// CHECK: [[SHL49:%.*]] = shl <2 x i64> [[TMP54]], [[SH_PROM48]]
+// CHECK: store volatile <2 x i64> [[SHL49]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SHL50:%.*]] = shl <2 x i64> [[TMP55]], <i64 5, i64 5>
+// CHECK: store volatile <2 x i64> [[SHL50]], <2 x i64>* @ul, align 8
+// CHECK: ret void
+void test_sl_assign(void) {
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
sc <<= sc2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
sc <<= uc2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
sc <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
sc <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
uc <<= sc2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
uc <<= uc2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
uc <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
uc <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
ss <<= ss2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
ss <<= us2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
ss <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
ss <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
us <<= ss2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
us <<= us2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
us <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
us <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
si <<= si2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
si <<= ui2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
si <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
si <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
ui <<= si2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
ui <<= ui2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
ui <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
ui <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
sl <<= sl2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
sl <<= ul2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
sl <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
sl <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
ul <<= sl2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
ul <<= ul2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
ul <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
ul <<= 5;
}
-void test_sr (void)
-{
-// CHECK-LABEL: test_sr
+// CHECK-LABEL: define void @test_sr() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[SHR:%.*]] = ashr <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: store volatile <16 x i8> [[SHR]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[SHR1:%.*]] = ashr <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: store volatile <16 x i8> [[SHR1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0
+// CHECK: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8>
+// CHECK: [[SHR2:%.*]] = ashr <16 x i8> [[TMP4]], [[SH_PROM]]
+// CHECK: store volatile <16 x i8> [[SHR2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SHR3:%.*]] = ashr <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK: store volatile <16 x i8> [[SHR3]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[SHR4:%.*]] = lshr <16 x i8> [[TMP7]], [[TMP8]]
+// CHECK: store volatile <16 x i8> [[SHR4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[SHR5:%.*]] = lshr <16 x i8> [[TMP9]], [[TMP10]]
+// CHECK: store volatile <16 x i8> [[SHR5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP12]], i32 0
+// CHECK: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8>
+// CHECK: [[SHR9:%.*]] = lshr <16 x i8> [[TMP11]], [[SH_PROM8]]
+// CHECK: store volatile <16 x i8> [[SHR9]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SHR10:%.*]] = lshr <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK: store volatile <16 x i8> [[SHR10]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[SHR11:%.*]] = ashr <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: store volatile <8 x i16> [[SHR11]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[SHR12:%.*]] = ashr <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: store volatile <8 x i16> [[SHR12]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP19:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP19]], i32 0
+// CHECK: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16>
+// CHECK: [[SHR16:%.*]] = ashr <8 x i16> [[TMP18]], [[SH_PROM15]]
+// CHECK: store volatile <8 x i16> [[SHR16]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SHR17:%.*]] = ashr <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK: store volatile <8 x i16> [[SHR17]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[SHR18:%.*]] = lshr <8 x i16> [[TMP21]], [[TMP22]]
+// CHECK: store volatile <8 x i16> [[SHR18]], <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[SHR19:%.*]] = lshr <8 x i16> [[TMP23]], [[TMP24]]
+// CHECK: store volatile <8 x i16> [[SHR19]], <8 x i16>* @us, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP26:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP26]], i32 0
+// CHECK: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16>
+// CHECK: [[SHR23:%.*]] = lshr <8 x i16> [[TMP25]], [[SH_PROM22]]
+// CHECK: store volatile <8 x i16> [[SHR23]], <8 x i16>* @us, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SHR24:%.*]] = lshr <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK: store volatile <8 x i16> [[SHR24]], <8 x i16>* @us, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[SHR25:%.*]] = ashr <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: store volatile <4 x i32> [[SHR25]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[SHR26:%.*]] = ashr <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: store volatile <4 x i32> [[SHR26]], <4 x i32>* @si, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP33:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP33]], i32 0
+// CHECK: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[SHR29:%.*]] = ashr <4 x i32> [[TMP32]], [[SPLAT_SPLAT28]]
+// CHECK: store volatile <4 x i32> [[SHR29]], <4 x i32>* @si, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHR30:%.*]] = ashr <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK: store volatile <4 x i32> [[SHR30]], <4 x i32>* @si, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[SHR31:%.*]] = lshr <4 x i32> [[TMP35]], [[TMP36]]
+// CHECK: store volatile <4 x i32> [[SHR31]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[SHR32:%.*]] = lshr <4 x i32> [[TMP37]], [[TMP38]]
+// CHECK: store volatile <4 x i32> [[SHR32]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP40:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP40]], i32 0
+// CHECK: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[SHR35:%.*]] = lshr <4 x i32> [[TMP39]], [[SPLAT_SPLAT34]]
+// CHECK: store volatile <4 x i32> [[SHR35]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHR36:%.*]] = lshr <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK: store volatile <4 x i32> [[SHR36]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[SHR37:%.*]] = ashr <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: store volatile <2 x i64> [[SHR37]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[SHR38:%.*]] = ashr <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: store volatile <2 x i64> [[SHR38]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP47]], i32 0
+// CHECK: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64>
+// CHECK: [[SHR42:%.*]] = ashr <2 x i64> [[TMP46]], [[SH_PROM41]]
+// CHECK: store volatile <2 x i64> [[SHR42]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SHR43:%.*]] = ashr <2 x i64> [[TMP48]], <i64 5, i64 5>
+// CHECK: store volatile <2 x i64> [[SHR43]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[SHR44:%.*]] = lshr <2 x i64> [[TMP49]], [[TMP50]]
+// CHECK: store volatile <2 x i64> [[SHR44]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[SHR45:%.*]] = lshr <2 x i64> [[TMP51]], [[TMP52]]
+// CHECK: store volatile <2 x i64> [[SHR45]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP54:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP54]], i32 0
+// CHECK: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64>
+// CHECK: [[SHR49:%.*]] = lshr <2 x i64> [[TMP53]], [[SH_PROM48]]
+// CHECK: store volatile <2 x i64> [[SHR49]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SHR50:%.*]] = lshr <2 x i64> [[TMP55]], <i64 5, i64 5>
+// CHECK: store volatile <2 x i64> [[SHR50]], <2 x i64>* @ul, align 8
+// CHECK: ret void
+void test_sr(void) {
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
sc = sc >> sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
sc = sc >> uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
sc = sc >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
sc = sc >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
uc = uc >> sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
uc = uc >> uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
uc = uc >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
uc = uc >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
ss = ss >> ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
ss = ss >> us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
ss = ss >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
ss = ss >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
us = us >> ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
us = us >> us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
us = us >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
us = us >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
si = si >> si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
si = si >> ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
si = si >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
si = si >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
ui = ui >> si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
ui = ui >> ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
ui = ui >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
ui = ui >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
sl = sl >> sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
sl = sl >> ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
sl = sl >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
sl = sl >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
ul = ul >> sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
ul = ul >> ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
ul = ul >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
ul = ul >> 5;
}
-void test_sr_assign (void)
-{
-// CHECK-LABEL: test_sr_assign
+// CHECK-LABEL: define void @test_sr_assign() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SHR:%.*]] = ashr <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK: store volatile <16 x i8> [[SHR]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SHR1:%.*]] = ashr <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK: store volatile <16 x i8> [[SHR1]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP4]], i32 0
+// CHECK: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8>
+// CHECK: [[SHR2:%.*]] = ashr <16 x i8> [[TMP5]], [[SH_PROM]]
+// CHECK: store volatile <16 x i8> [[SHR2]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[SHR3:%.*]] = ashr <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK: store volatile <16 x i8> [[SHR3]], <16 x i8>* @sc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SHR4:%.*]] = lshr <16 x i8> [[TMP8]], [[TMP7]]
+// CHECK: store volatile <16 x i8> [[SHR4]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SHR5:%.*]] = lshr <16 x i8> [[TMP10]], [[TMP9]]
+// CHECK: store volatile <16 x i8> [[SHR5]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP11]], i32 0
+// CHECK: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8>
+// CHECK: [[SHR9:%.*]] = lshr <16 x i8> [[TMP12]], [[SH_PROM8]]
+// CHECK: store volatile <16 x i8> [[SHR9]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[SHR10:%.*]] = lshr <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK: store volatile <16 x i8> [[SHR10]], <16 x i8>* @uc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SHR11:%.*]] = ashr <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK: store volatile <8 x i16> [[SHR11]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SHR12:%.*]] = ashr <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK: store volatile <8 x i16> [[SHR12]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP18:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP18]], i32 0
+// CHECK: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16>
+// CHECK: [[SHR16:%.*]] = ashr <8 x i16> [[TMP19]], [[SH_PROM15]]
+// CHECK: store volatile <8 x i16> [[SHR16]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[SHR17:%.*]] = ashr <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK: store volatile <8 x i16> [[SHR17]], <8 x i16>* @ss, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SHR18:%.*]] = lshr <8 x i16> [[TMP22]], [[TMP21]]
+// CHECK: store volatile <8 x i16> [[SHR18]], <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SHR19:%.*]] = lshr <8 x i16> [[TMP24]], [[TMP23]]
+// CHECK: store volatile <8 x i16> [[SHR19]], <8 x i16>* @us, align 8
+// CHECK: [[TMP25:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP25]], i32 0
+// CHECK: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16>
+// CHECK: [[SHR23:%.*]] = lshr <8 x i16> [[TMP26]], [[SH_PROM22]]
+// CHECK: store volatile <8 x i16> [[SHR23]], <8 x i16>* @us, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[SHR24:%.*]] = lshr <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK: store volatile <8 x i16> [[SHR24]], <8 x i16>* @us, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHR25:%.*]] = ashr <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK: store volatile <4 x i32> [[SHR25]], <4 x i32>* @si, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHR26:%.*]] = ashr <4 x i32> [[TMP31]], [[TMP30]]
+// CHECK: store volatile <4 x i32> [[SHR26]], <4 x i32>* @si, align 8
+// CHECK: [[TMP32:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP32]], i32 0
+// CHECK: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHR29:%.*]] = ashr <4 x i32> [[TMP33]], [[SPLAT_SPLAT28]]
+// CHECK: store volatile <4 x i32> [[SHR29]], <4 x i32>* @si, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[SHR30:%.*]] = ashr <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK: store volatile <4 x i32> [[SHR30]], <4 x i32>* @si, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHR31:%.*]] = lshr <4 x i32> [[TMP36]], [[TMP35]]
+// CHECK: store volatile <4 x i32> [[SHR31]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHR32:%.*]] = lshr <4 x i32> [[TMP38]], [[TMP37]]
+// CHECK: store volatile <4 x i32> [[SHR32]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP39:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP39]], i32 0
+// CHECK: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHR35:%.*]] = lshr <4 x i32> [[TMP40]], [[SPLAT_SPLAT34]]
+// CHECK: store volatile <4 x i32> [[SHR35]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[SHR36:%.*]] = lshr <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK: store volatile <4 x i32> [[SHR36]], <4 x i32>* @ui, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SHR37:%.*]] = ashr <2 x i64> [[TMP43]], [[TMP42]]
+// CHECK: store volatile <2 x i64> [[SHR37]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SHR38:%.*]] = ashr <2 x i64> [[TMP45]], [[TMP44]]
+// CHECK: store volatile <2 x i64> [[SHR38]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP46]], i32 0
+// CHECK: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64>
+// CHECK: [[SHR42:%.*]] = ashr <2 x i64> [[TMP47]], [[SH_PROM41]]
+// CHECK: store volatile <2 x i64> [[SHR42]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[SHR43:%.*]] = ashr <2 x i64> [[TMP48]], <i64 5, i64 5>
+// CHECK: store volatile <2 x i64> [[SHR43]], <2 x i64>* @sl, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SHR44:%.*]] = lshr <2 x i64> [[TMP50]], [[TMP49]]
+// CHECK: store volatile <2 x i64> [[SHR44]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SHR45:%.*]] = lshr <2 x i64> [[TMP52]], [[TMP51]]
+// CHECK: store volatile <2 x i64> [[SHR45]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP53:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP53]], i32 0
+// CHECK: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64>
+// CHECK: [[SHR49:%.*]] = lshr <2 x i64> [[TMP54]], [[SH_PROM48]]
+// CHECK: store volatile <2 x i64> [[SHR49]], <2 x i64>* @ul, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[SHR50:%.*]] = lshr <2 x i64> [[TMP55]], <i64 5, i64 5>
+// CHECK: store volatile <2 x i64> [[SHR50]], <2 x i64>* @ul, align 8
+// CHECK: ret void
+void test_sr_assign(void) {
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
sc >>= sc2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
sc >>= uc2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
sc >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
sc >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
uc >>= sc2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
uc >>= uc2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
uc >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
uc >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
ss >>= ss2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
ss >>= us2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
ss >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
ss >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
us >>= ss2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
us >>= us2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
us >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
us >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
si >>= si2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
si >>= ui2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
si >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
si >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
ui >>= si2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
ui >>= ui2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
ui >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
ui >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
sl >>= sl2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
sl >>= ul2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
sl >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
sl >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
ul >>= sl2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
ul >>= ul2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
ul >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
ul >>= 5;
}
-void test_cmpeq (void)
-{
-// CHECK-LABEL: test_cmpeq
+// CHECK-LABEL: define void @test_cmpeq() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[CMP:%.*]] = icmp eq <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP1:%.*]] = icmp eq <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[CMP3:%.*]] = icmp eq <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[CMP5:%.*]] = icmp eq <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK: [[SEXT6:%.*]] = sext <16 x i1> [[CMP5]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT6]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP7:%.*]] = icmp eq <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK: [[SEXT8:%.*]] = sext <16 x i1> [[CMP7]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT8]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[CMP9:%.*]] = icmp eq <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK: [[SEXT10:%.*]] = sext <16 x i1> [[CMP9]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT10]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP11:%.*]] = icmp eq <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK: [[SEXT12:%.*]] = sext <16 x i1> [[CMP11]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT12]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[CMP13:%.*]] = icmp eq <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: [[SEXT14:%.*]] = sext <8 x i1> [[CMP13]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT14]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP15:%.*]] = icmp eq <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: [[SEXT16:%.*]] = sext <8 x i1> [[CMP15]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT16]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[CMP17:%.*]] = icmp eq <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK: [[SEXT18:%.*]] = sext <8 x i1> [[CMP17]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT18]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[CMP19:%.*]] = icmp eq <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK: [[SEXT20:%.*]] = sext <8 x i1> [[CMP19]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT20]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP21:%.*]] = icmp eq <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK: [[SEXT22:%.*]] = sext <8 x i1> [[CMP21]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT22]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[CMP23:%.*]] = icmp eq <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK: [[SEXT24:%.*]] = sext <8 x i1> [[CMP23]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT24]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP25:%.*]] = icmp eq <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK: [[SEXT26:%.*]] = sext <8 x i1> [[CMP25]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT26]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[CMP27:%.*]] = icmp eq <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: [[SEXT28:%.*]] = sext <4 x i1> [[CMP27]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT28]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP29:%.*]] = icmp eq <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: [[SEXT30:%.*]] = sext <4 x i1> [[CMP29]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT30]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[CMP31:%.*]] = icmp eq <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK: [[SEXT32:%.*]] = sext <4 x i1> [[CMP31]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT32]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[CMP33:%.*]] = icmp eq <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK: [[SEXT34:%.*]] = sext <4 x i1> [[CMP33]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT34]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP35:%.*]] = icmp eq <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK: [[SEXT36:%.*]] = sext <4 x i1> [[CMP35]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT36]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[CMP37:%.*]] = icmp eq <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK: [[SEXT38:%.*]] = sext <4 x i1> [[CMP37]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT38]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP39:%.*]] = icmp eq <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK: [[SEXT40:%.*]] = sext <4 x i1> [[CMP39]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT40]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[CMP41:%.*]] = icmp eq <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: [[SEXT42:%.*]] = sext <2 x i1> [[CMP41]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT42]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP43:%.*]] = icmp eq <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: [[SEXT44:%.*]] = sext <2 x i1> [[CMP43]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT44]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[CMP45:%.*]] = icmp eq <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK: [[SEXT46:%.*]] = sext <2 x i1> [[CMP45]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT46]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[CMP47:%.*]] = icmp eq <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK: [[SEXT48:%.*]] = sext <2 x i1> [[CMP47]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT48]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP49:%.*]] = icmp eq <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK: [[SEXT50:%.*]] = sext <2 x i1> [[CMP49]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT50]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[CMP51:%.*]] = icmp eq <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK: [[SEXT52:%.*]] = sext <2 x i1> [[CMP51]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT52]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP53:%.*]] = icmp eq <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK: [[SEXT54:%.*]] = sext <2 x i1> [[CMP53]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT54]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP56:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP57:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[CMP55:%.*]] = fcmp oeq <2 x double> [[TMP56]], [[TMP57]]
+// CHECK: [[SEXT56:%.*]] = sext <2 x i1> [[CMP55]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT56]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_cmpeq(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = sc == sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = sc == bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc == sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = uc == uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = uc == bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc == uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc == bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = ss == ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = ss == bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs == ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = us == us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = us == bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs == us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs == bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = si == si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = si == bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi == si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = ui == ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = ui == bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi == ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi == bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = sl == sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = sl == bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl == sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = ul == ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = ul == bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl == ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl == bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp oeq <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = fd == fd2;
}
-void test_cmpne (void)
-{
-// CHECK-LABEL: test_cmpne
+// CHECK-LABEL: define void @test_cmpne() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[CMP:%.*]] = icmp ne <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP1:%.*]] = icmp ne <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[CMP3:%.*]] = icmp ne <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[CMP5:%.*]] = icmp ne <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK: [[SEXT6:%.*]] = sext <16 x i1> [[CMP5]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT6]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP7:%.*]] = icmp ne <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK: [[SEXT8:%.*]] = sext <16 x i1> [[CMP7]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT8]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[CMP9:%.*]] = icmp ne <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK: [[SEXT10:%.*]] = sext <16 x i1> [[CMP9]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT10]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP11:%.*]] = icmp ne <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK: [[SEXT12:%.*]] = sext <16 x i1> [[CMP11]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT12]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[CMP13:%.*]] = icmp ne <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK: [[SEXT14:%.*]] = sext <8 x i1> [[CMP13]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT14]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP15:%.*]] = icmp ne <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK: [[SEXT16:%.*]] = sext <8 x i1> [[CMP15]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT16]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[CMP17:%.*]] = icmp ne <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK: [[SEXT18:%.*]] = sext <8 x i1> [[CMP17]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT18]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[CMP19:%.*]] = icmp ne <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK: [[SEXT20:%.*]] = sext <8 x i1> [[CMP19]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT20]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP21:%.*]] = icmp ne <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK: [[SEXT22:%.*]] = sext <8 x i1> [[CMP21]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT22]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[CMP23:%.*]] = icmp ne <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK: [[SEXT24:%.*]] = sext <8 x i1> [[CMP23]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT24]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP25:%.*]] = icmp ne <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK: [[SEXT26:%.*]] = sext <8 x i1> [[CMP25]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT26]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[CMP27:%.*]] = icmp ne <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK: [[SEXT28:%.*]] = sext <4 x i1> [[CMP27]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT28]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP29:%.*]] = icmp ne <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK: [[SEXT30:%.*]] = sext <4 x i1> [[CMP29]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT30]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[CMP31:%.*]] = icmp ne <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK: [[SEXT32:%.*]] = sext <4 x i1> [[CMP31]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT32]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[CMP33:%.*]] = icmp ne <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK: [[SEXT34:%.*]] = sext <4 x i1> [[CMP33]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT34]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP35:%.*]] = icmp ne <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK: [[SEXT36:%.*]] = sext <4 x i1> [[CMP35]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT36]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[CMP37:%.*]] = icmp ne <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK: [[SEXT38:%.*]] = sext <4 x i1> [[CMP37]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT38]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP39:%.*]] = icmp ne <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK: [[SEXT40:%.*]] = sext <4 x i1> [[CMP39]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT40]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[CMP41:%.*]] = icmp ne <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK: [[SEXT42:%.*]] = sext <2 x i1> [[CMP41]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT42]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP43:%.*]] = icmp ne <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK: [[SEXT44:%.*]] = sext <2 x i1> [[CMP43]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT44]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[CMP45:%.*]] = icmp ne <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK: [[SEXT46:%.*]] = sext <2 x i1> [[CMP45]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT46]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[CMP47:%.*]] = icmp ne <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK: [[SEXT48:%.*]] = sext <2 x i1> [[CMP47]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT48]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP49:%.*]] = icmp ne <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK: [[SEXT50:%.*]] = sext <2 x i1> [[CMP49]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT50]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[CMP51:%.*]] = icmp ne <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK: [[SEXT52:%.*]] = sext <2 x i1> [[CMP51]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT52]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP53:%.*]] = icmp ne <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK: [[SEXT54:%.*]] = sext <2 x i1> [[CMP53]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT54]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP56:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP57:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[CMP55:%.*]] = fcmp une <2 x double> [[TMP56]], [[TMP57]]
+// CHECK: [[SEXT56:%.*]] = sext <2 x i1> [[CMP55]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT56]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_cmpne(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = sc != sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = sc != bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc != sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = uc != uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = uc != bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc != uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc != bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = ss != ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = ss != bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs != ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = us != us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = us != bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs != us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs != bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = si != si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = si != bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi != si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = ui != ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = ui != bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi != ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi != bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = sl != sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = sl != bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl != sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = ul != ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = ul != bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl != ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl != bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp une <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = fd != fd2;
}
-void test_cmpge (void)
-{
-// CHECK-LABEL: test_cmpge
+// CHECK-LABEL: define void @test_cmpge() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[CMP:%.*]] = icmp sge <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[CMP1:%.*]] = icmp uge <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP3:%.*]] = icmp uge <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[CMP5:%.*]] = icmp sge <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[CMP7:%.*]] = icmp uge <8 x i16> [[TMP8]], [[TMP9]]
+// CHECK: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP9:%.*]] = icmp uge <8 x i16> [[TMP10]], [[TMP11]]
+// CHECK: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[CMP11:%.*]] = icmp sge <4 x i32> [[TMP12]], [[TMP13]]
+// CHECK: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[CMP13:%.*]] = icmp uge <4 x i32> [[TMP14]], [[TMP15]]
+// CHECK: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP15:%.*]] = icmp uge <4 x i32> [[TMP16]], [[TMP17]]
+// CHECK: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[CMP17:%.*]] = icmp sge <2 x i64> [[TMP18]], [[TMP19]]
+// CHECK: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[CMP19:%.*]] = icmp uge <2 x i64> [[TMP20]], [[TMP21]]
+// CHECK: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP21:%.*]] = icmp uge <2 x i64> [[TMP22]], [[TMP23]]
+// CHECK: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[CMP23:%.*]] = fcmp oge <2 x double> [[TMP24]], [[TMP25]]
+// CHECK: [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_cmpge(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp sge <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = sc >= sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = uc >= uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc >= bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp sge <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = ss >= ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = us >= us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs >= bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp sge <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = si >= si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = ui >= ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi >= bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp sge <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = sl >= sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = ul >= ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl >= bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp oge <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = fd >= fd2;
}
-void test_cmpgt (void)
-{
-// CHECK-LABEL: test_cmpgt
+// CHECK-LABEL: define void @test_cmpgt() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[CMP:%.*]] = icmp sgt <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[CMP1:%.*]] = icmp ugt <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP3:%.*]] = icmp ugt <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[CMP5:%.*]] = icmp sgt <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[CMP7:%.*]] = icmp ugt <8 x i16> [[TMP8]], [[TMP9]]
+// CHECK: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP9:%.*]] = icmp ugt <8 x i16> [[TMP10]], [[TMP11]]
+// CHECK: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[CMP11:%.*]] = icmp sgt <4 x i32> [[TMP12]], [[TMP13]]
+// CHECK: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[CMP13:%.*]] = icmp ugt <4 x i32> [[TMP14]], [[TMP15]]
+// CHECK: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP15:%.*]] = icmp ugt <4 x i32> [[TMP16]], [[TMP17]]
+// CHECK: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[CMP17:%.*]] = icmp sgt <2 x i64> [[TMP18]], [[TMP19]]
+// CHECK: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[CMP19:%.*]] = icmp ugt <2 x i64> [[TMP20]], [[TMP21]]
+// CHECK: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP21:%.*]] = icmp ugt <2 x i64> [[TMP22]], [[TMP23]]
+// CHECK: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[CMP23:%.*]] = fcmp ogt <2 x double> [[TMP24]], [[TMP25]]
+// CHECK: [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_cmpgt(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp sgt <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = sc > sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = uc > uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc > bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp sgt <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = ss > ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = us > us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs > bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp sgt <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = si > si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = ui > ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi > bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp sgt <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = sl > sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = ul > ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl > bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp ogt <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = fd > fd2;
}
-void test_cmple (void)
-{
-// CHECK-LABEL: test_cmple
+// CHECK-LABEL: define void @test_cmple() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[CMP:%.*]] = icmp sle <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[CMP1:%.*]] = icmp ule <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP3:%.*]] = icmp ule <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[CMP5:%.*]] = icmp sle <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[CMP7:%.*]] = icmp ule <8 x i16> [[TMP8]], [[TMP9]]
+// CHECK: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP9:%.*]] = icmp ule <8 x i16> [[TMP10]], [[TMP11]]
+// CHECK: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[CMP11:%.*]] = icmp sle <4 x i32> [[TMP12]], [[TMP13]]
+// CHECK: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[CMP13:%.*]] = icmp ule <4 x i32> [[TMP14]], [[TMP15]]
+// CHECK: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP15:%.*]] = icmp ule <4 x i32> [[TMP16]], [[TMP17]]
+// CHECK: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[CMP17:%.*]] = icmp sle <2 x i64> [[TMP18]], [[TMP19]]
+// CHECK: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[CMP19:%.*]] = icmp ule <2 x i64> [[TMP20]], [[TMP21]]
+// CHECK: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP21:%.*]] = icmp ule <2 x i64> [[TMP22]], [[TMP23]]
+// CHECK: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[CMP23:%.*]] = fcmp ole <2 x double> [[TMP24]], [[TMP25]]
+// CHECK: [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_cmple(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp sle <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = sc <= sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = uc <= uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc <= bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp sle <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = ss <= ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = us <= us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs <= bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp sle <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = si <= si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = ui <= ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi <= bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp sle <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = sl <= sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = ul <= ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl <= bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp ole <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = fd <= fd2;
}
-void test_cmplt (void)
-{
-// CHECK-LABEL: test_cmplt
+// CHECK-LABEL: define void @test_cmplt() #0 {
+// CHECK: [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK: [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK: [[CMP:%.*]] = icmp slt <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK: [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK: [[CMP1:%.*]] = icmp ult <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK: [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK: [[CMP3:%.*]] = icmp ult <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK: store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK: [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK: [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK: [[CMP5:%.*]] = icmp slt <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK: [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK: [[CMP7:%.*]] = icmp ult <8 x i16> [[TMP8]], [[TMP9]]
+// CHECK: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK: [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK: [[CMP9:%.*]] = icmp ult <8 x i16> [[TMP10]], [[TMP11]]
+// CHECK: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16>
+// CHECK: store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8
+// CHECK: [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK: [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK: [[CMP11:%.*]] = icmp slt <4 x i32> [[TMP12]], [[TMP13]]
+// CHECK: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK: [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK: [[CMP13:%.*]] = icmp ult <4 x i32> [[TMP14]], [[TMP15]]
+// CHECK: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK: [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK: [[CMP15:%.*]] = icmp ult <4 x i32> [[TMP16]], [[TMP17]]
+// CHECK: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32>
+// CHECK: store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8
+// CHECK: [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK: [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK: [[CMP17:%.*]] = icmp slt <2 x i64> [[TMP18]], [[TMP19]]
+// CHECK: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK: [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK: [[CMP19:%.*]] = icmp ult <2 x i64> [[TMP20]], [[TMP21]]
+// CHECK: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK: [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK: [[CMP21:%.*]] = icmp ult <2 x i64> [[TMP22]], [[TMP23]]
+// CHECK: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8
+// CHECK: [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK: [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK: [[CMP23:%.*]] = fcmp olt <2 x double> [[TMP24]], [[TMP25]]
+// CHECK: [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64>
+// CHECK: store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8
+// CHECK: ret void
+void test_cmplt(void) {
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp slt <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = sc < sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = uc < uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
bc = bc < bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp slt <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = ss < ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = us < us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
bs = bs < bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp slt <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = si < si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = ui < ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
bi = bi < bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp slt <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = sl < sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = ul < ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = bl < bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp olt <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
bl = fd < fd2;
}