summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/align-systemz.c16
-rw-r--r--test/CodeGen/arm-long-calls.c7
-rw-r--r--test/CodeGen/arm64-abi-vector.c10
-rw-r--r--test/CodeGen/arm64-arguments.c68
-rw-r--r--test/CodeGen/arm64-be-bitfield.c2
-rw-r--r--test/CodeGen/asm-attrs.c33
-rw-r--r--test/CodeGen/atomic-ops.c1
-rw-r--r--test/CodeGen/attr-target.c3
-rw-r--r--test/CodeGen/available-externally-hidden.cpp32
-rw-r--r--test/CodeGen/available-externally-suppress.c10
-rw-r--r--test/CodeGen/avx512vlbw-builtins.c585
-rw-r--r--test/CodeGen/big-atomic-ops.c2
-rw-r--r--test/CodeGen/bitfield-2.c12
-rw-r--r--test/CodeGen/builtins-ppc-altivec.c88
-rw-r--r--test/CodeGen/builtins-ppc-p8vector.c663
-rw-r--r--test/CodeGen/builtins-ppc-vsx.c476
-rw-r--r--test/CodeGen/debug-info-block.c27
-rw-r--r--test/CodeGen/exceptions-seh-finally.c76
-rw-r--r--test/CodeGen/exceptions-seh-leave.c30
-rw-r--r--test/CodeGen/exceptions-seh.c139
-rw-r--r--test/CodeGen/inline.c4
-rw-r--r--test/CodeGen/packed-nest-unpacked.c31
-rw-r--r--test/CodeGen/ppc64-struct-onefloat.c16
-rw-r--r--test/CodeGen/ppc64le-aggregates.c18
24 files changed, 2148 insertions, 201 deletions
diff --git a/test/CodeGen/align-systemz.c b/test/CodeGen/align-systemz.c
index 68a21e39ab36..eaa1de64d344 100644
--- a/test/CodeGen/align-systemz.c
+++ b/test/CodeGen/align-systemz.c
@@ -25,3 +25,19 @@ void func (void)
s = es;
}
+
+// Alignment should be respected for coerced argument loads
+
+struct arg { long y __attribute__((packed, aligned(4))); };
+
+extern struct arg x;
+void f(struct arg);
+
+void test (void)
+{
+ f(x);
+}
+
+// CHECK-LABEL: @test
+// CHECK: load i64, i64* getelementptr inbounds (%struct.arg, %struct.arg* @x, i32 0, i32 0), align 4
+
diff --git a/test/CodeGen/arm-long-calls.c b/test/CodeGen/arm-long-calls.c
new file mode 100644
index 000000000000..fdd7babe9fe6
--- /dev/null
+++ b/test/CodeGen/arm-long-calls.c
@@ -0,0 +1,7 @@
+// RUN: %clang_cc1 -triple thumbv7-apple-ios5 -target-feature +long-calls -emit-llvm -o - %s | FileCheck -check-prefix=LONGCALL %s
+// RUN: %clang_cc1 -triple thumbv7-apple-ios5 -emit-llvm -o - %s | FileCheck -check-prefix=NOLONGCALL %s
+
+// LONGCALL: attributes #0 = { {{.*}} "target-features"="+long-calls"
+// NOLONGCALL-NOT: attributes #0 = { {{.*}} "target-features"="+long-calls"
+
+int foo1(int a) { return a; }
diff --git a/test/CodeGen/arm64-abi-vector.c b/test/CodeGen/arm64-abi-vector.c
index 4566c417909b..ebf7f5112659 100644
--- a/test/CodeGen/arm64-abi-vector.c
+++ b/test/CodeGen/arm64-abi-vector.c
@@ -309,7 +309,7 @@ __attribute__((noinline)) double args_vec_5c(int fixed, __char5 c5) {
// CHECK: args_vec_5c
// CHECK: [[C5:%.*]] = alloca <5 x i8>, align 8
// CHECK: [[TMP:%.*]] = bitcast <5 x i8>* [[C5]] to <2 x i32>*
-// CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 1
+// CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 8
double sum = fixed;
sum = sum + c5.x + c5.y;
return sum;
@@ -325,7 +325,7 @@ __attribute__((noinline)) double args_vec_9c(int fixed, __char9 c9) {
// CHECK: args_vec_9c
// CHECK: [[C9:%.*]] = alloca <9 x i8>, align 16
// CHECK: [[TMP:%.*]] = bitcast <9 x i8>* [[C9]] to <4 x i32>*
-// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
+// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16
double sum = fixed;
sum = sum + c9.x + c9.y;
return sum;
@@ -355,7 +355,7 @@ __attribute__((noinline)) double args_vec_3s(int fixed, __short3 c3) {
// CHECK: args_vec_3s
// CHECK: [[C3:%.*]] = alloca <3 x i16>, align 8
// CHECK: [[TMP:%.*]] = bitcast <3 x i16>* [[C3]] to <2 x i32>*
-// CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 1
+// CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 8
double sum = fixed;
sum = sum + c3.x + c3.y;
return sum;
@@ -371,7 +371,7 @@ __attribute__((noinline)) double args_vec_5s(int fixed, __short5 c5) {
// CHECK: args_vec_5s
// CHECK: [[C5:%.*]] = alloca <5 x i16>, align 16
// CHECK: [[TMP:%.*]] = bitcast <5 x i16>* [[C5]] to <4 x i32>*
-// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
+// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16
double sum = fixed;
sum = sum + c5.x + c5.y;
return sum;
@@ -387,7 +387,7 @@ __attribute__((noinline)) double args_vec_3i(int fixed, __int3 c3) {
// CHECK: args_vec_3i
// CHECK: [[C3:%.*]] = alloca <3 x i32>, align 16
// CHECK: [[TMP:%.*]] = bitcast <3 x i32>* [[C3]] to <4 x i32>*
-// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
+// CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 16
double sum = fixed;
sum = sum + c3.x + c3.y;
return sum;
diff --git a/test/CodeGen/arm64-arguments.c b/test/CodeGen/arm64-arguments.c
index 4486bb4b18e2..8b551c4af470 100644
--- a/test/CodeGen/arm64-arguments.c
+++ b/test/CodeGen/arm64-arguments.c
@@ -219,8 +219,8 @@ int32x4_t f36(int i, s36_with_align s1, s36_with_align s2) {
// CHECK: define <4 x i32> @f36(i32 %i, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s36, align 16
// CHECK: %s2 = alloca %struct.s36, align 16
-// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
-// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
+// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16
+// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16
// CHECK: %[[a:.*]] = bitcast %struct.s36* %s1 to <4 x i32>*
// CHECK: load <4 x i32>, <4 x i32>* %[[a]], align 16
// CHECK: %[[b:.*]] = bitcast %struct.s36* %s2 to <4 x i32>*
@@ -275,8 +275,8 @@ int f38(int i, s38_no_align s1, s38_no_align s2) {
// CHECK: define i32 @f38(i32 %i, i64 %s1.coerce, i64 %s2.coerce)
// CHECK: %s1 = alloca %struct.s38, align 8
// CHECK: %s2 = alloca %struct.s38, align 8
-// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 1
-// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 1
+// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 8
+// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 8
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 1
@@ -287,8 +287,8 @@ s38_no_align g38;
s38_no_align g38_2;
int caller38() {
// CHECK: define i32 @caller38()
-// CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 1
-// CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 1
+// CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4
+// CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
// CHECK: call i32 @f38(i32 3, i64 %[[a]], i64 %[[b]])
return f38(3, g38, g38_2);
}
@@ -299,8 +299,8 @@ int f38_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
// CHECK: define i32 @f38_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i64 %s1.coerce, i64 %s2.coerce)
// CHECK: %s1 = alloca %struct.s38, align 8
// CHECK: %s2 = alloca %struct.s38, align 8
-// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 1
-// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 1
+// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 8
+// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 8
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 1
@@ -309,8 +309,8 @@ int f38_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
}
int caller38_stack() {
// CHECK: define i32 @caller38_stack()
-// CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 1
-// CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 1
+// CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4
+// CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
// CHECK: call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i64 %[[a]], i64 %[[b]])
return f38_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g38, g38_2);
}
@@ -328,8 +328,8 @@ int f39(int i, s39_with_align s1, s39_with_align s2) {
// CHECK: define i32 @f39(i32 %i, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s39, align 16
// CHECK: %s2 = alloca %struct.s39, align 16
-// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
-// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
+// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16
+// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16
// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s1, i32 0, i32 1
@@ -340,8 +340,8 @@ s39_with_align g39;
s39_with_align g39_2;
int caller39() {
// CHECK: define i32 @caller39()
-// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 1
-// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 1
+// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16
+// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
// CHECK: call i32 @f39(i32 3, i128 %[[a]], i128 %[[b]])
return f39(3, g39, g39_2);
}
@@ -352,8 +352,8 @@ int f39_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
// CHECK: define i32 @f39_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s39, align 16
// CHECK: %s2 = alloca %struct.s39, align 16
-// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
-// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
+// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16
+// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16
// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s39, %struct.s39* %s1, i32 0, i32 1
@@ -362,8 +362,8 @@ int f39_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
}
int caller39_stack() {
// CHECK: define i32 @caller39_stack()
-// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 1
-// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 1
+// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16
+// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
// CHECK: call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %[[a]], i128 %[[b]])
return f39_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g39, g39_2);
}
@@ -383,8 +383,8 @@ int f40(int i, s40_no_align s1, s40_no_align s2) {
// CHECK: define i32 @f40(i32 %i, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce)
// CHECK: %s1 = alloca %struct.s40, align 8
// CHECK: %s2 = alloca %struct.s40, align 8
-// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 1
-// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 1
+// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 8
+// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 8
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 1
@@ -395,8 +395,8 @@ s40_no_align g40;
s40_no_align g40_2;
int caller40() {
// CHECK: define i32 @caller40()
-// CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 1
-// CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 1
+// CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
+// CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
// CHECK: call i32 @f40(i32 3, [2 x i64] %[[a]], [2 x i64] %[[b]])
return f40(3, g40, g40_2);
}
@@ -407,8 +407,8 @@ int f40_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
// CHECK: define i32 @f40_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce)
// CHECK: %s1 = alloca %struct.s40, align 8
// CHECK: %s2 = alloca %struct.s40, align 8
-// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 1
-// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 1
+// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 8
+// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 8
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 1
@@ -417,8 +417,8 @@ int f40_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
}
int caller40_stack() {
// CHECK: define i32 @caller40_stack()
-// CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 1
-// CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 1
+// CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
+// CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
// CHECK: call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, [2 x i64] %[[a]], [2 x i64] %[[b]])
return f40_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g40, g40_2);
}
@@ -438,8 +438,8 @@ int f41(int i, s41_with_align s1, s41_with_align s2) {
// CHECK: define i32 @f41(i32 %i, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s41, align 16
// CHECK: %s2 = alloca %struct.s41, align 16
-// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
-// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
+// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16
+// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16
// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s1, i32 0, i32 1
@@ -450,8 +450,8 @@ s41_with_align g41;
s41_with_align g41_2;
int caller41() {
// CHECK: define i32 @caller41()
-// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 1
-// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 1
+// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16
+// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
// CHECK: call i32 @f41(i32 3, i128 %[[a]], i128 %[[b]])
return f41(3, g41, g41_2);
}
@@ -462,8 +462,8 @@ int f41_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
// CHECK: define i32 @f41_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i128 %s1.coerce, i128 %s2.coerce)
// CHECK: %s1 = alloca %struct.s41, align 16
// CHECK: %s2 = alloca %struct.s41, align 16
-// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 1
-// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 1
+// CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16
+// CHECK: store i128 %s2.coerce, i128* %{{.*}}, align 16
// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s41, %struct.s41* %s1, i32 0, i32 1
@@ -472,8 +472,8 @@ int f41_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
}
int caller41_stack() {
// CHECK: define i32 @caller41_stack()
-// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 1
-// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 1
+// CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16
+// CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
// CHECK: call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %[[a]], i128 %[[b]])
return f41_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g41, g41_2);
}
diff --git a/test/CodeGen/arm64-be-bitfield.c b/test/CodeGen/arm64-be-bitfield.c
index b8d497c5580d..132239ab8313 100644
--- a/test/CodeGen/arm64-be-bitfield.c
+++ b/test/CodeGen/arm64-be-bitfield.c
@@ -7,7 +7,7 @@ struct bt3 { signed b2:10; signed b3:10; } b16;
// Get the high 32-bits and then shift appropriately for big-endian.
signed callee_b0f(struct bt3 bp11) {
// IR: callee_b0f(i64 [[ARG:%.*]])
-// IR: store i64 [[ARG]], i64* [[PTR:%.*]]
+// IR: store i64 [[ARG]], i64* [[PTR:%.*]], align 8
// IR: [[BITCAST:%.*]] = bitcast i64* [[PTR]] to i8*
// IR: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* [[BITCAST]], i64 4
// ARM: asr x0, x0, #54
diff --git a/test/CodeGen/asm-attrs.c b/test/CodeGen/asm-attrs.c
new file mode 100644
index 000000000000..ae7287953e0c
--- /dev/null
+++ b/test/CodeGen/asm-attrs.c
@@ -0,0 +1,33 @@
+// RUN: %clang_cc1 -triple armv7-apple-darwin -emit-llvm %s -o - | FileCheck %s
+
+// CHECK: call i32 asm "foo0", {{.*}} [[READNONE:#[0-9]+]]
+// CHECK: call i32 asm "foo1", {{.*}} [[READNONE]]
+// CHECK: call i32 asm "foo2", {{.*}} [[NOATTRS:#[0-9]+]]
+// CHECK: call i32 asm sideeffect "foo3", {{.*}} [[NOATTRS]]
+// CHECK: call i32 asm "foo4", {{.*}} [[READONLY:#[0-9]+]]
+// CHECK: call i32 asm "foo5", {{.*}} [[READONLY]]
+// CHECK: call i32 asm "foo6", {{.*}} [[NOATTRS]]
+// CHECK: call void asm sideeffect "foo7", {{.*}} [[NOATTRS]]
+// CHECK: call void asm "foo8", {{.*}} [[NOATTRS]]
+
+// CHECK: attributes [[READNONE]] = { nounwind readnone }
+// CHECK: attributes [[NOATTRS]] = { nounwind }
+// CHECK: attributes [[READONLY]] = { nounwind readonly }
+
+int g0, g1;
+
+struct S {
+ int i;
+} g2;
+
+void test_attrs(int a) {
+ __asm__ ("foo0" : "=r"(g1) : "r"(a));
+ __asm__ ("foo1" : "=r"(g1) : "r"(a) : "cc");
+ __asm__ ("foo2" : "=r"(g1) : "r"(a) : "memory");
+ __asm__ volatile("foo3" : "=r"(g1) : "r"(a));
+ __asm__ ("foo4" : "=r"(g1) : "r"(a), "m"(g0));
+ __asm__ ("foo5" : "=r"(g1) : "r"(a), "Q"(g0));
+ __asm__ ("foo6" : "=r"(g1), "=m"(g0) : "r"(a));
+ __asm__ ("foo7" : : "r"(a));
+ __asm__ ("foo8" : "=r"(g2) : "r"(a));
+}
diff --git a/test/CodeGen/atomic-ops.c b/test/CodeGen/atomic-ops.c
index 13ab5f117f93..d8f7d28392a5 100644
--- a/test/CodeGen/atomic-ops.c
+++ b/test/CodeGen/atomic-ops.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -emit-llvm -o - -ffreestanding -triple=i686-apple-darwin9 | FileCheck %s
+// REQUIRES: x86-registered-target
// Also test serialization of atomic operations here, to avoid duplicating the
// test.
diff --git a/test/CodeGen/attr-target.c b/test/CodeGen/attr-target.c
index 7ea5fe5a07a0..d805d133f365 100644
--- a/test/CodeGen/attr-target.c
+++ b/test/CodeGen/attr-target.c
@@ -13,6 +13,8 @@ int __attribute__((target("sse4"))) panda(int a) { return 4; }
int bar(int a) { return baz(a) + foo(a); }
+int __attribute__((target("avx, sse4.2, arch= ivybridge"))) qux(int a) { return 4; }
+
// Check that we emit the additional subtarget and cpu features for foo and not for baz or bar.
// CHECK: baz{{.*}} #0
// CHECK: foo{{.*}} #1
@@ -22,6 +24,7 @@ int bar(int a) { return baz(a) + foo(a); }
// CHECK: koala{{.*}} #0
// CHECK: echidna{{.*}} #2
// CHECK: bar{{.*}} #0
+// CHECK: qux{{.*}} #1
// CHECK: #0 = {{.*}}"target-cpu"="x86-64" "target-features"="+sse,+sse2"
// CHECK: #1 = {{.*}}"target-cpu"="ivybridge" "target-features"="+avx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3"
// CHECK: #2 = {{.*}}"target-cpu"="x86-64" "target-features"="+sse,-aes,-avx,-avx2,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512pf,-avx512vl,-f16c,-fma,-fma4,-pclmul,-sha,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-xop"
diff --git a/test/CodeGen/available-externally-hidden.cpp b/test/CodeGen/available-externally-hidden.cpp
new file mode 100644
index 000000000000..dc13f26b7526
--- /dev/null
+++ b/test/CodeGen/available-externally-hidden.cpp
@@ -0,0 +1,32 @@
+// RUN: %clang_cc1 -O2 -fvisibility hidden -std=c++11 -emit-llvm -o - -triple x86_64-apple-darwin10 %s | FileCheck %s
+
+// Ensure that available_externally functions eliminated at -O2 are now
+// declarations, and are not emitted as hidden with -fvisibility=hidden,
+// but rather with default visibility.
+struct Filter {
+ virtual void Foo();
+ int a;
+};
+
+class Message{};
+class Sender {
+ public:
+ virtual bool Send(Message* msg) = 0;
+
+ protected:
+ virtual ~Sender() {}
+};
+
+// CHECK: declare zeroext i1 @_ZThn16_N17SyncMessageFilter4SendEP7Message
+class SyncMessageFilter : public Filter, public Sender {
+ public:
+ bool Send(Message* message) override;
+};
+
+class TestSyncMessageFilter : public SyncMessageFilter {
+};
+
+int main() {
+TestSyncMessageFilter* f = new TestSyncMessageFilter;
+ f->Send(new Message);
+}
diff --git a/test/CodeGen/available-externally-suppress.c b/test/CodeGen/available-externally-suppress.c
index 390d2017884a..a25a28271277 100644
--- a/test/CodeGen/available-externally-suppress.c
+++ b/test/CodeGen/available-externally-suppress.c
@@ -1,12 +1,18 @@
// RUN: %clang_cc1 -emit-llvm -o - -triple x86_64-apple-darwin10 %s | FileCheck %s
+// RUN: %clang_cc1 -O2 -fno-inline -emit-llvm -o - -triple x86_64-apple-darwin10 %s | FileCheck %s
+// RUN: %clang_cc1 -flto -O2 -fno-inline -emit-llvm -o - -triple x86_64-apple-darwin10 %s | FileCheck %s -check-prefix=LTO
// Ensure that we don't emit available_externally functions at -O0.
+// Also should not emit them at -O2, unless -flto is present in which case
+// we should preserve them for link-time inlining decisions.
int x;
inline void f0(int y) { x = y; }
// CHECK-LABEL: define void @test()
// CHECK: declare void @f0(i32)
+// LTO-LABEL: define void @test()
+// LTO: define available_externally void @f0
void test() {
f0(17);
}
@@ -19,9 +25,13 @@ inline int __attribute__((always_inline)) f1(int x) {
}
// CHECK: @test1
+// LTO: @test1
int test1(int x) {
// CHECK: br i1
// CHECK-NOT: call {{.*}} @f1
// CHECK: ret i32
+ // LTO: br i1
+ // LTO-NOT: call {{.*}} @f1
+ // LTO: ret i32
return f1(x);
}
diff --git a/test/CodeGen/avx512vlbw-builtins.c b/test/CodeGen/avx512vlbw-builtins.c
index a4a12440dc11..7aa8b873dc42 100644
--- a/test/CodeGen/avx512vlbw-builtins.c
+++ b/test/CodeGen/avx512vlbw-builtins.c
@@ -792,3 +792,588 @@ __m128i test_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
//CHECK: @llvm.x86.avx512.mask.pmull.w.128
return _mm_maskz_mullo_epi16(__U , __A, __B);
}
+
+
+__m128i test_mm_mask_blend_epi8(__mmask16 __U, __m128i __A, __m128i __W) {
+ // CHECK-LABEL: @test_mm_mask_blend_epi8
+ // CHECK: @llvm.x86.avx512.mask.blend.b.128
+ return _mm_mask_blend_epi8(__U,__A,__W);
+}
+__m256i test_mm256_mask_blend_epi8(__mmask32 __U, __m256i __A, __m256i __W) {
+ // CHECK-LABEL: @test_mm256_mask_blend_epi8
+ // CHECK: @llvm.x86.avx512.mask.blend.b.256
+ return _mm256_mask_blend_epi8(__U,__A,__W);
+}
+
+__m128i test_mm_mask_blend_epi16(__mmask8 __U, __m128i __A, __m128i __W) {
+ // CHECK-LABEL: @test_mm_mask_blend_epi16
+ // CHECK: @llvm.x86.avx512.mask.blend.w.128
+ return _mm_mask_blend_epi16(__U,__A,__W);
+}
+
+__m256i test_mm256_mask_blend_epi16(__mmask16 __U, __m256i __A, __m256i __W) {
+ // CHECK-LABEL: @test_mm256_mask_blend_epi16
+ // CHECK: @llvm.x86.avx512.mask.blend.w.256
+ return _mm256_mask_blend_epi16(__U,__A,__W);
+}
+
+__m128i test_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
+ // CHECK-LABEL: @test_mm_mask_abs_epi8
+ // CHECK: @llvm.x86.avx512.mask.pabs.b.128
+ return _mm_mask_abs_epi8(__W,__U,__A);
+}
+
+__m128i test_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) {
+ // CHECK-LABEL: @test_mm_maskz_abs_epi8
+ // CHECK: @llvm.x86.avx512.mask.pabs.b.128
+ return _mm_maskz_abs_epi8(__U,__A);
+}
+
+__m256i test_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
+ // CHECK-LABEL: @test_mm256_mask_abs_epi8
+ // CHECK: @llvm.x86.avx512.mask.pabs.b.256
+ return _mm256_mask_abs_epi8(__W,__U,__A);
+}
+
+__m256i test_mm256_maskz_abs_epi8(__mmask32 __U, __m256i __A) {
+ // CHECK-LABEL: @test_mm256_maskz_abs_epi8
+ // CHECK: @llvm.x86.avx512.mask.pabs.b.256
+ return _mm256_maskz_abs_epi8(__U,__A);
+}
+
+__m128i test_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
+ // CHECK-LABEL: @test_mm_mask_abs_epi16
+ // CHECK: @llvm.x86.avx512.mask.pabs.w.128
+ return _mm_mask_abs_epi16(__W,__U,__A);
+}
+
+__m128i test_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) {
+ // CHECK-LABEL: @test_mm_maskz_abs_epi16
+ // CHECK: @llvm.x86.avx512.mask.pabs.w.128
+ return _mm_maskz_abs_epi16(__U,__A);
+}
+
+__m256i test_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
+ // CHECK-LABEL: @test_mm256_mask_abs_epi16
+ // CHECK: @llvm.x86.avx512.mask.pabs.w.256
+ return _mm256_mask_abs_epi16(__W,__U,__A);
+}
+
+__m256i test_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) {
+ // CHECK-LABEL: @test_mm256_maskz_abs_epi16
+ // CHECK: @llvm.x86.avx512.mask.pabs.w.256
+ return _mm256_maskz_abs_epi16(__U,__A);
+}
+
+__m128i test_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_packs_epi32
+ // CHECK: @llvm.x86.avx512.mask.packssdw.128
+ return _mm_maskz_packs_epi32(__M,__A,__B);
+}
+__m128i test_mm_mask_packs_epi32(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_packs_epi32
+ // CHECK: @llvm.x86.avx512.mask.packssdw.128
+ return _mm_mask_packs_epi32(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_packs_epi32
+ // CHECK: @llvm.x86.avx512.mask.packssdw.256
+ return _mm256_maskz_packs_epi32(__M,__A,__B);
+}
+__m256i test_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_packs_epi32
+ // CHECK: @llvm.x86.avx512.mask.packssdw.256
+ return _mm256_mask_packs_epi32(__W,__M,__A,__B);
+}
+__m128i test_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_packs_epi16
+ // CHECK: @llvm.x86.avx512.mask.packsswb.128
+ return _mm_maskz_packs_epi16(__M,__A,__B);
+}
+__m128i test_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_packs_epi16
+ // CHECK: @llvm.x86.avx512.mask.packsswb.128
+ return _mm_mask_packs_epi16(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_packs_epi16
+ // CHECK: @llvm.x86.avx512.mask.packsswb.256
+ return _mm256_maskz_packs_epi16(__M,__A,__B);
+}
+__m256i test_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_packs_epi16
+ // CHECK: @llvm.x86.avx512.mask.packsswb.256
+ return _mm256_mask_packs_epi16(__W,__M,__A,__B);
+}
+
+__m128i test_mm_mask_packus_epi32(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_packus_epi32
+ // CHECK: @llvm.x86.avx512.mask.packusdw.128
+ return _mm_mask_packus_epi32(__W,__M,__A,__B);
+}
+
+__m128i test_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_packus_epi32
+ // CHECK: @llvm.x86.avx512.mask.packusdw.128
+ return _mm_maskz_packus_epi32(__M,__A,__B);
+}
+
+__m256i test_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_packus_epi32
+ // CHECK: @llvm.x86.avx512.mask.packusdw.256
+ return _mm256_maskz_packus_epi32(__M,__A,__B);
+}
+
+__m256i test_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_packus_epi32
+ // CHECK: @llvm.x86.avx512.mask.packusdw.256
+ return _mm256_mask_packus_epi32(__W,__M,__A,__B);
+}
+
+__m128i test_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_packus_epi16
+ // CHECK: @llvm.x86.avx512.mask.packuswb.128
+ return _mm_maskz_packus_epi16(__M,__A,__B);
+}
+
+__m128i test_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_packus_epi16
+ // CHECK: @llvm.x86.avx512.mask.packuswb.128
+ return _mm_mask_packus_epi16(__W,__M,__A,__B);
+}
+
+__m256i test_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_packus_epi16
+ // CHECK: @llvm.x86.avx512.mask.packuswb.256
+ return _mm256_maskz_packus_epi16(__M,__A,__B);
+}
+
+__m256i test_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_packus_epi16
+ // CHECK: @llvm.x86.avx512.mask.packuswb.256
+ return _mm256_mask_packus_epi16(__W,__M,__A,__B);
+}
+
+__m128i test_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_adds_epi8
+ // CHECK: @llvm.x86.avx512.mask.padds.b.128
+ return _mm_mask_adds_epi8(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_adds_epi8
+ // CHECK: @llvm.x86.avx512.mask.padds.b.128
+ return _mm_maskz_adds_epi8(__U,__A,__B);
+}
+__m256i test_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_adds_epi8
+ // CHECK: @llvm.x86.avx512.mask.padds.b.256
+ return _mm256_mask_adds_epi8(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_adds_epi8
+ // CHECK: @llvm.x86.avx512.mask.padds.b.256
+ return _mm256_maskz_adds_epi8(__U,__A,__B);
+}
+__m128i test_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_adds_epi16
+ // CHECK: @llvm.x86.avx512.mask.padds.w.128
+ return _mm_mask_adds_epi16(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_adds_epi16
+ // CHECK: @llvm.x86.avx512.mask.padds.w.128
+ return _mm_maskz_adds_epi16(__U,__A,__B);
+}
+__m256i test_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_adds_epi16
+ // CHECK: @llvm.x86.avx512.mask.padds.w.256
+ return _mm256_mask_adds_epi16(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_adds_epi16
+ // CHECK: @llvm.x86.avx512.mask.padds.w.256
+ return _mm256_maskz_adds_epi16(__U,__A,__B);
+}
+__m128i test_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_adds_epu8
+ // CHECK: @llvm.x86.avx512.mask.paddus.b.128
+ return _mm_mask_adds_epu8(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_adds_epu8
+ // CHECK: @llvm.x86.avx512.mask.paddus.b.128
+ return _mm_maskz_adds_epu8(__U,__A,__B);
+}
+__m256i test_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_adds_epu8
+ // CHECK: @llvm.x86.avx512.mask.paddus.b.256
+ return _mm256_mask_adds_epu8(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_adds_epu8
+ // CHECK: @llvm.x86.avx512.mask.paddus.b.256
+ return _mm256_maskz_adds_epu8(__U,__A,__B);
+}
+__m128i test_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_adds_epu16
+ // CHECK: @llvm.x86.avx512.mask.paddus.w.128
+ return _mm_mask_adds_epu16(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_adds_epu16
+ // CHECK: @llvm.x86.avx512.mask.paddus.w.128
+ return _mm_maskz_adds_epu16(__U,__A,__B);
+}
+__m256i test_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_adds_epu16
+ // CHECK: @llvm.x86.avx512.mask.paddus.w.256
+ return _mm256_mask_adds_epu16(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_adds_epu16
+ // CHECK: @llvm.x86.avx512.mask.paddus.w.256
+ return _mm256_maskz_adds_epu16(__U,__A,__B);
+}
+__m128i test_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_avg_epu8
+ // CHECK: @llvm.x86.avx512.mask.pavg.b.128
+ return _mm_mask_avg_epu8(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_avg_epu8
+ // CHECK: @llvm.x86.avx512.mask.pavg.b.128
+ return _mm_maskz_avg_epu8(__U,__A,__B);
+}
+__m256i test_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_avg_epu8
+ // CHECK: @llvm.x86.avx512.mask.pavg.b.256
+ return _mm256_mask_avg_epu8(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_avg_epu8
+ // CHECK: @llvm.x86.avx512.mask.pavg.b.256
+ return _mm256_maskz_avg_epu8(__U,__A,__B);
+}
+__m128i test_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_avg_epu16
+ // CHECK: @llvm.x86.avx512.mask.pavg.w.128
+ return _mm_mask_avg_epu16(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_avg_epu16
+ // CHECK: @llvm.x86.avx512.mask.pavg.w.128
+ return _mm_maskz_avg_epu16(__U,__A,__B);
+}
+__m256i test_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_avg_epu16
+ // CHECK: @llvm.x86.avx512.mask.pavg.w.256
+ return _mm256_mask_avg_epu16(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_avg_epu16
+ // CHECK: @llvm.x86.avx512.mask.pavg.w.256
+ return _mm256_maskz_avg_epu16(__U,__A,__B);
+}
+__m128i test_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_max_epi8
+ // CHECK: @llvm.x86.avx512.mask.pmaxs.b.128
+ return _mm_maskz_max_epi8(__M,__A,__B);
+}
+__m128i test_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_max_epi8
+ // CHECK: @llvm.x86.avx512.mask.pmaxs.b.128
+ return _mm_mask_max_epi8(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_max_epi8
+ // CHECK: @llvm.x86.avx512.mask.pmaxs.b.256
+ return _mm256_maskz_max_epi8(__M,__A,__B);
+}
+__m256i test_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_max_epi8
+ // CHECK: @llvm.x86.avx512.mask.pmaxs.b.256
+ return _mm256_mask_max_epi8(__W,__M,__A,__B);
+}
+__m128i test_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_max_epi16
+ // CHECK: @llvm.x86.avx512.mask.pmaxs.w.128
+ return _mm_maskz_max_epi16(__M,__A,__B);
+}
+__m128i test_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_max_epi16
+ // CHECK: @llvm.x86.avx512.mask.pmaxs.w.128
+ return _mm_mask_max_epi16(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_max_epi16
+ // CHECK: @llvm.x86.avx512.mask.pmaxs.w.256
+ return _mm256_maskz_max_epi16(__M,__A,__B);
+}
+__m256i test_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_max_epi16
+ // CHECK: @llvm.x86.avx512.mask.pmaxs.w.256
+ return _mm256_mask_max_epi16(__W,__M,__A,__B);
+}
+__m128i test_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_max_epu8
+ // CHECK: @llvm.x86.avx512.mask.pmaxu.b.128
+ return _mm_maskz_max_epu8(__M,__A,__B);
+}
+__m128i test_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_max_epu8
+ // CHECK: @llvm.x86.avx512.mask.pmaxu.b.128
+ return _mm_mask_max_epu8(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_max_epu8(__mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_max_epu8
+ // CHECK: @llvm.x86.avx512.mask.pmaxu.b.256
+ return _mm256_maskz_max_epu8(__M,__A,__B);
+}
+__m256i test_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_max_epu8
+ // CHECK: @llvm.x86.avx512.mask.pmaxu.b.256
+ return _mm256_mask_max_epu8(__W,__M,__A,__B);
+}
+__m128i test_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_max_epu16
+ // CHECK: @llvm.x86.avx512.mask.pmaxu.w.128
+ return _mm_maskz_max_epu16(__M,__A,__B);
+}
+__m128i test_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_max_epu16
+ // CHECK: @llvm.x86.avx512.mask.pmaxu.w.128
+ return _mm_mask_max_epu16(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_max_epu16
+ // CHECK: @llvm.x86.avx512.mask.pmaxu.w.256
+ return _mm256_maskz_max_epu16(__M,__A,__B);
+}
+__m256i test_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_max_epu16
+ // CHECK: @llvm.x86.avx512.mask.pmaxu.w.256
+ return _mm256_mask_max_epu16(__W,__M,__A,__B);
+}
+__m128i test_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_min_epi8
+ // CHECK: @llvm.x86.avx512.mask.pmins.b.128
+ return _mm_maskz_min_epi8(__M,__A,__B);
+}
+__m128i test_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_min_epi8
+ // CHECK: @llvm.x86.avx512.mask.pmins.b.128
+ return _mm_mask_min_epi8(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_min_epi8
+ // CHECK: @llvm.x86.avx512.mask.pmins.b.256
+ return _mm256_maskz_min_epi8(__M,__A,__B);
+}
+__m256i test_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_min_epi8
+ // CHECK: @llvm.x86.avx512.mask.pmins.b.256
+ return _mm256_mask_min_epi8(__W,__M,__A,__B);
+}
+__m128i test_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_min_epi16
+ // CHECK: @llvm.x86.avx512.mask.pmins.w.128
+ return _mm_maskz_min_epi16(__M,__A,__B);
+}
+__m128i test_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_min_epi16
+ // CHECK: @llvm.x86.avx512.mask.pmins.w.128
+ return _mm_mask_min_epi16(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_min_epi16
+ // CHECK: @llvm.x86.avx512.mask.pmins.w.256
+ return _mm256_maskz_min_epi16(__M,__A,__B);
+}
+__m256i test_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_min_epi16
+ // CHECK: @llvm.x86.avx512.mask.pmins.w.256
+ return _mm256_mask_min_epi16(__W,__M,__A,__B);
+}
+__m128i test_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_min_epu8
+ // CHECK: @llvm.x86.avx512.mask.pminu.b.128
+ return _mm_maskz_min_epu8(__M,__A,__B);
+}
+__m128i test_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_min_epu8
+ // CHECK: @llvm.x86.avx512.mask.pminu.b.128
+ return _mm_mask_min_epu8(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_min_epu8(__mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_min_epu8
+ // CHECK: @llvm.x86.avx512.mask.pminu.b.256
+ return _mm256_maskz_min_epu8(__M,__A,__B);
+}
+__m256i test_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_min_epu8
+ // CHECK: @llvm.x86.avx512.mask.pminu.b.256
+ return _mm256_mask_min_epu8(__W,__M,__A,__B);
+}
+__m128i test_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_min_epu16
+ // CHECK: @llvm.x86.avx512.mask.pminu.w.128
+ return _mm_maskz_min_epu16(__M,__A,__B);
+}
+__m128i test_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_min_epu16
+ // CHECK: @llvm.x86.avx512.mask.pminu.w.128
+ return _mm_mask_min_epu16(__W,__M,__A,__B);
+}
+__m256i test_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_min_epu16
+ // CHECK: @llvm.x86.avx512.mask.pminu.w.256
+ return _mm256_maskz_min_epu16(__M,__A,__B);
+}
+__m256i test_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_min_epu16
+ // CHECK: @llvm.x86.avx512.mask.pminu.w.256
+ return _mm256_mask_min_epu16(__W,__M,__A,__B);
+}
+__m128i test_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_shuffle_epi8
+ // CHECK: @llvm.x86.avx512.mask.pshuf.b.128
+ return _mm_mask_shuffle_epi8(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_shuffle_epi8
+ // CHECK: @llvm.x86.avx512.mask.pshuf.b.128
+ return _mm_maskz_shuffle_epi8(__U,__A,__B);
+}
+__m256i test_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_shuffle_epi8
+ // CHECK: @llvm.x86.avx512.mask.pshuf.b.256
+ return _mm256_mask_shuffle_epi8(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_shuffle_epi8
+ // CHECK: @llvm.x86.avx512.mask.pshuf.b.256
+ return _mm256_maskz_shuffle_epi8(__U,__A,__B);
+}
+__m128i test_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_subs_epi8
+ // CHECK: @llvm.x86.avx512.mask.psubs.b.128
+ return _mm_mask_subs_epi8(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_subs_epi8
+ // CHECK: @llvm.x86.avx512.mask.psubs.b.128
+ return _mm_maskz_subs_epi8(__U,__A,__B);
+}
+__m256i test_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_subs_epi8
+ // CHECK: @llvm.x86.avx512.mask.psubs.b.256
+ return _mm256_mask_subs_epi8(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_subs_epi8
+ // CHECK: @llvm.x86.avx512.mask.psubs.b.256
+ return _mm256_maskz_subs_epi8(__U,__A,__B);
+}
+__m128i test_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_subs_epi16
+ // CHECK: @llvm.x86.avx512.mask.psubs.w.128
+ return _mm_mask_subs_epi16(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_subs_epi16
+ // CHECK: @llvm.x86.avx512.mask.psubs.w.128
+ return _mm_maskz_subs_epi16(__U,__A,__B);
+}
+__m256i test_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_subs_epi16
+ // CHECK: @llvm.x86.avx512.mask.psubs.w.256
+ return _mm256_mask_subs_epi16(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_subs_epi16
+ // CHECK: @llvm.x86.avx512.mask.psubs.w.256
+ return _mm256_maskz_subs_epi16(__U,__A,__B);
+}
+__m128i test_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_subs_epu8
+ // CHECK: @llvm.x86.avx512.mask.psubus.b.128
+ return _mm_mask_subs_epu8(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_subs_epu8
+ // CHECK: @llvm.x86.avx512.mask.psubus.b.128
+ return _mm_maskz_subs_epu8(__U,__A,__B);
+}
+__m256i test_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_subs_epu8
+ // CHECK: @llvm.x86.avx512.mask.psubus.b.256
+ return _mm256_mask_subs_epu8(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_subs_epu8
+ // CHECK: @llvm.x86.avx512.mask.psubus.b.256
+ return _mm256_maskz_subs_epu8(__U,__A,__B);
+}
+__m128i test_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_subs_epu16
+ // CHECK: @llvm.x86.avx512.mask.psubus.w.128
+ return _mm_mask_subs_epu16(__W,__U,__A,__B);
+}
+__m128i test_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_subs_epu16
+ // CHECK: @llvm.x86.avx512.mask.psubus.w.128
+ return _mm_maskz_subs_epu16(__U,__A,__B);
+}
+__m256i test_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_subs_epu16
+ // CHECK: @llvm.x86.avx512.mask.psubus.w.256
+ return _mm256_mask_subs_epu16(__W,__U,__A,__B);
+}
+__m256i test_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_subs_epu16
+ // CHECK: @llvm.x86.avx512.mask.psubus.w.256
+ return _mm256_maskz_subs_epu16(__U,__A,__B);
+}
+
+
+__m128i test_mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask2_permutex2var_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpermi2var.hi.128
+ return _mm_mask2_permutex2var_epi16(__A,__I,__U,__B);
+}
+__m256i test_mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask2_permutex2var_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpermi2var.hi.256
+ return _mm256_mask2_permutex2var_epi16(__A,__I,__U,__B);
+}
+__m128i test_mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B) {
+ // CHECK-LABEL: @test_mm_permutex2var_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpermt2var.hi.128
+ return _mm_permutex2var_epi16(__A,__I,__B);
+}
+__m128i test_mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B) {
+ // CHECK-LABEL: @test_mm_mask_permutex2var_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpermt2var.hi.128
+ return _mm_mask_permutex2var_epi16(__A,__U,__I,__B);
+}
+__m128i test_mm_maskz_permutex2var_epi16(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B) {
+ // CHECK-LABEL: @test_mm_maskz_permutex2var_epi16
+ // CHECK: @llvm.x86.avx512.maskz.vpermt2var.hi.128
+ return _mm_maskz_permutex2var_epi16(__U,__A,__I,__B);
+}
+
+__m256i test_mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_permutex2var_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpermt2var.hi.256
+ return _mm256_permutex2var_epi16(__A,__I,__B);
+}
+__m256i test_mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_mask_permutex2var_epi16
+ // CHECK: @llvm.x86.avx512.mask.vpermt2var.hi.256
+ return _mm256_mask_permutex2var_epi16(__A,__U,__I,__B);
+}
+__m256i test_mm256_maskz_permutex2var_epi16(__mmask16 __U, __m256i __A, __m256i __I, __m256i __B) {
+ // CHECK-LABEL: @test_mm256_maskz_permutex2var_epi16
+ // CHECK: @llvm.x86.avx512.maskz.vpermt2var.hi.256
+ return _mm256_maskz_permutex2var_epi16(__U,__A,__I,__B);
+}
diff --git a/test/CodeGen/big-atomic-ops.c b/test/CodeGen/big-atomic-ops.c
index 28b7b5d70822..6a7a7001f96d 100644
--- a/test/CodeGen/big-atomic-ops.c
+++ b/test/CodeGen/big-atomic-ops.c
@@ -1,5 +1,5 @@
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-macosx10.9.0 | FileCheck %s
-
+// REQUIRES: x86-registered-target
// Also test serialization of atomic operations here, to avoid duplicating the
// test.
// RUN: %clang_cc1 %s -emit-pch -o %t -triple=x86_64-apple-macosx10.9.0
diff --git a/test/CodeGen/bitfield-2.c b/test/CodeGen/bitfield-2.c
index c5154fcb19e2..e4b1b0d9fd5c 100644
--- a/test/CodeGen/bitfield-2.c
+++ b/test/CodeGen/bitfield-2.c
@@ -14,7 +14,7 @@
// CHECK-RECORD: LLVMType:%struct.s0 = type { [3 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageAlignment:1>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0>
struct __attribute((packed)) s0 {
int f0 : 24;
};
@@ -54,8 +54,8 @@ unsigned long long test_0() {
// CHECK-RECORD: LLVMType:%struct.s1 = type { [3 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageAlignment:1>
-// CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageAlignment:1>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0>
+// CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0>
#pragma pack(push)
#pragma pack(1)
@@ -102,7 +102,7 @@ unsigned long long test_1() {
// CHECK-RECORD: LLVMType:%union.u2 = type { i8 }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageAlignment:1>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0>
union __attribute__((packed)) u2 {
unsigned long long f0 : 3;
@@ -274,8 +274,8 @@ _Bool test_6() {
// CHECK-RECORD: LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageAlignment:4>
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageAlignment:16>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16>
struct __attribute__((aligned(16))) s7 {
int a, b, c;
diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c
index c6aa3c2faac2..8e8216b10111 100644
--- a/test/CodeGen/builtins-ppc-altivec.c
+++ b/test/CodeGen/builtins-ppc-altivec.c
@@ -1081,6 +1081,31 @@ void test6() {
// CHECK: @llvm.ppc.altivec.vctuxs
// CHECK-LE: @llvm.ppc.altivec.vctuxs
+ /* vec_div */
+ res_vsc = vec_div(vsc, vsc);
+// CHECK: sdiv <16 x i8>
+// CHECK-LE: sdiv <16 x i8>
+
+ res_vuc = vec_div(vuc, vuc);
+// CHECK: udiv <16 x i8>
+// CHECK-LE: udiv <16 x i8>
+
+ res_vs = vec_div(vs, vs);
+// CHECK: sdiv <8 x i16>
+// CHECK-LE: sdiv <8 x i16>
+
+ res_vus = vec_div(vus, vus);
+// CHECK: udiv <8 x i16>
+// CHECK-LE: udiv <8 x i16>
+
+ res_vi = vec_div(vi, vi);
+// CHECK: sdiv <4 x i32>
+// CHECK-LE: sdiv <4 x i32>
+
+ res_vui = vec_div(vui, vui);
+// CHECK: udiv <4 x i32>
+// CHECK-LE: udiv <4 x i32>
+
/* vec_dss */
vec_dss(0);
// CHECK: @llvm.ppc.altivec.dss
@@ -2127,6 +2152,31 @@ void test6() {
// CHECK: @llvm.ppc.altivec.mtvscr
// CHECK-LE: @llvm.ppc.altivec.mtvscr
+ /* vec_mul */
+ res_vsc = vec_mul(vsc, vsc);
+// CHECK: mul <16 x i8>
+// CHECK-LE: mul <16 x i8>
+
+ res_vuc = vec_mul(vuc, vuc);
+// CHECK: mul <16 x i8>
+// CHECK-LE: mul <16 x i8>
+
+ res_vs = vec_mul(vs, vs);
+// CHECK: mul <8 x i16>
+// CHECK-LE: mul <8 x i16>
+
+ res_vus = vec_mul(vus, vus);
+// CHECK: mul <8 x i16>
+// CHECK-LE: mul <8 x i16>
+
+ res_vi = vec_mul(vi, vi);
+// CHECK: mul <4 x i32>
+// CHECK-LE: mul <4 x i32>
+
+ res_vui = vec_mul(vui, vui);
+// CHECK: mul <4 x i32>
+// CHECK-LE: mul <4 x i32>
+
/* vec_mule */
res_vs = vec_mule(vsc, vsc);
// CHECK: @llvm.ppc.altivec.vmulesb
@@ -3272,6 +3322,15 @@ void test6() {
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm
+ res_vbs = vec_sld(vbs, vbs, 0);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
res_vp = vec_sld(vp, vp, 0);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm
@@ -3284,6 +3343,11 @@ void test6() {
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm
+ res_vbi = vec_sld(vbi, vbi, 0);
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
+
res_vf = vec_sld(vf, vf, 0);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm
@@ -3802,28 +3866,28 @@ void test6() {
/* vec_sr */
res_vsc = vec_sr(vsc, vuc);
-// CHECK: shr <16 x i8>
-// CHECK-LE: shr <16 x i8>
+// CHECK: lshr <16 x i8>
+// CHECK-LE: lshr <16 x i8>
res_vuc = vec_sr(vuc, vuc);
-// CHECK: shr <16 x i8>
-// CHECK-LE: shr <16 x i8>
+// CHECK: lshr <16 x i8>
+// CHECK-LE: lshr <16 x i8>
res_vs = vec_sr(vs, vus);
-// CHECK: shr <8 x i16>
-// CHECK-LE: shr <8 x i16>
+// CHECK: lshr <8 x i16>
+// CHECK-LE: lshr <8 x i16>
res_vus = vec_sr(vus, vus);
-// CHECK: shr <8 x i16>
-// CHECK-LE: shr <8 x i16>
+// CHECK: lshr <8 x i16>
+// CHECK-LE: lshr <8 x i16>
res_vi = vec_sr(vi, vui);
-// CHECK: shr <4 x i32>
-// CHECK-LE: shr <4 x i32>
+// CHECK: lshr <4 x i32>
+// CHECK-LE: lshr <4 x i32>
res_vui = vec_sr(vui, vui);
-// CHECK: shr <4 x i32>
-// CHECK-LE: shr <4 x i32>
+// CHECK: lshr <4 x i32>
+// CHECK-LE: lshr <4 x i32>
res_vsc = vec_vsrb(vsc, vuc);
// CHECK: shr <16 x i8>
diff --git a/test/CodeGen/builtins-ppc-p8vector.c b/test/CodeGen/builtins-ppc-p8vector.c
index 61e14ba283d2..208dd4347f65 100644
--- a/test/CodeGen/builtins-ppc-p8vector.c
+++ b/test/CodeGen/builtins-ppc-p8vector.c
@@ -9,23 +9,41 @@
vector signed char vsc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
vector unsigned char vuc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
-vector int vi = { -1, 2, -3, 4 };
+vector bool char vbc = { 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1 };
+
+vector signed short vss = { 0, 1, 2, 3, 4, 5, 6, 7 };
+vector unsigned short vus = { 0, 1, 2, 3, 4, 5, 6, 7 };
+vector bool short vbs = { 1, 1, 0, 0, 0, 0, 1, 1 };
+
+vector signed int vsi = { -1, 2, -3, 4 };
vector unsigned int vui = { 1, 2, 3, 4 };
vector bool int vbi = {0, -1, -1, 0};
-vector bool long long vbll = { 1, 0 };
+
vector signed long long vsll = { 1, 2 };
vector unsigned long long vull = { 1, 2 };
+vector bool long long vbll = { 1, 0 };
+
+vector float vfa = { 1.e-4f, -132.23f, -22.1, 32.00f };
vector double vda = { 1.e-11, -132.23e10 };
int res_i;
vector signed char res_vsc;
vector unsigned char res_vuc;
-vector int res_vi;
+vector bool char res_vbc;
+
+vector signed short res_vss;
+vector unsigned short res_vus;
+vector bool short res_vbs;
+
+vector signed int res_vsi;
vector unsigned int res_vui;
vector bool int res_vbi;
-vector bool long long res_vbll;
+
vector signed long long res_vsll;
vector unsigned long long res_vull;
+vector bool long long res_vbll;
+
+vector double res_vf;
vector double res_vd;
// CHECK-LABEL: define void @test1
@@ -60,7 +78,7 @@ void test1() {
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm
- res_vi = vec_mergee(vi, vi);
+ res_vsi = vec_mergee(vsi, vsi);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm
@@ -74,7 +92,7 @@ void test1() {
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm
- res_vi = vec_mergeo(vi, vi);
+ res_vsi = vec_mergeo(vsi, vsi);
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.altivec.vperm
@@ -138,6 +156,325 @@ void test1() {
// CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
// CHECK-PPC: error: call to 'vec_cmplt' is ambiguous
+ /* vec_eqv */
+ res_vsc = vec_eqv(vsc, vsc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+ res_vsc = vec_eqv(vbc, vsc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+ res_vsc = vec_eqv(vsc, vbc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+ res_vuc = vec_eqv(vuc, vuc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+ res_vuc = vec_eqv(vbc, vuc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+ res_vuc = vec_eqv(vuc, vbc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+ res_vss = vec_eqv(vss, vss);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+ res_vss = vec_eqv(vbs, vss);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+ res_vss = vec_eqv(vss, vbs);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+ res_vus = vec_eqv(vus, vus);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+ res_vus = vec_eqv(vbs, vus);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+ res_vus = vec_eqv(vus, vbs);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+ res_vsi = vec_eqv(vsi, vsi);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+ res_vsi = vec_eqv(vbi, vsi);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+ res_vsi = vec_eqv(vsi, vbi);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+ res_vui = vec_eqv(vui, vui);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+ res_vui = vec_eqv(vbi, vui);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+ res_vui = vec_eqv(vui, vbi);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+ res_vsll = vec_eqv(vsll, vsll);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+ res_vsll = vec_eqv(vbll, vsll);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+ res_vsll = vec_eqv(vsll, vbll);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+ res_vull = vec_eqv(vull, vull);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+ res_vull = vec_eqv(vbll, vull);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+ res_vull = vec_eqv(vull, vbll);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+ res_vf = vec_eqv(vfa, vfa);
+// CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-LE: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-PPC: error: assigning to
+
+ res_vf = vec_eqv(vbi, vfa);
+// CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-LE: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
+// CHECK-PPC: error: assigning to
+
+ res_vf = vec_eqv(vfa, vbi);
+// CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
+// CHECK: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-LE: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-PPC: error: assigning to
+
+ res_vd = vec_eqv(vda, vda);
+// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-PPC: error: assigning to
+
+ res_vd = vec_eqv(vbll, vda);
+// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-PPC: error: assigning to
+
+ res_vd = vec_eqv(vda, vbll);
+// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-PPC: error: assigning to
+
+ res_vsc = vec_cntlz(vsc);
+// CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
+// CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
+// CHECK-PPC: warning: implicit declaration of function 'vec_cntlz' is invalid in C99
+
+ res_vuc = vec_cntlz(vuc);
+// CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
+// CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
+
+ res_vss = vec_cntlz(vss);
+// CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
+// CHECK-LE: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
+
+ res_vus = vec_cntlz(vus);
+// CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
+// CHECK-LE: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
+
+ res_vsi = vec_cntlz(vsi);
+// CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
+// CHECK-LE: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
+
+ res_vui = vec_cntlz(vui);
+// CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
+// CHECK-LE: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
+
+ res_vsll = vec_cntlz(vsll);
+// CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
+// CHECK-LE: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
+
+ res_vull = vec_cntlz(vull);
+// CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
+// CHECK-LE: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
+
/* ----------------------- predicates --------------------------- */
/* vec_all_eq */
res_i = vec_all_eq(vsll, vsll);
@@ -634,7 +971,7 @@ void test1() {
// CHECK-PPC: error: call to 'vec_min' is ambiguous
/* vec_mule */
- res_vsll = vec_mule(vi, vi);
+ res_vsll = vec_mule(vsi, vsi);
// CHECK: @llvm.ppc.altivec.vmulesw
// CHECK-LE: @llvm.ppc.altivec.vmulosw
// CHECK-PPC: error: call to 'vec_mule' is ambiguous
@@ -645,7 +982,7 @@ void test1() {
// CHECK-PPC: error: call to 'vec_mule' is ambiguous
/* vec_mulo */
- res_vsll = vec_mulo(vi, vi);
+ res_vsll = vec_mulo(vsi, vsi);
// CHECK: @llvm.ppc.altivec.vmulosw
// CHECK-LE: @llvm.ppc.altivec.vmulesw
// CHECK-PPC: error: call to 'vec_mulo' is ambiguous
@@ -656,7 +993,7 @@ void test1() {
// CHECK-PPC: error: call to 'vec_mulo' is ambiguous
/* vec_packs */
- res_vi = vec_packs(vsll, vsll);
+ res_vsi = vec_packs(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vpksdss
// CHECK-LE: @llvm.ppc.altivec.vpksdss
// CHECK-PPC: error: call to 'vec_packs' is ambiguous
@@ -701,8 +1038,8 @@ void test1() {
/* vec_sr */
res_vsll = vec_sr(vsll, vull);
-// CHECK: ashr <2 x i64>
-// CHECK-LE: ashr <2 x i64>
+// CHECK: lshr <2 x i64>
+// CHECK-LE: lshr <2 x i64>
// CHECK-PPC: error: call to 'vec_sr' is ambiguous
res_vull = vec_sr(vull, vull);
@@ -722,7 +1059,7 @@ void test1() {
// CHECK-PPC: error: call to 'vec_sra' is ambiguous
/* vec_unpackh */
- res_vsll = vec_unpackh(vi);
+ res_vsll = vec_unpackh(vsi);
// CHECK: llvm.ppc.altivec.vupkhsw
// CHECK-LE: llvm.ppc.altivec.vupklsw
// CHECK-PPC: error: call to 'vec_unpackh' is ambiguous
@@ -733,7 +1070,7 @@ void test1() {
// CHECK-PPC: error: call to 'vec_unpackh' is ambiguous
/* vec_unpackl */
- res_vsll = vec_unpackl(vi);
+ res_vsll = vec_unpackl(vsi);
// CHECK: llvm.ppc.altivec.vupklsw
// CHECK-LE: llvm.ppc.altivec.vupkhsw
// CHECK-PPC: error: call to 'vec_unpackl' is ambiguous
@@ -744,7 +1081,7 @@ void test1() {
// CHECK-PPC: error: call to 'vec_unpackl' is ambiguous
/* vec_vpksdss */
- res_vi = vec_vpksdss(vsll, vsll);
+ res_vsi = vec_vpksdss(vsll, vsll);
// CHECK: llvm.ppc.altivec.vpksdss
// CHECK-LE: llvm.ppc.altivec.vpksdss
// CHECK-PPC: warning: implicit declaration of function 'vec_vpksdss'
@@ -756,7 +1093,7 @@ void test1() {
// CHECK-PPC: warning: implicit declaration of function 'vec_vpksdus'
/* vec_vpkudum */
- res_vi = vec_vpkudum(vsll, vsll);
+ res_vsi = vec_vpkudum(vsll, vsll);
// CHECK: vperm
// CHECK-LE: vperm
// CHECK-PPC: warning: implicit declaration of function 'vec_vpkudum'
@@ -771,7 +1108,7 @@ void test1() {
// CHECK-PPC: warning: implicit declaration of function 'vec_vpkudus'
/* vec_vupkhsw */
- res_vsll = vec_vupkhsw(vi);
+ res_vsll = vec_vupkhsw(vsi);
// CHECK: llvm.ppc.altivec.vupkhsw
// CHECK-LE: llvm.ppc.altivec.vupklsw
// CHECK-PPC: warning: implicit declaration of function 'vec_vupkhsw'
@@ -781,7 +1118,7 @@ void test1() {
// CHECK-LE: llvm.ppc.altivec.vupklsw
/* vec_vupklsw */
- res_vsll = vec_vupklsw(vi);
+ res_vsll = vec_vupklsw(vsi);
// CHECK: llvm.ppc.altivec.vupklsw
// CHECK-LE: llvm.ppc.altivec.vupkhsw
// CHECK-PPC: warning: implicit declaration of function 'vec_vupklsw'
@@ -832,6 +1169,298 @@ void test1() {
// CHECK: @llvm.ppc.altivec.vminud
// CHECK-LE: @llvm.ppc.altivec.vminud
+ /* vec_nand */
+ res_vsc = vec_nand(vsc, vsc);
+// CHECK: [[T1:%.+]] = and <16 x i8>
+// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: [[T1:%.+]] = and <16 x i8>
+// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-PPC: warning: implicit declaration of function 'vec_nand' is invalid in C99
+
+ res_vsc = vec_nand(vsc, vbc);
+// CHECK: [[T1:%.+]] = and <16 x i8>
+// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: [[T1:%.+]] = and <16 x i8>
+// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+
+ res_vsc = vec_nand(vbc, vsc);
+// CHECK: [[T1:%.+]] = and <16 x i8>
+// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: [[T1:%.+]] = and <16 x i8>
+// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+
+ res_vuc = vec_nand(vuc, vuc);
+// CHECK: [[T1:%.+]] = and <16 x i8>
+// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: [[T1:%.+]] = and <16 x i8>
+// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+
+ res_vuc = vec_nand(vuc, vbc);
+// CHECK: [[T1:%.+]] = and <16 x i8>
+// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: [[T1:%.+]] = and <16 x i8>
+// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+
+ res_vuc = vec_nand(vbc, vuc);
+// CHECK: [[T1:%.+]] = and <16 x i8>
+// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: [[T1:%.+]] = and <16 x i8>
+// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+
+ res_vss = vec_nand(vss, vss);
+// CHECK: [[T1:%.+]] = and <8 x i16>
+// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: [[T1:%.+]] = and <8 x i16>
+// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+
+ res_vss = vec_nand(vss, vbs);
+// CHECK: [[T1:%.+]] = and <8 x i16>
+// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: [[T1:%.+]] = and <8 x i16>
+// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+
+ res_vss = vec_nand(vbs, vss);
+// CHECK: [[T1:%.+]] = and <8 x i16>
+// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: [[T1:%.+]] = and <8 x i16>
+// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+
+ res_vus = vec_nand(vus, vus);
+// CHECK: [[T1:%.+]] = and <8 x i16>
+// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: [[T1:%.+]] = and <8 x i16>
+// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+
+ res_vus = vec_nand(vus, vbs);
+// CHECK: [[T1:%.+]] = and <8 x i16>
+// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: [[T1:%.+]] = and <8 x i16>
+// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+
+ res_vus = vec_nand(vbs, vus);
+// CHECK: [[T1:%.+]] = and <8 x i16>
+// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: [[T1:%.+]] = and <8 x i16>
+// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+
+ res_vsi = vec_nand(vsi, vsi);
+// CHECK: [[T1:%.+]] = and <4 x i32>
+// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: [[T1:%.+]] = and <4 x i32>
+// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+
+ res_vsi = vec_nand(vsi, vbi);
+// CHECK: [[T1:%.+]] = and <4 x i32>
+// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: [[T1:%.+]] = and <4 x i32>
+// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+
+ res_vsi = vec_nand(vbi, vsi);
+// CHECK: [[T1:%.+]] = and <4 x i32>
+// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: [[T1:%.+]] = and <4 x i32>
+// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+
+ res_vui = vec_nand(vui, vui);
+// CHECK: [[T1:%.+]] = and <4 x i32>
+// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: [[T1:%.+]] = and <4 x i32>
+// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+
+ res_vui = vec_nand(vui, vbi);
+// CHECK: [[T1:%.+]] = and <4 x i32>
+// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: [[T1:%.+]] = and <4 x i32>
+// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+
+ res_vui = vec_nand(vbi, vui);
+// CHECK: [[T1:%.+]] = and <4 x i32>
+// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: [[T1:%.+]] = and <4 x i32>
+// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+
+ res_vsll = vec_nand(vsll, vsll);
+// CHECK: [[T1:%.+]] = and <2 x i64>
+// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+// CHECK-LE: [[T1:%.+]] = and <2 x i64>
+// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+
+ res_vsll = vec_nand(vsll, vbll);
+// CHECK: [[T1:%.+]] = and <2 x i64>
+// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+// CHECK-LE: [[T1:%.+]] = and <2 x i64>
+// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+
+ res_vsll = vec_nand(vbll, vsll);
+// CHECK: [[T1:%.+]] = and <2 x i64>
+// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+// CHECK-LE: [[T1:%.+]] = and <2 x i64>
+// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+
+ res_vull = vec_nand(vull, vull);
+// CHECK: [[T1:%.+]] = and <2 x i64>
+// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+// CHECK-LE: [[T1:%.+]] = and <2 x i64>
+// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+
+ res_vull = vec_nand(vull, vbll);
+// CHECK: [[T1:%.+]] = and <2 x i64>
+// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+// CHECK-LE: [[T1:%.+]] = and <2 x i64>
+// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+
+ res_vull = vec_nand(vbll, vull);
+// CHECK: [[T1:%.+]] = and <2 x i64>
+// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+// CHECK-LE: [[T1:%.+]] = and <2 x i64>
+// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+
+ /* vec_orc */
+ res_vsc = vec_orc(vsc, vsc);
+// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: or <16 x i8> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
+// CHECK-PPC: warning: implicit declaration of function 'vec_orc' is invalid in C99
+
+ res_vsc = vec_orc(vsc, vbc);
+// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: or <16 x i8> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
+
+ res_vsc = vec_orc(vbc, vsc);
+// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: or <16 x i8> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
+
+ res_vuc = vec_orc(vuc, vuc);
+// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: or <16 x i8> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
+
+ res_vuc = vec_orc(vuc, vbc);
+// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: or <16 x i8> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
+
+ res_vuc = vec_orc(vbc, vuc);
+// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: or <16 x i8> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
+
+ res_vss = vec_orc(vss, vss);
+// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: or <8 x i16> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
+
+ res_vss = vec_orc(vss, vbs);
+// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: or <8 x i16> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
+
+ res_vss = vec_orc(vbs, vss);
+// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: or <8 x i16> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
+
+ res_vus = vec_orc(vus, vus);
+// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: or <8 x i16> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
+
+ res_vus = vec_orc(vus, vbs);
+// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: or <8 x i16> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
+
+ res_vus = vec_orc(vbs, vus);
+// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: or <8 x i16> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
+
+ res_vsi = vec_orc(vsi, vsi);
+// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: or <4 x i32> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
+
+ res_vsi = vec_orc(vsi, vbi);
+// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: or <4 x i32> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
+
+ res_vsi = vec_orc(vbi, vsi);
+// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: or <4 x i32> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
+
+ res_vui = vec_orc(vui, vui);
+// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: or <4 x i32> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
+
+ res_vui = vec_orc(vui, vbi);
+// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: or <4 x i32> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
+
+ res_vui = vec_orc(vbi, vui);
+// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: or <4 x i32> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
+
+ res_vsll = vec_orc(vsll, vsll);
+// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK: or <2 x i64> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
+
+ res_vsll = vec_orc(vsll, vbll);
+// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK: or <2 x i64> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
+
+ res_vsll = vec_orc(vbll, vsll);
+// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK: or <2 x i64> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
+
+ res_vull = vec_orc(vull, vull);
+// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK: or <2 x i64> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
+
+ res_vull = vec_orc(vull, vbll);
+// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK: or <2 x i64> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
+
+ res_vull = vec_orc(vbll, vull);
+// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK: or <2 x i64> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
+
/* vec_vbpermq */
res_vsll = vec_vbpermq(vsc, vsc);
// CHECK: llvm.ppc.altivec.vbpermq
diff --git a/test/CodeGen/builtins-ppc-vsx.c b/test/CodeGen/builtins-ppc-vsx.c
index 99362133dd9a..9a40d3041db1 100644
--- a/test/CodeGen/builtins-ppc-vsx.c
+++ b/test/CodeGen/builtins-ppc-vsx.c
@@ -1,6 +1,6 @@
// REQUIRES: powerpc-registered-target
// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
-// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
vector unsigned char vuc = { 8, 9, 10, 11, 12, 13, 14, 15,
0, 1, 2, 3, 4, 5, 6, 7};
@@ -27,39 +27,57 @@ void dummy() { }
void test1() {
// CHECK-LABEL: define void @test1
+// CHECK-LE-LABEL: define void @test1
res_vd = vec_add(vd, vd);
// CHECK: fadd <2 x double>
+// CHECK-LE: fadd <2 x double>
res_vd = vec_and(vbll, vd);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
res_vd = vec_and(vd, vbll);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
res_vd = vec_and(vd, vd);
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
dummy();
// CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
res_vd = vec_andc(vbll, vd);
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
+// CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
dummy();
// CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
res_vd = vec_andc(vd, vbll);
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
// CHECK: and <2 x i64>
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
+// CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
dummy();
// CHECK: call void @dummy()
@@ -72,307 +90,759 @@ void test1() {
dummy();
// CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
res_vd = vec_ceil(vd);
// CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
res_vf = vec_ceil(vf);
// CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
res_vbll = vec_cmpeq(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
res_vbi = vec_cmpeq(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
res_vbll = vec_cmpge(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
res_vbi = vec_cmpge(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
res_vbll = vec_cmpgt(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
res_vbi = vec_cmpgt(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
res_vbll = vec_cmple(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
res_vbi = vec_cmple(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
res_vbll = vec_cmplt(vd, vd);
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
res_vbi = vec_cmplt(vf, vf);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+
+ /* vec_cpsgn */
+ res_vf = vec_cpsgn(vf, vf);
+// CHECK: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
+// CHECK-LE: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
+
+ res_vd = vec_cpsgn(vd, vd);
+// CHECK: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
+// CHECK-LE: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
/* vec_div */
+ res_vsll = vec_div(vsll, vsll);
+// CHECK: sdiv <2 x i64>
+// CHECK-LE: sdiv <2 x i64>
+
+ res_vull = vec_div(vull, vull);
+// CHECK: udiv <2 x i64>
+// CHECK-LE: udiv <2 x i64>
+
res_vf = vec_div(vf, vf);
-// CHECK: @llvm.ppc.vsx.xvdivsp
+// CHECK: fdiv <4 x float>
+// CHECK-LE: fdiv <4 x float>
res_vd = vec_div(vd, vd);
-// CHECK: @llvm.ppc.vsx.xvdivdp
+// CHECK: fdiv <2 x double>
+// CHECK-LE: fdiv <2 x double>
/* vec_max */
res_vf = vec_max(vf, vf);
// CHECK: @llvm.ppc.vsx.xvmaxsp
+// CHECK-LE: @llvm.ppc.vsx.xvmaxsp
res_vd = vec_max(vd, vd);
// CHECK: @llvm.ppc.vsx.xvmaxdp
+// CHECK-LE: @llvm.ppc.vsx.xvmaxdp
res_vf = vec_vmaxfp(vf, vf);
// CHECK: @llvm.ppc.vsx.xvmaxsp
+// CHECK-LE: @llvm.ppc.vsx.xvmaxsp
/* vec_min */
res_vf = vec_min(vf, vf);
// CHECK: @llvm.ppc.vsx.xvminsp
+// CHECK-LE: @llvm.ppc.vsx.xvminsp
res_vd = vec_min(vd, vd);
// CHECK: @llvm.ppc.vsx.xvmindp
+// CHECK-LE: @llvm.ppc.vsx.xvmindp
res_vf = vec_vminfp(vf, vf);
// CHECK: @llvm.ppc.vsx.xvminsp
+// CHECK-LE: @llvm.ppc.vsx.xvminsp
res_d = __builtin_vsx_xsmaxdp(d, d);
// CHECK: @llvm.ppc.vsx.xsmaxdp
+// CHECK-LE: @llvm.ppc.vsx.xsmaxdp
res_d = __builtin_vsx_xsmindp(d, d);
// CHECK: @llvm.ppc.vsx.xsmindp
+// CHECK-LE: @llvm.ppc.vsx.xsmindp
/* vec_perm */
res_vsll = vec_perm(vsll, vsll, vuc);
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
res_vull = vec_perm(vull, vull, vuc);
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbll = vec_perm(vbll, vbll, vuc);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+ res_vf = vec_round(vf);
+// CHECK: call <4 x float> @llvm.round.v4f32(<4 x float>
+// CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float>
+
+ res_vd = vec_round(vd);
+// CHECK: call <2 x double> @llvm.round.v2f64(<2 x double>
+// CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double>
res_vd = vec_perm(vd, vd, vuc);
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vd = vec_splat(vd, 1);
+// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+ res_vbll = vec_splat(vbll, 1);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+ res_vsll = vec_splat(vsll, 1);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+ res_vull = vec_splat(vull, 1);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+ res_vsi = vec_pack(vsll, vsll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_pack(vull, vull);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbi = vec_pack(vbll, vbll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
res_vsll = vec_vperm(vsll, vsll, vuc);
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
res_vull = vec_vperm(vull, vull, vuc);
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
res_vd = vec_vperm(vd, vd, vuc);
// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
/* vec_vsx_ld */
res_vsi = vec_vsx_ld(0, &vsi);
// CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
res_vui = vec_vsx_ld(0, &vui);
// CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
res_vf = vec_vsx_ld (0, &vf);
// CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
res_vsll = vec_vsx_ld(0, &vsll);
// CHECK: @llvm.ppc.vsx.lxvd2x
+// CHECK-LE: @llvm.ppc.vsx.lxvd2x
res_vull = vec_vsx_ld(0, &vull);
// CHECK: @llvm.ppc.vsx.lxvd2x
+// CHECK-LE: @llvm.ppc.vsx.lxvd2x
res_vd = vec_vsx_ld(0, &vd);
// CHECK: @llvm.ppc.vsx.lxvd2x
+// CHECK-LE: @llvm.ppc.vsx.lxvd2x
/* vec_vsx_st */
vec_vsx_st(vsi, 0, &res_vsi);
// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
vec_vsx_st(vui, 0, &res_vui);
// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
vec_vsx_st(vf, 0, &res_vf);
// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
vec_vsx_st(vsll, 0, &res_vsll);
// CHECK: @llvm.ppc.vsx.stxvd2x
+// CHECK-LE: @llvm.ppc.vsx.stxvd2x
vec_vsx_st(vull, 0, &res_vull);
// CHECK: @llvm.ppc.vsx.stxvd2x
+// CHECK-LE: @llvm.ppc.vsx.stxvd2x
vec_vsx_st(vd, 0, &res_vd);
// CHECK: @llvm.ppc.vsx.stxvd2x
+// CHECK-LE: @llvm.ppc.vsx.stxvd2x
/* vec_and */
res_vsll = vec_and(vsll, vsll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vsll = vec_and(vbll, vsll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vsll = vec_and(vsll, vbll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_and(vull, vull);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_and(vbll, vull);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_and(vull, vbll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vbll = vec_and(vbll, vbll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
/* vec_vand */
res_vsll = vec_vand(vsll, vsll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vsll = vec_vand(vbll, vsll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vsll = vec_vand(vsll, vbll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_vand(vull, vull);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_vand(vbll, vull);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_vand(vull, vbll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vbll = vec_vand(vbll, vbll);
// CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
/* vec_andc */
res_vsll = vec_andc(vsll, vsll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vsll = vec_andc(vbll, vsll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vsll = vec_andc(vsll, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_andc(vull, vull);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_andc(vbll, vull);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vull = vec_andc(vull, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
res_vbll = vec_andc(vbll, vbll);
// CHECK: xor <2 x i64>
// CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
+
+ res_vf = vec_floor(vf);
+// CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
+
+ res_vd = vec_floor(vd);
+// CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
+
+ res_vf = vec_madd(vf, vf, vf);
+// CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
+
+ res_vd = vec_madd(vd, vd, vd);
+// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
+
+ /* vec_mergeh */
+ res_vsll = vec_mergeh(vsll, vsll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsll = vec_mergeh(vsll, vbll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsll = vec_mergeh(vbll, vsll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vull = vec_mergeh(vull, vull);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vull = vec_mergeh(vull, vbll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vull = vec_mergeh(vbll, vull);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ /* vec_mergel */
+ res_vsll = vec_mergel(vsll, vsll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsll = vec_mergel(vsll, vbll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsll = vec_mergel(vbll, vsll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vull = vec_mergel(vull, vull);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vull = vec_mergel(vull, vbll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vull = vec_mergel(vbll, vull);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ /* vec_msub */
+ res_vf = vec_msub(vf, vf, vf);
+// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+// CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
+// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
+
+ res_vd = vec_msub(vd, vd, vd);
+// CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
+// CHECK-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
+// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
+
+ res_vsll = vec_mul(vsll, vsll);
+// CHECK: mul <2 x i64>
+// CHECK-LE: mul <2 x i64>
+
+ res_vull = vec_mul(vull, vull);
+// CHECK: mul <2 x i64>
+// CHECK-LE: mul <2 x i64>
+
+ res_vf = vec_mul(vf, vf);
+// CHECK: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
+
+ res_vd = vec_mul(vd, vd);
+// CHECK: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
+
+ res_vf = vec_nearbyint(vf);
+// CHECK: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
+
+ res_vd = vec_nearbyint(vd);
+// CHECK: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
+
+ res_vf = vec_nmadd(vf, vf, vf);
+// CHECK: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
+// CHECK-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
+// CHECK-LE: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
+// CHECK-LE-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
+
+ res_vd = vec_nmadd(vd, vd, vd);
+// CHECK: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
+// CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
+// CHECK-LE: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
+// CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
+
+ res_vf = vec_nmsub(vf, vf, vf);
+// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+// CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
+// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
+// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+
+ res_vd = vec_nmsub(vd, vd, vd);
+// CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
+// CHECK-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
+// CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
+// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
+// CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
/* vec_nor */
res_vsll = vec_nor(vsll, vsll);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vull = vec_nor(vull, vull);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vull = vec_nor(vbll, vbll);
// CHECK: or <2 x i64>
// CHECK: xor <2 x i64>
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: xor <2 x i64>
+
+ res_vd = vec_nor(vd, vd);
+// CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
+// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK-LE: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
/* vec_or */
res_vsll = vec_or(vsll, vsll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vsll = vec_or(vbll, vsll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vsll = vec_or(vsll, vbll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vull = vec_or(vull, vull);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vull = vec_or(vbll, vull);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vull = vec_or(vull, vbll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vbll = vec_or(vbll, vbll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
+
+ res_vd = vec_or(vd, vd);
+// CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK-LE: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+
+ res_vd = vec_or(vbll, vd);
+// CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
+// CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK-LE: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
+// CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>
+
+ res_vd = vec_or(vd, vbll);
+// CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
+// CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK-LE: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>
+
+ res_vf = vec_re(vf);
+// CHECK: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
+// CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
+
+ res_vd = vec_re(vd);
+// CHECK: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
+// CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
+
+ res_vf = vec_rint(vf);
+// CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
+
+ res_vd = vec_rint(vd);
+// CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
+
+ res_vf = vec_rsqrte(vf);
+// CHECK: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
+
+ res_vd = vec_rsqrte(vd);
+// CHECK: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
+
+ dummy();
+// CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
+
+ res_vf = vec_sel(vd, vd, vbll);
+// CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
+// CHECK: and <2 x i64> %{{[0-9]+}},
+// CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK: or <2 x i64>
+// CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
+// CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
+// CHECK-LE: and <2 x i64> %{{[0-9]+}},
+// CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
+
+ dummy();
+// CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
+
+ res_vd = vec_sel(vd, vd, vull);
+// CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
+// CHECK: and <2 x i64> %{{[0-9]+}},
+// CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK: or <2 x i64>
+// CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
+// CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
+// CHECK-LE: and <2 x i64> %{{[0-9]+}},
+// CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
+
+ res_vf = vec_sqrt(vf);
+// CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
+
+ res_vd = vec_sqrt(vd);
+// CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
+
+ res_vd = vec_sub(vd, vd);
+// CHECK: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
+
+ res_vf = vec_trunc(vf);
+// CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
+
+ res_vd = vec_trunc(vd);
+// CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
/* vec_vor */
res_vsll = vec_vor(vsll, vsll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vsll = vec_vor(vbll, vsll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vsll = vec_vor(vsll, vbll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vull = vec_vor(vull, vull);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vull = vec_vor(vbll, vull);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vull = vec_vor(vull, vbll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
res_vbll = vec_vor(vbll, vbll);
// CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
/* vec_xor */
res_vsll = vec_xor(vsll, vsll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vsll = vec_xor(vbll, vsll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vsll = vec_xor(vsll, vbll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vull = vec_xor(vull, vull);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vull = vec_xor(vbll, vull);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vull = vec_xor(vull, vbll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vbll = vec_xor(vbll, vbll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
+
+ dummy();
+// CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
+
+ res_vd = vec_xor(vd, vd);
+// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
+// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
+
+ dummy();
+// CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
+
+ res_vd = vec_xor(vd, vbll);
+// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
+// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
+
+ dummy();
+// CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
+
+ res_vd = vec_xor(vbll, vd);
+// CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
+// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
/* vec_vxor */
res_vsll = vec_vxor(vsll, vsll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vsll = vec_vxor(vbll, vsll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vsll = vec_vxor(vsll, vbll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vull = vec_vxor(vull, vull);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vull = vec_vxor(vbll, vull);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vull = vec_vxor(vull, vbll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
res_vbll = vec_vxor(vbll, vbll);
// CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
}
diff --git a/test/CodeGen/debug-info-block.c b/test/CodeGen/debug-info-block.c
index c4930bfc3c86..48a9f66877e1 100644
--- a/test/CodeGen/debug-info-block.c
+++ b/test/CodeGen/debug-info-block.c
@@ -1,10 +1,29 @@
// RUN: %clang_cc1 -fblocks -g -emit-llvm -o - %s | FileCheck %s
// Verify that the desired debugging type is generated for a structure
-// member that is a pointer to a block.
+// member that is a pointer to a block.
-// CHECK: !DICompositeType(tag: DW_TAG_structure_type, name: "__block_literal_generic"
+// CHECK: !DICompositeType(tag: DW_TAG_structure_type, scope
+// CHECK-NOT: line
+// CHECK-SAME: elements: ![[ELEMS1:.*]])
+// CHECK: ![[ELEMS1]] = {{.*, .*, .*,}} ![[FPEL1:.*]], {{.*}}
+// CHECK: ![[INT:.*]] = !DIBasicType(name: "int"
+// CHECK: ![[FPEL1]] = {{.*}}"__FuncPtr", {{.*}}, baseType: ![[FPTY1:[0-9]+]]
+// CHECK: ![[FPTY1]] = {{.*}}baseType: ![[FNTY1:[0-9]+]]
+// CHECK: ![[FNTY1]] = !DISubroutineType(types: ![[VOIDVOID:[0-9]+]])
+// CHECK: ![[VOIDVOID]] = !{null, null}
// CHECK: !DICompositeType(tag: DW_TAG_structure_type, name: "__block_descriptor"
+// CHECK-NOT: line
+// CHECK-SAME: )
+
+// CHECK: !DICompositeType(tag: DW_TAG_structure_type, scope
+// CHECK-NOT: line
+// CHECK-SAME: elements: ![[ELEMS2:.*]])
+// CHECK: ![[ELEMS2]] = {{.*,.*,.*}}, ![[FPEL2:.*]], {{.*}}
+// CHECK: ![[FPEL2]] = {{.*}}"__FuncPtr", {{.*}}, baseType: ![[FPTY2:[0-9]+]]
+// CHECK: ![[FPTY2]] = {{.*}}baseType: ![[FNTY2:[0-9]+]]
+// CHECK: ![[FNTY2]] = !DISubroutineType(types: ![[INTINT:[0-9]+]])
+// CHECK: ![[INTINT]] = !{![[INT]], ![[INT]]}
struct inStruct {
- void (^genericBlockPtr)();
+ void (^voidBlockPtr)();
+ int (^intBlockPtr)(int);
} is;
-
diff --git a/test/CodeGen/exceptions-seh-finally.c b/test/CodeGen/exceptions-seh-finally.c
index 345d514611e3..772e28306b58 100644
--- a/test/CodeGen/exceptions-seh-finally.c
+++ b/test/CodeGen/exceptions-seh-finally.c
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -triple x86_64-pc-win32 -fms-extensions -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple i686-pc-win32 -fms-extensions -emit-llvm -o - | FileCheck %s
void abort(void) __attribute__((noreturn));
void might_crash(void);
@@ -17,18 +18,18 @@ void basic_finally(void) {
// CHECK: to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
//
// CHECK: [[invoke_cont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@basic_finally@@"(i8 0, i8* %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
+// CHECK: call void @"\01?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 0, i8* %[[fp]])
// CHECK-NEXT: ret void
//
// CHECK: [[lpad]]
// CHECK-NEXT: landingpad
// CHECK-NEXT: cleanup
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@basic_finally@@"(i8 1, i8* %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
+// CHECK: call void @"\01?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 1, i8* %[[fp]])
// CHECK: resume { i8*, i32 }
-// CHECK: define internal void @"\01?fin$0@0@basic_finally@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK: define internal void @"\01?fin$0@0@basic_finally@@"({{.*}})
// CHECK: call void @cleanup()
// Mostly check that we don't double emit 'r' which would crash.
@@ -57,11 +58,11 @@ l:
// CHECK: to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
//
// CHECK: [[invoke_cont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@label_in_finally@@"(i8 0, i8* %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
+// CHECK: call void @"\01?fin$0@0@label_in_finally@@"({{i8( zeroext)?}} 0, i8* %[[fp]])
// CHECK: ret void
-// CHECK: define internal void @"\01?fin$0@0@label_in_finally@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK: define internal void @"\01?fin$0@0@label_in_finally@@"({{.*}})
// CHECK: br label %[[l:[^ ]*]]
//
// CHECK: [[l]]
@@ -84,19 +85,19 @@ void use_abnormal_termination(void) {
// CHECK: to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
//
// CHECK: [[invoke_cont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@use_abnormal_termination@@"(i8 0, i8* %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
+// CHECK: call void @"\01?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} 0, i8* %[[fp]])
// CHECK: ret void
//
// CHECK: [[lpad]]
// CHECK-NEXT: landingpad
// CHECK-NEXT: cleanup
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@use_abnormal_termination@@"(i8 1, i8* %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
+// CHECK: call void @"\01?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} 1, i8* %[[fp]])
// CHECK: resume { i8*, i32 }
-// CHECK: define internal void @"\01?fin$0@0@use_abnormal_termination@@"(i8 %abnormal_termination, i8* %frame_pointer)
-// CHECK: %[[abnormal_zext:[^ ]*]] = zext i8 %abnormal_termination to i32
+// CHECK: define internal void @"\01?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} %[[abnormal:abnormal_termination]], i8* %frame_pointer)
+// CHECK: %[[abnormal_zext:[^ ]*]] = zext i8 %[[abnormal]] to i32
// CHECK: store i32 %[[abnormal_zext]], i32* @crashed
// CHECK-NEXT: ret void
@@ -109,11 +110,10 @@ void noreturn_noop_finally() {
}
// CHECK-LABEL: define void @noreturn_noop_finally()
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@noreturn_noop_finally@@"(i8 0, i8* %[[fp]])
+// CHECK: call void @"\01?fin$0@0@noreturn_noop_finally@@"({{.*}})
// CHECK: ret void
-// CHECK: define internal void @"\01?fin$0@0@noreturn_noop_finally@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK: define internal void @"\01?fin$0@0@noreturn_noop_finally@@"({{.*}})
// CHECK: call void @abort()
// CHECK: unreachable
@@ -130,18 +130,16 @@ void noreturn_finally() {
// CHECK: to label %[[cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
//
// CHECK: [[cont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@noreturn_finally@@"(i8 0, i8* %[[fp]])
+// CHECK: call void @"\01?fin$0@0@noreturn_finally@@"({{.*}})
// CHECK: ret void
//
// CHECK: [[lpad]]
// CHECK: landingpad
// CHECK-NEXT: cleanup
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@noreturn_finally@@"(i8 1, i8* %[[fp]])
+// CHECK: call void @"\01?fin$0@0@noreturn_finally@@"({{.*}})
// CHECK: resume { i8*, i32 }
-// CHECK: define internal void @"\01?fin$0@0@noreturn_finally@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK: define internal void @"\01?fin$0@0@noreturn_finally@@"({{.*}})
// CHECK: call void @abort()
// CHECK: unreachable
@@ -152,11 +150,10 @@ int finally_with_return() {
}
}
// CHECK-LABEL: define i32 @finally_with_return()
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK-NEXT: call void @"\01?fin$0@0@finally_with_return@@"(i8 0, i8* %[[fp]])
+// CHECK: call void @"\01?fin$0@0@finally_with_return@@"({{.*}})
// CHECK-NEXT: ret i32 42
-// CHECK: define internal void @"\01?fin$0@0@finally_with_return@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK: define internal void @"\01?fin$0@0@finally_with_return@@"({{.*}})
// CHECK-NOT: br i1
// CHECK-NOT: br label
// CHECK: ret void
@@ -174,25 +171,22 @@ int nested___finally___finally() {
}
// CHECK-LABEL: define i32 @nested___finally___finally
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally@@"(i8 0, i8* %[[fp]])
+// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally@@"({{.*}})
// CHECK: to label %[[outercont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
//
// CHECK: [[outercont]]
-// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK-NEXT: call void @"\01?fin$0@0@nested___finally___finally@@"(i8 0, i8* %[[fp]])
+// CHECK: call void @"\01?fin$0@0@nested___finally___finally@@"({{.*}})
// CHECK-NEXT: ret i32 0
//
// CHECK: [[lpad]]
// CHECK-NEXT: landingpad
// CHECK-NEXT: cleanup
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK-NEXT: call void @"\01?fin$0@0@nested___finally___finally@@"(i8 1, i8* %[[fp]])
+// CHECK: call void @"\01?fin$0@0@nested___finally___finally@@"({{.*}})
-// CHECK-LABEL: define internal void @"\01?fin$0@0@nested___finally___finally@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK-LABEL: define internal void @"\01?fin$0@0@nested___finally___finally@@"({{.*}})
// CHECK: ret void
-// CHECK-LABEL: define internal void @"\01?fin$1@0@nested___finally___finally@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK-LABEL: define internal void @"\01?fin$1@0@nested___finally___finally@@"({{.*}})
// CHECK: unreachable
int nested___finally___finally_with_eh_edge() {
@@ -212,31 +206,27 @@ int nested___finally___finally_with_eh_edge() {
// CHECK-NEXT: to label %[[invokecont:[^ ]*]] unwind label %[[lpad1:[^ ]*]]
//
// [[invokecont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"(i8 0, i8* %[[fp]])
+// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"({{.*}})
// CHECK: to label %[[outercont:[^ ]*]] unwind label %[[lpad2:[^ ]*]]
//
// CHECK: [[outercont]]
-// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK-NEXT: call void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"(i8 0, i8* %[[fp]])
+// CHECK: call void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"({{.*}})
// CHECK-NEXT: ret i32 912
//
// CHECK: [[lpad1]]
// CHECK-NEXT: landingpad
// CHECK-NEXT: cleanup
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"(i8 1, i8* %[[fp]])
+// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"({{.*}})
// CHECK: to label %[[outercont:[^ ]*]] unwind label %[[lpad2]]
//
// CHECK: [[lpad2]]
// CHECK-NEXT: landingpad
// CHECK-NEXT: cleanup
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"(i8 1, i8* %[[fp]])
+// CHECK: call void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"({{.*}})
// CHECK: resume
-// CHECK-LABEL: define internal void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK-LABEL: define internal void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"({{.*}})
// CHECK: ret void
-// CHECK-LABEL: define internal void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"(i8 %abnormal_termination, i8* %frame_pointer)
+// CHECK-LABEL: define internal void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"({{.*}})
// CHECK: unreachable
diff --git a/test/CodeGen/exceptions-seh-leave.c b/test/CodeGen/exceptions-seh-leave.c
index 36b896d06240..e56da17d0a80 100644
--- a/test/CodeGen/exceptions-seh-leave.c
+++ b/test/CodeGen/exceptions-seh-leave.c
@@ -74,7 +74,7 @@ int __leave_with___finally_simple() {
// CHECK-NEXT: br label %[[tryleave:[^ ]*]]
// CHECK-NOT: store i32 23
// CHECK: [[tryleave]]
-// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: call void @"\01?fin$0@0@__leave_with___finally_simple@@"(i8 0, i8* %[[fp]])
// __finally block doesn't return, __finally.cont doesn't exist.
@@ -94,7 +94,7 @@ int __leave_with___finally_noreturn() {
// CHECK-NEXT: br label %[[tryleave:[^ ]*]]
// CHECK-NOT: store i32 23
// CHECK: [[tryleave]]
-// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: call void @"\01?fin$0@0@__leave_with___finally_noreturn@@"(i8 0, i8* %[[fp]])
// The "normal" case.
@@ -118,7 +118,7 @@ int __leave_with___finally() {
// CHECK-NEXT: br label %[[tryleave:[^ ]*]]
// CHECK-NOT: store i32 23
// CHECK: [[tryleave]]
-// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: call void @"\01?fin$0@0@__leave_with___finally@@"(i8 0, i8* %[[fp]])
@@ -148,7 +148,7 @@ int nested___except___finally() {
// CHECK-NEXT: to label %[[g1_cont1:.*]] unwind label %[[g1_lpad:.*]]
// CHECK: [[g1_cont1]]
-// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: invoke void @"\01?fin$0@0@nested___except___finally@@"(i8 0, i8* %[[fp]])
// CHECK-NEXT: to label %[[fin_cont:.*]] unwind label %[[g2_lpad:.*]]
@@ -159,7 +159,7 @@ int nested___except___finally() {
// CHECK: [[g1_lpad]]
// CHECK-NEXT: landingpad
// CHECK-NEXT: catch i8* null
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: invoke void @"\01?fin$0@0@nested___except___finally@@"(i8 1, i8* %[[fp]])
// CHECK-NEXT: to label %[[g1_resume:.*]] unwind label %[[g2_lpad]]
@@ -205,7 +205,7 @@ int nested___except___except() {
// CHECK: br label %[[except:[^ ]*]]
// CHECK: [[except]]
-// CHECK-NEXT: invoke void @g()
+// CHECK: invoke void @g()
// CHECK-NEXT: to label %[[g2_cont:.*]] unwind label %[[g2_lpad:.*]]
// CHECK: [[g2_cont]]
@@ -216,7 +216,7 @@ int nested___except___except() {
// CHECK: br label %[[outerexcept:[^ ]*]]
// CHECK: [[outerexcept]]
-// CHECK-NEXT: br label %[[trycont4:[^ ]*]]
+// CHECK: br label %[[trycont4:[^ ]*]]
// CHECK: [[trycont4]]
// CHECK-NEXT: ret i32 1
@@ -258,15 +258,15 @@ int nested___finally___except() {
// CHECK: br label %[[except:[^ ]*]]
// CHECK: [[except]]
-// CHECK-NEXT: invoke void @g()
+// CHECK: invoke void @g()
// CHECK-NEXT: to label %[[g2_cont:.*]] unwind label %[[g2_lpad:.*]]
// CHECK: [[g2_cont]]
-// CHECK-NEXT: br label %[[tryleave:[^ ]*]]
+// CHECK: br label %[[tryleave:[^ ]*]]
// CHECK-NOT: 23
// CHECK: [[g2_lpad]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: call void @"\01?fin$0@0@nested___finally___except@@"(i8 1, i8* %[[fp]])
// CHECK-NEXT: br label %[[ehresume:[^ ]*]]
@@ -275,7 +275,7 @@ int nested___finally___except() {
// CHECK-NEXT: br label %[[tryleave]]
// CHECK: [[tryleave]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: call void @"\01?fin$0@0@nested___finally___except@@"(i8 0, i8* %[[fp]])
// CHECK-NEXT: ret i32 1
@@ -311,20 +311,20 @@ int nested___finally___finally() {
// CHECK: [[g1_cont]]
// CHECK: store i32 16, i32* %[[myres:[^ ]*]],
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: invoke void @"\01?fin$1@0@nested___finally___finally@@"(i8 0, i8* %[[fp]])
// CHECK-NEXT: to label %[[finally_cont:.*]] unwind label %[[g2_lpad:.*]]
// CHECK: [[finally_cont]]
// CHECK: store i32 51, i32* %[[myres]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: call void @"\01?fin$0@0@nested___finally___finally@@"(i8 0, i8* %[[fp]])
// CHECK-NEXT: ret i32 1
// CHECK: [[g1_lpad]]
// CHECK-NEXT: landingpad
// CHECK-NEXT: cleanup
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: invoke void @"\01?fin$1@0@nested___finally___finally@@"(i8 1, i8* %[[fp]])
// CHECK-NEXT: to label %[[finally_cont2:.*]] unwind label %[[g2_lpad]]
@@ -337,7 +337,7 @@ int nested___finally___finally() {
// CHECK: br label %[[ehcleanup]]
// CHECK: [[ehcleanup]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
// CHECK-NEXT: call void @"\01?fin$0@0@nested___finally___finally@@"(i8 1, i8* %[[fp]])
// CHECK: resume
diff --git a/test/CodeGen/exceptions-seh.c b/test/CodeGen/exceptions-seh.c
index 1b77ad616278..9707a9a31beb 100644
--- a/test/CodeGen/exceptions-seh.c
+++ b/test/CodeGen/exceptions-seh.c
@@ -1,4 +1,7 @@
-// RUN: %clang_cc1 %s -triple x86_64-pc-win32 -fms-extensions -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple x86_64-pc-win32 -fms-extensions -emit-llvm -o - \
+// RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+// RUN: %clang_cc1 %s -triple i686-pc-win32 -fms-extensions -emit-llvm -o - \
+// RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=X86
void try_body(int numerator, int denominator, int *myres) {
*myres = numerator / denominator;
@@ -19,20 +22,46 @@ int safe_div(int numerator, int denominator, int *res) {
*res = myres;
return success;
}
-// CHECK-LABEL: define i32 @safe_div(i32 %numerator, i32 %denominator, i32* %res) {{.*}} personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
-// CHECK: invoke void @try_body(i32 %{{.*}}, i32 %{{.*}}, i32* %{{.*}}) #[[NOINLINE:[0-9]+]]
-// CHECK: to label %{{.*}} unwind label %[[lpad:[^ ]*]]
-//
-// CHECK: [[lpad]]
-// CHECK: landingpad { i8*, i32 }
-// CHECK-NEXT: catch i8* null
-// CHECK-NOT: br i1
-// CHECK: br label %[[except:[^ ]*]]
-// CHECK: [[except]]
-// CHECK-NEXT: store i32 -42, i32* %[[success:[^ ]*]]
-//
-// CHECK: %[[res:[^ ]*]] = load i32, i32* %[[success]]
-// CHECK: ret i32 %[[res]]
+
+// X64-LABEL: define i32 @safe_div(i32 %numerator, i32 %denominator, i32* %res) {{.*}} personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+// X64: invoke void @try_body(i32 %{{.*}}, i32 %{{.*}}, i32* %{{.*}}) #[[NOINLINE:[0-9]+]]
+// X64: to label %{{.*}} unwind label %[[lpad:[^ ]*]]
+//
+// X64: [[lpad]]
+// X64: landingpad { i8*, i32 }
+// X64-NEXT: catch i8* null
+// X64-NOT: br i1
+// X64: br label %[[except:[^ ]*]]
+// X64: [[except]]
+// X64: store i32 -42, i32* %[[success:[^ ]*]]
+//
+// X64: %[[res:[^ ]*]] = load i32, i32* %[[success]]
+// X64: ret i32 %[[res]]
+
+// X86-LABEL: define i32 @safe_div(i32 %numerator, i32 %denominator, i32* %res) {{.*}} personality i8* bitcast (i32 (...)* @_except_handler3 to i8*)
+// X86: invoke void @try_body(i32 %{{.*}}, i32 %{{.*}}, i32* %{{.*}}) #[[NOINLINE:[0-9]+]]
+// X86: to label %{{.*}} unwind label %[[lpad:[^ ]*]]
+//
+// X86: [[lpad]]
+// X86: landingpad { i8*, i32 }
+// X86-NEXT: catch i8* bitcast (i32 ()* @"\01?filt$0@0@safe_div@@" to i8*)
+// X86-NOT: br i1
+// X86: br label %[[except:[^ ]*]]
+// X86: [[except]]
+// X86: store i32 -42, i32* %[[success:[^ ]*]]
+//
+// X86: %[[res:[^ ]*]] = load i32, i32* %[[success]]
+// X86: ret i32 %[[res]]
+
+// X86-LABEL: define internal i32 @"\01?filt$0@0@safe_div@@"()
+// X86: %[[ebp:[^ ]*]] = call i8* @llvm.frameaddress(i32 1)
+// X86: %[[fp:[^ ]*]] = call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 (i32, i32, i32*)* @safe_div to i8*), i8* %[[ebp]])
+// X86: call i8* @llvm.localrecover(i8* bitcast (i32 (i32, i32, i32*)* @safe_div to i8*), i8* %[[fp]], i32 0)
+// X86: load i8*, i8**
+// X86: load i32*, i32**
+// X86: load i32, i32*
+// X86: store i32 %{{.*}}, i32*
+// X86: ret i32 1
void j(void);
@@ -46,20 +75,29 @@ int filter_expr_capture(void) {
return r;
}
-// CHECK-LABEL: define i32 @filter_expr_capture() {{.*}} personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
-// CHECK: call void (...) @llvm.frameescape(i32* %[[r:[^ ,]*]])
+// CHECK-LABEL: define i32 @filter_expr_capture()
+// X64-SAME: personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+// X86-SAME: personality i8* bitcast (i32 (...)* @_except_handler3 to i8*)
+// X64: call void (...) @llvm.localescape(i32* %[[r:[^ ,]*]])
+// X86: call void (...) @llvm.localescape(i32* %[[r:[^ ,]*]], i32* %[[code:[^ ,]*]])
// CHECK: store i32 42, i32* %[[r]]
// CHECK: invoke void @j() #[[NOINLINE]]
//
// CHECK: landingpad
-// CHECK-NEXT: catch i8* bitcast (i32 (i8*, i8*)* @"\01?filt$0@0@filter_expr_capture@@" to i8*)
+// CHECK-NEXT: catch i8* bitcast (i32 ({{.*}})* @"\01?filt$0@0@filter_expr_capture@@" to i8*)
// CHECK: store i32 13, i32* %[[r]]
//
// CHECK: %[[rv:[^ ]*]] = load i32, i32* %[[r]]
// CHECK: ret i32 %[[rv]]
-// CHECK-LABEL: define internal i32 @"\01?filt$0@0@filter_expr_capture@@"(i8* %exception_pointers, i8* %frame_pointer)
-// CHECK: call i8* @llvm.framerecover(i8* bitcast (i32 ()* @filter_expr_capture to i8*), i8* %frame_pointer, i32 0)
+// X64-LABEL: define internal i32 @"\01?filt$0@0@filter_expr_capture@@"(i8* %exception_pointers, i8* %frame_pointer)
+// X64: call i8* @llvm.localrecover(i8* bitcast (i32 ()* @filter_expr_capture to i8*), i8* %frame_pointer, i32 0)
+//
+// X86-LABEL: define internal i32 @"\01?filt$0@0@filter_expr_capture@@"()
+// X86: %[[ebp:[^ ]*]] = call i8* @llvm.frameaddress(i32 1)
+// X86: %[[fp:[^ ]*]] = call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @filter_expr_capture to i8*), i8* %[[ebp]])
+// X86: call i8* @llvm.localrecover(i8* bitcast (i32 ()* @filter_expr_capture to i8*), i8* %[[fp]], i32 0)
+//
// CHECK: store i32 -1, i32* %{{.*}}
// CHECK: ret i32 -1
@@ -77,7 +115,9 @@ int nested_try(void) {
}
return r;
}
-// CHECK-LABEL: define i32 @nested_try() {{.*}} personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+// CHECK-LABEL: define i32 @nested_try()
+// X64-SAME: personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+// X86-SAME: personality i8* bitcast (i32 (...)* @_except_handler3 to i8*)
// CHECK: store i32 42, i32* %[[r:[^ ,]*]]
// CHECK: invoke void @j() #[[NOINLINE]]
// CHECK: to label %[[cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
@@ -88,18 +128,18 @@ int nested_try(void) {
//
// CHECK: [[lpad]]
// CHECK: landingpad { i8*, i32 }
-// CHECK: catch i8* bitcast (i32 (i8*, i8*)* @"\01?filt$1@0@nested_try@@" to i8*)
-// CHECK: catch i8* bitcast (i32 (i8*, i8*)* @"\01?filt$0@0@nested_try@@" to i8*)
+// CHECK: catch i8* bitcast (i32 ({{.*}})* @"\01?filt$1@0@nested_try@@" to i8*)
+// CHECK: catch i8* bitcast (i32 ({{.*}})* @"\01?filt$0@0@nested_try@@" to i8*)
// CHECK: store i8* %{{.*}}, i8** %[[ehptr_slot:[^ ]*]]
// CHECK: store i32 %{{.*}}, i32* %[[sel_slot:[^ ]*]]
//
// CHECK: load i32, i32* %[[sel_slot]]
-// CHECK: call i32 @llvm.eh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @"\01?filt$1@0@nested_try@@" to i8*))
+// CHECK: call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ({{.*}})* @"\01?filt$1@0@nested_try@@" to i8*))
// CHECK: icmp eq i32
// CHECK: br i1
//
// CHECK: load i32, i32* %[[sel_slot]]
-// CHECK: call i32 @llvm.eh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @"\01?filt$0@0@nested_try@@" to i8*))
+// CHECK: call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ({{.*}})* @"\01?filt$0@0@nested_try@@" to i8*))
// CHECK: icmp eq i32
// CHECK: br i1
//
@@ -115,40 +155,55 @@ int nested_try(void) {
//
// CHECK: [[inner_try_cont]]
// CHECK: br label %[[outer_try_cont]]
+//
+// CHECK-LABEL: define internal i32 @"\01?filt$0@0@nested_try@@"({{.*}})
+// X86: call i8* @llvm.x86.seh.recoverfp({{.*}})
+// CHECK: load i32*, i32**
+// CHECK: load i32, i32*
+// CHECK: icmp eq i32 %{{.*}}, 456
+//
+// CHECK-LABEL: define internal i32 @"\01?filt$1@0@nested_try@@"({{.*}})
+// X86: call i8* @llvm.x86.seh.recoverfp({{.*}})
+// CHECK: load i32*, i32**
+// CHECK: load i32, i32*
+// CHECK: icmp eq i32 %{{.*}}, 123
-static unsigned g = 0;
-void basic_finally(void) {
- ++g;
+int basic_finally(int g) {
__try {
j();
} __finally {
- --g;
+ ++g;
}
+ return g;
}
-// CHECK-LABEL: define void @basic_finally() {{.*}} personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
-// CHECK: load i32, i32* @g
-// CHECK: add i32 %{{.*}}, 1
-// CHECK: store i32 %{{.*}}, i32* @g
+// CHECK-LABEL: define i32 @basic_finally(i32 %g)
+// X64-SAME: personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+// X86-SAME: personality i8* bitcast (i32 (...)* @_except_handler3 to i8*)
+// CHECK: %[[g_addr:[^ ]*]] = alloca i32, align 4
+// CHECK: call void (...) @llvm.localescape(i32* %[[g_addr]])
+// CHECK: store i32 %g, i32* %[[g_addr]]
//
// CHECK: invoke void @j()
// CHECK: to label %[[cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
//
// CHECK: [[cont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@basic_finally@@"(i8 0, i8* %[[fp]])
-// CHECK: ret void
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
+// CHECK: call void @"\01?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 0, i8* %[[fp]])
+// CHECK: load i32, i32* %[[g_addr]], align 4
+// CHECK: ret i32
//
// CHECK: [[lpad]]
// CHECK: landingpad { i8*, i32 }
// CHECK-NEXT: cleanup
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.frameaddress(i32 0)
-// CHECK: call void @"\01?fin$0@0@basic_finally@@"(i8 1, i8* %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
+// CHECK: call void @"\01?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 1, i8* %[[fp]])
// CHECK: resume
-// CHECK: define internal void @"\01?fin$0@0@basic_finally@@"(i8 %abnormal_termination, i8* %frame_pointer)
-// CHECK: load i32, i32* @g, align 4
-// CHECK: add i32 %{{.*}}, -1
-// CHECK: store i32 %{{.*}}, i32* @g, align 4
+// CHECK: define internal void @"\01?fin$0@0@basic_finally@@"({{i8( zeroext)?}} %abnormal_termination, i8* %frame_pointer)
+// CHECK: call i8* @llvm.localrecover(i8* bitcast (i32 (i32)* @basic_finally to i8*), i8* %frame_pointer, i32 0)
+// CHECK: load i32, i32* %{{.*}}, align 4
+// CHECK: add nsw i32 %{{.*}}, 1
+// CHECK: store i32 %{{.*}}, i32* %{{.*}}, align 4
// CHECK: ret void
int returns_int(void);
diff --git a/test/CodeGen/inline.c b/test/CodeGen/inline.c
index a45bccc51328..16e95c03c875 100644
--- a/test/CodeGen/inline.c
+++ b/test/CodeGen/inline.c
@@ -54,6 +54,7 @@
// RUN: echo "MS C Mode tests:"
// RUN: %clang_cc1 %s -triple i386-unknown-unknown -O1 -disable-llvm-optzns -emit-llvm -o - -std=c99 -fms-compatibility | FileCheck %s --check-prefix=CHECK4
+// CHECK4-NOT: define weak_odr void @_Exit(
// CHECK4-LABEL: define weak_odr i32 @ei()
// CHECK4-LABEL: define i32 @bar()
// CHECK4-NOT: unreferenced1
@@ -62,6 +63,9 @@
// CHECK4-LABEL: define linkonce_odr i32 @foo()
// CHECK4-LABEL: define available_externally void @gnu_ei_inline()
+__attribute__((noreturn)) void __cdecl _exit(int _Code);
+__inline void __cdecl _Exit(int status) { _exit(status); }
+
extern __inline int ei() { return 123; }
__inline int foo() {
diff --git a/test/CodeGen/packed-nest-unpacked.c b/test/CodeGen/packed-nest-unpacked.c
index 1dcd2ec468d7..e2bbd41a9daf 100644
--- a/test/CodeGen/packed-nest-unpacked.c
+++ b/test/CodeGen/packed-nest-unpacked.c
@@ -60,6 +60,35 @@ struct YBitfield gbitfield;
unsigned test7() {
// CHECK: @test7
- // CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 4
+ // CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 1
return gbitfield.y.b2;
}
+
+void test8(unsigned x) {
+ // CHECK: @test8
+ // CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 1
+ // CHECK: store i32 {{.*}}, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 1
+ gbitfield.y.b2 = x;
+}
+
+struct TBitfield
+{
+ long a;
+ char b;
+ unsigned c:15;
+};
+struct TBitfield tbitfield;
+
+unsigned test9() {
+ // CHECK: @test9
+ // CHECK: load i16, i16* getelementptr inbounds (%struct.TBitfield, %struct.TBitfield* @tbitfield, i32 0, i32 2), align 1
+ return tbitfield.c;
+}
+
+void test10(unsigned x) {
+ // CHECK: @test10
+ // CHECK: load i16, i16* getelementptr inbounds (%struct.TBitfield, %struct.TBitfield* @tbitfield, i32 0, i32 2), align 1
+ // CHECK: store i16 {{.*}}, i16* getelementptr inbounds (%struct.TBitfield, %struct.TBitfield* @tbitfield, i32 0, i32 2), align 1
+ tbitfield.c = x;
+}
+
diff --git a/test/CodeGen/ppc64-struct-onefloat.c b/test/CodeGen/ppc64-struct-onefloat.c
index d0ccfbe34a44..534e5116f9b0 100644
--- a/test/CodeGen/ppc64-struct-onefloat.c
+++ b/test/CodeGen/ppc64-struct-onefloat.c
@@ -14,15 +14,15 @@ void bar(Sf a, Sd b, SSf d, SSd e) {}
// CHECK: %d = alloca %struct.s4, align 4
// CHECK: %e = alloca %struct.s5, align 8
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %a, i32 0, i32 0
-// CHECK: store float %a.coerce, float* %{{[a-zA-Z0-9.]+}}, align 1
+// CHECK: store float %a.coerce, float* %{{[a-zA-Z0-9.]+}}, align 4
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %b, i32 0, i32 0
-// CHECK: store double %b.coerce, double* %{{[a-zA-Z0-9.]+}}, align 1
+// CHECK: store double %b.coerce, double* %{{[a-zA-Z0-9.]+}}, align 8
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s4, %struct.s4* %d, i32 0, i32 0
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
-// CHECK: store float %d.coerce, float* %{{[a-zA-Z0-9.]+}}, align 1
+// CHECK: store float %d.coerce, float* %{{[a-zA-Z0-9.]+}}, align 4
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s5, %struct.s5* %e, i32 0, i32 0
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
-// CHECK: store double %e.coerce, double* %{{[a-zA-Z0-9.]+}}, align 1
+// CHECK: store double %e.coerce, double* %{{[a-zA-Z0-9.]+}}, align 8
// CHECK: ret void
void foo(void)
@@ -36,14 +36,14 @@ void foo(void)
// CHECK-LABEL: define void @foo
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %p1, i32 0, i32 0
-// CHECK: %{{[0-9]+}} = load float, float* %{{[a-zA-Z0-9.]+}}, align 1
+// CHECK: %{{[0-9]+}} = load float, float* %{{[a-zA-Z0-9.]+}}, align 4
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %p2, i32 0, i32 0
-// CHECK: %{{[0-9]+}} = load double, double* %{{[a-zA-Z0-9.]+}}, align 1
+// CHECK: %{{[0-9]+}} = load double, double* %{{[a-zA-Z0-9.]+}}, align 8
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s4, %struct.s4* %p4, i32 0, i32 0
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
-// CHECK: %{{[0-9]+}} = load float, float* %{{[a-zA-Z0-9.]+}}, align 1
+// CHECK: %{{[0-9]+}} = load float, float* %{{[a-zA-Z0-9.]+}}, align 4
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s5, %struct.s5* %p5, i32 0, i32 0
// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
-// CHECK: %{{[0-9]+}} = load double, double* %{{[a-zA-Z0-9.]+}}, align 1
+// CHECK: %{{[0-9]+}} = load double, double* %{{[a-zA-Z0-9.]+}}, align 8
// CHECK: call void @bar(float inreg %{{[0-9]+}}, double inreg %{{[0-9]+}}, float inreg %{{[0-9]+}}, double inreg %{{[0-9]+}})
// CHECK: ret void
diff --git a/test/CodeGen/ppc64le-aggregates.c b/test/CodeGen/ppc64le-aggregates.c
index 76798c132734..3ad4b06c688a 100644
--- a/test/CodeGen/ppc64le-aggregates.c
+++ b/test/CodeGen/ppc64le-aggregates.c
@@ -54,49 +54,49 @@ struct fabc func_fabc(struct fabc x) { return x; }
struct f2a2b func_f2a2b(struct f2a2b x) { return x; }
// CHECK-LABEL: @call_f1
-// CHECK: %[[TMP:[^ ]+]] = load float, float* getelementptr inbounds (%struct.f1, %struct.f1* @global_f1, i32 0, i32 0, i32 0), align 1
+// CHECK: %[[TMP:[^ ]+]] = load float, float* getelementptr inbounds (%struct.f1, %struct.f1* @global_f1, i32 0, i32 0, i32 0), align 4
// CHECK: call [1 x float] @func_f1(float inreg %[[TMP]])
struct f1 global_f1;
void call_f1(void) { global_f1 = func_f1(global_f1); }
// CHECK-LABEL: @call_f2
-// CHECK: %[[TMP:[^ ]+]] = load [2 x float], [2 x float]* getelementptr inbounds (%struct.f2, %struct.f2* @global_f2, i32 0, i32 0), align 1
+// CHECK: %[[TMP:[^ ]+]] = load [2 x float], [2 x float]* getelementptr inbounds (%struct.f2, %struct.f2* @global_f2, i32 0, i32 0), align 4
// CHECK: call [2 x float] @func_f2([2 x float] %[[TMP]])
struct f2 global_f2;
void call_f2(void) { global_f2 = func_f2(global_f2); }
// CHECK-LABEL: @call_f3
-// CHECK: %[[TMP:[^ ]+]] = load [3 x float], [3 x float]* getelementptr inbounds (%struct.f3, %struct.f3* @global_f3, i32 0, i32 0), align 1
+// CHECK: %[[TMP:[^ ]+]] = load [3 x float], [3 x float]* getelementptr inbounds (%struct.f3, %struct.f3* @global_f3, i32 0, i32 0), align 4
// CHECK: call [3 x float] @func_f3([3 x float] %[[TMP]])
struct f3 global_f3;
void call_f3(void) { global_f3 = func_f3(global_f3); }
// CHECK-LABEL: @call_f4
-// CHECK: %[[TMP:[^ ]+]] = load [4 x float], [4 x float]* getelementptr inbounds (%struct.f4, %struct.f4* @global_f4, i32 0, i32 0), align 1
+// CHECK: %[[TMP:[^ ]+]] = load [4 x float], [4 x float]* getelementptr inbounds (%struct.f4, %struct.f4* @global_f4, i32 0, i32 0), align 4
// CHECK: call [4 x float] @func_f4([4 x float] %[[TMP]])
struct f4 global_f4;
void call_f4(void) { global_f4 = func_f4(global_f4); }
// CHECK-LABEL: @call_f5
-// CHECK: %[[TMP:[^ ]+]] = load [5 x float], [5 x float]* getelementptr inbounds (%struct.f5, %struct.f5* @global_f5, i32 0, i32 0), align 1
+// CHECK: %[[TMP:[^ ]+]] = load [5 x float], [5 x float]* getelementptr inbounds (%struct.f5, %struct.f5* @global_f5, i32 0, i32 0), align 4
// CHECK: call [5 x float] @func_f5([5 x float] %[[TMP]])
struct f5 global_f5;
void call_f5(void) { global_f5 = func_f5(global_f5); }
// CHECK-LABEL: @call_f6
-// CHECK: %[[TMP:[^ ]+]] = load [6 x float], [6 x float]* getelementptr inbounds (%struct.f6, %struct.f6* @global_f6, i32 0, i32 0), align 1
+// CHECK: %[[TMP:[^ ]+]] = load [6 x float], [6 x float]* getelementptr inbounds (%struct.f6, %struct.f6* @global_f6, i32 0, i32 0), align 4
// CHECK: call [6 x float] @func_f6([6 x float] %[[TMP]])
struct f6 global_f6;
void call_f6(void) { global_f6 = func_f6(global_f6); }
// CHECK-LABEL: @call_f7
-// CHECK: %[[TMP:[^ ]+]] = load [7 x float], [7 x float]* getelementptr inbounds (%struct.f7, %struct.f7* @global_f7, i32 0, i32 0), align 1
+// CHECK: %[[TMP:[^ ]+]] = load [7 x float], [7 x float]* getelementptr inbounds (%struct.f7, %struct.f7* @global_f7, i32 0, i32 0), align 4
// CHECK: call [7 x float] @func_f7([7 x float] %[[TMP]])
struct f7 global_f7;
void call_f7(void) { global_f7 = func_f7(global_f7); }
// CHECK-LABEL: @call_f8
-// CHECK: %[[TMP:[^ ]+]] = load [8 x float], [8 x float]* getelementptr inbounds (%struct.f8, %struct.f8* @global_f8, i32 0, i32 0), align 1
+// CHECK: %[[TMP:[^ ]+]] = load [8 x float], [8 x float]* getelementptr inbounds (%struct.f8, %struct.f8* @global_f8, i32 0, i32 0), align 4
// CHECK: call [8 x float] @func_f8([8 x float] %[[TMP]])
struct f8 global_f8;
void call_f8(void) { global_f8 = func_f8(global_f8); }
@@ -104,7 +104,7 @@ void call_f8(void) { global_f8 = func_f8(global_f8); }
// CHECK-LABEL: @call_f9
// CHECK: %[[TMP1:[^ ]+]] = alloca [5 x i64]
// CHECK: %[[TMP2:[^ ]+]] = bitcast [5 x i64]* %[[TMP1]] to i8*
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %[[TMP2]], i8* bitcast (%struct.f9* @global_f9 to i8*), i64 36, i32 1, i1 false)
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %[[TMP2]], i8* bitcast (%struct.f9* @global_f9 to i8*), i64 36, i32 4, i1 false)
// CHECK: %[[TMP3:[^ ]+]] = load [5 x i64], [5 x i64]* %[[TMP1]]
// CHECK: call void @func_f9(%struct.f9* sret %{{[^ ]+}}, [5 x i64] %[[TMP3]])
struct f9 global_f9;