diff options
Diffstat (limited to 'test/CodeGen')
38 files changed, 1471 insertions, 229 deletions
diff --git a/test/CodeGen/2008-07-29-override-alias-decl.c b/test/CodeGen/2008-07-29-override-alias-decl.c index a4bea0e06cd8..dbe10b395f4f 100644 --- a/test/CodeGen/2008-07-29-override-alias-decl.c +++ b/test/CodeGen/2008-07-29-override-alias-decl.c @@ -2,10 +2,7 @@ int x() { return 1; } -// CHECK: [[retval:%.*]] = alloca i32 -// CHECK: store i32 1, i32* [[retval]] -// CHECK: [[load:%.*]] = load i32* [[retval]] -// CHECK: ret i32 [[load]] +// CHECK: ret i32 1 int f() __attribute__((weak, alias("x"))); @@ -17,9 +14,6 @@ int h() { return f(); } -// CHECK: [[retval:%.*]] = alloca i32 // CHECK: [[call:%.*]] = call i32 (...)* @f() -// CHECK: store i32 [[call]], i32* [[retval]] -// CHECK: [[load:%.*]] = load i32* [[retval]] -// CHECK: ret i32 [[load]] +// CHECK: ret i32 [[call]] diff --git a/test/CodeGen/2008-12-02-logical-or-fold.c b/test/CodeGen/2008-12-02-logical-or-fold.c deleted file mode 100644 index 167ad299ce61..000000000000 --- a/test/CodeGen/2008-12-02-logical-or-fold.c +++ /dev/null @@ -1,4 +0,0 @@ -// RUN: %clang_cc1 -emit-llvm -o - %s | grep "store i32 1" -// PR3150 - -int a() {return 1||1;} diff --git a/test/CodeGen/address-space-field2.c b/test/CodeGen/address-space-field2.c index 198fd22a3a74..9c21cab3a566 100644 --- a/test/CodeGen/address-space-field2.c +++ b/test/CodeGen/address-space-field2.c @@ -16,10 +16,6 @@ // CHECK: addrspace(1) // CHECK: addrspace(1) // CHECK: addrspace(1) -// CHECK: addrspace(1) -// CHECK: addrspace(1) -// CHECK: addrspace(2) -// CHECK: addrspace(2) // CHECK: addrspace(2) // CHECK: addrspace(2) // CHECK: addrspace(2) diff --git a/test/CodeGen/address-space-field3.c b/test/CodeGen/address-space-field3.c index 090f4a104b05..c17085cdf48b 100644 --- a/test/CodeGen/address-space-field3.c +++ b/test/CodeGen/address-space-field3.c @@ -16,10 +16,6 @@ // CHECK: addrspace(2) // CHECK: addrspace(2) // CHECK: addrspace(2) -// CHECK: addrspace(2) -// CHECK: addrspace(2) -// CHECK: addrspace(1) -// CHECK: addrspace(1) // CHECK: addrspace(1) // CHECK: addrspace(1) // CHECK: addrspace(1) diff --git a/test/CodeGen/address-space-field4.c b/test/CodeGen/address-space-field4.c index a1906c0c0059..a896ab652d98 100644 --- a/test/CodeGen/address-space-field4.c +++ b/test/CodeGen/address-space-field4.c @@ -23,9 +23,6 @@ // CHECK: addrspace(3) // CHECK: addrspace(3) // CHECK: addrspace(1) -// CHECK: addrspace(3) -// CHECK: addrspace(3) -// CHECK: addrspace(1) // CHECK: addrspace(1) // CHECK: addrspace(1) // CHECK: addrspace(1) @@ -35,9 +32,6 @@ // CHECK: addrspace(1) // CHECK: addrspace(1) // CHECK: addrspace(2) -// CHECK: addrspace(1) -// CHECK: addrspace(2) -// CHECK: addrspace(2) // CHECK: addrspace(2) // Check the load and store are using the correct address space to access diff --git a/test/CodeGen/address-space.c b/test/CodeGen/address-space.c index 5b5891955714..04f88dc20a1a 100644 --- a/test/CodeGen/address-space.c +++ b/test/CodeGen/address-space.c @@ -1,20 +1,44 @@ -// RUN: %clang_cc1 -emit-llvm < %s | grep '@foo.*global.*addrspace(1)' -// RUN: %clang_cc1 -emit-llvm < %s | grep '@ban.*global.*addrspace(1)' -// RUN: %clang_cc1 -emit-llvm < %s | grep 'load.*addrspace(1)' | count 2 +// RUN: %clang_cc1 -emit-llvm < %s | FileCheck %s // RUN: %clang_cc1 -emit-llvm < %s | grep 'load.*addrspace(2).. @A' // RUN: %clang_cc1 -emit-llvm < %s | grep 'load.*addrspace(2).. @B' + +// CHECK: @foo = common addrspace(1) global int foo __attribute__((address_space(1))); + +// CHECK: @ban = common addrspace(1) global int ban[10] __attribute__((address_space(1))); -int bar() { return foo; } +// CHECK: define i32 @test1() +// CHECK: load i32 addrspace(1)* @foo +int test1() { return foo; } -int baz(int i) { return ban[i]; } +// CHECK: define i32 @test2(i32 %i) +// CHECK: load i32 addrspace(1)* +// CHECK-NEXT: ret i32 +int test2(int i) { return ban[i]; } // Both A and B point into addrspace(2). __attribute__((address_space(2))) int *A, *B; +// CHECK: define void @test3() +// CHECK: load i32 addrspace(2)** @B +// CHECK: load i32 addrspace(2)* +// CHECK: load i32 addrspace(2)** @A +// CHECK: store i32 {{.*}}, i32 addrspace(2)* void test3() { *A = *B; } +// PR7437 +typedef struct { + float aData[1]; +} MyStruct; + +// CHECK: define void @test4( +// CHECK: call void @llvm.memcpy.p0i8.p2i8 +// CHECK: call void @llvm.memcpy.p2i8.p0i8 +void test4(MyStruct __attribute__((address_space(2))) *pPtr) { + MyStruct s = pPtr[0]; + pPtr[0] = s; +} diff --git a/test/CodeGen/altivec.c b/test/CodeGen/altivec.c new file mode 100644 index 000000000000..9e38df50930c --- /dev/null +++ b/test/CodeGen/altivec.c @@ -0,0 +1,4 @@ +// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s + +// CHECK: @test0 = global <4 x i32> <i32 1, i32 1, i32 1, i32 1> +vector int test0 = (vector int)(1); diff --git a/test/CodeGen/arm-arguments.c b/test/CodeGen/arm-arguments.c index 72fd7c3f8b71..73bc03dac7f5 100644 --- a/test/CodeGen/arm-arguments.c +++ b/test/CodeGen/arm-arguments.c @@ -1,131 +1,131 @@ // RUN: %clang_cc1 -triple armv7-apple-darwin9 -target-abi apcs-gnu -emit-llvm -w -o - %s | FileCheck -check-prefix=APCS-GNU %s // RUN: %clang_cc1 -triple armv7-apple-darwin9 -target-abi aapcs -emit-llvm -w -o - %s | FileCheck -check-prefix=AAPCS %s -// APCS-GNU: define arm_apcscc signext i8 @f0() +// APCS-GNU: define signext i8 @f0() // AAPCS: define arm_aapcscc signext i8 @f0() char f0(void) { return 0; } -// APCS-GNU: define arm_apcscc i8 @f1() +// APCS-GNU: define i8 @f1() // AAPCS: define arm_aapcscc i8 @f1() struct s1 { char f0; }; struct s1 f1(void) {} -// APCS-GNU: define arm_apcscc i16 @f2() +// APCS-GNU: define i16 @f2() // AAPCS: define arm_aapcscc i16 @f2() struct s2 { short f0; }; struct s2 f2(void) {} -// APCS-GNU: define arm_apcscc i32 @f3() +// APCS-GNU: define i32 @f3() // AAPCS: define arm_aapcscc i32 @f3() struct s3 { int f0; }; struct s3 f3(void) {} -// APCS-GNU: define arm_apcscc i32 @f4() +// APCS-GNU: define i32 @f4() // AAPCS: define arm_aapcscc i32 @f4() struct s4 { struct s4_0 { int f0; } f0; }; struct s4 f4(void) {} -// APCS-GNU: define arm_apcscc void @f5( +// APCS-GNU: define void @f5( // APCS-GNU: struct.s5* sret // AAPCS: define arm_aapcscc i32 @f5() struct s5 { struct { } f0; int f1; }; struct s5 f5(void) {} -// APCS-GNU: define arm_apcscc void @f6( +// APCS-GNU: define void @f6( // APCS-GNU: struct.s6* sret // AAPCS: define arm_aapcscc i32 @f6() struct s6 { int f0[1]; }; struct s6 f6(void) {} -// APCS-GNU: define arm_apcscc void @f7() +// APCS-GNU: define void @f7() // AAPCS: define arm_aapcscc void @f7() struct s7 { struct { int : 0; } f0; }; struct s7 f7(void) {} -// APCS-GNU: define arm_apcscc void @f8( +// APCS-GNU: define void @f8( // APCS-GNU: struct.s8* sret // AAPCS: define arm_aapcscc void @f8() struct s8 { struct { int : 0; } f0[1]; }; struct s8 f8(void) {} -// APCS-GNU: define arm_apcscc i32 @f9() +// APCS-GNU: define i32 @f9() // AAPCS: define arm_aapcscc i32 @f9() struct s9 { int f0; int : 0; }; struct s9 f9(void) {} -// APCS-GNU: define arm_apcscc i32 @f10() +// APCS-GNU: define i32 @f10() // AAPCS: define arm_aapcscc i32 @f10() struct s10 { int f0; int : 0; int : 0; }; struct s10 f10(void) {} -// APCS-GNU: define arm_apcscc void @f11( +// APCS-GNU: define void @f11( // APCS-GNU: struct.s10* sret // AAPCS: define arm_aapcscc i32 @f11() struct s11 { int : 0; int f0; }; struct s11 f11(void) {} -// APCS-GNU: define arm_apcscc i32 @f12() +// APCS-GNU: define i32 @f12() // AAPCS: define arm_aapcscc i32 @f12() union u12 { char f0; short f1; int f2; }; union u12 f12(void) {} -// APCS-GNU: define arm_apcscc void @f13( +// APCS-GNU: define void @f13( // APCS-GNU: struct.s13* sret // FIXME: This should return a float. -// AAPCS-FIXME: define arm_aapcscc float @f13() +// AAPCS-FIXME: darm_aapcscc efine float @f13() struct s13 { float f0; }; struct s13 f13(void) {} -// APCS-GNU: define arm_apcscc void @f14( +// APCS-GNU: define void @f14( // APCS-GNU: struct.s13* sret // AAPCS: define arm_aapcscc i32 @f14() union u14 { float f0; }; union u14 f14(void) {} -// APCS-GNU: define arm_apcscc void @f15() +// APCS-GNU: define void @f15() // AAPCS: define arm_aapcscc void @f15() void f15(struct s7 a0) {} -// APCS-GNU: define arm_apcscc void @f16() +// APCS-GNU: define void @f16() // AAPCS: define arm_aapcscc void @f16() void f16(struct s8 a0) {} -// APCS-GNU: define arm_apcscc i32 @f17() +// APCS-GNU: define i32 @f17() // AAPCS: define arm_aapcscc i32 @f17() struct s17 { short f0 : 13; char f1 : 4; }; struct s17 f17(void) {} -// APCS-GNU: define arm_apcscc i32 @f18() +// APCS-GNU: define i32 @f18() // AAPCS: define arm_aapcscc i32 @f18() struct s18 { short f0; char f1 : 4; }; struct s18 f18(void) {} -// APCS-GNU: define arm_apcscc void @f19( +// APCS-GNU: define void @f19( // APCS-GNU: struct.s19* sret // AAPCS: define arm_aapcscc i32 @f19() struct s19 { int f0; struct s8 f1; }; struct s19 f19(void) {} -// APCS-GNU: define arm_apcscc void @f20( +// APCS-GNU: define void @f20( // APCS-GNU: struct.s20* sret // AAPCS: define arm_aapcscc i32 @f20() struct s20 { struct s8 f1; int f0; }; struct s20 f20(void) {} -// APCS-GNU: define arm_apcscc i8 @f21() +// APCS-GNU: define i8 @f21() // AAPCS: define arm_aapcscc i32 @f21() struct s21 { struct {} f1; int f0 : 4; }; struct s21 f21(void) {} -// APCS-GNU: define arm_apcscc i16 @f22() -// APCS-GNU: define arm_apcscc i32 @f23() -// APCS-GNU: define arm_apcscc i64 @f24() -// APCS-GNU: define arm_apcscc i128 @f25() -// APCS-GNU: define arm_apcscc i64 @f26() -// APCS-GNU: define arm_apcscc i128 @f27() +// APCS-GNU: define i16 @f22() +// APCS-GNU: define i32 @f23() +// APCS-GNU: define i64 @f24() +// APCS-GNU: define i128 @f25() +// APCS-GNU: define i64 @f26() +// APCS-GNU: define i128 @f27() // AAPCS: define arm_aapcscc i16 @f22() // AAPCS: define arm_aapcscc i32 @f23() // AAPCS: define arm_aapcscc void @f24({{.*}} sret @@ -139,17 +139,17 @@ _Complex long long f25(void) {} _Complex float f26(void) {} _Complex double f27(void) {} -// APCS-GNU: define arm_apcscc i16 @f28() +// APCS-GNU: define i16 @f28() // AAPCS: define arm_aapcscc i16 @f28() struct s28 { _Complex char f0; }; struct s28 f28() {} -// APCS-GNU: define arm_apcscc i32 @f29() +// APCS-GNU: define i32 @f29() // AAPCS: define arm_aapcscc i32 @f29() struct s29 { _Complex short f0; }; struct s29 f29() {} -// APCS-GNU: define arm_apcscc void @f30({{.*}} sret +// APCS-GNU: define void @f30({{.*}} sret // AAPCS: define arm_aapcscc void @f30({{.*}} sret struct s30 { _Complex int f0; }; struct s30 f30() {} diff --git a/test/CodeGen/arm-cc.c b/test/CodeGen/arm-cc.c new file mode 100644 index 000000000000..74eecc755f75 --- /dev/null +++ b/test/CodeGen/arm-cc.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple armv7-apple-darwin9 -target-abi apcs-gnu -emit-llvm -w -o - %s | FileCheck -check-prefix=DARWIN-APCS %s +// RUN: %clang_cc1 -triple armv7-apple-darwin9 -target-abi aapcs -emit-llvm -w -o - %s | FileCheck -check-prefix=DARWIN-AAPCS %s +// RUN: %clang_cc1 -triple arm-none-linux-gnueabi -target-abi apcs-gnu -emit-llvm -w -o - %s | FileCheck -check-prefix=LINUX-APCS %s +// RUN: %clang_cc1 -triple arm-none-linux-gnueabi -target-abi aapcs -emit-llvm -w -o - %s | FileCheck -check-prefix=LINUX-AAPCS %s + + +// DARWIN-APCS: define void @f() +// DARWIN-APCS: call void @g +// DARWIN-AAPCS: define arm_aapcscc void @f() +// DARWIN-AAPCS: call arm_aapcscc void @g +// LINUX-APCS: define arm_apcscc void @f() +// LINUX-APCS: call arm_apcscc void @g +// LINUX-AAPCS: define void @f() +// LINUX-AAPCS: call void @g +void g(void); +void f(void) { + g(); +} diff --git a/test/CodeGen/assign.c b/test/CodeGen/assign.c new file mode 100644 index 000000000000..eab3d357692d --- /dev/null +++ b/test/CodeGen/assign.c @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64 -emit-llvm -o - %s | FileCheck %s + +// Check that we don't generate unnecessary reloads. +// +// CHECK: define void @f0() +// CHECK: [[x_0:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[y_0:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 1, i32* [[x_0]] +// CHECK-NEXT: store i32 1, i32* [[x_0]] +// CHECK-NEXT: store i32 1, i32* [[y_0]] +// CHECK: } +void f0() { + int x, y; + x = 1; + y = (x = 1); +} + +// Check that we do generate reloads for volatile access. +// +// CHECK: define void @f1() +// CHECK: [[x_1:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[y_1:%.*]] = alloca i32, align 4 +// CHECK-NEXT: volatile store i32 1, i32* [[x_1]] +// CHECK-NEXT: volatile store i32 1, i32* [[x_1]] +// CHECK-NEXT: [[tmp_1:%.*]] = volatile load i32* [[x_1]] +// CHECK-NEXT: volatile store i32 [[tmp_1]], i32* [[y_1]] +// CHECK: } +void f1() { + volatile int x, y; + x = 1; + y = (x = 1); +} diff --git a/test/CodeGen/available-externally-suppress.c b/test/CodeGen/available-externally-suppress.c new file mode 100644 index 000000000000..c3b7a213baf6 --- /dev/null +++ b/test/CodeGen/available-externally-suppress.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -emit-llvm -o - -O0 -triple x86_64-apple-darwin10 %s | FileCheck %s + +// Ensure that we don't emit available_externally functions at -O0. +int x; + +inline void f0(int y) { x = y; } + +// CHECK: define void @test() +// CHECK: declare void @f0(i32) +void test() { + f0(17); +} diff --git a/test/CodeGen/blocks-aligned-byref-variable.c b/test/CodeGen/blocks-aligned-byref-variable.c index 79ac41dcd5d5..07d683c3526e 100644 --- a/test/CodeGen/blocks-aligned-byref-variable.c +++ b/test/CodeGen/blocks-aligned-byref-variable.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -emit-llvm -o - -triple x86_64-apple-darwin10 -// RUN: %clang_cc1 -emit-llvm -o - -triple i386-apple-darwin10 +// RUN: %clang_cc1 -emit-llvm -o - -triple x86_64-apple-darwin10 -fblocks %s +// RUN: %clang_cc1 -emit-llvm -o - -triple i386-apple-darwin10 -fblocks %s typedef int __attribute__((aligned(32))) ai; void f() { diff --git a/test/CodeGen/blocks.c b/test/CodeGen/blocks.c index a0f5dae6f44d..6888356a5a1d 100644 --- a/test/CodeGen/blocks.c +++ b/test/CodeGen/blocks.c @@ -27,3 +27,9 @@ void (^test1)(void) = ^(void) { ^ { i = 1; }(); }; +typedef double ftype(double); +// It's not clear that we *should* support this syntax, but until that decision +// is made, we should support it properly and not crash. +ftype ^test2 = ^ftype { + return 0; +}; diff --git a/test/CodeGen/builtin-attributes.c b/test/CodeGen/builtin-attributes.c index 944aac3f521f..afde3fab8481 100644 --- a/test/CodeGen/builtin-attributes.c +++ b/test/CodeGen/builtin-attributes.c @@ -1,11 +1,11 @@ -// RUN: %clang_cc1 -triple arm-unknown-unknown -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple arm-unknown-linux-gnueabi -emit-llvm -o - %s | FileCheck %s -// CHECK: declare arm_aapcscc i32 @printf(i8*, ...) +// CHECK: declare i32 @printf(i8*, ...) void f0() { printf("a\n"); } -// CHECK: call arm_aapcscc void @exit +// CHECK: call void @exit // CHECK: unreachable void f1() { exit(1); diff --git a/test/CodeGen/builtins-arm.c b/test/CodeGen/builtins-arm.c index 555375754959..546f57a4a18a 100644 --- a/test/CodeGen/builtins-arm.c +++ b/test/CodeGen/builtins-arm.c @@ -1,6 +1,12 @@ -// RUN: %clang_cc1 -triple thumbv7-eabi -target-cpu cortex-a8 -O3 -emit-llvm -o %t %s +// RUN: %clang_cc1 -Wall -Werror -triple thumbv7-eabi -target-cpu cortex-a8 -O3 -emit-llvm -o - %s | FileCheck %s void *f0() { return __builtin_thread_pointer(); } + +void f1(char *a, char *b) { + __clear_cache(a,b); +} + +// CHECK: call void @__clear_cache diff --git a/test/CodeGen/builtins-ppc-altivec.c b/test/CodeGen/builtins-ppc-altivec.c index 04249cc1ee70..6f65866ae56a 100644 --- a/test/CodeGen/builtins-ppc-altivec.c +++ b/test/CodeGen/builtins-ppc-altivec.c @@ -1,191 +1,1099 @@ // RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s -#include "altivec.h" - -int main () -{ - vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 }; - vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; - vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 }; - vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 }; - vector int vi = { -1, 2, -3, 4 }; - vector unsigned int vui = { 1, 2, 3, 4 }; - vector float vf = { -1.5, 2.5, -3.5, 4.5 }; - - vector signed char res_vsc; - vector unsigned char res_vuc; - vector short res_vs; - vector unsigned short res_vus; - vector int res_vi; - vector unsigned int res_vui; - vector float res_vf; - - int param_i; - int res_i; +// TODO: uncomment +/* vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 }; */ +vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 }; +vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; +// TODO: uncomment +/* vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 }; */ +vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 }; +vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 }; +// TODO: uncomment +/* vector bool int vbi = { 1, 0, 1, 0 }; */ +vector int vi = { -1, 2, -3, 4 }; +vector unsigned int vui = { 1, 2, 3, 4 }; +vector float vf = { -1.5, 2.5, -3.5, 4.5 }; + +// TODO: uncomment +/* vector bool char res_vbc; */ +vector signed char res_vsc; +vector unsigned char res_vuc; +// TODO: uncomment +/* vector bool short res_vbs; */ +vector short res_vs; +vector unsigned short res_vus; +// TODO: uncomment +vector pixel res_vp; +// TODO: uncomment +/* vector bool int res_vbi; */ +vector int res_vi; +vector unsigned int res_vui; +vector float res_vf; + +signed char param_sc; +unsigned char param_uc; +short param_s; +unsigned short param_us; +int param_i; +unsigned int param_ui; +float param_f; + +int res_i; + +int test1() { +// CHECK: define i32 @test1 /* vec_abs */ - vsc = vec_abs(vsc); // CHECK: sub <16 x i8> zeroinitializer - // CHECK: @llvm.ppc.altivec.vmaxsb + vsc = vec_abs(vsc); // CHECK: sub nsw <16 x i8> zeroinitializer + // CHECK: @llvm.ppc.altivec.vmaxsb - vs = __builtin_vec_abs(vs); // CHECK: sub <8 x i16> zeroinitializer - // CHECK: @llvm.ppc.altivec.vmaxsh + vs = vec_abs(vs); // CHECK: sub nsw <8 x i16> zeroinitializer + // CHECK: @llvm.ppc.altivec.vmaxsh - vi = vec_abs(vi); // CHECK: sub <4 x i32> zeroinitializer - // CHECK: @llvm.ppc.altivec.vmaxsw + vi = vec_abs(vi); // CHECK: sub nsw <4 x i32> zeroinitializer + // CHECK: @llvm.ppc.altivec.vmaxsw - vf = vec_abs(vf); // CHECK: store <4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647> - // CHECK: and <4 x i32> + vf = vec_abs(vf); // CHECK: and <4 x i32> /* vec_abs */ - vsc = vec_abss(vsc); // CHECK: @llvm.ppc.altivec.vsubsbs - // CHECK: @llvm.ppc.altivec.vmaxsb + vsc = vec_abss(vsc); // CHECK: @llvm.ppc.altivec.vsubsbs + // CHECK: @llvm.ppc.altivec.vmaxsb - vs = __builtin_vec_abss(vs); // CHECK: @llvm.ppc.altivec.vsubshs - // CHECK: @llvm.ppc.altivec.vmaxsh + vs = vec_abss(vs); // CHECK: @llvm.ppc.altivec.vsubshs + // CHECK: @llvm.ppc.altivec.vmaxsh - vi = vec_abss(vi); // CHECK: @llvm.ppc.altivec.vsubsws - // CHECK: @llvm.ppc.altivec.vmaxsw + vi = vec_abss(vi); // CHECK: @llvm.ppc.altivec.vsubsws + // CHECK: @llvm.ppc.altivec.vmaxsw /* vec_add */ - res_vsc = vec_add(vsc, vsc); // CHECK: add nsw <16 x i8> + res_vsc = vec_add(vsc, vsc); // CHECK: add nsw <16 x i8> + res_vuc = vec_add(vuc, vuc); // CHECK: add <16 x i8> + res_vs = vec_add(vs, vs); // CHECK: add nsw <8 x i16> + res_vus = vec_add(vus, vus); // CHECK: add <8 x i16> + res_vi = vec_add(vi, vi); // CHECK: add nsw <4 x i32> + res_vui = vec_add(vui, vui); // CHECK: add <4 x i32> + res_vf = vec_add(vf, vf); // CHECK: fadd <4 x float> + res_vsc = vec_vaddubm(vsc, vsc); // CHECK: add nsw <16 x i8> res_vuc = vec_vaddubm(vuc, vuc); // CHECK: add <16 x i8> - res_vs = __builtin_altivec_vadduhm(vs, vs); // CHECK: add nsw <8 x i16> + res_vs = vec_vadduhm(vs, vs); // CHECK: add nsw <8 x i16> res_vus = vec_vadduhm(vus, vus); // CHECK: add <8 x i16> - res_vi = __builtin_vec_vadduwm(vi, vi); // CHECK: add nsw <4 x i32> + res_vi = vec_vadduwm(vi, vi); // CHECK: add nsw <4 x i32> res_vui = vec_vadduwm(vui, vui); // CHECK: add <4 x i32> - res_vf = __builtin_vec_vaddfp(vf, vf); // CHECK: fadd <4 x float> + res_vf = vec_vaddfp(vf, vf); // CHECK: fadd <4 x float> /* vec_addc */ + res_vui = vec_addc(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw res_vui = vec_vaddcuw(vui, vui); // HECK: @llvm.ppc.altivec.vaddcuw /* vec_adds */ - res_vsc = vec_adds(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs + res_vsc = vec_adds(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs + res_vuc = vec_adds(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs + res_vs = vec_adds(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs + res_vus = vec_adds(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs + res_vi = vec_adds(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws + res_vui = vec_adds(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws + res_vsc = vec_vaddsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vaddsbs res_vuc = vec_vaddubs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vaddubs - res_vs = __builtin_vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs + res_vs = vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs res_vus = vec_vadduhs(vus, vus); // CHECK: @llvm.ppc.altivec.vadduhs - res_vi = __builtin_vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws + res_vi = vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws res_vui = vec_vadduws(vui, vui); // CHECK: @llvm.ppc.altivec.vadduws - /* vec_sub */ - res_vsc = vec_sub(vsc, vsc); // CHECK: sub nsw <16 x i8> - res_vuc = vec_vsububm(vuc, vuc); // CHECK: sub <16 x i8> - res_vs = __builtin_altivec_vsubuhm(vs, vs); // CHECK: sub nsw <8 x i16> - res_vus = vec_vsubuhm(vus, vus); // CHECK: sub <8 x i16> - res_vi = __builtin_vec_vsubuwm(vi, vi); // CHECK: sub nsw <4 x i32> - res_vui = vec_vsubuwm(vui, vui); // CHECK: sub <4 x i32> - res_vf = __builtin_vec_vsubfp(vf, vf); // CHECK: fsub <4 x float> + /* vec_and */ + res_vsc = vec_and(vsc, vsc); // CHECK: and <16 x i8> + res_vuc = vec_and(vuc, vuc); // CHECK: and <16 x i8> + res_vs = vec_and(vs, vs); // CHECK: and <8 x i16> + res_vus = vec_and(vus, vus); // CHECK: and <8 x i16> + res_vi = vec_and(vi, vi); // CHECK: and <4 x i32> + res_vui = vec_and(vui, vui); // CHECK: and <4 x i32> + res_vsc = vec_vand(vsc, vsc); // CHECK: and <16 x i8> + res_vuc = vec_vand(vuc, vuc); // CHECK: and <16 x i8> + res_vs = vec_vand(vs, vs); // CHECK: and <8 x i16> + res_vus = vec_vand(vus, vus); // CHECK: and <8 x i16> + res_vi = vec_vand(vi, vi); // CHECK: and <4 x i32> + res_vui = vec_vand(vui, vui); // CHECK: and <4 x i32> - /* vec_subs */ - res_vsc = vec_subs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs - res_vuc = vec_vsububs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs - res_vs = __builtin_vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs - res_vus = vec_vsubuhs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs - res_vi = __builtin_vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws - res_vui = vec_vsubuws(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws + /* vec_andc */ + res_vsc = vec_andc(vsc, vsc); // CHECK: xor <16 x i8> + // CHECK: and <16 x i8> + + res_vuc = vec_andc(vuc, vuc); // CHECK: xor <16 x i8> + // CHECK: and <16 x i8> + + res_vs = vec_andc(vs, vs); // CHECK: xor <8 x i16> + // CHECK: and <8 x i16> + + res_vus = vec_andc(vus, vus); // CHECK: xor <8 x i16> + // CHECK: and <8 x i16> + + res_vi = vec_andc(vi, vi); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + + res_vui = vec_andc(vui, vui); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + + res_vf = vec_andc(vf, vf); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + + res_vsc = vec_vandc(vsc, vsc); // CHECK: xor <16 x i8> + // CHECK: and <16 x i8> + + res_vuc = vec_vandc(vuc, vuc); // CHECK: xor <16 x i8> + // CHECK: and <16 x i8> + res_vs = vec_vandc(vs, vs); // CHECK: xor <8 x i16> + // CHECK: and <8 x i16> + + res_vus = vec_vandc(vus, vus); // CHECK: xor <8 x i16> + // CHECK: and <8 x i16> + + res_vi = vec_vandc(vi, vi); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + + res_vui = vec_vandc(vui, vui); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + + res_vf = vec_vandc(vf, vf); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> +} + +// CHECK: i32 @test2 +int test2() { /* vec_avg */ - res_vsc = vec_avg(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb - res_vuc = __builtin_vec_vavgub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub + res_vsc = vec_avg(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vavgsb + res_vuc = vec_avg(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub + res_vs = vec_avg(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh + res_vus = vec_avg(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh + res_vi = vec_avg(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw + res_vui = vec_avg(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw + res_vsc = vec_vavgsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vavgsb + res_vuc = vec_vavgub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vavgub res_vs = vec_vavgsh(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh - res_vus = __builtin_vec_vavguh(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh + res_vus = vec_vavguh(vus, vus); // CHECK: @llvm.ppc.altivec.vavguh res_vi = vec_vavgsw(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw - res_vui = __builtin_vec_vavguw(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw + res_vui = vec_vavguw(vui, vui); // CHECK: @llvm.ppc.altivec.vavguw - /* vec_st */ - param_i = 5; - vec_st(vsc, 0, &res_vsc); // CHECK: @llvm.ppc.altivec.stvx - __builtin_vec_st(vuc, param_i, &res_vuc); // CHECK: @llvm.ppc.altivec.stvx - vec_stvx(vs, 1, &res_vs); // CHECK: @llvm.ppc.altivec.stvx - vec_st(vus, 1000, &res_vus); // CHECK: @llvm.ppc.altivec.stvx - vec_st(vi, 0, &res_vi); // CHECK: @llvm.ppc.altivec.stvx - vec_st(vui, 0, &res_vui); // CHECK: @llvm.ppc.altivec.stvx - vec_st(vf, 0, &res_vf); // CHECK: @llvm.ppc.altivec.stvx - - /* vec_stl */ - param_i = 10000; - vec_stl(vsc, param_i, &res_vsc); // CHECK: @llvm.ppc.altivec.stvxl - __builtin_vec_stl(vuc, 1, &res_vuc); // CHECK: @llvm.ppc.altivec.stvxl - vec_stvxl(vs, 0, &res_vs); // CHECK: @llvm.ppc.altivec.stvxl - vec_stl(vus, 0, &res_vus); // CHECK: @llvm.ppc.altivec.stvxl - vec_stl(vi, 0, &res_vi); // CHECK: @llvm.ppc.altivec.stvxl - vec_stl(vui, 0, &res_vui); // CHECK: @llvm.ppc.altivec.stvxl - vec_stl(vf, 0, &res_vf); // CHECK: @llvm.ppc.altivec.stvxl - - /* vec_ste */ - param_i = 10000; - vec_ste(vsc, param_i, &res_vsc); // CHECK: @llvm.ppc.altivec.stvebx - vec_stvebx(vuc, 1, &res_vuc); // CHECK: @llvm.ppc.altivec.stvebx - __builtin_vec_stvehx(vs, 0, &res_vs); // CHECK: @llvm.ppc.altivec.stvehx - vec_stvehx(vus, 0, &res_vus); // CHECK: @llvm.ppc.altivec.stvehx - vec_stvewx(vi, 0, &res_vi); // CHECK: @llvm.ppc.altivec.stvewx - __builtin_vec_stvewx(vui, 0, &res_vui); // CHECK: @llvm.ppc.altivec.stvewx - vec_stvewx(vf, 0, &res_vf); // CHECK: @llvm.ppc.altivec.stvewx + /* vec_ceil */ + res_vf = vec_ceil(vf); // CHECK: @llvm.ppc.altivec.vrfip + res_vf = vec_vrfip(vf); // CHECK: @llvm.ppc.altivec.vrfip /* vec_cmpb */ + res_vi = vec_cmpb(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp res_vi = vec_vcmpbfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp /* vec_cmpeq */ - res_vi = vec_cmpeq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb - res_vi = __builtin_vec_cmpeq(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpequb - res_vi = vec_cmpeq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh - res_vi = vec_cmpeq(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpequh - res_vi = vec_cmpeq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw - res_vi = vec_cmpeq(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpequw - res_vi = vec_cmpeq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp + vsc = vec_cmpeq(vsc, vsc); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpequb + vuc = vec_cmpeq(vuc, vuc); // CHCK: @llvm.ppc.altivec.vcmpequb + vs = vec_cmpeq(vs, vs); // CHCK: @llvm.ppc.altivec.vcmpequh + vs = vec_cmpeq(vus, vus); // CHCK: @llvm.ppc.altivec.vcmpequh + vi = vec_cmpeq(vi, vi); // CHCK: @llvm.ppc.altivec.vcmpequw + vui = vec_cmpeq(vui, vui); // CHCK: @llvm.ppc.altivec.vcmpequw + vf = vec_cmpeq(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpeqfp /* vec_cmpge */ - res_vi = __builtin_vec_cmpge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp + vf = vec_cmpge(vf, vf); // CHCK: @llvm.ppc.altivec.vcmpgefp + vf = vec_vcmpgefp(vf, vf); // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpgefp + +} +// CHECK: define i32 @test5 +int test5() { + /* vec_cmpgt */ - res_vi = vec_cmpgt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb - res_vi = vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub - res_vi = __builtin_vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh - res_vi = vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh - res_vi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw - res_vi = vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw - res_vi = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp + vsc = vec_cmpgt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb + vuc = vec_cmpgt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub + vs = vec_cmpgt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh + vus = vec_cmpgt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh + vi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw + vui = vec_cmpgt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw + vf = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp + vsc = vec_vcmpgtsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb + vuc = vec_vcmpgtub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub + vs = vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh + vus = vec_vcmpgtuh(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh + vi = vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw + vui = vec_vcmpgtuw(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw + vf = vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp /* vec_cmple */ - res_vi = __builtin_vec_cmple(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp + vf = vec_cmple(vf, vf); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgefp +} +// CHECK: define i32 @test6 +int test6() { /* vec_cmplt */ - res_vi = vec_cmplt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb - res_vi = __builtin_vec_cmplt(vuc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub - res_vi = vec_cmplt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh - res_vi = vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh - res_vi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw - res_vi = vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw - res_vi = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp + vsc =vec_cmplt(vsc, vsc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb + vsc =vec_cmplt(vuc, vuc); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub + vs = vec_cmplt(vs, vs); // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh + vs = vec_cmplt(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh + vi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw + vui = vec_cmplt(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw + vf = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp + + /* vec_ctf */ + res_vf = vec_ctf(vi, param_i); // CHECK: @llvm.ppc.altivec.vcfsx + res_vf = vec_ctf(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux + res_vf = vec_vcfsx(vi, 0); // CHECK: @llvm.ppc.altivec.vcfsx + res_vf = vec_vcfux(vui, 0); // CHECK: @llvm.ppc.altivec.vcfux + + /* vec_cts */ + res_vi = vec_cts(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs + res_vi = vec_vctsxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs + + /* vec_ctu */ + res_vui = vec_ctu(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs + res_vui = vec_vctuxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs + + /* vec_dss */ + vec_dss(param_i); // CHECK: @llvm.ppc.altivec.dss + + /* vec_dssall */ + vec_dssall(); // CHECK: @llvm.ppc.altivec.dssall + + /* vec_dst */ + vec_dst(&vsc, 0, 0); // CHECK: @llvm.ppc.altivec.dst + + /* vec_dstst */ + vec_dstst(&vs, 0, 0); // CHECK: @llvm.ppc.altivec.dstst + + /* vec_dststt */ + vec_dststt(¶m_i, 0, 0); // CHECK: @llvm.ppc.altivec.dststt + + /* vec_dstt */ + vec_dstt(&vf, 0, 0); // CHECK: @llvm.ppc.altivec.dstt + + /* vec_expte */ + res_vf = vec_expte(vf); // CHECK: @llvm.ppc.altivec.vexptefp + res_vf = vec_vexptefp(vf); // CHECK: @llvm.ppc.altivec.vexptefp + + /* vec_floor */ + res_vf = vec_floor(vf); // CHECK: @llvm.ppc.altivec.vrfim + res_vf = vec_vrfim(vf); // CHECK: @llvm.ppc.altivec.vrfim + + /* vec_ld */ + res_vsc = vec_ld(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx + res_vsc = vec_ld(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx + res_vuc = vec_ld(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx + res_vuc = vec_ld(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx + res_vs = vec_ld(0, &vs); // CHECK: @llvm.ppc.altivec.lvx + res_vs = vec_ld(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx + res_vus = vec_ld(0, &vus); // CHECK: @llvm.ppc.altivec.lvx + res_vus = vec_ld(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx + res_vi = vec_ld(0, &vi); // CHECK: @llvm.ppc.altivec.lvx + res_vi = vec_ld(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx + res_vui = vec_ld(0, &vui); // CHECK: @llvm.ppc.altivec.lvx + res_vui = vec_ld(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx + res_vf = vec_ld(0, &vf); // CHECK: @llvm.ppc.altivec.lvx + res_vf = vec_ld(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvx + res_vsc = vec_lvx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvx + res_vsc = vec_lvx(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvx + res_vuc = vec_lvx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvx + res_vuc = vec_lvx(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvx + res_vs = vec_lvx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx + res_vs = vec_lvx(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx + res_vus = vec_lvx(0, &vus); // CHECK: @llvm.ppc.altivec.lvx + res_vus = vec_lvx(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvx + res_vi = vec_lvx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx + res_vi = vec_lvx(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx + res_vui = vec_lvx(0, &vui); // CHECK: @llvm.ppc.altivec.lvx + res_vui = vec_lvx(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvx + res_vf = vec_lvx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx + res_vf = vec_lvx(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvx + + /* vec_lde */ + res_vsc = vec_lde(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx + res_vuc = vec_lde(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx + res_vs = vec_lde(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx + res_vus = vec_lde(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx + res_vi = vec_lde(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx + res_vui = vec_lde(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx + res_vf = vec_lde(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx + res_vsc = vec_lvebx(0, &vsc); // CHECK: @llvm.ppc.altivec.lvebx + res_vuc = vec_lvebx(0, &vuc); // CHECK: @llvm.ppc.altivec.lvebx + res_vs = vec_lvehx(0, &vs); // CHECK: @llvm.ppc.altivec.lvehx + res_vus = vec_lvehx(0, &vus); // CHECK: @llvm.ppc.altivec.lvehx + res_vi = vec_lvewx(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx + res_vui = vec_lvewx(0, &vui); // CHECK: @llvm.ppc.altivec.lvewx + res_vf = vec_lvewx(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx + + /* vec_ldl */ + res_vsc = vec_ldl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl + res_vsc = vec_ldl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl + res_vuc = vec_ldl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl + res_vuc = vec_ldl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl + res_vs = vec_ldl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl + res_vs = vec_ldl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl + res_vus = vec_ldl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl + res_vus = vec_ldl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl + res_vi = vec_ldl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl + res_vi = vec_ldl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl + res_vui = vec_ldl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl + res_vui = vec_ldl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl + res_vf = vec_ldl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl + res_vf = vec_ldl(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvxl + res_vsc = vec_lvxl(0, &vsc); // CHECK: @llvm.ppc.altivec.lvxl + res_vsc = vec_lvxl(0, ¶m_sc); // CHECK: @llvm.ppc.altivec.lvxl + res_vuc = vec_lvxl(0, &vuc); // CHECK: @llvm.ppc.altivec.lvxl + res_vuc = vec_lvxl(0, ¶m_uc); // CHECK: @llvm.ppc.altivec.lvxl + res_vs = vec_lvxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl + res_vs = vec_lvxl(0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvxl + res_vus = vec_lvxl(0, &vus); // CHECK: @llvm.ppc.altivec.lvxl + res_vus = vec_lvxl(0, ¶m_us); // CHECK: @llvm.ppc.altivec.lvxl + res_vi = vec_lvxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl + res_vi = vec_lvxl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvxl + res_vui = vec_lvxl(0, &vui); // CHECK: @llvm.ppc.altivec.lvxl + res_vui = vec_lvxl(0, ¶m_ui); // CHECK: @llvm.ppc.altivec.lvxl + res_vf = vec_lvxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl + res_vf = vec_lvxl(0, ¶m_f); // CHECK: @llvm.ppc.altivec.lvxl + + /* vec_loge */ + res_vf = vec_loge(vf); // CHECK: @llvm.ppc.altivec.vlogefp + res_vf = vec_vlogefp(vf); // CHECK: @llvm.ppc.altivec.vlogefp + + /* vec_lvsl */ + res_vuc = vec_lvsl(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvsl + + /* vec_lvsr */ + res_vuc = vec_lvsr(0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvsr + + /* vec_madd */ + res_vf =vec_madd(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp + res_vf = vec_vmaddfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp + + /* vec_madds */ + res_vs = vec_madds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs + res_vs = vec_vmhaddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs /* vec_max */ - res_vsc = vec_max(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb - res_vuc = __builtin_vec_vmaxub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub - res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh + res_vsc = vec_max(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb + res_vuc = vec_max(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub + res_vs = vec_max(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh res_vus = vec_max(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh - res_vi = __builtin_vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw + res_vi = vec_max(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw + res_vui = vec_max(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw + res_vf = vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp + res_vsc = vec_vmaxsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmaxsb + res_vuc = vec_vmaxub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmaxub + res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh + res_vus = vec_vmaxuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmaxuh + res_vi = vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw res_vui = vec_vmaxuw(vui, vui); // CHECK: @llvm.ppc.altivec.vmaxuw - res_vf = __builtin_vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp + res_vf = vec_vmaxfp(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp + + /* vec_mergeh */ + res_vsc = vec_mergeh(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_mergeh(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_mergeh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_mergeh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_mergeh(vi, vi); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_mergeh(vui, vui); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_mergeh(vf, vf); // CHECK: @llvm.ppc.altivec.vperm + res_vsc = vec_vmrghb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_vmrghb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_vmrghh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_vmrghh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_vmrghw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_vmrghw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_vmrghw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm + + /* vec_mergel */ + res_vsc = vec_mergel(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_mergel(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_mergel(vs, vs); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_mergel(vus, vus); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_mergel(vi, vi); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_mergel(vui, vui); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_mergel(vf, vf); // CHECK: @llvm.ppc.altivec.vperm + res_vsc = vec_vmrglb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_vmrglb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_vmrglh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_vmrglh(vus, vus); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_vmrglw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_vmrglw(vui, vui); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_vmrglw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm /* vec_mfvscr */ - vf = vec_mfvscr(); // CHECK: @llvm.ppc.altivec.mfvscr + vus = vec_mfvscr(); // CHECK: @llvm.ppc.altivec.mfvscr /* vec_min */ - res_vsc = vec_min(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb - res_vuc = __builtin_vec_vminub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub - res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh + res_vsc = vec_min(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb + res_vuc = vec_min(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub + res_vs = vec_min(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh res_vus = vec_min(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh - res_vi = __builtin_vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw + res_vi = vec_min(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw + res_vui = vec_min(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw + res_vf = vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp + res_vsc = vec_vminsb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vminsb + res_vuc = vec_vminub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vminub + res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh + res_vus = vec_vminuh(vus, vus); // CHECK: @llvm.ppc.altivec.vminuh + res_vi = vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw res_vui = vec_vminuw(vui, vui); // CHECK: @llvm.ppc.altivec.vminuw - res_vf = __builtin_vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp + res_vf = vec_vminfp(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp + + /* vec_mladd */ + res_vus = vec_mladd(vus, vus, vus); // CHECK: mul <8 x i16> + // CHECK: add <8 x i16> + + res_vs = vec_mladd(vus, vs, vs); // CHECK: mul nsw <8 x i16> + // CHECK: add nsw <8 x i16> + + res_vs = vec_mladd(vs, vus, vus); // CHECK: mul nsw <8 x i16> + // CHECK: add nsw <8 x i16> + + res_vs = vec_mladd(vs, vs, vs); // CHECK: mul nsw <8 x i16> + // CHECK: add nsw <8 x i16> + + /* vec_mradds */ + res_vs = vec_mradds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs + res_vs = vec_vmhraddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs + + /* vec_msum */ + res_vi = vec_msum(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm + res_vui = vec_msum(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm + res_vi = vec_msum(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm + res_vui = vec_msum(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm + res_vi = vec_vmsummbm(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm + res_vui = vec_vmsumubm(vuc, vuc, vui); // CHECK: @llvm.ppc.altivec.vmsumubm + res_vi = vec_vmsumshm(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm + res_vui = vec_vmsumuhm(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhm + + /* vec_msums */ + res_vi = vec_msums(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs + res_vui = vec_msums(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs + res_vi = vec_vmsumshs(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs + res_vui = vec_vmsumuhs(vus, vus, vui); // CHECK: @llvm.ppc.altivec.vmsumuhs /* vec_mtvscr */ vec_mtvscr(vsc); // CHECK: @llvm.ppc.altivec.mtvscr - /* ------------------------------ predicates -------------------------------------- */ + /* vec_mule */ + res_vs = vec_mule(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb + res_vus = vec_mule(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub + res_vi = vec_mule(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh + res_vui = vec_mule(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh + res_vs = vec_vmulesb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulesb + res_vus = vec_vmuleub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuleub + res_vi = vec_vmulesh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh + res_vui = vec_vmuleuh(vus, vus); // CHECK: @llvm.ppc.altivec.vmuleuh + + /* vec_mulo */ + res_vs = vec_mulo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb + res_vus = vec_mulo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub + res_vi = vec_mulo(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh + res_vui = vec_mulo(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh + res_vs = vec_vmulosb(vsc, vsc); // CHECK: @llvm.ppc.altivec.vmulosb + res_vus = vec_vmuloub(vuc, vuc); // CHECK: @llvm.ppc.altivec.vmuloub + res_vi = vec_vmulosh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh + res_vui = vec_vmulouh(vus, vus); // CHECK: @llvm.ppc.altivec.vmulouh + + /* vec_nmsub */ + res_vf = vec_nmsub(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp + res_vf = vec_vnmsubfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp + + /* vec_nor */ + res_vsc = vec_nor(vsc, vsc); // CHECK: or <16 x i8> + // CHECK: xor <16 x i8> + + res_vuc = vec_nor(vuc, vuc); // CHECK: or <16 x i8> + // CHECK: xor <16 x i8> + + res_vs = vec_nor(vs, vs); // CHECK: or <8 x i16> + // CHECK: xor <8 x i16> + + res_vus = vec_nor(vus, vus); // CHECK: or <8 x i16> + // CHECK: xor <8 x i16> + + res_vi = vec_nor(vi, vi); // CHECK: or <4 x i32> + // CHECK: xor <4 x i32> + + res_vui = vec_nor(vui, vui); // CHECK: or <4 x i32> + // CHECK: xor <4 x i32> + + res_vf = vec_nor(vf, vf); // CHECK: or <4 x i32> + // CHECK: xor <4 x i32> + + res_vsc = vec_vnor(vsc, vsc); // CHECK: or <16 x i8> + // CHECK: xor <16 x i8> + + res_vuc = vec_vnor(vuc, vuc); // CHECK: or <16 x i8> + // CHECK: xor <16 x i8> - res_i = __builtin_vec_vcmpeq_p(__CR6_EQ, vsc, vui); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p - res_i = __builtin_vec_vcmpge_p(__CR6_EQ, vs, vi); // CHECK: @llvm.ppc.altivec.vcmpgefp.p - res_i = __builtin_vec_vcmpgt_p(__CR6_EQ, vuc, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p + res_vs = vec_vnor(vs, vs); // CHECK: or <8 x i16> + // CHECK: xor <8 x i16> + + res_vus = vec_vnor(vus, vus); // CHECK: or <8 x i16> + // CHECK: xor <8 x i16> + + res_vi = vec_vnor(vi, vi); // CHECK: or <4 x i32> + // CHECK: xor <4 x i32> + + res_vui = vec_vnor(vui, vui); // CHECK: or <4 x i32> + // CHECK: xor <4 x i32> + + res_vf = vec_vnor(vf, vf); // CHECK: or <4 x i32> + // CHECK: xor <4 x i32> + + /* vec_or */ + res_vsc = vec_or(vsc, vsc); // CHECK: or <16 x i8> + res_vuc = vec_or(vuc, vuc); // CHECK: or <16 x i8> + res_vs = vec_or(vs, vs); // CHECK: or <8 x i16> + res_vus = vec_or(vus, vus); // CHECK: or <8 x i16> + res_vi = vec_or(vi, vi); // CHECK: or <4 x i32> + res_vui = vec_or(vui, vui); // CHECK: or <4 x i32> + res_vf = vec_or(vf, vf); // CHECK: or <4 x i32> + res_vsc = vec_vor(vsc, vsc); // CHECK: or <16 x i8> + res_vuc = vec_vor(vuc, vuc); // CHECK: or <16 x i8> + res_vs = vec_vor(vs, vs); // CHECK: or <8 x i16> + res_vus = vec_vor(vus, vus); // CHECK: or <8 x i16> + res_vi = vec_vor(vi, vi); // CHECK: or <4 x i32> + res_vui = vec_vor(vui, vui); // CHECK: or <4 x i32> + res_vf = vec_vor(vf, vf); // CHECK: or <4 x i32> + + /* vec_pack */ + res_vsc = vec_pack(vs, vs); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_pack(vus, vus); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_pack(vi, vi); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_pack(vui, vui); // CHECK: @llvm.ppc.altivec.vperm + res_vsc = vec_vpkuhum(vs, vs); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_vpkuhum(vus, vus); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_vpkuwum(vi, vi); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_vpkuwum(vui, vui); // CHECK: @llvm.ppc.altivec.vperm + + /* vec_packpx */ + res_vp = vec_packpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx + res_vp = vec_vpkpx(vui, vui); // CHECK: @llvm.ppc.altivec.vpkpx + + /* vec_packs */ + res_vsc = vec_packs(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss + res_vuc = vec_packs(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus + res_vs = vec_packs(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss + res_vus = vec_packs(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus + res_vsc = vec_vpkshss(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss + res_vuc = vec_vpkuhus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus + res_vs = vec_vpkswss(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss + res_vus = vec_vpkuwus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus + + /* vec_packsu */ + res_vuc = vec_packsu(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus + res_vuc = vec_packsu(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus + res_vus = vec_packsu(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus + res_vus = vec_packsu(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus + res_vuc = vec_vpkshus(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus + res_vuc = vec_vpkshus(vus, vus); // CHECK: @llvm.ppc.altivec.vpkuhus + res_vus = vec_vpkswus(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus + res_vus = vec_vpkswus(vui, vui); // CHECK: @llvm.ppc.altivec.vpkuwus + + /* vec_perm */ + res_vsc = vec_perm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_perm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_perm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_perm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_perm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_perm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_perm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vsc = vec_vperm(vsc, vsc, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_vperm(vuc, vuc, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_vperm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_vperm(vus, vus, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_vperm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_vperm(vui, vui, vuc); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_vperm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm + + /* vec_re */ + res_vf = vec_re(vf); // CHECK: @llvm.ppc.altivec.vrefp + res_vf = vec_vrefp(vf); // CHECK: @llvm.ppc.altivec.vrefp + + /* vec_rl */ + res_vsc = vec_rl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb + res_vuc = vec_rl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb + res_vs = vec_rl(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh + res_vus = vec_rl(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh + res_vi = vec_rl(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw + res_vui = vec_rl(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw + res_vsc = vec_vrlb(vsc, vuc); // CHECK: @llvm.ppc.altivec.vrlb + res_vuc = vec_vrlb(vuc, vuc); // CHECK: @llvm.ppc.altivec.vrlb + res_vs = vec_vrlh(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh + res_vus = vec_vrlh(vus, vus); // CHECK: @llvm.ppc.altivec.vrlh + res_vi = vec_vrlw(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw + res_vui = vec_vrlw(vui, vui); // CHECK: @llvm.ppc.altivec.vrlw + + /* vec_round */ + res_vf = vec_round(vf); // CHECK: @llvm.ppc.altivec.vrfin + res_vf = vec_vrfin(vf); // CHECK: @llvm.ppc.altivec.vrfin + + /* vec_rsqrte */ + res_vf = vec_rsqrte(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp + res_vf = vec_vrsqrtefp(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp + + /* vec_sel */ + res_vsc = vec_sel(vsc, vsc, vuc); // CHECK: xor <16 x i8> + // CHECK: and <16 x i8> + // CHECK: and <16 x i8> + // CHECK: or <16 x i8> + + res_vuc = vec_sel(vuc, vuc, vuc); // CHECK: xor <16 x i8> + // CHECK: and <16 x i8> + // CHECK: and <16 x i8> + // CHECK: or <16 x i8> + + res_vs = vec_sel(vs, vs, vus); // CHECK: xor <8 x i16> + // CHECK: and <8 x i16> + // CHECK: and <8 x i16> + // CHECK: or <8 x i16> + + + res_vus = vec_sel(vus, vus, vus); // CHECK: xor <8 x i16> + // CHECK: and <8 x i16> + // CHECK: and <8 x i16> + // CHECK: or <8 x i16> + + res_vi = vec_sel(vi, vi, vui); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + // CHECK: and <4 x i32> + // CHECK: or <4 x i32> + + + res_vui = vec_sel(vui, vui, vui); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + // CHECK: and <4 x i32> + // CHECK: or <4 x i32> + + + res_vf = vec_sel(vf, vf, vui); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + // CHECK: and <4 x i32> + // CHECK: or <4 x i32> + + res_vsc = vec_vsel(vsc, vsc, vuc); // CHECK: xor <16 x i8> + // CHECK: and <16 x i8> + // CHECK: and <16 x i8> + // CHECK: or <16 x i8> + + res_vuc = vec_vsel(vuc, vuc, vuc); // CHECK: xor <16 x i8> + // CHECK: and <16 x i8> + // CHECK: and <16 x i8> + // CHECK: or <16 x i8> + + res_vs = vec_vsel(vs, vs, vus); // CHECK: xor <8 x i16> + // CHECK: and <8 x i16> + // CHECK: and <8 x i16> + // CHECK: or <8 x i16> + + + res_vus = vec_vsel(vus, vus, vus); // CHECK: xor <8 x i16> + // CHECK: and <8 x i16> + // CHECK: and <8 x i16> + // CHECK: or <8 x i16> + + res_vi = vec_vsel(vi, vi, vui); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + // CHECK: and <4 x i32> + // CHECK: or <4 x i32> + + + res_vui = vec_vsel(vui, vui, vui); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + // CHECK: and <4 x i32> + // CHECK: or <4 x i32> + + + res_vf = vec_vsel(vf, vf, vui); // CHECK: xor <4 x i32> + // CHECK: and <4 x i32> + // CHECK: and <4 x i32> + // CHECK: or <4 x i32> + + + /* vec_sl */ + res_vsc = vec_sl(vsc, vuc); // CHECK: shl <16 x i8> + res_vuc = vec_sl(vuc, vuc); // CHECK: shl <16 x i8> + res_vs = vec_sl(vs, vus); // CHECK: shl <8 x i16> + res_vus = vec_sl(vus, vus); // CHECK: shl <8 x i16> + res_vi = vec_sl(vi, vui); // CHECK: shl <4 x i32> + res_vui = vec_sl(vui, vui); // CHECK: shl <4 x i32> + res_vsc = vec_vslb(vsc, vuc); // CHECK: shl <16 x i8> + res_vuc = vec_vslb(vuc, vuc); // CHECK: shl <16 x i8> + res_vs = vec_vslh(vs, vus); // CHECK: shl <8 x i16> + res_vus = vec_vslh(vus, vus); // CHECK: shl <8 x i16> + res_vi = vec_vslw(vi, vui); // CHECK: shl <4 x i32> + res_vui = vec_vslw(vui, vui); // CHECK: shl <4 x i32> + + /* vec_sld */ + res_vsc = vec_sld(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_sld(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_sld(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_sld(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_sld(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_sld(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_sld(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vsc = vec_vsldoi(vsc, vsc, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_vsldoi(vuc, vuc, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_vsldoi(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_vsldoi(vus, vus, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_vsldoi(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_vsldoi(vui, vui, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_vsldoi(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm + + /* vec_sll */ + res_vsc = vec_sll(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vsc = vec_sll(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vsc = vec_sll(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vuc = vec_sll(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vuc = vec_sll(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vuc = vec_sll(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vs = vec_sll(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vs = vec_sll(vs, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vs = vec_sll(vs, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vus = vec_sll(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vus = vec_sll(vus, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vus = vec_sll(vus, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vi = vec_sll(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vi = vec_sll(vi, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vi = vec_sll(vi, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vui = vec_sll(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vui = vec_sll(vui, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vui = vec_sll(vui, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vsc = vec_vsl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vsc = vec_vsl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vsc = vec_vsl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vuc = vec_vsl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vuc = vec_vsl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vuc = vec_vsl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vs = vec_vsl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vs = vec_vsl(vs, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vs = vec_vsl(vs, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vus = vec_vsl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vus = vec_vsl(vus, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vus = vec_vsl(vus, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vi = vec_vsl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vi = vec_vsl(vi, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vi = vec_vsl(vi, vui); // CHECK: @llvm.ppc.altivec.vsl + res_vui = vec_vsl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsl + res_vui = vec_vsl(vui, vus); // CHECK: @llvm.ppc.altivec.vsl + res_vui = vec_vsl(vui, vui); // CHECK: @llvm.ppc.altivec.vsl + + /* vec_slo */ + res_vsc = vec_slo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vsc = vec_slo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vuc = vec_slo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vuc = vec_slo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vs = vec_slo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vs = vec_slo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vus = vec_slo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vus = vec_slo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vi = vec_slo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vi = vec_slo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vui = vec_slo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vui = vec_slo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vf = vec_slo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vf = vec_slo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vsc = vec_vslo(vsc, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vsc = vec_vslo(vsc, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vuc = vec_vslo(vuc, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vuc = vec_vslo(vuc, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vs = vec_vslo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vs = vec_vslo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vus = vec_vslo(vus, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vus = vec_vslo(vus, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vi = vec_vslo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vi = vec_vslo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vui = vec_vslo(vui, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vui = vec_vslo(vui, vuc); // CHECK: @llvm.ppc.altivec.vslo + res_vf = vec_vslo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo + res_vf = vec_vslo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo + + /* vec_splat */ + res_vsc = vec_splat(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_splat(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_splat(vs, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_splat(vus, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_splat(vi, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_splat(vui, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_splat(vf, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vsc = vec_vspltb(vsc, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vuc = vec_vspltb(vuc, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vs = vec_vsplth(vs, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vus = vec_vsplth(vus, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vi = vec_vspltw(vi, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vui = vec_vspltw(vui, 0); // CHECK: @llvm.ppc.altivec.vperm + res_vf = vec_vspltw(vf, 0); // CHECK: @llvm.ppc.altivec.vperm + + /* vec_splat_s8 */ + res_vsc = vec_splat_s8(0x09); // TODO: add check + res_vsc = vec_vspltisb(0x09); // TODO: add check + + /* vec_splat_s16 */ + res_vs = vec_splat_s16(0x09); // TODO: add check + res_vs = vec_vspltish(0x09); // TODO: add check + + /* vec_splat_s32 */ + res_vi = vec_splat_s32(0x09); // TODO: add check + res_vi = vec_vspltisw(0x09); // TODO: add check + + /* vec_splat_u8 */ + res_vuc = vec_splat_u8(0x09); // TODO: add check + + /* vec_splat_u16 */ + res_vus = vec_splat_u16(0x09); // TODO: add check + + /* vec_splat_u32 */ + res_vui = vec_splat_u32(0x09); // TODO: add check + + /* vec_sr */ + res_vsc = vec_sr(vsc, vuc); // CHECK: shr <16 x i8> + res_vuc = vec_sr(vuc, vuc); // CHECK: shr <16 x i8> + res_vs = vec_sr(vs, vus); // CHECK: shr <8 x i16> + res_vus = vec_sr(vus, vus); // CHECK: shr <8 x i16> + res_vi = vec_sr(vi, vui); // CHECK: shr <4 x i32> + res_vui = vec_sr(vui, vui); // CHECK: shr <4 x i32> + res_vsc = vec_vsrb(vsc, vuc); // CHECK: shr <16 x i8> + res_vuc = vec_vsrb(vuc, vuc); // CHECK: shr <16 x i8> + res_vs = vec_vsrh(vs, vus); // CHECK: shr <8 x i16> + res_vus = vec_vsrh(vus, vus); // CHECK: shr <8 x i16> + res_vi = vec_vsrw(vi, vui); // CHECK: shr <4 x i32> + res_vui = vec_vsrw(vui, vui); // CHECK: shr <4 x i32> + + /* vec_sra */ + res_vsc = vec_sra(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab + res_vuc = vec_sra(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab + res_vs = vec_sra(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah + res_vus = vec_sra(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah + res_vi = vec_sra(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw + res_vui = vec_sra(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw + res_vsc = vec_vsrab(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsrab + res_vuc = vec_vsrab(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsrab + res_vs = vec_vsrah(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah + res_vus = vec_vsrah(vus, vus); // CHECK: @llvm.ppc.altivec.vsrah + res_vi = vec_vsraw(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw + res_vui = vec_vsraw(vui, vui); // CHECK: @llvm.ppc.altivec.vsraw + + /* vec_srl */ + res_vsc = vec_srl(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vsc = vec_srl(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vsc = vec_srl(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vuc = vec_srl(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vuc = vec_srl(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vuc = vec_srl(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vs = vec_srl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vs = vec_srl(vs, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vs = vec_srl(vs, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vus = vec_srl(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vus = vec_srl(vus, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vus = vec_srl(vus, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vi = vec_srl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vi = vec_srl(vi, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vi = vec_srl(vi, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vui = vec_srl(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vui = vec_srl(vui, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vui = vec_srl(vui, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vsc = vec_vsr(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vsc = vec_vsr(vsc, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vsc = vec_vsr(vsc, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vuc = vec_vsr(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vuc = vec_vsr(vuc, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vuc = vec_vsr(vuc, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vs = vec_vsr(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vs = vec_vsr(vs, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vs = vec_vsr(vs, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vus = vec_vsr(vus, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vus = vec_vsr(vus, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vus = vec_vsr(vus, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vi = vec_vsr(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vi = vec_vsr(vi, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vi = vec_vsr(vi, vui); // CHECK: @llvm.ppc.altivec.vsr + res_vui = vec_vsr(vui, vuc); // CHECK: @llvm.ppc.altivec.vsr + res_vui = vec_vsr(vui, vus); // CHECK: @llvm.ppc.altivec.vsr + res_vui = vec_vsr(vui, vui); // CHECK: @llvm.ppc.altivec.vsr + + /* vec_sro */ + res_vsc = vec_sro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vsc = vec_sro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vuc = vec_sro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vuc = vec_sro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vs = vec_sro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vs = vec_sro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vus = vec_sro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vus = vec_sro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vi = vec_sro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vi = vec_sro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vui = vec_sro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vui = vec_sro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vf = vec_sro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vf = vec_sro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vsc = vec_vsro(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vsc = vec_vsro(vsc, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vuc = vec_vsro(vuc, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vuc = vec_vsro(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vs = vec_vsro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vs = vec_vsro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vus = vec_vsro(vus, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vus = vec_vsro(vus, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vi = vec_vsro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vi = vec_vsro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vui = vec_vsro(vui, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vui = vec_vsro(vui, vuc); // CHECK: @llvm.ppc.altivec.vsro + res_vf = vec_vsro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro + res_vf = vec_vsro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro + + /* vec_st */ + vec_st(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx + vec_st(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx + vec_stvx(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvx + + /* vec_ste */ + vec_ste(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvebx + vec_ste(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvebx + vec_ste(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx + vec_ste(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvehx + vec_ste(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvewx + vec_ste(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvewx + vec_ste(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvewx + vec_stvebx(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvebx + vec_stvebx(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvebx + vec_stvehx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx + vec_stvehx(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvehx + vec_stvewx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvewx + vec_stvewx(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvewx + vec_stvewx(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvewx + + /* vec_stl */ + vec_stl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl + vec_stl(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vsc, 0, &vsc); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vsc, 0, ¶m_sc); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vuc, 0, &vuc); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vuc, 0, ¶m_uc); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vus, 0, &vus); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vus, 0, ¶m_us); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vui, 0, &vui); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vui, 0, ¶m_ui); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl + vec_stvxl(vf, 0, ¶m_f); // CHECK: @llvm.ppc.altivec.stvxl + + /* vec_sub */ + res_vsc = vec_sub(vsc, vsc); // CHECK: sub nsw <16 x i8> + res_vuc = vec_sub(vuc, vuc); // CHECK: sub <16 x i8> + res_vs = vec_sub(vs, vs); // CHECK: sub nsw <8 x i16> + res_vus = vec_sub(vus, vus); // CHECK: sub <8 x i16> + res_vi = vec_sub(vi, vi); // CHECK: sub nsw <4 x i32> + res_vui = vec_sub(vui, vui); // CHECK: sub <4 x i32> + res_vf = vec_sub(vf, vf); // CHECK: fsub <4 x float> + res_vsc = vec_vsububm(vsc, vsc); // CHECK: sub nsw <16 x i8> + res_vuc = vec_vsububm(vuc, vuc); // CHECK: sub <16 x i8> + res_vs = vec_vsubuhm(vs, vs); // CHECK: sub nsw <8 x i16> + res_vus = vec_vsubuhm(vus, vus); // CHECK: sub <8 x i16> + res_vi = vec_vsubuwm(vi, vi); // CHECK: sub nsw <4 x i32> + res_vui = vec_vsubuwm(vui, vui); // CHECK: sub <4 x i32> + res_vf = vec_vsubfp(vf, vf); // CHECK: fsub <4 x float> + + /* vec_subc */ + res_vui = vec_subc(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw + res_vui = vec_vsubcuw(vui, vui); // CHECK: @llvm.ppc.altivec.vsubcuw + + /* vec_subs */ + res_vsc = vec_subs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs + res_vuc = vec_subs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs + res_vs = vec_subs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs + res_vus = vec_subs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs + res_vi = vec_subs(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws + res_vui = vec_subs(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws + res_vsc = vec_vsubsbs(vsc, vsc); // CHECK: @llvm.ppc.altivec.vsubsbs + res_vuc = vec_vsububs(vuc, vuc); // CHECK: @llvm.ppc.altivec.vsububs + res_vs = vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs + res_vus = vec_vsubuhs(vus, vus); // CHECK: @llvm.ppc.altivec.vsubuhs + res_vi = vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws + res_vui = vec_vsubuws(vui, vui); // CHECK: @llvm.ppc.altivec.vsubuws + + /* vec_sum4s */ + res_vi = vec_sum4s(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs + res_vui = vec_sum4s(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs + res_vi = vec_sum4s(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs + res_vi = vec_vsum4sbs(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs + res_vui = vec_vsum4ubs(vuc, vui); // CHECK: @llvm.ppc.altivec.vsum4ubs + res_vi = vec_vsum4shs(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs + + /* vec_sum2s */ + res_vi = vec_sum2s(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws + res_vi = vec_vsum2sws(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws + + /* vec_sums */ + res_vi = vec_sums(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws + res_vi = vec_vsumsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws + + /* vec_trunc */ + res_vf = vec_trunc(vf); // CHECK: @llvm.ppc.altivec.vrfiz + res_vf = vec_vrfiz(vf); // CHECK: @llvm.ppc.altivec.vrfiz + + /* vec_unpackh */ + res_vs = vec_unpackh(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb + res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh + res_vs = vec_vupkhsb(vsc); // CHECK: @llvm.ppc.altivec.vupkhsb + res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh + + /* vec_unpackl */ + res_vs = vec_unpackl(vsc); // CHECK: @llvm.ppc.altivec.vupklsb + res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh + res_vs = vec_vupklsb(vsc); // CHECK: @llvm.ppc.altivec.vupklsb + res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh + + /* vec_xor */ + res_vsc = vec_xor(vsc, vsc); // CHECK: xor <16 x i8> + res_vuc = vec_xor(vuc, vuc); // CHECK: xor <16 x i8> + res_vs = vec_xor(vs, vs); // CHECK: xor <8 x i16> + res_vus = vec_xor(vus, vus); // CHECK: xor <8 x i16> + res_vi = vec_xor(vi, vi); // CHECK: xor <4 x i32> + res_vui = vec_xor(vui, vui); // CHECK: xor <4 x i32> + res_vf = vec_xor(vf, vf); // CHECK: xor <4 x i32> + res_vsc = vec_vxor(vsc, vsc); // CHECK: xor <16 x i8> + res_vuc = vec_vxor(vuc, vuc); // CHECK: xor <16 x i8> + res_vs = vec_vxor(vs, vs); // CHECK: xor <8 x i16> + res_vus = vec_vxor(vus, vus); // CHECK: xor <8 x i16> + res_vi = vec_vxor(vi, vi); // CHECK: xor <4 x i32> + res_vui = vec_vxor(vui, vui); // CHECK: xor <4 x i32> + res_vf = vec_vxor(vf, vf); // CHECK: xor <4 x i32> + + /* ------------------------------ predicates -------------------------------------- */ /* vec_all_eq */ res_i = vec_all_eq(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpequb.p @@ -203,7 +1111,7 @@ int main () res_i = vec_all_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_all_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_all_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p - res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p + res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p /* vec_all_gt */ res_i = vec_all_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p @@ -224,7 +1132,7 @@ int main () res_i = vec_all_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_all_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_all_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p - res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p + res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p /* vec_all_nan */ res_i = vec_all_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p @@ -269,7 +1177,7 @@ int main () res_i = vec_any_ge(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_any_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_any_ge(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p - res_i = vec_any_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p + res_i = vec_any_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p /* vec_any_gt */ res_i = vec_any_gt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p @@ -287,7 +1195,7 @@ int main () res_i = vec_any_le(vus, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_any_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_any_le(vui, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p - res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p + res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p /* vec_any_lt */ res_i = vec_any_lt(vsc, vsc); // CHECK: @llvm.ppc.altivec.vcmpgtsb.p diff --git a/test/CodeGen/builtins.c b/test/CodeGen/builtins.c index 8b6125806eff..40f77249f918 100644 --- a/test/CodeGen/builtins.c +++ b/test/CodeGen/builtins.c @@ -39,9 +39,10 @@ int main() { Q(inff, ()); Q(infl, ()); + P(fpclassify, (0, 1, 2, 3, 4, 1.0)); + P(fpclassify, (0, 1, 2, 3, 4, 1.0f)); + P(fpclassify, (0, 1, 2, 3, 4, 1.0l)); // FIXME: - // XXX note funny semantics for the (last) argument - // P(fpclassify, (FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, FP_ZERO, 1.0)); // P(isinf_sign, (1.0)); Q(nan, ("")); @@ -195,3 +196,10 @@ void test_float_builtins(float F, double D, long double LD) { // CHECK: and i1 } +// CHECK: define void @test_builtin_longjmp +void test_builtin_longjmp(void **buffer) { + // CHECK: [[BITCAST:%.*]] = bitcast + // CHECK-NEXT: call void @llvm.eh.sjlj.longjmp(i8* [[BITCAST]]) + __builtin_longjmp(buffer, 1); + // CHECK-NEXT: unreachable +} diff --git a/test/CodeGen/const-arithmetic.c b/test/CodeGen/const-arithmetic.c index e12b4f6d92c1..92c02f0b3dfb 100644 --- a/test/CodeGen/const-arithmetic.c +++ b/test/CodeGen/const-arithmetic.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s -// CHECK: @g1 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 8 ; <[2 x i8*]*> [#uses=0] -// CHECK: @g2 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 8 ; <[2 x i8*]*> [#uses=0] +// CHECK: @g1 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16 ; <[2 x i8*]*> [#uses=0] +// CHECK: @g2 = global [2 x i8*] [i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -2), i8* getelementptr (i8* getelementptr inbounds ([0 x %struct.anon]* @g0, i32 0, i32 0, i32 0), i64 -46)], align 16 ; <[2 x i8*]*> [#uses=0] extern struct { unsigned char a, b; } g0[]; void *g1[] = {g0 + -1, g0 + -23 }; diff --git a/test/CodeGen/const-unordered-compare.c b/test/CodeGen/const-unordered-compare.c index ac7d35bcd542..ffd04db6f862 100644 --- a/test/CodeGen/const-unordered-compare.c +++ b/test/CodeGen/const-unordered-compare.c @@ -2,6 +2,6 @@ // Checks folding of an unordered comparison int nan_ne_check() { - // CHECK: store i32 1 + // CHECK: ret i32 1 return (__builtin_nanf("") != __builtin_nanf("")) ? 1 : 0; } diff --git a/test/CodeGen/decl.c b/test/CodeGen/decl.c index 7ffb7006b05b..7a9971ee1812 100644 --- a/test/CodeGen/decl.c +++ b/test/CodeGen/decl.c @@ -89,3 +89,31 @@ struct test7s { int a; int b; } test7[] = { struct test8s { int f0; char f1; } test8g = {}; +// PR7519 + +struct S { + void (*x) (struct S *); +}; + +extern struct S *global_dc; +void cp_diagnostic_starter(struct S *); + +void init_error(void) { + global_dc->x = cp_diagnostic_starter; +} + + + +// rdar://8147692 - ABI crash in recursive struct-through-function-pointer. +typedef struct { + int x5a; +} x5; + +typedef struct x2 *x0; +typedef long (*x1)(x0 x0a, x5 x6); +struct x2 { + x1 x4; +}; +long x3(x0 x0a, x5 a) { + return x0a->x4(x0a, a); +} diff --git a/test/CodeGen/exprs.c b/test/CodeGen/exprs.c index d82cbf48d30a..7cc1134077ee 100644 --- a/test/CodeGen/exprs.c +++ b/test/CodeGen/exprs.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -emit-llvm -o - +// RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -emit-llvm -o - | FileCheck %s // PR1895 // sizeof function @@ -119,3 +119,29 @@ void f9(struct S *x) { void f10() { __builtin_sin(0); } + +// rdar://7530813 +// CHECK: define i32 @f11 +int f11(long X) { + int A[100]; + return A[X]; + +// CHECK: [[Xaddr:%[^ ]+]] = alloca i64, align 8 +// CHECK: load {{.*}}* [[Xaddr]] +// CHECK-NEXT: getelementptr inbounds [100 x i32]* %A, i32 0, +// CHECK-NEXT: load i32* +} + +int f12() { + // PR3150 + // CHECK: define i32 @f12 + // CHECK: ret i32 1 + return 1||1; +} + +// Make sure negate of fp uses -0.0 for proper -0 handling. +double f13(double X) { + // CHECK: define double @f13 + // CHECK: fsub double -0.0 + return -X; +} diff --git a/test/CodeGen/extern-inline.c b/test/CodeGen/extern-inline.c index 5dd9bfda574c..60f6d034bf1f 100644 --- a/test/CodeGen/extern-inline.c +++ b/test/CodeGen/extern-inline.c @@ -19,7 +19,7 @@ int g2(void) {return f2(0,1);} static int f2(int a, int b) {return a*b;} // CHECK: load i32* %{{.*}} // CHECK: load i32* %{{.*}} -// CHECK: mul i32 %{{.*}}, %{{.*}} +// CHECK: mul nsw i32 %{{.*}}, %{{.*}} int h2(void) {return f2(1,2);} // CHECK: call i32 @f2 diff --git a/test/CodeGen/frame-pointer-elim.c b/test/CodeGen/frame-pointer-elim.c new file mode 100644 index 000000000000..79c0599467a9 --- /dev/null +++ b/test/CodeGen/frame-pointer-elim.c @@ -0,0 +1,29 @@ +// RUN: %clang -ccc-host-triple i386 -S -o - %s | \ +// RUN: FileCheck --check-prefix=DEFAULT %s +// DEFAULT: f0: +// DEFAULT: pushl %ebp +// DEFAULT: ret +// DEFAULT: f1: +// DEFAULT: pushl %ebp +// DEFAULT: ret + +// RUN: %clang -ccc-host-triple i386 -S -o - -fomit-frame-pointer %s | \ +// RUN: FileCheck --check-prefix=OMIT_ALL %s +// OMIT_ALL: f0: +// OMIT_ALL-NOT: pushl %ebp +// OMIT_ALL: ret +// OMIT_ALL: f1: +// OMIT_ALL-NOT: pushl %ebp +// OMIT_ALL: ret + +// RUN: %clang -ccc-host-triple i386 -S -o - -momit-leaf-frame-pointer %s | \ +// RUN: FileCheck --check-prefix=OMIT_LEAF %s +// OMIT_LEAF: f0: +// OMIT_LEAF-NOT: pushl %ebp +// OMIT_LEAF: ret +// OMIT_LEAF: f1: +// OMIT_LEAF: pushl %ebp +// OMIT_LEAF: ret + +void f0() {} +void f1() { f0(); } diff --git a/test/CodeGen/func-in-block.c b/test/CodeGen/func-in-block.c new file mode 100644 index 000000000000..27e0c0960997 --- /dev/null +++ b/test/CodeGen/func-in-block.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fblocks -emit-llvm -o - %s | FileCheck %s +// rdar: // 7860965 + +extern void PRINTF(const char *); +extern void B(void (^)(void)); + +int main() +{ + PRINTF(__func__); + B( + ^{ + PRINTF(__func__); + } + ); + return 0; // not reached +} + +// CHECK: call void @PRINTF({{.*}}@__func__.__main_block_invoke_ diff --git a/test/CodeGen/init.c b/test/CodeGen/init.c index d48e723c58a1..c8de99d90177 100644 --- a/test/CodeGen/init.c +++ b/test/CodeGen/init.c @@ -40,3 +40,9 @@ vec3 f5(vec3 value) { .x = value.x }}; } + +// rdar://problem/8154689 +void f6() { + int x; + long ids[] = { (long) &x }; +} diff --git a/test/CodeGen/inline.c b/test/CodeGen/inline.c index a17b06992968..a6b4b3e4483b 100644 --- a/test/CodeGen/inline.c +++ b/test/CodeGen/inline.c @@ -1,5 +1,5 @@ // RUN: echo "GNU89 tests:" -// RUN: %clang %s -emit-llvm -S -o %t -std=gnu89 +// RUN: %clang %s -O1 -emit-llvm -S -o %t -std=gnu89 // RUN: grep "define available_externally i32 @ei()" %t // RUN: grep "define i32 @foo()" %t // RUN: grep "define i32 @bar()" %t @@ -14,7 +14,7 @@ // RUN: grep "define available_externally i32 @test5" %t // RUN: echo "\nC99 tests:" -// RUN: %clang %s -emit-llvm -S -o %t -std=c99 +// RUN: %clang %s -O1 -emit-llvm -S -o %t -std=c99 // RUN: grep "define i32 @ei()" %t // RUN: grep "define available_externally i32 @foo()" %t // RUN: grep "define i32 @bar()" %t @@ -29,7 +29,7 @@ // RUN: grep "define available_externally i32 @test5" %t // RUN: echo "\nC++ tests:" -// RUN: %clang %s -emit-llvm -S -o %t -std=c++98 +// RUN: %clang %s -O1 -emit-llvm -S -o %t -std=c++98 // RUN: grep "define linkonce_odr i32 @_Z2eiv()" %t // RUN: grep "define linkonce_odr i32 @_Z3foov()" %t // RUN: grep "define i32 @_Z3barv()" %t diff --git a/test/CodeGen/inline2.c b/test/CodeGen/inline2.c index 737b58fa44c6..fca4fff7ca8d 100644 --- a/test/CodeGen/inline2.c +++ b/test/CodeGen/inline2.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -std=gnu89 -triple i386-apple-darwin9 -emit-llvm %s -o - | FileCheck -check-prefix GNU89 %s -// RUN: %clang_cc1 -std=c99 -triple i386-apple-darwin9 -emit-llvm %s -o - | FileCheck -check-prefix C99 %s +// RUN: %clang_cc1 -O1 -std=gnu89 -triple i386-apple-darwin9 -emit-llvm %s -o - | FileCheck -check-prefix GNU89 %s +// RUN: %clang_cc1 -O1 -std=c99 -triple i386-apple-darwin9 -emit-llvm %s -o - | FileCheck -check-prefix C99 %s // CHECK-GNU89: define i32 @f0() // CHECK-C99: define i32 @f0() diff --git a/test/CodeGen/instrument-functions.c b/test/CodeGen/instrument-functions.c new file mode 100644 index 000000000000..d80385e2239a --- /dev/null +++ b/test/CodeGen/instrument-functions.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -S -emit-llvm -o - %s -finstrument-functions | FileCheck %s + +// CHECK: @test1 +int test1(int x) { +// CHECK: __cyg_profile_func_enter +// CHECK: __cyg_profile_func_exit +// CHECK: ret + return x; +} + +// CHECK: @test2 +int test2(int) __attribute__((no_instrument_function)); +int test2(int x) { +// CHECK-NOT: __cyg_profile_func_enter +// CHECK-NOT: __cyg_profile_func_exit +// CHECK: ret + return x; +} diff --git a/test/CodeGen/integer-overflow.c b/test/CodeGen/integer-overflow.c new file mode 100644 index 000000000000..9bed741b3236 --- /dev/null +++ b/test/CodeGen/integer-overflow.c @@ -0,0 +1,45 @@ +// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s --check-prefix=DEFAULT +// RUN: %clang_cc1 %s -emit-llvm -o - -fwrapv | FileCheck %s --check-prefix=WRAPV +// RUN: %clang_cc1 %s -emit-llvm -o - -ftrapv | FileCheck %s --check-prefix=TRAPV + + +// Tests for signed integer overflow stuff. +// rdar://7432000 rdar://7221421 +void test1() { + // DEFAULT: define void @test1 + // WRAPV: define void @test1 + // TRAPV: define void @test1 + extern volatile int f11G, a, b; + + // DEFAULT: add nsw i32 + // WRAPV: add i32 + // TRAPV: llvm.sadd.with.overflow.i32 + f11G = a + b; + + // DEFAULT: sub nsw i32 + // WRAPV: sub i32 + // TRAPV: llvm.ssub.with.overflow.i32 + f11G = a - b; + + // DEFAULT: mul nsw i32 + // WRAPV: mul i32 + // TRAPV: llvm.smul.with.overflow.i32 + f11G = a * b; + + // DEFAULT: sub nsw i32 0, + // WRAPV: sub i32 0, + // TRAPV: llvm.ssub.with.overflow.i32(i32 0 + f11G = -a; + + // PR7426 - Overflow checking for increments. + + // DEFAULT: add nsw i32 {{.*}}, 1 + // WRAPV: add i32 {{.*}}, 1 + // TRAPV: llvm.sadd.with.overflow.i32({{.*}}, i32 1) + ++a; + + // DEFAULT: add nsw i32 {{.*}}, -1 + // WRAPV: add i32 {{.*}}, -1 + // TRAPV: llvm.sadd.with.overflow.i32({{.*}}, i32 -1) + --a; +} diff --git a/test/CodeGen/object-size.c b/test/CodeGen/object-size.c index 3920ec5934de..287d742b877d 100644 --- a/test/CodeGen/object-size.c +++ b/test/CodeGen/object-size.c @@ -13,32 +13,38 @@ char gbuf[63]; char *gp; int gi, gj; +// CHECK: define void @test1 void test1() { // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8]* @gbuf, i32 0, i64 4), i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i64 59) strcpy(&gbuf[4], "Hi there"); } +// CHECK: define void @test2 void test2() { // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8]* @gbuf, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i64 63) strcpy(gbuf, "Hi there"); } +// CHECK: define void @test3 void test3() { // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8]* @gbuf, i64 1, i64 37), i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i64 0) strcpy(&gbuf[100], "Hi there"); } +// CHECK: define void @test4 void test4() { // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8]* @gbuf, i32 0, i64 -1), i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), i64 0) strcpy((char*)(void*)&gbuf[-1], "Hi there"); } +// CHECK: define void @test5 void test5() { // CHECK: = load i8** @gp // CHECK-NEXT:= call i64 @llvm.objectsize.i64(i8* %{{.*}}, i1 false) strcpy(gp, "Hi there"); } +// CHECK: define void @test6 void test6() { char buf[57]; @@ -46,6 +52,7 @@ void test6() { strcpy(&buf[4], "Hi there"); } +// CHECK: define void @test7 void test7() { int i; // CHECK-NOT: __strcpy_chk @@ -53,6 +60,7 @@ void test7() { strcpy((++i, gbuf), "Hi there"); } +// CHECK: define void @test8 void test8() { char *buf[50]; // CHECK-NOT: __strcpy_chk @@ -60,12 +68,14 @@ void test8() { strcpy(buf[++gi], "Hi there"); } +// CHECK: define void @test9 void test9() { // CHECK-NOT: __strcpy_chk // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0)) strcpy((char *)((++gi) + gj), "Hi there"); } +// CHECK: define void @test10 char **p; void test10() { // CHECK-NOT: __strcpy_chk @@ -73,36 +83,42 @@ void test10() { strcpy(*(++p), "Hi there"); } +// CHECK: define void @test11 void test11() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* getelementptr inbounds ([63 x i8]* @gbuf, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0)) strcpy(gp = gbuf, "Hi there"); } +// CHECK: define void @test12 void test12() { // CHECK-NOT: __strcpy_chk // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0)) strcpy(++gp, "Hi there"); } +// CHECK: define void @test13 void test13() { // CHECK-NOT: __strcpy_chk // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0)) strcpy(gp++, "Hi there"); } +// CHECK: define void @test14 void test14() { // CHECK-NOT: __strcpy_chk // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0)) strcpy(--gp, "Hi there"); } +// CHECK: define void @test15 void test15() { // CHECK-NOT: __strcpy_chk // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{..*}}, i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0)) strcpy(gp--, "Hi there"); } +// CHECK: define void @test16 void test16() { // CHECK-NOT: __strcpy_chk // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0)) diff --git a/test/CodeGen/pascal-wchar-string.c b/test/CodeGen/pascal-wchar-string.c new file mode 100644 index 000000000000..89e4de489f09 --- /dev/null +++ b/test/CodeGen/pascal-wchar-string.c @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -emit-llvm -o - %s -fpascal-strings -fshort-wchar | FileCheck %s +// rdar: // 8020384 + +extern void abort (void); + +typedef unsigned short UInt16; + +typedef UInt16 UniChar; + +int main(int argc, char* argv[]) +{ + + char st[] = "\pfoo"; // pascal string + UniChar wt[] = L"\pbar"; // pascal Unicode string + UniChar wt1[] = L"\p"; + UniChar wt2[] = L"\pgorf"; + + if (st[0] != 3) + abort (); + if (wt[0] != 3) + abort (); + if (wt1[0] != 0) + abort (); + if (wt2[0] != 4) + abort (); + + return 0; +} + +// CHECK: c"\03\00b\00a\00r\00\00\00" +// CHECK: c"\04\00g\00o\00r\00f\00\00\00" diff --git a/test/CodeGen/pragma-pack-1.c b/test/CodeGen/pragma-pack-1.c index f5d301639e05..c30a62ac3c4e 100644 --- a/test/CodeGen/pragma-pack-1.c +++ b/test/CodeGen/pragma-pack-1.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm -o - +// RUN: %clang_cc1 -emit-llvm -o - %s // PR4610 #pragma pack(4) diff --git a/test/CodeGen/regparm.c b/test/CodeGen/regparm.c index b60f8c70d762..ec5cbab16a5b 100644 --- a/test/CodeGen/regparm.c +++ b/test/CodeGen/regparm.c @@ -14,6 +14,11 @@ FType bar; static void FASTCALL reduced(char b, double c, foo* d, double e, int f); +// PR7025 +void FASTCALL f1(int i, int j, int k); +// CHECK: define void @f1(i32 inreg %i, i32 inreg %j, i32 %k) +void f1(int i, int j, int k) { } + int main(void) { // CHECK: call void @reduced(i8 signext inreg 0, {{.*}} %struct.anon* inreg null diff --git a/test/CodeGen/statements.c b/test/CodeGen/statements.c index e3835f062a69..7ed82add69b0 100644 --- a/test/CodeGen/statements.c +++ b/test/CodeGen/statements.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 < %s -emit-llvm +// RUN: %clang_cc1 -Wreturn-type < %s -emit-llvm void test1(int x) { switch (x) { diff --git a/test/CodeGen/struct-init.c b/test/CodeGen/struct-init.c index 88b57a26478a..926e5a7f5dd9 100644 --- a/test/CodeGen/struct-init.c +++ b/test/CodeGen/struct-init.c @@ -10,3 +10,11 @@ char a; const zend_ini_entry ini_entries[] = { { ((char*)&((zend_ini_entry*)0)->mh_arg1 - (char*)(void*)0)}, }; + +// PR7564 +struct GLGENH { + int : 27; + int EMHJAA : 1; +}; + +struct GLGENH ABHFBF = {1}; diff --git a/test/CodeGen/typedef-func.c b/test/CodeGen/typedef-func.c index bc08b359d70f..1467e8b1f74a 100644 --- a/test/CodeGen/typedef-func.c +++ b/test/CodeGen/typedef-func.c @@ -2,7 +2,7 @@ // PR2414 struct mad_frame{}; -enum mad_flow {}; +enum mad_flow {ont}; typedef enum mad_flow filter_func_t(void *, struct mad_frame *); diff --git a/test/CodeGen/volatile.c b/test/CodeGen/volatile.c index db87a375152a..1a996defcf01 100644 --- a/test/CodeGen/volatile.c +++ b/test/CodeGen/volatile.c @@ -1,8 +1,8 @@ // RUN: %clang_cc1 -emit-llvm < %s -o %t -// RUN: grep volatile %t | count 29 +// RUN: grep volatile %t | count 28 // RUN: grep memcpy %t | count 7 -// The number 29 comes from the current codegen for volatile loads; +// The number 28 comes from the current codegen for volatile loads; // if this number changes, it's not necessarily something wrong, but // something has changed to affect volatile load/store codegen @@ -64,7 +64,7 @@ int main() { i=vV[3]; i=VE.yx[1]; i=vVE.zy[1]; - i = aggFct().x; + i = aggFct().x; // Note: not volatile i=vtS; diff --git a/test/CodeGen/x86_64-arguments.c b/test/CodeGen/x86_64-arguments.c index 47b2eb1585e2..cc318dc749b3 100644 --- a/test/CodeGen/x86_64-arguments.c +++ b/test/CodeGen/x86_64-arguments.c @@ -45,7 +45,7 @@ void f7(e7 a0) { // Test merging/passing of upper eightbyte with X87 class. // // CHECK: define %0 @f8_1() -// CHECK: define void @f8_2(%0) +// CHECK: define void @f8_2(i64 %a0.coerce0, double %a0.coerce1) union u8 { long double a; int b; @@ -56,7 +56,7 @@ void f8_2(union u8 a0) {} // CHECK: define i64 @f9() struct s9 { int a; int b; int : 0; } f9(void) { while (1) {} } -// CHECK: define void @f10(i64) +// CHECK: define void @f10(i64 %a0.coerce) struct s10 { int a; int b; int : 0; }; void f10(struct s10 a0) {} @@ -64,14 +64,14 @@ void f10(struct s10 a0) {} union { long double a; float b; } f11() { while (1) {} } // CHECK: define i64 @f12_0() -// CHECK: define void @f12_1(i64) +// CHECK: define void @f12_1(i64 %a0.coerce) struct s12 { int a __attribute__((aligned(16))); }; struct s12 f12_0(void) { while (1) {} } void f12_1(struct s12 a0) {} // Check that sret parameter is accounted for when checking available integer // registers. -// CHECK: define void @f13(%struct.s13_0* sret %agg.result, i32 %a, i32 %b, i32 %c, i32 %d, %struct.s13_1* byval %e, i32 %f) +// CHECK: define void @f13(%struct.s13_0* sret %agg.result, i32 %a, i32 %b, i32 %c, i32 %d, {{.*}}* byval %e, i32 %f) struct s13_0 { long long f0[3]; }; struct s13_1 { long long f0[2]; }; @@ -92,10 +92,10 @@ void f16(float a, float b, float c, float d, float e, float f, float g, float h, void f17(float a, float b, float c, float d, float e, float f, float g, float h, long double X) {} -// Check for valid coercion. -// CHECK: [[f18_t0:%.*]] = bitcast i64* {{.*}} to %struct.f18_s0* -// CHECK: [[f18_t1:%.*]] = load %struct.f18_s0* [[f18_t0]], align 1 -// CHECK: store %struct.f18_s0 [[f18_t1]], %struct.f18_s0* %f18_arg1 +// Check for valid coercion. The struct should be passed/returned as i32, not +// as i64 for better code quality. +// rdar://8135035 +// CHECK: define void @f18(i32 %a, i32 %f18_arg1.coerce) struct f18_s0 { int f0; }; void f18(int a, struct f18_s0 f18_arg1) { while (1) {} } @@ -113,3 +113,21 @@ struct __attribute__((aligned(32))) s20 { int y; }; void f20(struct s20 x) {} + +struct StringRef { + long x; + const char *Ptr; +}; + +// rdar://7375902 +// CHECK: define i8* @f21(i64 %S.coerce0, i8* %S.coerce1) +const char *f21(struct StringRef S) { return S.x+S.Ptr; } + +// PR7567 +typedef __attribute__ ((aligned(16))) struct f22s { unsigned long long x[2]; } L; +void f22(L x, L y) { } +// CHECK: @f22 +// CHECK: %x = alloca{{.*}}, align 16 +// CHECK: %y = alloca{{.*}}, align 16 + + |
