summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/2004-02-20-Builtins.c5
-rw-r--r--test/CodeGen/2005-07-20-SqrtNoErrno.c10
-rw-r--r--test/CodeGen/2009-10-20-GlobalDebug.c4
-rw-r--r--test/CodeGen/2010-08-10-DbgConstant.c3
-rw-r--r--test/CodeGen/Inputs/sanitizer-special-case-list.sanitized.txt4
-rw-r--r--test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized1.txt2
-rw-r--r--test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized2.txt4
-rw-r--r--test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized3.txt4
-rw-r--r--test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized4.txt4
-rw-r--r--test/CodeGen/adc-builtins.c4
-rw-r--r--test/CodeGen/address-safety-attr-kasan-hwasan.cpp53
-rw-r--r--test/CodeGen/arm-metadata.c2
-rw-r--r--test/CodeGen/arm64-microsoft-intrinsics.c26
-rw-r--r--test/CodeGen/attr-availability.c6
-rw-r--r--test/CodeGen/attr-mprefer-vector-width.c14
-rw-r--r--test/CodeGen/attr-target-x86.c25
-rw-r--r--test/CodeGen/avx-builtins.c6
-rw-r--r--test/CodeGen/avx2-builtins.c32
-rw-r--r--test/CodeGen/avx512-reduceMinMaxIntrin.c2852
-rw-r--r--test/CodeGen/avx512bw-builtins.c330
-rw-r--r--test/CodeGen/avx512cdintrin.c38
-rw-r--r--test/CodeGen/avx512dq-builtins.c16
-rw-r--r--test/CodeGen/avx512f-builtins.c437
-rw-r--r--test/CodeGen/avx512ifmavl-builtins.c4
-rw-r--r--test/CodeGen/avx512vl-builtins.c205
-rw-r--r--test/CodeGen/avx512vlbw-builtins.c347
-rw-r--r--test/CodeGen/avx512vlcd-builtins.c56
-rw-r--r--test/CodeGen/avx512vldq-builtins.c24
-rw-r--r--test/CodeGen/avx512vpopcntdqvlintrin.c73
-rw-r--r--test/CodeGen/blocks-opencl.cl17
-rw-r--r--test/CodeGen/bounds-checking.c4
-rw-r--r--test/CodeGen/builtin-clflushopt.c6
-rw-r--r--test/CodeGen/builtin-clwb.c9
-rw-r--r--test/CodeGen/builtin-clzero.c4
-rw-r--r--test/CodeGen/builtin-cpu-is.c53
-rw-r--r--test/CodeGen/builtin-cpu-supports.c4
-rw-r--r--test/CodeGen/builtin-sqrt.c15
-rw-r--r--test/CodeGen/builtins-hexagon.c4158
-rw-r--r--test/CodeGen/builtins-nvptx-ptx50.cu23
-rw-r--r--test/CodeGen/builtins-nvptx-ptx60.cu97
-rw-r--r--test/CodeGen/builtins-nvptx-sm_70.cu166
-rw-r--r--test/CodeGen/builtins-nvptx.c33
-rw-r--r--test/CodeGen/builtins-overflow.c119
-rw-r--r--test/CodeGen/builtins-x86.c21
-rw-r--r--test/CodeGen/builtins.c536
-rw-r--r--test/CodeGen/catch-undef-behavior.c3
-rw-r--r--test/CodeGen/cetintrin.c84
-rw-r--r--test/CodeGen/cfi-icall-cross-dso.c10
-rw-r--r--test/CodeGen/cfi-icall-generalize.c19
-rw-r--r--test/CodeGen/cfi-icall.c10
-rw-r--r--test/CodeGen/cfi-unrelated-cast.cpp37
-rw-r--r--test/CodeGen/complex-builtins.c206
-rw-r--r--test/CodeGen/complex-libcalls.c208
-rw-r--r--test/CodeGen/darwin-ppc-varargs.c28
-rw-r--r--test/CodeGen/debug-info-attributed-stmt.c12
-rw-r--r--test/CodeGen/debug-info-block-vars.c20
-rw-r--r--test/CodeGen/debug-info-global-constant.c3
-rw-r--r--test/CodeGen/debug-info-lto.c4
-rw-r--r--test/CodeGen/debug-info-preprocessed-file.i11
-rw-r--r--test/CodeGen/debug-info-static-const-fp.c11
-rw-r--r--test/CodeGen/debug-info-static.c2
-rw-r--r--test/CodeGen/debug-info-vla.c3
-rw-r--r--test/CodeGen/finite-math.c2
-rw-r--r--test/CodeGen/fma-builtins.c104
-rw-r--r--test/CodeGen/fma4-builtins.c76
-rw-r--r--test/CodeGen/fp16-ops.c55
-rw-r--r--test/CodeGen/fp16vec-ops.c163
-rw-r--r--test/CodeGen/function-attributes.c1
-rw-r--r--test/CodeGen/hexagon-inline-asm.c8
-rw-r--r--test/CodeGen/instrument-functions.c30
-rw-r--r--test/CodeGen/libcall-declarations.c600
-rw-r--r--test/CodeGen/libcalls.c51
-rw-r--r--test/CodeGen/linux-arm-atomic.c1
-rw-r--r--test/CodeGen/long-call-attr.c21
-rw-r--r--test/CodeGen/mangle-blocks.c6
-rw-r--r--test/CodeGen/math-builtins.c578
-rw-r--r--test/CodeGen/math-libcalls.c547
-rw-r--r--test/CodeGen/mcount.c8
-rw-r--r--test/CodeGen/mozilla-ms-inline-asm.c2
-rw-r--r--test/CodeGen/ms-annotation.c26
-rw-r--r--test/CodeGen/ms-inline-asm-64.c8
-rw-r--r--test/CodeGen/ms-inline-asm-enums.cpp55
-rw-r--r--test/CodeGen/ms-inline-asm-variables.c35
-rw-r--r--test/CodeGen/ms-inline-asm.c183
-rw-r--r--test/CodeGen/ms-inline-asm.cpp22
-rw-r--r--test/CodeGen/ms-intrinsics.c23
-rw-r--r--test/CodeGen/nobuiltin.c4
-rw-r--r--test/CodeGen/noplt.c9
-rw-r--r--test/CodeGen/nullptr-arithmetic.c47
-rw-r--r--test/CodeGen/pascal-wchar-string.c2
-rw-r--r--test/CodeGen/ppc-vector-compare.cc34
-rw-r--r--test/CodeGen/pr34021.c25
-rw-r--r--test/CodeGen/pragma-comment.c1
-rw-r--r--test/CodeGen/preserve-call-conv.c3
-rw-r--r--test/CodeGen/profile-sample-accurate.c7
-rw-r--r--test/CodeGen/push-hidden-visibility-subclass.cpp20
-rw-r--r--test/CodeGen/sanitizer-special-case-list.c26
-rw-r--r--test/CodeGen/sse2-builtins.c16
-rw-r--r--test/CodeGen/ssse3-builtins.c12
-rw-r--r--test/CodeGen/string-literal-short-wstring.c4
-rw-r--r--test/CodeGen/string-literal-unicode-conversion.c2
-rw-r--r--test/CodeGen/target-builtin-noerror.c38
-rw-r--r--test/CodeGen/target-data.c4
-rw-r--r--test/CodeGen/tbaa-array.cpp18
-rw-r--r--test/CodeGen/tbaa-cast.cpp23
-rw-r--r--test/CodeGen/tbaa-for-vptr.cpp6
-rw-r--r--test/CodeGen/tbaa-reference.cpp37
-rw-r--r--test/CodeGen/tbm-builtins.c96
-rw-r--r--test/CodeGen/thinlto-debug-pm.c10
-rw-r--r--test/CodeGen/thinlto-emit-llvm.c2
-rw-r--r--test/CodeGen/ubsan-builtin-checks.c44
-rw-r--r--test/CodeGen/ubsan-pass-object-size.c68
-rw-r--r--test/CodeGen/unsigned-overflow-minimal.c21
-rw-r--r--test/CodeGen/verify-debuginfo.ll17
-rw-r--r--test/CodeGen/wchar-size.c2
-rw-r--r--test/CodeGen/x86-GCC-inline-asm-Y-constraints.c68
-rw-r--r--test/CodeGen/x86_32-xsave.c60
-rw-r--r--test/CodeGen/x86_64-instrument-functions.c38
-rw-r--r--test/CodeGen/x86_64-xsave.c120
-rw-r--r--test/CodeGen/xray-always-emit-customevent.cpp10
120 files changed, 10535 insertions, 3490 deletions
diff --git a/test/CodeGen/2004-02-20-Builtins.c b/test/CodeGen/2004-02-20-Builtins.c
index 9be0523b4afd..13f970127d60 100644
--- a/test/CodeGen/2004-02-20-Builtins.c
+++ b/test/CodeGen/2004-02-20-Builtins.c
@@ -1,5 +1,8 @@
-// RUN: %clang_cc1 %s -emit-llvm -o - | not grep builtin
+// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s
double sqrt(double x);
+
+// CHECK-LABEL: @zsqrtxxx
+// CHECK-NOT: builtin
void zsqrtxxx(float num) {
num = sqrt(num);
}
diff --git a/test/CodeGen/2005-07-20-SqrtNoErrno.c b/test/CodeGen/2005-07-20-SqrtNoErrno.c
deleted file mode 100644
index 96761e4cfb2e..000000000000
--- a/test/CodeGen/2005-07-20-SqrtNoErrno.c
+++ /dev/null
@@ -1,10 +0,0 @@
-// RUN: %clang_cc1 -triple x86_64-apple-darwin %s -emit-llvm -o - | FileCheck %s
-// llvm.sqrt has undefined behavior on negative inputs, so it is
-// inappropriate to translate C/C++ sqrt to this.
-float sqrtf(float x);
-float foo(float X) {
- // CHECK: foo
- // CHECK: call float @sqrtf(float %
- // Check that this is marked readonly when errno is ignored.
- return sqrtf(X);
-}
diff --git a/test/CodeGen/2009-10-20-GlobalDebug.c b/test/CodeGen/2009-10-20-GlobalDebug.c
index 0d7c759f905e..c8c247f6b041 100644
--- a/test/CodeGen/2009-10-20-GlobalDebug.c
+++ b/test/CodeGen/2009-10-20-GlobalDebug.c
@@ -10,11 +10,11 @@ int main() {
return 0;
}
-// CHECK: [[L]] = !DIGlobalVariableExpression(var: [[LV:.*]])
+// CHECK: [[L]] = !DIGlobalVariableExpression(var: [[LV:.*]], expr: !DIExpression())
// CHECK: [[LV]] = distinct !DIGlobalVariable(name: "localstatic"
// CHECK-NOT: linkageName:
// CHECK-SAME: line: 9,
-// CHECK: [[G]] = !DIGlobalVariableExpression(var: [[GV:.*]])
+// CHECK: [[G]] = !DIGlobalVariableExpression(var: [[GV:.*]], expr: !DIExpression())
// CHECK: [[GV]] = distinct !DIGlobalVariable(name: "global"
// CHECK-NOT: linkageName:
// CHECK-SAME: line: 7,
diff --git a/test/CodeGen/2010-08-10-DbgConstant.c b/test/CodeGen/2010-08-10-DbgConstant.c
index 68947edd526a..ad9d56618687 100644
--- a/test/CodeGen/2010-08-10-DbgConstant.c
+++ b/test/CodeGen/2010-08-10-DbgConstant.c
@@ -1,6 +1,5 @@
// RUN: %clang_cc1 -S -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s
-// CHECK: !DIGlobalVariableExpression(var: [[VAR:.*]], expr: [[EXPR:![0-9]+]])
-// CHECK: [[EXPR]] = !DIExpression(DW_OP_constu, 201, DW_OP_stack_value)
+// CHECK: !DIGlobalVariableExpression(var: [[VAR:.*]], expr: !DIExpression(DW_OP_constu, 201, DW_OP_stack_value))
static const unsigned int ro = 201;
void bar(int);
diff --git a/test/CodeGen/Inputs/sanitizer-special-case-list.sanitized.txt b/test/CodeGen/Inputs/sanitizer-special-case-list.sanitized.txt
new file mode 100644
index 000000000000..a2afde027b45
--- /dev/null
+++ b/test/CodeGen/Inputs/sanitizer-special-case-list.sanitized.txt
@@ -0,0 +1,4 @@
+[unsigned-integer-overflow]
+fun:*cfi*
+[cfi]
+fun:*overflow*
diff --git a/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized1.txt b/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized1.txt
new file mode 100644
index 000000000000..45ad57bbebff
--- /dev/null
+++ b/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized1.txt
@@ -0,0 +1,2 @@
+fun:*cfi*
+fun:*overflow*
diff --git a/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized2.txt b/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized2.txt
new file mode 100644
index 000000000000..375b246f16c5
--- /dev/null
+++ b/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized2.txt
@@ -0,0 +1,4 @@
+[cfi]
+fun:*cfi*
+[unsigned-integer-overflow]
+fun:*overflow*
diff --git a/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized3.txt b/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized3.txt
new file mode 100644
index 000000000000..b038fee52b20
--- /dev/null
+++ b/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized3.txt
@@ -0,0 +1,4 @@
+[cfi-icall]
+fun:*cfi*
+[unsigned-integer-overflow]
+fun:*overflow*
diff --git a/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized4.txt b/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized4.txt
new file mode 100644
index 000000000000..b31747ec3e10
--- /dev/null
+++ b/test/CodeGen/Inputs/sanitizer-special-case-list.unsanitized4.txt
@@ -0,0 +1,4 @@
+[c*]
+fun:*cfi*
+[u*]
+fun:*overflow*
diff --git a/test/CodeGen/adc-builtins.c b/test/CodeGen/adc-builtins.c
index 0d8d6fa03476..d41fa8f446e6 100644
--- a/test/CodeGen/adc-builtins.c
+++ b/test/CodeGen/adc-builtins.c
@@ -1,6 +1,4 @@
-// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
-
-#define __MM_MALLOC_H
+// RUN: %clang_cc1 -ffreestanding -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
#include <x86intrin.h>
diff --git a/test/CodeGen/address-safety-attr-kasan-hwasan.cpp b/test/CodeGen/address-safety-attr-kasan-hwasan.cpp
new file mode 100644
index 000000000000..7a84b798e4b9
--- /dev/null
+++ b/test/CodeGen/address-safety-attr-kasan-hwasan.cpp
@@ -0,0 +1,53 @@
+// Make sure the sanitize_address attribute is emitted when using both ASan and KASan.
+// Also document that __attribute__((no_sanitize_address)) doesn't disable KASan instrumentation.
+
+/// RUN: %clang_cc1 -triple i386-unknown-linux -disable-O0-optnone -emit-llvm -o - %s | FileCheck -check-prefix=CHECK-NOASAN %s
+/// RUN: %clang_cc1 -triple i386-unknown-linux -fsanitize=address -disable-O0-optnone -emit-llvm -o - %s | FileCheck -check-prefix=CHECK-ASAN %s
+/// RUN: %clang_cc1 -triple i386-unknown-linux -fsanitize=kernel-address -disable-O0-optnone -emit-llvm -o - %s | FileCheck -check-prefix=CHECK-KASAN %s
+/// RUN: %clang_cc1 -triple i386-unknown-linux -fsanitize=hwaddress -disable-O0-optnone -emit-llvm -o - %s | FileCheck -check-prefix=CHECK-HWASAN %s
+
+int HasSanitizeAddress() {
+ return 1;
+}
+// CHECK-NOASAN: {{Function Attrs: noinline nounwind$}}
+// CHECK-ASAN: Function Attrs: noinline nounwind sanitize_address
+// CHECK-KASAN: Function Attrs: noinline nounwind sanitize_address
+// CHECK-HWASAN: Function Attrs: noinline nounwind sanitize_hwaddress
+
+__attribute__((no_sanitize("address")))
+int NoSanitizeQuoteAddress() {
+ return 0;
+}
+// CHECK-NOASAN: {{Function Attrs: noinline nounwind$}}
+// CHECK-ASAN: {{Function Attrs: noinline nounwind$}}
+// CHECK-KASAN: {{Function Attrs: noinline nounwind sanitize_address$}}
+// CHECK-HWASAN: {{Function Attrs: noinline nounwind sanitize_hwaddress$}}
+
+__attribute__((no_sanitize_address))
+int NoSanitizeAddress() {
+ return 0;
+}
+// CHECK-NOASAN: {{Function Attrs: noinline nounwind$}}
+// CHECK-ASAN: {{Function Attrs: noinline nounwind$}}
+// CHECK-KASAN: {{Function Attrs: noinline nounwind sanitize_address$}}
+// CHECK-HWASAN: {{Function Attrs: noinline nounwind sanitize_hwaddress$}}
+
+__attribute__((no_sanitize("kernel-address")))
+int NoSanitizeKernelAddress() {
+ return 0;
+}
+
+// CHECK-NOASAN: {{Function Attrs: noinline nounwind$}}
+// CHECK-ASAN: {{Function Attrs: noinline nounwind sanitize_address$}}
+// CHECK-KASAN: {{Function Attrs: noinline nounwind$}}
+// CHECK-HWASAN: {{Function Attrs: noinline nounwind sanitize_hwaddress$}}
+
+__attribute__((no_sanitize("hwaddress")))
+int NoSanitizeHWAddress() {
+ return 0;
+}
+
+// CHECK-NOASAN: {{Function Attrs: noinline nounwind$}}
+// CHECK-ASAN: {{Function Attrs: noinline nounwind sanitize_address$}}
+// CHECK-KASAN: {{Function Attrs: noinline nounwind sanitize_address$}}
+// CHECK-HWASAN: {{Function Attrs: noinline nounwind$}}
diff --git a/test/CodeGen/arm-metadata.c b/test/CodeGen/arm-metadata.c
index 1f7756cc8d48..4f3e2dba219b 100644
--- a/test/CodeGen/arm-metadata.c
+++ b/test/CodeGen/arm-metadata.c
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -triple armv7a-linux-gnueabi -emit-llvm -o - %s | FileCheck -check-prefix=DEFAULT %s
// RUN: %clang_cc1 -triple armv7a-linux-gnueabi -emit-llvm -o - %s -fshort-enums | FileCheck -check-prefix=SHORT-ENUM %s
-// RUN: %clang_cc1 -triple armv7a-linux-gnueabi -emit-llvm -o - %s -fshort-wchar | FileCheck -check-prefix=SHORT-WCHAR %s
+// RUN: %clang_cc1 -triple armv7a-linux-gnueabi -emit-llvm -o - %s -fwchar-type=short -fno-signed-wchar | FileCheck -check-prefix=SHORT-WCHAR %s
// DEFAULT: !{{[0-9]+}} = !{i32 1, !"wchar_size", i32 4}
// DEFAULT: !{{[0-9]+}} = !{i32 1, !"min_enum_size", i32 4}
diff --git a/test/CodeGen/arm64-microsoft-intrinsics.c b/test/CodeGen/arm64-microsoft-intrinsics.c
new file mode 100644
index 000000000000..ff802e7f9b85
--- /dev/null
+++ b/test/CodeGen/arm64-microsoft-intrinsics.c
@@ -0,0 +1,26 @@
+// RUN: %clang_cc1 -triple arm64-windows -fms-compatibility -emit-llvm -o - %s \
+// RUN: | FileCheck %s -check-prefix CHECK-MSVC
+
+// RUN: not %clang_cc1 -triple arm64-linux -Werror -S -o /dev/null %s 2>&1 \
+// RUN: | FileCheck %s -check-prefix CHECK-LINUX
+
+void check__dmb(void) {
+ __dmb(0);
+}
+
+// CHECK-MSVC: @llvm.aarch64.dmb(i32 0)
+// CHECK-LINUX: error: implicit declaration of function '__dmb'
+
+void check__dsb(void) {
+ __dsb(0);
+}
+
+// CHECK-MSVC: @llvm.aarch64.dsb(i32 0)
+// CHECK-LINUX: error: implicit declaration of function '__dsb'
+
+void check__isb(void) {
+ __isb(0);
+}
+
+// CHECK-MSVC: @llvm.aarch64.isb(i32 0)
+// CHECK-LINUX: error: implicit declaration of function '__isb'
diff --git a/test/CodeGen/attr-availability.c b/test/CodeGen/attr-availability.c
index ccbbb62f8c1f..87e137cfe965 100644
--- a/test/CodeGen/attr-availability.c
+++ b/test/CodeGen/attr-availability.c
@@ -8,9 +8,9 @@
void f2();
void f2() { }
-// CHECK-10_4-LABEL: define void @f3
-// CHECK-10_5-LABEL: define void @f3
-// CHECK-10_6-LABEL: define void @f3
+// CHECK-10_4-LABEL: define hidden void @f3
+// CHECK-10_5-LABEL: define hidden void @f3
+// CHECK-10_6-LABEL: define hidden void @f3
void f3() __attribute__((availability(macosx,introduced=10.5)));
void f3() { }
diff --git a/test/CodeGen/attr-mprefer-vector-width.c b/test/CodeGen/attr-mprefer-vector-width.c
new file mode 100644
index 000000000000..30edba5852f9
--- /dev/null
+++ b/test/CodeGen/attr-mprefer-vector-width.c
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 -mprefer-vector-width=128 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK128
+// RUN: %clang_cc1 -mprefer-vector-width=256 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK256
+// RUN: %clang_cc1 -mprefer-vector-width=none -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECKNONE
+
+int baz(int a) { return 4; }
+
+// CHECK128: baz{{.*}} #0
+// CHECK128: #0 = {{.*}}"prefer-vector-width"="128"
+
+// CHECK256: baz{{.*}} #0
+// CHECK256: #0 = {{.*}}"prefer-vector-width"="256"
+
+// CHECKNONE: baz{{.*}} #0
+// CHECKNONE-NOT: #0 = {{.*}}"prefer-vector-width"="none"
diff --git a/test/CodeGen/attr-target-x86.c b/test/CodeGen/attr-target-x86.c
index f2777679aec1..6ec2d6578dc4 100644
--- a/test/CodeGen/attr-target-x86.c
+++ b/test/CodeGen/attr-target-x86.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -triple x86_64-linux-gnu -target-cpu x86-64 -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple i686-linux-gnu -target-cpu i686 -emit-llvm %s -o - | FileCheck %s
int baz(int a) { return 4; }
@@ -10,6 +10,7 @@ int __attribute__((target("fpmath=387"))) koala(int a) { return 4; }
int __attribute__((target("no-sse2"))) echidna(int a) { return 4; }
int __attribute__((target("sse4"))) panda(int a) { return 4; }
+int __attribute__((target("no-sse4"))) narwhal(int a) { return 4; }
int bar(int a) { return baz(a) + foo(a); }
@@ -18,7 +19,7 @@ int __attribute__((target("no-aes, arch=ivybridge"))) qax(int a) { return 4; }
int __attribute__((target("no-mmx"))) qq(int a) { return 40; }
-int __attribute__((target("arch=lakemont"))) lake(int a) { return 4; }
+int __attribute__((target("arch=lakemont,mmx"))) lake(int a) { return 4; }
// Check that we emit the additional subtarget and cpu features for foo and not for baz or bar.
// CHECK: baz{{.*}} #0
@@ -29,15 +30,17 @@ int __attribute__((target("arch=lakemont"))) lake(int a) { return 4; }
// CHECK: koala{{.*}} #0
// CHECK: echidna{{.*}} #2
// CHECK: panda{{.*}} #3
+// CHECK: narwhal{{.*}} #4
// CHECK: bar{{.*}} #0
// CHECK: qux{{.*}} #1
-// CHECK: qax{{.*}} #4
-// CHECK: qq{{.*}} #5
-// CHECK: lake{{.*}} #6
-// CHECK: #0 = {{.*}}"target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87"
+// CHECK: qax{{.*}} #5
+// CHECK: qq{{.*}} #6
+// CHECK: lake{{.*}} #7
+// CHECK: #0 = {{.*}}"target-cpu"="i686" "target-features"="+x87"
// CHECK: #1 = {{.*}}"target-cpu"="ivybridge" "target-features"="+aes,+avx,+cx16,+f16c,+fsgsbase,+fxsr,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt"
-// CHECK: #2 = {{.*}}"target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+x87,-aes,-avx,-avx2,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vl,-avx512vpopcntdq,-f16c,-fma,-fma4,-pclmul,-sha,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-xop,-xsave,-xsaveopt"
-// CHECK: #3 = {{.*}}"target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87"
-// CHECK: #4 = {{.*}}"target-cpu"="ivybridge" "target-features"="+avx,+cx16,+f16c,+fsgsbase,+fxsr,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-aes"
-// CHECK: #5 = {{.*}}"target-cpu"="x86-64" "target-features"="+fxsr,+sse,+sse2,+x87,-3dnow,-3dnowa,-mmx"
-// CHECK: #6 = {{.*}}"target-cpu"="lakemont" "target-features"="+mmx,+sse,+sse2"
+// CHECK: #2 = {{.*}}"target-cpu"="i686" "target-features"="+x87,-aes,-avx,-avx2,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vl,-avx512vpopcntdq,-f16c,-fma,-fma4,-pclmul,-sha,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-xop,-xsave,-xsaveopt"
+// CHECK: #3 = {{.*}}"target-cpu"="i686" "target-features"="+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87"
+// CHECK: #4 = {{.*}}"target-cpu"="i686" "target-features"="+x87,-avx,-avx2,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vl,-avx512vpopcntdq,-f16c,-fma,-fma4,-sse4.1,-sse4.2,-xop,-xsave,-xsaveopt"
+// CHECK: #5 = {{.*}}"target-cpu"="ivybridge" "target-features"="+avx,+cx16,+f16c,+fsgsbase,+fxsr,+mmx,+pclmul,+popcnt,+rdrnd,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-aes"
+// CHECK: #6 = {{.*}}"target-cpu"="i686" "target-features"="+x87,-3dnow,-3dnowa,-mmx"
+// CHECK: #7 = {{.*}}"target-cpu"="lakemont" "target-features"="+mmx"
diff --git a/test/CodeGen/avx-builtins.c b/test/CodeGen/avx-builtins.c
index 31a08440d061..4e77ad166ce0 100644
--- a/test/CodeGen/avx-builtins.c
+++ b/test/CodeGen/avx-builtins.c
@@ -678,19 +678,19 @@ __m256 test_mm256_permute_ps(__m256 A) {
__m256d test_mm256_permute2f128_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_permute2f128_pd
- // CHECK: call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, i8 49)
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
return _mm256_permute2f128_pd(A, B, 0x31);
}
__m256 test_mm256_permute2f128_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_permute2f128_ps
- // CHECK: call <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, i8 19)
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
return _mm256_permute2f128_ps(A, B, 0x13);
}
__m256i test_mm256_permute2f128_si256(__m256i A, __m256i B) {
// CHECK-LABEL: test_mm256_permute2f128_si256
- // CHECK: call <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, i8 32)
+ // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
return _mm256_permute2f128_si256(A, B, 0x20);
}
diff --git a/test/CodeGen/avx2-builtins.c b/test/CodeGen/avx2-builtins.c
index 10f3e715de9b..f79f60e6db78 100644
--- a/test/CodeGen/avx2-builtins.c
+++ b/test/CodeGen/avx2-builtins.c
@@ -8,19 +8,25 @@
__m256i test_mm256_abs_epi8(__m256i a) {
// CHECK-LABEL: test_mm256_abs_epi8
- // CHECK: call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %{{.*}})
+ // CHECK: [[SUB:%.*]] = sub <32 x i8> zeroinitializer, %{{.*}}
+ // CHECK: [[CMP:%.*]] = icmp sgt <32 x i8> %{{.*}}, zeroinitializer
+ // CHECK: select <32 x i1> [[CMP]], <32 x i8> %{{.*}}, <32 x i8> [[SUB]]
return _mm256_abs_epi8(a);
}
__m256i test_mm256_abs_epi16(__m256i a) {
// CHECK-LABEL: test_mm256_abs_epi16
- // CHECK: call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %{{.*}})
+ // CHECK: [[SUB:%.*]] = sub <16 x i16> zeroinitializer, %{{.*}}
+ // CHECK: [[CMP:%.*]] = icmp sgt <16 x i16> %{{.*}}, zeroinitializer
+ // CHECK: select <16 x i1> [[CMP]], <16 x i16> %{{.*}}, <16 x i16> [[SUB]]
return _mm256_abs_epi16(a);
}
__m256i test_mm256_abs_epi32(__m256i a) {
// CHECK-LABEL: test_mm256_abs_epi32
- // CHECK: call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %{{.*}})
+ // CHECK: [[SUB:%.*]] = sub <8 x i32> zeroinitializer, %{{.*}}
+ // CHECK: [[CMP:%.*]] = icmp sgt <8 x i32> %{{.*}}, zeroinitializer
+ // CHECK: select <8 x i1> [[CMP]], <8 x i32> %{{.*}}, <8 x i32> [[SUB]]
return _mm256_abs_epi32(a);
}
@@ -99,13 +105,25 @@ __m256i test_mm256_andnot_si256(__m256i a, __m256i b) {
__m256i test_mm256_avg_epu8(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_avg_epu8
- // CHECK: call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK-NOT: call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
+ // CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
+ // CHECK: add <32 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <32 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <32 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
return _mm256_avg_epu8(a, b);
}
__m256i test_mm256_avg_epu16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_avg_epu16
- // CHECK: call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK-NOT: call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK: zext <16 x i16> %{{.*}} to <16 x i32>
+ // CHECK: zext <16 x i16> %{{.*}} to <16 x i32>
+ // CHECK: add <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <16 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <16 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <16 x i32> %{{.*}} to <16 x i16>
return _mm256_avg_epu16(a, b);
}
@@ -889,8 +907,8 @@ __m256i test_mm256_packs_epu32(__m256i a, __m256i b) {
__m256i test_mm256_permute2x128_si256(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_permute2x128_si256
- // CHECK: call <4 x i64> @llvm.x86.avx2.vperm2i128(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, i8 49)
- return _mm256_permute2x128_si256(a, b, 0x31);
+ // CHECK: shufflevector <4 x i64> zeroinitializer, <4 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ return _mm256_permute2x128_si256(a, b, 0x38);
}
__m256i test_mm256_permute4x64_epi64(__m256i a) {
diff --git a/test/CodeGen/avx512-reduceMinMaxIntrin.c b/test/CodeGen/avx512-reduceMinMaxIntrin.c
index 993e2964a19b..2081cef75460 100644
--- a/test/CodeGen/avx512-reduceMinMaxIntrin.c
+++ b/test/CodeGen/avx512-reduceMinMaxIntrin.c
@@ -1,439 +1,2611 @@
-// FIXME: We should not be testing with -O2 (ie, a dependency on the entire IR optimizer).
-
-// RUN: %clang_cc1 -ffreestanding %s -O2 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror |opt -instnamer -S |FileCheck %s
+// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
+// CHECK-LABEL: define i64 @test_mm512_reduce_max_epi64(<8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I8_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I9_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store <8 x i64> [[TMP0]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x i64> [[TMP3]], <8 x i64> [[TMP4]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP7:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP8:%.*]] = icmp sgt <8 x i64> [[TMP5]], [[TMP6]]
+// CHECK: [[TMP9:%.*]] = select <8 x i1> [[TMP8]], <8 x i64> [[TMP5]], <8 x i64> [[TMP6]]
+// CHECK: store <8 x i64> [[TMP9]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP10:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <8 x i64> [[TMP10]], <8 x i64> [[TMP11]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x i64> [[TMP12]], <8 x i64> [[TMP13]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE2_I]], <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP17:%.*]] = icmp sgt <8 x i64> [[TMP14]], [[TMP15]]
+// CHECK: [[TMP18:%.*]] = select <8 x i1> [[TMP17]], <8 x i64> [[TMP14]], <8 x i64> [[TMP15]]
+// CHECK: store <8 x i64> [[TMP18]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <8 x i64> [[TMP19]], <8 x i64> [[TMP20]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x i64> [[TMP21]], <8 x i64> [[TMP22]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE5_I]], <8 x i64>* [[__A_ADDR_I9_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE6_I]], <8 x i64>* [[__B_ADDR_I10_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I9_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I10_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP26:%.*]] = icmp sgt <8 x i64> [[TMP23]], [[TMP24]]
+// CHECK: [[TMP27:%.*]] = select <8 x i1> [[TMP26]], <8 x i64> [[TMP23]], <8 x i64> [[TMP24]]
+// CHECK: store <8 x i64> [[TMP27]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP28]], i32 0
+// CHECK: ret i64 [[VECEXT_I]]
long long test_mm512_reduce_max_epi64(__m512i __W){
- // CHECK: %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp = icmp slt <8 x i64> %shuffle1.i, %__W
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x i64> %__W, <8 x i64> %shuffle1.i
- // CHECK: %shuffle3.i = shufflevector <8 x i64> %tmp1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = icmp sgt <8 x i64> %tmp1, %shuffle3.i
- // CHECK: %tmp3 = select <8 x i1> %tmp2, <8 x i64> %tmp1, <8 x i64> %shuffle3.i
- // CHECK: %shuffle6.i = shufflevector <8 x i64> %tmp3, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = icmp sgt <8 x i64> %tmp3, %shuffle6.i
- // CHECK: %.elt.i = extractelement <8 x i1> %tmp4, i32 0
- // CHECK: %.elt20.i = extractelement <8 x i64> %tmp3, i32 0
- // CHECK: %shuffle6.elt.i = extractelement <8 x i64> %tmp3, i32 1
- // CHECK: %vecext.i = select i1 %.elt.i, i64 %.elt20.i, i64 %shuffle6.elt.i
- // CHECK: ret i64 %vecext.i
return _mm512_reduce_max_epi64(__W);
}
+// CHECK-LABEL: define i64 @test_mm512_reduce_max_epu64(<8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I8_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I9_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store <8 x i64> [[TMP0]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x i64> [[TMP3]], <8 x i64> [[TMP4]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP7:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP8:%.*]] = icmp ugt <8 x i64> [[TMP5]], [[TMP6]]
+// CHECK: [[TMP9:%.*]] = select <8 x i1> [[TMP8]], <8 x i64> [[TMP5]], <8 x i64> [[TMP6]]
+// CHECK: store <8 x i64> [[TMP9]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP10:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <8 x i64> [[TMP10]], <8 x i64> [[TMP11]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x i64> [[TMP12]], <8 x i64> [[TMP13]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE2_I]], <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP17:%.*]] = icmp ugt <8 x i64> [[TMP14]], [[TMP15]]
+// CHECK: [[TMP18:%.*]] = select <8 x i1> [[TMP17]], <8 x i64> [[TMP14]], <8 x i64> [[TMP15]]
+// CHECK: store <8 x i64> [[TMP18]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <8 x i64> [[TMP19]], <8 x i64> [[TMP20]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x i64> [[TMP21]], <8 x i64> [[TMP22]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE5_I]], <8 x i64>* [[__A_ADDR_I9_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE6_I]], <8 x i64>* [[__B_ADDR_I10_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I9_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I10_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP26:%.*]] = icmp ugt <8 x i64> [[TMP23]], [[TMP24]]
+// CHECK: [[TMP27:%.*]] = select <8 x i1> [[TMP26]], <8 x i64> [[TMP23]], <8 x i64> [[TMP24]]
+// CHECK: store <8 x i64> [[TMP27]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP28]], i32 0
+// CHECK: ret i64 [[VECEXT_I]]
unsigned long long test_mm512_reduce_max_epu64(__m512i __W){
- // CHECK: %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp = icmp ult <8 x i64> %shuffle1.i, %__W
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x i64> %__W, <8 x i64> %shuffle1.i
- // CHECK: %shuffle3.i = shufflevector <8 x i64> %tmp1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = icmp ugt <8 x i64> %tmp1, %shuffle3.i
- // CHECK: %tmp3 = select <8 x i1> %tmp2, <8 x i64> %tmp1, <8 x i64> %shuffle3.i
- // CHECK: %shuffle6.i = shufflevector <8 x i64> %tmp3, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = icmp ugt <8 x i64> %tmp3, %shuffle6.i
- // CHECK: %.elt.i = extractelement <8 x i1> %tmp4, i32 0
- // CHECK: %.elt20.i = extractelement <8 x i64> %tmp3, i32 0
- // CHECK: %shuffle6.elt.i = extractelement <8 x i64> %tmp3, i32 1
- // CHECK: %vecext.i = select i1 %.elt.i, i64 %.elt20.i, i64 %shuffle6.elt.i
- // CHECK: ret i64 %vecext.i
return _mm512_reduce_max_epu64(__W);
}
+// CHECK-LABEL: define double @test_mm512_reduce_max_pd(<8 x double> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I8_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I9_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I10_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x double>, align 64
+// CHECK: store <8 x double> %__W, <8 x double>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x double>, <8 x double>* [[__W_ADDR]], align 64
+// CHECK: store <8 x double> [[TMP0]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP3:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x double> [[TMP3]], <8 x double> [[TMP4]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE_I]], <8 x double>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE1_I]], <8 x double>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP5:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP6:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP7:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP8:%.*]] = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> [[TMP5]], <8 x double> [[TMP6]], <8 x double> [[TMP7]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP8]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP9:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP10:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <8 x double> [[TMP9]], <8 x double> [[TMP10]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP11:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x double> [[TMP11]], <8 x double> [[TMP12]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE2_I]], <8 x double>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE3_I]], <8 x double>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP13:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP14:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I13_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP16:%.*]] = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> [[TMP13]], <8 x double> [[TMP14]], <8 x double> [[TMP15]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP16]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP17:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <8 x double> [[TMP17]], <8 x double> [[TMP18]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP19:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x double> [[TMP19]], <8 x double> [[TMP20]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE5_I]], <8 x double>* [[__A_ADDR_I9_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE6_I]], <8 x double>* [[__B_ADDR_I10_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I9_I]], align 64
+// CHECK: [[TMP22:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I10_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP24:%.*]] = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> [[TMP21]], <8 x double> [[TMP22]], <8 x double> [[TMP23]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP24]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x double> [[TMP25]], i32 0
+// CHECK: ret double [[VECEXT_I]]
double test_mm512_reduce_max_pd(__m512d __W){
- // CHECK: %shuffle1.i = shufflevector <8 x double> %__W, <8 x double> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %__W, <8 x double> %shuffle1.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %shuffle3.i = shufflevector <8 x double> %tmp, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %tmp, <8 x double> %shuffle3.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %shuffle6.i = shufflevector <8 x double> %tmp1, <8 x double> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %tmp1, <8 x double> %shuffle6.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %vecext.i = extractelement <8 x double> %tmp2, i32 0
- // CHECK: ret double %vecext.i
return _mm512_reduce_max_pd(__W);
}
+// CHECK-LABEL: define i64 @test_mm512_reduce_min_epi64(<8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I8_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I9_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store <8 x i64> [[TMP0]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x i64> [[TMP3]], <8 x i64> [[TMP4]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP7:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP8:%.*]] = icmp sgt <8 x i64> [[TMP5]], [[TMP6]]
+// CHECK: [[TMP9:%.*]] = select <8 x i1> [[TMP8]], <8 x i64> [[TMP5]], <8 x i64> [[TMP6]]
+// CHECK: store <8 x i64> [[TMP9]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP10:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <8 x i64> [[TMP10]], <8 x i64> [[TMP11]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x i64> [[TMP12]], <8 x i64> [[TMP13]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE2_I]], <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP17:%.*]] = icmp sgt <8 x i64> [[TMP14]], [[TMP15]]
+// CHECK: [[TMP18:%.*]] = select <8 x i1> [[TMP17]], <8 x i64> [[TMP14]], <8 x i64> [[TMP15]]
+// CHECK: store <8 x i64> [[TMP18]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <8 x i64> [[TMP19]], <8 x i64> [[TMP20]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x i64> [[TMP21]], <8 x i64> [[TMP22]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE5_I]], <8 x i64>* [[__A_ADDR_I9_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE6_I]], <8 x i64>* [[__B_ADDR_I10_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I9_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I10_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP26:%.*]] = icmp sgt <8 x i64> [[TMP23]], [[TMP24]]
+// CHECK: [[TMP27:%.*]] = select <8 x i1> [[TMP26]], <8 x i64> [[TMP23]], <8 x i64> [[TMP24]]
+// CHECK: store <8 x i64> [[TMP27]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP28]], i32 0
+// CHECK: ret i64 [[VECEXT_I]]
long long test_mm512_reduce_min_epi64(__m512i __W){
- // CHECK: %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp = icmp slt <8 x i64> %shuffle1.i, %__W
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x i64> %__W, <8 x i64> %shuffle1.i
- // CHECK: %shuffle3.i = shufflevector <8 x i64> %tmp1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = icmp sgt <8 x i64> %tmp1, %shuffle3.i
- // CHECK: %tmp3 = select <8 x i1> %tmp2, <8 x i64> %tmp1, <8 x i64> %shuffle3.i
- // CHECK: %shuffle6.i = shufflevector <8 x i64> %tmp3, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = icmp sgt <8 x i64> %tmp3, %shuffle6.i
- // CHECK: %.elt.i = extractelement <8 x i1> %tmp4, i32 0
- // CHECK: %.elt20.i = extractelement <8 x i64> %tmp3, i32 0
- // CHECK: %shuffle6.elt.i = extractelement <8 x i64> %tmp3, i32 1
- // CHECK: %vecext.i = select i1 %.elt.i, i64 %.elt20.i, i64 %shuffle6.elt.i
- // CHECK: ret i64 %vecext.i
return _mm512_reduce_max_epi64(__W);
}
+// CHECK-LABEL: define i64 @test_mm512_reduce_min_epu64(<8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I8_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I9_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store <8 x i64> [[TMP0]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x i64> [[TMP3]], <8 x i64> [[TMP4]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP7:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP8:%.*]] = icmp ugt <8 x i64> [[TMP5]], [[TMP6]]
+// CHECK: [[TMP9:%.*]] = select <8 x i1> [[TMP8]], <8 x i64> [[TMP5]], <8 x i64> [[TMP6]]
+// CHECK: store <8 x i64> [[TMP9]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP10:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <8 x i64> [[TMP10]], <8 x i64> [[TMP11]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x i64> [[TMP12]], <8 x i64> [[TMP13]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE2_I]], <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP17:%.*]] = icmp ugt <8 x i64> [[TMP14]], [[TMP15]]
+// CHECK: [[TMP18:%.*]] = select <8 x i1> [[TMP17]], <8 x i64> [[TMP14]], <8 x i64> [[TMP15]]
+// CHECK: store <8 x i64> [[TMP18]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <8 x i64> [[TMP19]], <8 x i64> [[TMP20]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x i64> [[TMP21]], <8 x i64> [[TMP22]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE5_I]], <8 x i64>* [[__A_ADDR_I9_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE6_I]], <8 x i64>* [[__B_ADDR_I10_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I9_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I10_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP26:%.*]] = icmp ugt <8 x i64> [[TMP23]], [[TMP24]]
+// CHECK: [[TMP27:%.*]] = select <8 x i1> [[TMP26]], <8 x i64> [[TMP23]], <8 x i64> [[TMP24]]
+// CHECK: store <8 x i64> [[TMP27]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP28]], i32 0
+// CHECK: ret i64 [[VECEXT_I]]
unsigned long long test_mm512_reduce_min_epu64(__m512i __W){
- // CHECK: %shuffle1.i = shufflevector <8 x i64> %__W, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp = icmp ult <8 x i64> %shuffle1.i, %__W
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x i64> %__W, <8 x i64> %shuffle1.i
- // CHECK: %shuffle3.i = shufflevector <8 x i64> %tmp1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = icmp ugt <8 x i64> %tmp1, %shuffle3.i
- // CHECK: %tmp3 = select <8 x i1> %tmp2, <8 x i64> %tmp1, <8 x i64> %shuffle3.i
- // CHECK: %shuffle6.i = shufflevector <8 x i64> %tmp3, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = icmp ugt <8 x i64> %tmp3, %shuffle6.i
- // CHECK: %.elt.i = extractelement <8 x i1> %tmp4, i32 0
- // CHECK: %.elt20.i = extractelement <8 x i64> %tmp3, i32 0
- // CHECK: %shuffle6.elt.i = extractelement <8 x i64> %tmp3, i32 1
- // CHECK: %vecext.i = select i1 %.elt.i, i64 %.elt20.i, i64 %shuffle6.elt.i
- // CHECK: ret i64 %vecext.i
return _mm512_reduce_max_epu64(__W);
}
+// CHECK-LABEL: define double @test_mm512_reduce_min_pd(<8 x double> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I8_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I9_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I10_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x double>, align 64
+// CHECK: store <8 x double> %__W, <8 x double>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x double>, <8 x double>* [[__W_ADDR]], align 64
+// CHECK: store <8 x double> [[TMP0]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP3:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x double> [[TMP3]], <8 x double> [[TMP4]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE_I]], <8 x double>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE1_I]], <8 x double>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP5:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP6:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP7:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP8:%.*]] = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> [[TMP5]], <8 x double> [[TMP6]], <8 x double> [[TMP7]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP8]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP9:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP10:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <8 x double> [[TMP9]], <8 x double> [[TMP10]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP11:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x double> [[TMP11]], <8 x double> [[TMP12]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE2_I]], <8 x double>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE3_I]], <8 x double>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP13:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP14:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I13_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP16:%.*]] = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> [[TMP13]], <8 x double> [[TMP14]], <8 x double> [[TMP15]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP16]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP17:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <8 x double> [[TMP17]], <8 x double> [[TMP18]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP19:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x double> [[TMP19]], <8 x double> [[TMP20]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE5_I]], <8 x double>* [[__A_ADDR_I9_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE6_I]], <8 x double>* [[__B_ADDR_I10_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I9_I]], align 64
+// CHECK: [[TMP22:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I10_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I8_I]], align 64
+// CHECK: [[TMP24:%.*]] = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> [[TMP21]], <8 x double> [[TMP22]], <8 x double> [[TMP23]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP24]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x double> [[TMP25]], i32 0
+// CHECK: ret double [[VECEXT_I]]
double test_mm512_reduce_min_pd(__m512d __W){
- // CHECK: %shuffle1.i = shufflevector <8 x double> %__W, <8 x double> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %__W, <8 x double> %shuffle1.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %shuffle3.i = shufflevector <8 x double> %tmp, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %tmp, <8 x double> %shuffle3.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %shuffle6.i = shufflevector <8 x double> %tmp1, <8 x double> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %tmp1, <8 x double> %shuffle6.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %vecext.i = extractelement <8 x double> %tmp2, i32 0
- // CHECK: ret double %vecext.i
return _mm512_reduce_min_pd(__W);
}
+// CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_epi64(i8 zeroext %__M, <8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I9_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i8, align 1
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i8, align 1
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store i8 %__M, i8* [[__M_ADDR]], align 1
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i8, i8* [[__M_ADDR]], align 1
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store i8 [[TMP0]], i8* [[__M_ADDR_I]], align 1
+// CHECK: store <8 x i64> [[TMP1]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i8, i8* [[__M_ADDR_I]], align 1
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: store i64 -9223372036854775808, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[TMP4:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <8 x i64> undef, i64 [[TMP4]], i32 0
+// CHECK: [[TMP5:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <8 x i64> [[VECINIT_I_I]], i64 [[TMP5]], i32 1
+// CHECK: [[TMP6:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <8 x i64> [[VECINIT1_I_I]], i64 [[TMP6]], i32 2
+// CHECK: [[TMP7:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <8 x i64> [[VECINIT2_I_I]], i64 [[TMP7]], i32 3
+// CHECK: [[TMP8:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <8 x i64> [[VECINIT3_I_I]], i64 [[TMP8]], i32 4
+// CHECK: [[TMP9:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <8 x i64> [[VECINIT4_I_I]], i64 [[TMP9]], i32 5
+// CHECK: [[TMP10:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <8 x i64> [[VECINIT5_I_I]], i64 [[TMP10]], i32 6
+// CHECK: [[TMP11:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <8 x i64> [[VECINIT6_I_I]], i64 [[TMP11]], i32 7
+// CHECK: store <8 x i64> [[VECINIT7_I_I]], <8 x i64>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP13:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
+// CHECK: [[TMP14:%.*]] = select <8 x i1> [[TMP13]], <8 x i64> [[TMP3]], <8 x i64> [[TMP12]]
+// CHECK: store <8 x i64> [[TMP14]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i64> [[TMP15]], <8 x i64> [[TMP16]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP17:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x i64> [[TMP17]], <8 x i64> [[TMP18]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP22:%.*]] = icmp sgt <8 x i64> [[TMP19]], [[TMP20]]
+// CHECK: [[TMP23:%.*]] = select <8 x i1> [[TMP22]], <8 x i64> [[TMP19]], <8 x i64> [[TMP20]]
+// CHECK: store <8 x i64> [[TMP23]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x i64> [[TMP24]], <8 x i64> [[TMP25]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP26:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <8 x i64> [[TMP26]], <8 x i64> [[TMP27]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__A_ADDR_I10_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE4_I]], <8 x i64>* [[__B_ADDR_I11_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I10_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I11_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP31:%.*]] = icmp sgt <8 x i64> [[TMP28]], [[TMP29]]
+// CHECK: [[TMP32:%.*]] = select <8 x i1> [[TMP31]], <8 x i64> [[TMP28]], <8 x i64> [[TMP29]]
+// CHECK: store <8 x i64> [[TMP32]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP33:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x i64> [[TMP33]], <8 x i64> [[TMP34]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP35:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <8 x i64> [[TMP35]], <8 x i64> [[TMP36]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE6_I]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE7_I]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP37:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP38:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP40:%.*]] = icmp sgt <8 x i64> [[TMP37]], [[TMP38]]
+// CHECK: [[TMP41:%.*]] = select <8 x i1> [[TMP40]], <8 x i64> [[TMP37]], <8 x i64> [[TMP38]]
+// CHECK: store <8 x i64> [[TMP41]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP42]], i32 0
+// CHECK: ret i64 [[VECEXT_I]]
long long test_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __W){
- // CHECK: %tmp = bitcast i8 %__M to <8 x i1>
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x i64> %__W, <8 x i64> <i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808, i64 -9223372036854775808>
- // CHECK: %shuffle1.i = shufflevector <8 x i64> %tmp1, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = icmp sgt <8 x i64> %tmp1, %shuffle1.i
- // CHECK: %tmp3 = select <8 x i1> %tmp2, <8 x i64> %tmp1, <8 x i64> %shuffle1.i
- // CHECK: %shuffle4.i = shufflevector <8 x i64> %tmp3, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = icmp sgt <8 x i64> %tmp3, %shuffle4.i
- // CHECK: %tmp5 = select <8 x i1> %tmp4, <8 x i64> %tmp3, <8 x i64> %shuffle4.i
- // CHECK: %shuffle7.i = shufflevector <8 x i64> %tmp5, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp6 = icmp sgt <8 x i64> %tmp5, %shuffle7.i
- // CHECK: %.elt.i = extractelement <8 x i1> %tmp6, i32 0
- // CHECK: %.elt22.i = extractelement <8 x i64> %tmp5, i32 0
- // CHECK: %shuffle7.elt.i = extractelement <8 x i64> %tmp5, i32 1
- // CHECK: %vecext.i = select i1 %.elt.i, i64 %.elt22.i, i64 %shuffle7.elt.i
- // CHECK: ret i64 %vecext.i
return _mm512_mask_reduce_max_epi64(__M, __W);
}
+// CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_epu64(i8 zeroext %__M, <8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I9_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i8, align 1
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i8, align 1
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store i8 %__M, i8* [[__M_ADDR]], align 1
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i8, i8* [[__M_ADDR]], align 1
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store i8 [[TMP0]], i8* [[__M_ADDR_I]], align 1
+// CHECK: store <8 x i64> [[TMP1]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i8, i8* [[__M_ADDR_I]], align 1
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: store i64 0, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[TMP4:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <8 x i64> undef, i64 [[TMP4]], i32 0
+// CHECK: [[TMP5:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <8 x i64> [[VECINIT_I_I]], i64 [[TMP5]], i32 1
+// CHECK: [[TMP6:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <8 x i64> [[VECINIT1_I_I]], i64 [[TMP6]], i32 2
+// CHECK: [[TMP7:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <8 x i64> [[VECINIT2_I_I]], i64 [[TMP7]], i32 3
+// CHECK: [[TMP8:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <8 x i64> [[VECINIT3_I_I]], i64 [[TMP8]], i32 4
+// CHECK: [[TMP9:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <8 x i64> [[VECINIT4_I_I]], i64 [[TMP9]], i32 5
+// CHECK: [[TMP10:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <8 x i64> [[VECINIT5_I_I]], i64 [[TMP10]], i32 6
+// CHECK: [[TMP11:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <8 x i64> [[VECINIT6_I_I]], i64 [[TMP11]], i32 7
+// CHECK: store <8 x i64> [[VECINIT7_I_I]], <8 x i64>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP13:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
+// CHECK: [[TMP14:%.*]] = select <8 x i1> [[TMP13]], <8 x i64> [[TMP3]], <8 x i64> [[TMP12]]
+// CHECK: store <8 x i64> [[TMP14]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i64> [[TMP15]], <8 x i64> [[TMP16]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP17:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x i64> [[TMP17]], <8 x i64> [[TMP18]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP22:%.*]] = icmp ugt <8 x i64> [[TMP19]], [[TMP20]]
+// CHECK: [[TMP23:%.*]] = select <8 x i1> [[TMP22]], <8 x i64> [[TMP19]], <8 x i64> [[TMP20]]
+// CHECK: store <8 x i64> [[TMP23]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x i64> [[TMP24]], <8 x i64> [[TMP25]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP26:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <8 x i64> [[TMP26]], <8 x i64> [[TMP27]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__A_ADDR_I10_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE4_I]], <8 x i64>* [[__B_ADDR_I11_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I10_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I11_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP31:%.*]] = icmp ugt <8 x i64> [[TMP28]], [[TMP29]]
+// CHECK: [[TMP32:%.*]] = select <8 x i1> [[TMP31]], <8 x i64> [[TMP28]], <8 x i64> [[TMP29]]
+// CHECK: store <8 x i64> [[TMP32]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP33:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x i64> [[TMP33]], <8 x i64> [[TMP34]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP35:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <8 x i64> [[TMP35]], <8 x i64> [[TMP36]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE6_I]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE7_I]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP37:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP38:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP40:%.*]] = icmp ugt <8 x i64> [[TMP37]], [[TMP38]]
+// CHECK: [[TMP41:%.*]] = select <8 x i1> [[TMP40]], <8 x i64> [[TMP37]], <8 x i64> [[TMP38]]
+// CHECK: store <8 x i64> [[TMP41]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP42]], i32 0
+// CHECK: ret i64 [[VECEXT_I]]
unsigned long test_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __W){
- // CHECK: %tmp = bitcast i8 %__M to <8 x i1>
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x i64> %__W, <8 x i64> zeroinitializer
- // CHECK: %shuffle1.i = shufflevector <8 x i64> %tmp1, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = icmp ugt <8 x i64> %tmp1, %shuffle1.i
- // CHECK: %tmp3 = select <8 x i1> %tmp2, <8 x i64> %tmp1, <8 x i64> %shuffle1.i
- // CHECK: %shuffle4.i = shufflevector <8 x i64> %tmp3, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = icmp ugt <8 x i64> %tmp3, %shuffle4.i
- // CHECK: %tmp5 = select <8 x i1> %tmp4, <8 x i64> %tmp3, <8 x i64> %shuffle4.i
- // CHECK: %shuffle7.i = shufflevector <8 x i64> %tmp5, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp6 = icmp ugt <8 x i64> %tmp5, %shuffle7.i
- // CHECK: %.elt.i = extractelement <8 x i1> %tmp6, i32 0
- // CHECK: %.elt22.i = extractelement <8 x i64> %tmp5, i32 0
- // CHECK: %shuffle7.elt.i = extractelement <8 x i64> %tmp5, i32 1
- // CHECK: %vecext.i = select i1 %.elt.i, i64 %.elt22.i, i64 %shuffle7.elt.i
- // CHECK: ret i64 %vecext.i
return _mm512_mask_reduce_max_epu64(__M, __W);
}
+// CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_pd(i8 zeroext %__M, <8 x double> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I9_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I10_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I11_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__W_ADDR_I_I:%.*]] = alloca double, align 8
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i8, align 1
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i8, align 1
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x double>, align 64
+// CHECK: store i8 %__M, i8* [[__M_ADDR]], align 1
+// CHECK: store <8 x double> %__W, <8 x double>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i8, i8* [[__M_ADDR]], align 1
+// CHECK: [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[__W_ADDR]], align 64
+// CHECK: store i8 [[TMP0]], i8* [[__M_ADDR_I]], align 1
+// CHECK: store <8 x double> [[TMP1]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i8, i8* [[__M_ADDR_I]], align 1
+// CHECK: [[TMP3:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: store double 0x7FF0000000000000, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[TMP4:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <8 x double> undef, double [[TMP4]], i32 0
+// CHECK: [[TMP5:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <8 x double> [[VECINIT_I_I]], double [[TMP5]], i32 1
+// CHECK: [[TMP6:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <8 x double> [[VECINIT1_I_I]], double [[TMP6]], i32 2
+// CHECK: [[TMP7:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <8 x double> [[VECINIT2_I_I]], double [[TMP7]], i32 3
+// CHECK: [[TMP8:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <8 x double> [[VECINIT3_I_I]], double [[TMP8]], i32 4
+// CHECK: [[TMP9:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <8 x double> [[VECINIT4_I_I]], double [[TMP9]], i32 5
+// CHECK: [[TMP10:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <8 x double> [[VECINIT5_I_I]], double [[TMP10]], i32 6
+// CHECK: [[TMP11:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <8 x double> [[VECINIT6_I_I]], double [[TMP11]], i32 7
+// CHECK: store <8 x double> [[VECINIT7_I_I]], <8 x double>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[SUB_I:%.*]] = fsub <8 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, [[TMP12]]
+// CHECK: [[TMP13:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
+// CHECK: [[TMP14:%.*]] = select <8 x i1> [[TMP13]], <8 x double> [[TMP3]], <8 x double> [[SUB_I]]
+// CHECK: store <8 x double> [[TMP14]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x double> [[TMP15]], <8 x double> [[TMP16]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP17:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x double> [[TMP17]], <8 x double> [[TMP18]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE_I]], <8 x double>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE1_I]], <8 x double>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I14_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP22:%.*]] = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> [[TMP19]], <8 x double> [[TMP20]], <8 x double> [[TMP21]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP22]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x double> [[TMP23]], <8 x double> [[TMP24]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP25:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <8 x double> [[TMP25]], <8 x double> [[TMP26]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE3_I]], <8 x double>* [[__A_ADDR_I10_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE4_I]], <8 x double>* [[__B_ADDR_I11_I]], align 64
+// CHECK: [[TMP27:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I10_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I11_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP30:%.*]] = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> [[TMP27]], <8 x double> [[TMP28]], <8 x double> [[TMP29]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP30]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP31:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP32:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x double> [[TMP31]], <8 x double> [[TMP32]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP33:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <8 x double> [[TMP33]], <8 x double> [[TMP34]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE6_I]], <8 x double>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE7_I]], <8 x double>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP35:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP37:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP38:%.*]] = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> [[TMP35]], <8 x double> [[TMP36]], <8 x double> [[TMP37]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP38]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x double> [[TMP39]], i32 0
+// CHECK: [[CONV:%.*]] = fptosi double [[VECEXT_I]] to i64
+// CHECK: ret i64 [[CONV]]
long long test_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __W){
- // CHECK: %tmp = bitcast i8 %__M to <8 x i1>
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x double> %__W, <8 x double> <double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000, double 0xFFF0000000000000>
- // CHECK: %shuffle1.i = shufflevector <8 x double> %tmp1, <8 x double> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %tmp1, <8 x double> %shuffle1.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %shuffle4.i = shufflevector <8 x double> %tmp2, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %tmp2, <8 x double> %shuffle4.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %shuffle7.i = shufflevector <8 x double> %tmp3, <8 x double> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = tail call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %tmp3, <8 x double> %shuffle7.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %vecext.i = extractelement <8 x double> %tmp4, i32 0
- // CHECK: %conv = fptosi double %vecext.i to i64
- // CHECK: ret i64 %conv
return _mm512_mask_reduce_max_pd(__M, __W);
}
+// CHECK-LABEL: define i64 @test_mm512_mask_reduce_min_epi64(i8 zeroext %__M, <8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I9_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i8, align 1
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i8, align 1
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store i8 %__M, i8* [[__M_ADDR]], align 1
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i8, i8* [[__M_ADDR]], align 1
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store i8 [[TMP0]], i8* [[__M_ADDR_I]], align 1
+// CHECK: store <8 x i64> [[TMP1]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i8, i8* [[__M_ADDR_I]], align 1
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: store i64 9223372036854775807, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[TMP4:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <8 x i64> undef, i64 [[TMP4]], i32 0
+// CHECK: [[TMP5:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <8 x i64> [[VECINIT_I_I]], i64 [[TMP5]], i32 1
+// CHECK: [[TMP6:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <8 x i64> [[VECINIT1_I_I]], i64 [[TMP6]], i32 2
+// CHECK: [[TMP7:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <8 x i64> [[VECINIT2_I_I]], i64 [[TMP7]], i32 3
+// CHECK: [[TMP8:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <8 x i64> [[VECINIT3_I_I]], i64 [[TMP8]], i32 4
+// CHECK: [[TMP9:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <8 x i64> [[VECINIT4_I_I]], i64 [[TMP9]], i32 5
+// CHECK: [[TMP10:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <8 x i64> [[VECINIT5_I_I]], i64 [[TMP10]], i32 6
+// CHECK: [[TMP11:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <8 x i64> [[VECINIT6_I_I]], i64 [[TMP11]], i32 7
+// CHECK: store <8 x i64> [[VECINIT7_I_I]], <8 x i64>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP13:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
+// CHECK: [[TMP14:%.*]] = select <8 x i1> [[TMP13]], <8 x i64> [[TMP3]], <8 x i64> [[TMP12]]
+// CHECK: store <8 x i64> [[TMP14]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i64> [[TMP15]], <8 x i64> [[TMP16]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP17:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x i64> [[TMP17]], <8 x i64> [[TMP18]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP22:%.*]] = icmp slt <8 x i64> [[TMP19]], [[TMP20]]
+// CHECK: [[TMP23:%.*]] = select <8 x i1> [[TMP22]], <8 x i64> [[TMP19]], <8 x i64> [[TMP20]]
+// CHECK: store <8 x i64> [[TMP23]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x i64> [[TMP24]], <8 x i64> [[TMP25]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP26:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <8 x i64> [[TMP26]], <8 x i64> [[TMP27]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__A_ADDR_I10_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE4_I]], <8 x i64>* [[__B_ADDR_I11_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I10_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I11_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP31:%.*]] = icmp slt <8 x i64> [[TMP28]], [[TMP29]]
+// CHECK: [[TMP32:%.*]] = select <8 x i1> [[TMP31]], <8 x i64> [[TMP28]], <8 x i64> [[TMP29]]
+// CHECK: store <8 x i64> [[TMP32]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP33:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x i64> [[TMP33]], <8 x i64> [[TMP34]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP35:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <8 x i64> [[TMP35]], <8 x i64> [[TMP36]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE6_I]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE7_I]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP37:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP38:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP40:%.*]] = icmp slt <8 x i64> [[TMP37]], [[TMP38]]
+// CHECK: [[TMP41:%.*]] = select <8 x i1> [[TMP40]], <8 x i64> [[TMP37]], <8 x i64> [[TMP38]]
+// CHECK: store <8 x i64> [[TMP41]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP42]], i32 0
+// CHECK: ret i64 [[VECEXT_I]]
long long test_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __W){
- // CHECK: %tmp = bitcast i8 %__M to <8 x i1>
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x i64> %__W, <8 x i64> <i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807, i64 9223372036854775807>
- // CHECK: %shuffle1.i = shufflevector <8 x i64> %tmp1, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = icmp slt <8 x i64> %tmp1, %shuffle1.i
- // CHECK: %tmp3 = select <8 x i1> %tmp2, <8 x i64> %tmp1, <8 x i64> %shuffle1.i
- // CHECK: %shuffle4.i = shufflevector <8 x i64> %tmp3, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = icmp slt <8 x i64> %tmp3, %shuffle4.i
- // CHECK: %tmp5 = select <8 x i1> %tmp4, <8 x i64> %tmp3, <8 x i64> %shuffle4.i
- // CHECK: %shuffle7.i = shufflevector <8 x i64> %tmp5, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp6 = icmp slt <8 x i64> %tmp5, %shuffle7.i
- // CHECK: %.elt.i = extractelement <8 x i1> %tmp6, i32 0
- // CHECK: %.elt22.i = extractelement <8 x i64> %tmp5, i32 0
- // CHECK: %shuffle7.elt.i = extractelement <8 x i64> %tmp5, i32 1
- // CHECK: %vecext.i = select i1 %.elt.i, i64 %.elt22.i, i64 %shuffle7.elt.i
- // CHECK: ret i64 %vecext.i
return _mm512_mask_reduce_min_epi64(__M, __W);
}
+// CHECK-LABEL: define i64 @test_mm512_mask_reduce_min_epu64(i8 zeroext %__M, <8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I9_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i8, align 1
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i8, align 1
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store i8 %__M, i8* [[__M_ADDR]], align 1
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i8, i8* [[__M_ADDR]], align 1
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store i8 [[TMP0]], i8* [[__M_ADDR_I]], align 1
+// CHECK: store <8 x i64> [[TMP1]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i8, i8* [[__M_ADDR_I]], align 1
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: store i64 0, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[TMP4:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <8 x i64> undef, i64 [[TMP4]], i32 0
+// CHECK: [[TMP5:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <8 x i64> [[VECINIT_I_I]], i64 [[TMP5]], i32 1
+// CHECK: [[TMP6:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <8 x i64> [[VECINIT1_I_I]], i64 [[TMP6]], i32 2
+// CHECK: [[TMP7:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <8 x i64> [[VECINIT2_I_I]], i64 [[TMP7]], i32 3
+// CHECK: [[TMP8:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <8 x i64> [[VECINIT3_I_I]], i64 [[TMP8]], i32 4
+// CHECK: [[TMP9:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <8 x i64> [[VECINIT4_I_I]], i64 [[TMP9]], i32 5
+// CHECK: [[TMP10:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <8 x i64> [[VECINIT5_I_I]], i64 [[TMP10]], i32 6
+// CHECK: [[TMP11:%.*]] = load i64, i64* [[__D_ADDR_I_I]], align 8
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <8 x i64> [[VECINIT6_I_I]], i64 [[TMP11]], i32 7
+// CHECK: store <8 x i64> [[VECINIT7_I_I]], <8 x i64>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP13:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
+// CHECK: [[TMP14:%.*]] = select <8 x i1> [[TMP13]], <8 x i64> [[TMP3]], <8 x i64> [[TMP12]]
+// CHECK: store <8 x i64> [[TMP14]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i64> [[TMP15]], <8 x i64> [[TMP16]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP17:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x i64> [[TMP17]], <8 x i64> [[TMP18]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP22:%.*]] = icmp ugt <8 x i64> [[TMP19]], [[TMP20]]
+// CHECK: [[TMP23:%.*]] = select <8 x i1> [[TMP22]], <8 x i64> [[TMP19]], <8 x i64> [[TMP20]]
+// CHECK: store <8 x i64> [[TMP23]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x i64> [[TMP24]], <8 x i64> [[TMP25]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP26:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <8 x i64> [[TMP26]], <8 x i64> [[TMP27]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__A_ADDR_I10_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE4_I]], <8 x i64>* [[__B_ADDR_I11_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I10_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I11_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP31:%.*]] = icmp ugt <8 x i64> [[TMP28]], [[TMP29]]
+// CHECK: [[TMP32:%.*]] = select <8 x i1> [[TMP31]], <8 x i64> [[TMP28]], <8 x i64> [[TMP29]]
+// CHECK: store <8 x i64> [[TMP32]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP33:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x i64> [[TMP33]], <8 x i64> [[TMP34]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP35:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <8 x i64> [[TMP35]], <8 x i64> [[TMP36]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x i64> [[SHUFFLE6_I]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[SHUFFLE7_I]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP37:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP38:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP40:%.*]] = icmp ugt <8 x i64> [[TMP37]], [[TMP38]]
+// CHECK: [[TMP41:%.*]] = select <8 x i1> [[TMP40]], <8 x i64> [[TMP37]], <8 x i64> [[TMP38]]
+// CHECK: store <8 x i64> [[TMP41]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP42]], i32 0
+// CHECK: ret i64 [[VECEXT_I]]
long long test_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __W){
- // CHECK: %tmp = bitcast i8 %__M to <8 x i1>
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x i64> %__W, <8 x i64> zeroinitializer
- // CHECK: %shuffle1.i = shufflevector <8 x i64> %tmp1, <8 x i64> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = icmp ugt <8 x i64> %tmp1, %shuffle1.i
- // CHECK: %tmp3 = select <8 x i1> %tmp2, <8 x i64> %tmp1, <8 x i64> %shuffle1.i
- // CHECK: %shuffle4.i = shufflevector <8 x i64> %tmp3, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = icmp ugt <8 x i64> %tmp3, %shuffle4.i
- // CHECK: %tmp5 = select <8 x i1> %tmp4, <8 x i64> %tmp3, <8 x i64> %shuffle4.i
- // CHECK: %shuffle7.i = shufflevector <8 x i64> %tmp5, <8 x i64> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp6 = icmp ugt <8 x i64> %tmp5, %shuffle7.i
- // CHECK: %.elt.i = extractelement <8 x i1> %tmp6, i32 0
- // CHECK: %.elt22.i = extractelement <8 x i64> %tmp5, i32 0
- // CHECK: %shuffle7.elt.i = extractelement <8 x i64> %tmp5, i32 1
- // CHECK: %vecext.i = select i1 %.elt.i, i64 %.elt22.i, i64 %shuffle7.elt.i
- // CHECK: ret i64 %vecext.i
return _mm512_mask_reduce_max_epu64(__M, __W);
}
+// CHECK-LABEL: define double @test_mm512_mask_reduce_min_pd(i8 zeroext %__M, <8 x double> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I9_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I10_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I11_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__W_ADDR_I_I:%.*]] = alloca double, align 8
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i8, align 1
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x double>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i8, align 1
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x double>, align 64
+// CHECK: store i8 %__M, i8* [[__M_ADDR]], align 1
+// CHECK: store <8 x double> %__W, <8 x double>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i8, i8* [[__M_ADDR]], align 1
+// CHECK: [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[__W_ADDR]], align 64
+// CHECK: store i8 [[TMP0]], i8* [[__M_ADDR_I]], align 1
+// CHECK: store <8 x double> [[TMP1]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i8, i8* [[__M_ADDR_I]], align 1
+// CHECK: [[TMP3:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: store double 0x7FF0000000000000, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[TMP4:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <8 x double> undef, double [[TMP4]], i32 0
+// CHECK: [[TMP5:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <8 x double> [[VECINIT_I_I]], double [[TMP5]], i32 1
+// CHECK: [[TMP6:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <8 x double> [[VECINIT1_I_I]], double [[TMP6]], i32 2
+// CHECK: [[TMP7:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <8 x double> [[VECINIT2_I_I]], double [[TMP7]], i32 3
+// CHECK: [[TMP8:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <8 x double> [[VECINIT3_I_I]], double [[TMP8]], i32 4
+// CHECK: [[TMP9:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <8 x double> [[VECINIT4_I_I]], double [[TMP9]], i32 5
+// CHECK: [[TMP10:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <8 x double> [[VECINIT5_I_I]], double [[TMP10]], i32 6
+// CHECK: [[TMP11:%.*]] = load double, double* [[__W_ADDR_I_I]], align 8
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <8 x double> [[VECINIT6_I_I]], double [[TMP11]], i32 7
+// CHECK: store <8 x double> [[VECINIT7_I_I]], <8 x double>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP13:%.*]] = bitcast i8 [[TMP2]] to <8 x i1>
+// CHECK: [[TMP14:%.*]] = select <8 x i1> [[TMP13]], <8 x double> [[TMP3]], <8 x double> [[TMP12]]
+// CHECK: store <8 x double> [[TMP14]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP16:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x double> [[TMP15]], <8 x double> [[TMP16]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP17:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <8 x double> [[TMP17]], <8 x double> [[TMP18]], <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE_I]], <8 x double>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE1_I]], <8 x double>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP19:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I14_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP22:%.*]] = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> [[TMP19]], <8 x double> [[TMP20]], <8 x double> [[TMP21]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP22]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <8 x double> [[TMP23]], <8 x double> [[TMP24]], <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP25:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <8 x double> [[TMP25]], <8 x double> [[TMP26]], <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE3_I]], <8 x double>* [[__A_ADDR_I10_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE4_I]], <8 x double>* [[__B_ADDR_I11_I]], align 64
+// CHECK: [[TMP27:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I10_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I11_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I9_I]], align 64
+// CHECK: [[TMP30:%.*]] = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> [[TMP27]], <8 x double> [[TMP28]], <8 x double> [[TMP29]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP30]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP31:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP32:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <8 x double> [[TMP31]], <8 x double> [[TMP32]], <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP33:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <8 x double> [[TMP33]], <8 x double> [[TMP34]], <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <8 x double> [[SHUFFLE6_I]], <8 x double>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x double> [[SHUFFLE7_I]], <8 x double>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP35:%.*]] = load <8 x double>, <8 x double>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x double>, <8 x double>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <8 x double> zeroinitializer, <8 x double>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP37:%.*]] = load <8 x double>, <8 x double>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP38:%.*]] = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> [[TMP35]], <8 x double> [[TMP36]], <8 x double> [[TMP37]], i8 -1, i32 4) #2
+// CHECK: store <8 x double> [[TMP38]], <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x double>, <8 x double>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x double> [[TMP39]], i32 0
+// CHECK: ret double [[VECEXT_I]]
double test_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __W){
- // CHECK: %tmp = bitcast i8 %__M to <8 x i1>
- // CHECK: %tmp1 = select <8 x i1> %tmp, <8 x double> %__W, <8 x double> <double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000, double 0x7FF0000000000000>
- // CHECK: %shuffle1.i = shufflevector <8 x double> %tmp1, <8 x double> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %tmp1, <8 x double> %shuffle1.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %shuffle4.i = shufflevector <8 x double> %tmp2, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %tmp2, <8 x double> %shuffle4.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %shuffle7.i = shufflevector <8 x double> %tmp3, <8 x double> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = tail call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %tmp3, <8 x double> %shuffle7.i, <8 x double> zeroinitializer, i8 -1, i32 4) #3
- // CHECK: %vecext.i = extractelement <8 x double> %tmp4, i32 0
- // CHECK: ret double %vecext.i
return _mm512_mask_reduce_min_pd(__M, __W);
}
+// CHECK-LABEL: define i32 @test_mm512_reduce_max_epi32(<8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I17_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I18_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I19_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I15_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I16_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[A_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store <8 x i64> [[TMP0]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = bitcast <8 x i64> [[TMP1]] to <16 x i32>
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP3]] to <16 x i32>
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i32> [[TMP2]], <16 x i32> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP5:%.*]] = bitcast <16 x i32> [[SHUFFLE_I]] to <8 x i64>
+// CHECK: [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP7:%.*]] = bitcast <8 x i64> [[TMP6]] to <16 x i32>
+// CHECK: [[TMP8:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP9:%.*]] = bitcast <8 x i64> [[TMP8]] to <16 x i32>
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x i32> [[TMP7]], <16 x i32> [[TMP9]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP10:%.*]] = bitcast <16 x i32> [[SHUFFLE1_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP5]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[TMP10]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = bitcast <8 x i64> [[TMP11]] to <16 x i32>
+// CHECK: [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP14:%.*]] = bitcast <8 x i64> [[TMP13]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP16:%.*]] = bitcast <8 x i64> [[TMP15]] to <16 x i32>
+// CHECK: [[TMP17:%.*]] = icmp sgt <16 x i32> [[TMP12]], [[TMP14]]
+// CHECK: [[TMP18:%.*]] = select <16 x i1> [[TMP17]], <16 x i32> [[TMP12]], <16 x i32> [[TMP14]]
+// CHECK: [[TMP19:%.*]] = bitcast <16 x i32> [[TMP18]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP19]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP21:%.*]] = bitcast <8 x i64> [[TMP20]] to <16 x i32>
+// CHECK: [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP23:%.*]] = bitcast <8 x i64> [[TMP22]] to <16 x i32>
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <16 x i32> [[TMP21]], <16 x i32> [[TMP23]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP24:%.*]] = bitcast <16 x i32> [[SHUFFLE2_I]] to <8 x i64>
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = bitcast <8 x i64> [[TMP25]] to <16 x i32>
+// CHECK: [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = bitcast <8 x i64> [[TMP27]] to <16 x i32>
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x i32> [[TMP26]], <16 x i32> [[TMP28]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP29:%.*]] = bitcast <16 x i32> [[SHUFFLE3_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP24]], <8 x i64>* [[__A_ADDR_I18_I]], align 64
+// CHECK: store <8 x i64> [[TMP29]], <8 x i64>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I18_I]], align 64
+// CHECK: [[TMP31:%.*]] = bitcast <8 x i64> [[TMP30]] to <16 x i32>
+// CHECK: [[TMP32:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP33:%.*]] = bitcast <8 x i64> [[TMP32]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP35:%.*]] = bitcast <8 x i64> [[TMP34]] to <16 x i32>
+// CHECK: [[TMP36:%.*]] = icmp sgt <16 x i32> [[TMP31]], [[TMP33]]
+// CHECK: [[TMP37:%.*]] = select <16 x i1> [[TMP36]], <16 x i32> [[TMP31]], <16 x i32> [[TMP33]]
+// CHECK: [[TMP38:%.*]] = bitcast <16 x i32> [[TMP37]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP38]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP40:%.*]] = bitcast <8 x i64> [[TMP39]] to <16 x i32>
+// CHECK: [[TMP41:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = bitcast <8 x i64> [[TMP41]] to <16 x i32>
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <16 x i32> [[TMP40]], <16 x i32> [[TMP42]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP43:%.*]] = bitcast <16 x i32> [[SHUFFLE5_I]] to <8 x i64>
+// CHECK: [[TMP44:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP45:%.*]] = bitcast <8 x i64> [[TMP44]] to <16 x i32>
+// CHECK: [[TMP46:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP47:%.*]] = bitcast <8 x i64> [[TMP46]] to <16 x i32>
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x i32> [[TMP45]], <16 x i32> [[TMP47]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP48:%.*]] = bitcast <16 x i32> [[SHUFFLE6_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP43]], <8 x i64>* [[__A_ADDR_I15_I]], align 64
+// CHECK: store <8 x i64> [[TMP48]], <8 x i64>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP49:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I15_I]], align 64
+// CHECK: [[TMP50:%.*]] = bitcast <8 x i64> [[TMP49]] to <16 x i32>
+// CHECK: [[TMP51:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP52:%.*]] = bitcast <8 x i64> [[TMP51]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP53:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP54:%.*]] = bitcast <8 x i64> [[TMP53]] to <16 x i32>
+// CHECK: [[TMP55:%.*]] = icmp sgt <16 x i32> [[TMP50]], [[TMP52]]
+// CHECK: [[TMP56:%.*]] = select <16 x i1> [[TMP55]], <16 x i32> [[TMP50]], <16 x i32> [[TMP52]]
+// CHECK: [[TMP57:%.*]] = bitcast <16 x i32> [[TMP56]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP57]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP58:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP59:%.*]] = bitcast <8 x i64> [[TMP58]] to <16 x i32>
+// CHECK: [[TMP60:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP61:%.*]] = bitcast <8 x i64> [[TMP60]] to <16 x i32>
+// CHECK: [[SHUFFLE8_I:%.*]] = shufflevector <16 x i32> [[TMP59]], <16 x i32> [[TMP61]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP62:%.*]] = bitcast <16 x i32> [[SHUFFLE8_I]] to <8 x i64>
+// CHECK: [[TMP63:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP64:%.*]] = bitcast <8 x i64> [[TMP63]] to <16 x i32>
+// CHECK: [[TMP65:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP66:%.*]] = bitcast <8 x i64> [[TMP65]] to <16 x i32>
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x i32> [[TMP64]], <16 x i32> [[TMP66]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP67:%.*]] = bitcast <16 x i32> [[SHUFFLE9_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP62]], <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x i64> [[TMP67]], <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP68:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP69:%.*]] = bitcast <8 x i64> [[TMP68]] to <16 x i32>
+// CHECK: [[TMP70:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP71:%.*]] = bitcast <8 x i64> [[TMP70]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP72:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP73:%.*]] = bitcast <8 x i64> [[TMP72]] to <16 x i32>
+// CHECK: [[TMP74:%.*]] = icmp sgt <16 x i32> [[TMP69]], [[TMP71]]
+// CHECK: [[TMP75:%.*]] = select <16 x i1> [[TMP74]], <16 x i32> [[TMP69]], <16 x i32> [[TMP71]]
+// CHECK: [[TMP76:%.*]] = bitcast <16 x i32> [[TMP75]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP76]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP77:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP77]], i32 0
+// CHECK: [[CONV_I:%.*]] = trunc i64 [[VECEXT_I]] to i32
+// CHECK: ret i32 [[CONV_I]]
int test_mm512_reduce_max_epi32(__m512i __W){
- // CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
- // CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = icmp slt <16 x i32> %shuffle1.i, %tmp
- // CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> %shuffle1.i
- // CHECK: %shuffle3.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = icmp sgt <16 x i32> %tmp2, %shuffle3.i
- // CHECK: %tmp4 = select <16 x i1> %tmp3, <16 x i32> %tmp2, <16 x i32> %shuffle3.i
- // CHECK: %shuffle6.i = shufflevector <16 x i32> %tmp4, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = icmp sgt <16 x i32> %tmp4, %shuffle6.i
- // CHECK: %tmp6 = select <16 x i1> %tmp5, <16 x i32> %tmp4, <16 x i32> %shuffle6.i
- // CHECK: %shuffle9.i = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp7 = icmp sgt <16 x i32> %tmp6, %shuffle9.i
- // CHECK: %tmp8 = select <16 x i1> %tmp7, <16 x i32> %tmp6, <16 x i32> %shuffle9.i
- // CHECK: %tmp9 = bitcast <16 x i32> %tmp8 to <8 x i64>
- // CHECK: %vecext.i = extractelement <8 x i64> %tmp9, i32 0
- // CHECK: %conv.i = trunc i64 %vecext.i to i32
- // CHECK: ret i32 %conv.i
return _mm512_reduce_max_epi32(__W);
}
+// CHECK-LABEL: define i32 @test_mm512_reduce_max_epu32(<8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I17_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I18_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I19_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I15_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I16_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[A_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store <8 x i64> [[TMP0]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = bitcast <8 x i64> [[TMP1]] to <16 x i32>
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP3]] to <16 x i32>
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i32> [[TMP2]], <16 x i32> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP5:%.*]] = bitcast <16 x i32> [[SHUFFLE_I]] to <8 x i64>
+// CHECK: [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP7:%.*]] = bitcast <8 x i64> [[TMP6]] to <16 x i32>
+// CHECK: [[TMP8:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP9:%.*]] = bitcast <8 x i64> [[TMP8]] to <16 x i32>
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x i32> [[TMP7]], <16 x i32> [[TMP9]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP10:%.*]] = bitcast <16 x i32> [[SHUFFLE1_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP5]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[TMP10]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = bitcast <8 x i64> [[TMP11]] to <16 x i32>
+// CHECK: [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP14:%.*]] = bitcast <8 x i64> [[TMP13]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP16:%.*]] = bitcast <8 x i64> [[TMP15]] to <16 x i32>
+// CHECK: [[TMP17:%.*]] = icmp ugt <16 x i32> [[TMP12]], [[TMP14]]
+// CHECK: [[TMP18:%.*]] = select <16 x i1> [[TMP17]], <16 x i32> [[TMP12]], <16 x i32> [[TMP14]]
+// CHECK: [[TMP19:%.*]] = bitcast <16 x i32> [[TMP18]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP19]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP21:%.*]] = bitcast <8 x i64> [[TMP20]] to <16 x i32>
+// CHECK: [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP23:%.*]] = bitcast <8 x i64> [[TMP22]] to <16 x i32>
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <16 x i32> [[TMP21]], <16 x i32> [[TMP23]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP24:%.*]] = bitcast <16 x i32> [[SHUFFLE2_I]] to <8 x i64>
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = bitcast <8 x i64> [[TMP25]] to <16 x i32>
+// CHECK: [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = bitcast <8 x i64> [[TMP27]] to <16 x i32>
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x i32> [[TMP26]], <16 x i32> [[TMP28]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP29:%.*]] = bitcast <16 x i32> [[SHUFFLE3_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP24]], <8 x i64>* [[__A_ADDR_I18_I]], align 64
+// CHECK: store <8 x i64> [[TMP29]], <8 x i64>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I18_I]], align 64
+// CHECK: [[TMP31:%.*]] = bitcast <8 x i64> [[TMP30]] to <16 x i32>
+// CHECK: [[TMP32:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP33:%.*]] = bitcast <8 x i64> [[TMP32]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP35:%.*]] = bitcast <8 x i64> [[TMP34]] to <16 x i32>
+// CHECK: [[TMP36:%.*]] = icmp ugt <16 x i32> [[TMP31]], [[TMP33]]
+// CHECK: [[TMP37:%.*]] = select <16 x i1> [[TMP36]], <16 x i32> [[TMP31]], <16 x i32> [[TMP33]]
+// CHECK: [[TMP38:%.*]] = bitcast <16 x i32> [[TMP37]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP38]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP40:%.*]] = bitcast <8 x i64> [[TMP39]] to <16 x i32>
+// CHECK: [[TMP41:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = bitcast <8 x i64> [[TMP41]] to <16 x i32>
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <16 x i32> [[TMP40]], <16 x i32> [[TMP42]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP43:%.*]] = bitcast <16 x i32> [[SHUFFLE5_I]] to <8 x i64>
+// CHECK: [[TMP44:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP45:%.*]] = bitcast <8 x i64> [[TMP44]] to <16 x i32>
+// CHECK: [[TMP46:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP47:%.*]] = bitcast <8 x i64> [[TMP46]] to <16 x i32>
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x i32> [[TMP45]], <16 x i32> [[TMP47]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP48:%.*]] = bitcast <16 x i32> [[SHUFFLE6_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP43]], <8 x i64>* [[__A_ADDR_I15_I]], align 64
+// CHECK: store <8 x i64> [[TMP48]], <8 x i64>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP49:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I15_I]], align 64
+// CHECK: [[TMP50:%.*]] = bitcast <8 x i64> [[TMP49]] to <16 x i32>
+// CHECK: [[TMP51:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP52:%.*]] = bitcast <8 x i64> [[TMP51]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP53:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP54:%.*]] = bitcast <8 x i64> [[TMP53]] to <16 x i32>
+// CHECK: [[TMP55:%.*]] = icmp ugt <16 x i32> [[TMP50]], [[TMP52]]
+// CHECK: [[TMP56:%.*]] = select <16 x i1> [[TMP55]], <16 x i32> [[TMP50]], <16 x i32> [[TMP52]]
+// CHECK: [[TMP57:%.*]] = bitcast <16 x i32> [[TMP56]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP57]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP58:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP59:%.*]] = bitcast <8 x i64> [[TMP58]] to <16 x i32>
+// CHECK: [[TMP60:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP61:%.*]] = bitcast <8 x i64> [[TMP60]] to <16 x i32>
+// CHECK: [[SHUFFLE8_I:%.*]] = shufflevector <16 x i32> [[TMP59]], <16 x i32> [[TMP61]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP62:%.*]] = bitcast <16 x i32> [[SHUFFLE8_I]] to <8 x i64>
+// CHECK: [[TMP63:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP64:%.*]] = bitcast <8 x i64> [[TMP63]] to <16 x i32>
+// CHECK: [[TMP65:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP66:%.*]] = bitcast <8 x i64> [[TMP65]] to <16 x i32>
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x i32> [[TMP64]], <16 x i32> [[TMP66]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP67:%.*]] = bitcast <16 x i32> [[SHUFFLE9_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP62]], <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x i64> [[TMP67]], <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP68:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP69:%.*]] = bitcast <8 x i64> [[TMP68]] to <16 x i32>
+// CHECK: [[TMP70:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP71:%.*]] = bitcast <8 x i64> [[TMP70]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP72:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP73:%.*]] = bitcast <8 x i64> [[TMP72]] to <16 x i32>
+// CHECK: [[TMP74:%.*]] = icmp ugt <16 x i32> [[TMP69]], [[TMP71]]
+// CHECK: [[TMP75:%.*]] = select <16 x i1> [[TMP74]], <16 x i32> [[TMP69]], <16 x i32> [[TMP71]]
+// CHECK: [[TMP76:%.*]] = bitcast <16 x i32> [[TMP75]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP76]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP77:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP77]], i32 0
+// CHECK: [[CONV_I:%.*]] = trunc i64 [[VECEXT_I]] to i32
+// CHECK: ret i32 [[CONV_I]]
unsigned int test_mm512_reduce_max_epu32(__m512i __W){
- // CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
- // CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = icmp ult <16 x i32> %shuffle1.i, %tmp
- // CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> %shuffle1.i
- // CHECK: %shuffle3.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = icmp ugt <16 x i32> %tmp2, %shuffle3.i
- // CHECK: %tmp4 = select <16 x i1> %tmp3, <16 x i32> %tmp2, <16 x i32> %shuffle3.i
- // CHECK: %shuffle6.i = shufflevector <16 x i32> %tmp4, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = icmp ugt <16 x i32> %tmp4, %shuffle6.i
- // CHECK: %tmp6 = select <16 x i1> %tmp5, <16 x i32> %tmp4, <16 x i32> %shuffle6.i
- // CHECK: %shuffle9.i = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp7 = icmp ugt <16 x i32> %tmp6, %shuffle9.i
- // CHECK: %tmp8 = select <16 x i1> %tmp7, <16 x i32> %tmp6, <16 x i32> %shuffle9.i
- // CHECK: %tmp9 = bitcast <16 x i32> %tmp8 to <8 x i64>
- // CHECK: %vecext.i = extractelement <8 x i64> %tmp9, i32 0
- // CHECK: %conv.i = trunc i64 %vecext.i to i32
- // CHECK: ret i32 %conv.i
return _mm512_reduce_max_epu32(__W);
}
+// CHECK-LABEL: define float @test_mm512_reduce_max_ps(<16 x float> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I17_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I18_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I19_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I14_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I15_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I16_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[A_ADDR_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <16 x float>, align 64
+// CHECK: store <16 x float> %__W, <16 x float>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <16 x float>, <16 x float>* [[__W_ADDR]], align 64
+// CHECK: store <16 x float> [[TMP0]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x float> [[TMP1]], <16 x float> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP3:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x float> [[TMP3]], <16 x float> [[TMP4]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE_I]], <16 x float>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE1_I]], <16 x float>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP5:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP6:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP7:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP8:%.*]] = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> [[TMP5]], <16 x float> [[TMP6]], <16 x float> [[TMP7]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP8]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP9:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP10:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <16 x float> [[TMP9]], <16 x float> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP11:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x float> [[TMP11]], <16 x float> [[TMP12]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE2_I]], <16 x float>* [[__A_ADDR_I18_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE3_I]], <16 x float>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP13:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I18_I]], align 64
+// CHECK: [[TMP14:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I19_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP16:%.*]] = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> [[TMP13]], <16 x float> [[TMP14]], <16 x float> [[TMP15]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP16]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP17:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <16 x float> [[TMP17]], <16 x float> [[TMP18]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP19:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x float> [[TMP19]], <16 x float> [[TMP20]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE5_I]], <16 x float>* [[__A_ADDR_I15_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE6_I]], <16 x float>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I15_I]], align 64
+// CHECK: [[TMP22:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I16_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP24:%.*]] = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> [[TMP21]], <16 x float> [[TMP22]], <16 x float> [[TMP23]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP24]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE8_I:%.*]] = shufflevector <16 x float> [[TMP25]], <16 x float> [[TMP26]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP27:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x float> [[TMP27]], <16 x float> [[TMP28]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE8_I]], <16 x float>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE9_I]], <16 x float>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I13_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP31:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP32:%.*]] = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> [[TMP29]], <16 x float> [[TMP30]], <16 x float> [[TMP31]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP32]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP33:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <16 x float> [[TMP33]], i32 0
+// CHECK: ret float [[VECEXT_I]]
float test_mm512_reduce_max_ps(__m512 __W){
- // CHECK: %shuffle1.i = shufflevector <16 x float> %__W, <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %__W, <16 x float> %shuffle1.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle3.i = shufflevector <16 x float> %tmp, <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %tmp, <16 x float> %shuffle3.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle6.i = shufflevector <16 x float> %tmp1, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %tmp1, <16 x float> %shuffle6.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle9.i = shufflevector <16 x float> %tmp2, <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %tmp2, <16 x float> %shuffle9.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %vecext.i = extractelement <16 x float> %tmp3, i32 0
- // CHECK: ret float %vecext.i
return _mm512_reduce_max_ps(__W);
}
+// CHECK-LABEL: define i32 @test_mm512_reduce_min_epi32(<8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I17_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I18_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I19_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I15_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I16_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[A_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store <8 x i64> [[TMP0]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = bitcast <8 x i64> [[TMP1]] to <16 x i32>
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP3]] to <16 x i32>
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i32> [[TMP2]], <16 x i32> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP5:%.*]] = bitcast <16 x i32> [[SHUFFLE_I]] to <8 x i64>
+// CHECK: [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP7:%.*]] = bitcast <8 x i64> [[TMP6]] to <16 x i32>
+// CHECK: [[TMP8:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP9:%.*]] = bitcast <8 x i64> [[TMP8]] to <16 x i32>
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x i32> [[TMP7]], <16 x i32> [[TMP9]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP10:%.*]] = bitcast <16 x i32> [[SHUFFLE1_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP5]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[TMP10]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = bitcast <8 x i64> [[TMP11]] to <16 x i32>
+// CHECK: [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP14:%.*]] = bitcast <8 x i64> [[TMP13]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP16:%.*]] = bitcast <8 x i64> [[TMP15]] to <16 x i32>
+// CHECK: [[TMP17:%.*]] = icmp slt <16 x i32> [[TMP12]], [[TMP14]]
+// CHECK: [[TMP18:%.*]] = select <16 x i1> [[TMP17]], <16 x i32> [[TMP12]], <16 x i32> [[TMP14]]
+// CHECK: [[TMP19:%.*]] = bitcast <16 x i32> [[TMP18]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP19]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP21:%.*]] = bitcast <8 x i64> [[TMP20]] to <16 x i32>
+// CHECK: [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP23:%.*]] = bitcast <8 x i64> [[TMP22]] to <16 x i32>
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <16 x i32> [[TMP21]], <16 x i32> [[TMP23]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP24:%.*]] = bitcast <16 x i32> [[SHUFFLE2_I]] to <8 x i64>
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = bitcast <8 x i64> [[TMP25]] to <16 x i32>
+// CHECK: [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = bitcast <8 x i64> [[TMP27]] to <16 x i32>
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x i32> [[TMP26]], <16 x i32> [[TMP28]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP29:%.*]] = bitcast <16 x i32> [[SHUFFLE3_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP24]], <8 x i64>* [[__A_ADDR_I18_I]], align 64
+// CHECK: store <8 x i64> [[TMP29]], <8 x i64>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I18_I]], align 64
+// CHECK: [[TMP31:%.*]] = bitcast <8 x i64> [[TMP30]] to <16 x i32>
+// CHECK: [[TMP32:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP33:%.*]] = bitcast <8 x i64> [[TMP32]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP35:%.*]] = bitcast <8 x i64> [[TMP34]] to <16 x i32>
+// CHECK: [[TMP36:%.*]] = icmp slt <16 x i32> [[TMP31]], [[TMP33]]
+// CHECK: [[TMP37:%.*]] = select <16 x i1> [[TMP36]], <16 x i32> [[TMP31]], <16 x i32> [[TMP33]]
+// CHECK: [[TMP38:%.*]] = bitcast <16 x i32> [[TMP37]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP38]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP40:%.*]] = bitcast <8 x i64> [[TMP39]] to <16 x i32>
+// CHECK: [[TMP41:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = bitcast <8 x i64> [[TMP41]] to <16 x i32>
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <16 x i32> [[TMP40]], <16 x i32> [[TMP42]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP43:%.*]] = bitcast <16 x i32> [[SHUFFLE5_I]] to <8 x i64>
+// CHECK: [[TMP44:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP45:%.*]] = bitcast <8 x i64> [[TMP44]] to <16 x i32>
+// CHECK: [[TMP46:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP47:%.*]] = bitcast <8 x i64> [[TMP46]] to <16 x i32>
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x i32> [[TMP45]], <16 x i32> [[TMP47]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP48:%.*]] = bitcast <16 x i32> [[SHUFFLE6_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP43]], <8 x i64>* [[__A_ADDR_I15_I]], align 64
+// CHECK: store <8 x i64> [[TMP48]], <8 x i64>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP49:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I15_I]], align 64
+// CHECK: [[TMP50:%.*]] = bitcast <8 x i64> [[TMP49]] to <16 x i32>
+// CHECK: [[TMP51:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP52:%.*]] = bitcast <8 x i64> [[TMP51]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP53:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP54:%.*]] = bitcast <8 x i64> [[TMP53]] to <16 x i32>
+// CHECK: [[TMP55:%.*]] = icmp slt <16 x i32> [[TMP50]], [[TMP52]]
+// CHECK: [[TMP56:%.*]] = select <16 x i1> [[TMP55]], <16 x i32> [[TMP50]], <16 x i32> [[TMP52]]
+// CHECK: [[TMP57:%.*]] = bitcast <16 x i32> [[TMP56]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP57]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP58:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP59:%.*]] = bitcast <8 x i64> [[TMP58]] to <16 x i32>
+// CHECK: [[TMP60:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP61:%.*]] = bitcast <8 x i64> [[TMP60]] to <16 x i32>
+// CHECK: [[SHUFFLE8_I:%.*]] = shufflevector <16 x i32> [[TMP59]], <16 x i32> [[TMP61]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP62:%.*]] = bitcast <16 x i32> [[SHUFFLE8_I]] to <8 x i64>
+// CHECK: [[TMP63:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP64:%.*]] = bitcast <8 x i64> [[TMP63]] to <16 x i32>
+// CHECK: [[TMP65:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP66:%.*]] = bitcast <8 x i64> [[TMP65]] to <16 x i32>
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x i32> [[TMP64]], <16 x i32> [[TMP66]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP67:%.*]] = bitcast <16 x i32> [[SHUFFLE9_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP62]], <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x i64> [[TMP67]], <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP68:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP69:%.*]] = bitcast <8 x i64> [[TMP68]] to <16 x i32>
+// CHECK: [[TMP70:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP71:%.*]] = bitcast <8 x i64> [[TMP70]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP72:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP73:%.*]] = bitcast <8 x i64> [[TMP72]] to <16 x i32>
+// CHECK: [[TMP74:%.*]] = icmp slt <16 x i32> [[TMP69]], [[TMP71]]
+// CHECK: [[TMP75:%.*]] = select <16 x i1> [[TMP74]], <16 x i32> [[TMP69]], <16 x i32> [[TMP71]]
+// CHECK: [[TMP76:%.*]] = bitcast <16 x i32> [[TMP75]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP76]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP77:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP77]], i32 0
+// CHECK: [[CONV_I:%.*]] = trunc i64 [[VECEXT_I]] to i32
+// CHECK: ret i32 [[CONV_I]]
int test_mm512_reduce_min_epi32(__m512i __W){
- // CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
- // CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = icmp sgt <16 x i32> %shuffle1.i, %tmp
- // CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> %shuffle1.i
- // CHECK: %shuffle3.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = icmp slt <16 x i32> %tmp2, %shuffle3.i
- // CHECK: %tmp4 = select <16 x i1> %tmp3, <16 x i32> %tmp2, <16 x i32> %shuffle3.i
- // CHECK: %shuffle6.i = shufflevector <16 x i32> %tmp4, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = icmp slt <16 x i32> %tmp4, %shuffle6.i
- // CHECK: %tmp6 = select <16 x i1> %tmp5, <16 x i32> %tmp4, <16 x i32> %shuffle6.i
- // CHECK: %shuffle9.i = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp7 = icmp slt <16 x i32> %tmp6, %shuffle9.i
- // CHECK: %tmp8 = select <16 x i1> %tmp7, <16 x i32> %tmp6, <16 x i32> %shuffle9.i
- // CHECK: %tmp9 = bitcast <16 x i32> %tmp8 to <8 x i64>
- // CHECK: %vecext.i = extractelement <8 x i64> %tmp9, i32 0
- // CHECK: %conv.i = trunc i64 %vecext.i to i32
- // CHECK: ret i32 %conv.i
return _mm512_reduce_min_epi32(__W);
}
+// CHECK-LABEL: define i32 @test_mm512_reduce_min_epu32(<8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I17_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I18_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I19_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I15_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I16_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[A_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store <8 x i64> [[TMP0]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = bitcast <8 x i64> [[TMP1]] to <16 x i32>
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP3]] to <16 x i32>
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i32> [[TMP2]], <16 x i32> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP5:%.*]] = bitcast <16 x i32> [[SHUFFLE_I]] to <8 x i64>
+// CHECK: [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP7:%.*]] = bitcast <8 x i64> [[TMP6]] to <16 x i32>
+// CHECK: [[TMP8:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP9:%.*]] = bitcast <8 x i64> [[TMP8]] to <16 x i32>
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x i32> [[TMP7]], <16 x i32> [[TMP9]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP10:%.*]] = bitcast <16 x i32> [[SHUFFLE1_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP5]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[TMP10]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP12:%.*]] = bitcast <8 x i64> [[TMP11]] to <16 x i32>
+// CHECK: [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP14:%.*]] = bitcast <8 x i64> [[TMP13]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP16:%.*]] = bitcast <8 x i64> [[TMP15]] to <16 x i32>
+// CHECK: [[TMP17:%.*]] = icmp ult <16 x i32> [[TMP12]], [[TMP14]]
+// CHECK: [[TMP18:%.*]] = select <16 x i1> [[TMP17]], <16 x i32> [[TMP12]], <16 x i32> [[TMP14]]
+// CHECK: [[TMP19:%.*]] = bitcast <16 x i32> [[TMP18]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP19]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP21:%.*]] = bitcast <8 x i64> [[TMP20]] to <16 x i32>
+// CHECK: [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP23:%.*]] = bitcast <8 x i64> [[TMP22]] to <16 x i32>
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <16 x i32> [[TMP21]], <16 x i32> [[TMP23]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP24:%.*]] = bitcast <16 x i32> [[SHUFFLE2_I]] to <8 x i64>
+// CHECK: [[TMP25:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = bitcast <8 x i64> [[TMP25]] to <16 x i32>
+// CHECK: [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = bitcast <8 x i64> [[TMP27]] to <16 x i32>
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x i32> [[TMP26]], <16 x i32> [[TMP28]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP29:%.*]] = bitcast <16 x i32> [[SHUFFLE3_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP24]], <8 x i64>* [[__A_ADDR_I18_I]], align 64
+// CHECK: store <8 x i64> [[TMP29]], <8 x i64>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I18_I]], align 64
+// CHECK: [[TMP31:%.*]] = bitcast <8 x i64> [[TMP30]] to <16 x i32>
+// CHECK: [[TMP32:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP33:%.*]] = bitcast <8 x i64> [[TMP32]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP35:%.*]] = bitcast <8 x i64> [[TMP34]] to <16 x i32>
+// CHECK: [[TMP36:%.*]] = icmp ult <16 x i32> [[TMP31]], [[TMP33]]
+// CHECK: [[TMP37:%.*]] = select <16 x i1> [[TMP36]], <16 x i32> [[TMP31]], <16 x i32> [[TMP33]]
+// CHECK: [[TMP38:%.*]] = bitcast <16 x i32> [[TMP37]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP38]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP40:%.*]] = bitcast <8 x i64> [[TMP39]] to <16 x i32>
+// CHECK: [[TMP41:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = bitcast <8 x i64> [[TMP41]] to <16 x i32>
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <16 x i32> [[TMP40]], <16 x i32> [[TMP42]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP43:%.*]] = bitcast <16 x i32> [[SHUFFLE5_I]] to <8 x i64>
+// CHECK: [[TMP44:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP45:%.*]] = bitcast <8 x i64> [[TMP44]] to <16 x i32>
+// CHECK: [[TMP46:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP47:%.*]] = bitcast <8 x i64> [[TMP46]] to <16 x i32>
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x i32> [[TMP45]], <16 x i32> [[TMP47]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP48:%.*]] = bitcast <16 x i32> [[SHUFFLE6_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP43]], <8 x i64>* [[__A_ADDR_I15_I]], align 64
+// CHECK: store <8 x i64> [[TMP48]], <8 x i64>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP49:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I15_I]], align 64
+// CHECK: [[TMP50:%.*]] = bitcast <8 x i64> [[TMP49]] to <16 x i32>
+// CHECK: [[TMP51:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP52:%.*]] = bitcast <8 x i64> [[TMP51]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP53:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP54:%.*]] = bitcast <8 x i64> [[TMP53]] to <16 x i32>
+// CHECK: [[TMP55:%.*]] = icmp ult <16 x i32> [[TMP50]], [[TMP52]]
+// CHECK: [[TMP56:%.*]] = select <16 x i1> [[TMP55]], <16 x i32> [[TMP50]], <16 x i32> [[TMP52]]
+// CHECK: [[TMP57:%.*]] = bitcast <16 x i32> [[TMP56]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP57]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP58:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP59:%.*]] = bitcast <8 x i64> [[TMP58]] to <16 x i32>
+// CHECK: [[TMP60:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP61:%.*]] = bitcast <8 x i64> [[TMP60]] to <16 x i32>
+// CHECK: [[SHUFFLE8_I:%.*]] = shufflevector <16 x i32> [[TMP59]], <16 x i32> [[TMP61]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP62:%.*]] = bitcast <16 x i32> [[SHUFFLE8_I]] to <8 x i64>
+// CHECK: [[TMP63:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP64:%.*]] = bitcast <8 x i64> [[TMP63]] to <16 x i32>
+// CHECK: [[TMP65:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP66:%.*]] = bitcast <8 x i64> [[TMP65]] to <16 x i32>
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x i32> [[TMP64]], <16 x i32> [[TMP66]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP67:%.*]] = bitcast <16 x i32> [[SHUFFLE9_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP62]], <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <8 x i64> [[TMP67]], <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP68:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP69:%.*]] = bitcast <8 x i64> [[TMP68]] to <16 x i32>
+// CHECK: [[TMP70:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP71:%.*]] = bitcast <8 x i64> [[TMP70]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP72:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP73:%.*]] = bitcast <8 x i64> [[TMP72]] to <16 x i32>
+// CHECK: [[TMP74:%.*]] = icmp ult <16 x i32> [[TMP69]], [[TMP71]]
+// CHECK: [[TMP75:%.*]] = select <16 x i1> [[TMP74]], <16 x i32> [[TMP69]], <16 x i32> [[TMP71]]
+// CHECK: [[TMP76:%.*]] = bitcast <16 x i32> [[TMP75]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP76]], <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP77:%.*]] = load <8 x i64>, <8 x i64>* [[A_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP77]], i32 0
+// CHECK: [[CONV_I:%.*]] = trunc i64 [[VECEXT_I]] to i32
+// CHECK: ret i32 [[CONV_I]]
unsigned int test_mm512_reduce_min_epu32(__m512i __W){
- // CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
- // CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = icmp ugt <16 x i32> %shuffle1.i, %tmp
- // CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> %shuffle1.i
- // CHECK: %shuffle3.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = icmp ult <16 x i32> %tmp2, %shuffle3.i
- // CHECK: %tmp4 = select <16 x i1> %tmp3, <16 x i32> %tmp2, <16 x i32> %shuffle3.i
- // CHECK: %shuffle6.i = shufflevector <16 x i32> %tmp4, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = icmp ult <16 x i32> %tmp4, %shuffle6.i
- // CHECK: %tmp6 = select <16 x i1> %tmp5, <16 x i32> %tmp4, <16 x i32> %shuffle6.i
- // CHECK: %shuffle9.i = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp7 = icmp ult <16 x i32> %tmp6, %shuffle9.i
- // CHECK: %tmp8 = select <16 x i1> %tmp7, <16 x i32> %tmp6, <16 x i32> %shuffle9.i
- // CHECK: %tmp9 = bitcast <16 x i32> %tmp8 to <8 x i64>
- // CHECK: %vecext.i = extractelement <8 x i64> %tmp9, i32 0
- // CHECK: %conv.i = trunc i64 %vecext.i to i32
- // CHECK: ret i32 %conv.i
return _mm512_reduce_min_epu32(__W);
}
+// CHECK-LABEL: define float @test_mm512_reduce_min_ps(<16 x float> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I17_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I18_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I19_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I14_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I15_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I16_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I11_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I12_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I13_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[A_ADDR_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__W_ADDR:%.*]] = alloca <16 x float>, align 64
+// CHECK: store <16 x float> %__W, <16 x float>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load <16 x float>, <16 x float>* [[__W_ADDR]], align 64
+// CHECK: store <16 x float> [[TMP0]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP1:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x float> [[TMP1]], <16 x float> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP3:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x float> [[TMP3]], <16 x float> [[TMP4]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE_I]], <16 x float>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE1_I]], <16 x float>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP5:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP6:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP7:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP8:%.*]] = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> [[TMP5]], <16 x float> [[TMP6]], <16 x float> [[TMP7]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP8]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP9:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP10:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE2_I:%.*]] = shufflevector <16 x float> [[TMP9]], <16 x float> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP11:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP12:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x float> [[TMP11]], <16 x float> [[TMP12]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE2_I]], <16 x float>* [[__A_ADDR_I18_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE3_I]], <16 x float>* [[__B_ADDR_I19_I]], align 64
+// CHECK: [[TMP13:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I18_I]], align 64
+// CHECK: [[TMP14:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I19_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP15:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I17_I]], align 64
+// CHECK: [[TMP16:%.*]] = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> [[TMP13]], <16 x float> [[TMP14]], <16 x float> [[TMP15]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP16]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP17:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP18:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE5_I:%.*]] = shufflevector <16 x float> [[TMP17]], <16 x float> [[TMP18]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP19:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x float> [[TMP19]], <16 x float> [[TMP20]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE5_I]], <16 x float>* [[__A_ADDR_I15_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE6_I]], <16 x float>* [[__B_ADDR_I16_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I15_I]], align 64
+// CHECK: [[TMP22:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I16_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I14_I]], align 64
+// CHECK: [[TMP24:%.*]] = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> [[TMP21]], <16 x float> [[TMP22]], <16 x float> [[TMP23]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP24]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP25:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE8_I:%.*]] = shufflevector <16 x float> [[TMP25]], <16 x float> [[TMP26]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP27:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x float> [[TMP27]], <16 x float> [[TMP28]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE8_I]], <16 x float>* [[__A_ADDR_I12_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE9_I]], <16 x float>* [[__B_ADDR_I13_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I12_I]], align 64
+// CHECK: [[TMP30:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I13_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP31:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I11_I]], align 64
+// CHECK: [[TMP32:%.*]] = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> [[TMP29]], <16 x float> [[TMP30]], <16 x float> [[TMP31]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP32]], <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[TMP33:%.*]] = load <16 x float>, <16 x float>* [[A_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <16 x float> [[TMP33]], i32 0
+// CHECK: ret float [[VECEXT_I]]
float test_mm512_reduce_min_ps(__m512 __W){
- // CHECK: %shuffle1.i = shufflevector <16 x float> %__W, <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %__W, <16 x float> %shuffle1.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle3.i = shufflevector <16 x float> %tmp, <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp1 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %tmp, <16 x float> %shuffle3.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle6.i = shufflevector <16 x float> %tmp1, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %tmp1, <16 x float> %shuffle6.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle9.i = shufflevector <16 x float> %tmp2, <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %tmp2, <16 x float> %shuffle9.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %vecext.i = extractelement <16 x float> %tmp3, i32 0
- // CHECK: ret float %vecext.i
return _mm512_reduce_min_ps(__W);
}
+// CHECK-LABEL: define i32 @test_mm512_mask_reduce_max_epi32(i16 zeroext %__M, <8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I18_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I19_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I20_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I15_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I16_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I17_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i16, align 2
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i16, align 2
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store i16 %__M, i16* [[__M_ADDR]], align 2
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i16, i16* [[__M_ADDR]], align 2
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store i16 [[TMP0]], i16* [[__M_ADDR_I]], align 2
+// CHECK: store <8 x i64> [[TMP1]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i16, i16* [[__M_ADDR_I]], align 2
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP3]] to <16 x i32>
+// CHECK: store i32 -2147483648, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[TMP5:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0
+// CHECK: [[TMP6:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <16 x i32> [[VECINIT_I_I]], i32 [[TMP6]], i32 1
+// CHECK: [[TMP7:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <16 x i32> [[VECINIT1_I_I]], i32 [[TMP7]], i32 2
+// CHECK: [[TMP8:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <16 x i32> [[VECINIT2_I_I]], i32 [[TMP8]], i32 3
+// CHECK: [[TMP9:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <16 x i32> [[VECINIT3_I_I]], i32 [[TMP9]], i32 4
+// CHECK: [[TMP10:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <16 x i32> [[VECINIT4_I_I]], i32 [[TMP10]], i32 5
+// CHECK: [[TMP11:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <16 x i32> [[VECINIT5_I_I]], i32 [[TMP11]], i32 6
+// CHECK: [[TMP12:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <16 x i32> [[VECINIT6_I_I]], i32 [[TMP12]], i32 7
+// CHECK: [[TMP13:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT8_I_I:%.*]] = insertelement <16 x i32> [[VECINIT7_I_I]], i32 [[TMP13]], i32 8
+// CHECK: [[TMP14:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT9_I_I:%.*]] = insertelement <16 x i32> [[VECINIT8_I_I]], i32 [[TMP14]], i32 9
+// CHECK: [[TMP15:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT10_I_I:%.*]] = insertelement <16 x i32> [[VECINIT9_I_I]], i32 [[TMP15]], i32 10
+// CHECK: [[TMP16:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT11_I_I:%.*]] = insertelement <16 x i32> [[VECINIT10_I_I]], i32 [[TMP16]], i32 11
+// CHECK: [[TMP17:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT12_I_I:%.*]] = insertelement <16 x i32> [[VECINIT11_I_I]], i32 [[TMP17]], i32 12
+// CHECK: [[TMP18:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT13_I_I:%.*]] = insertelement <16 x i32> [[VECINIT12_I_I]], i32 [[TMP18]], i32 13
+// CHECK: [[TMP19:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT14_I_I:%.*]] = insertelement <16 x i32> [[VECINIT13_I_I]], i32 [[TMP19]], i32 14
+// CHECK: [[TMP20:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT15_I_I:%.*]] = insertelement <16 x i32> [[VECINIT14_I_I]], i32 [[TMP20]], i32 15
+// CHECK: store <16 x i32> [[VECINIT15_I_I]], <16 x i32>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <16 x i32>, <16 x i32>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP22:%.*]] = bitcast <16 x i32> [[TMP21]] to <8 x i64>
+// CHECK: [[TMP23:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
+// CHECK: [[TMP24:%.*]] = select <16 x i1> [[TMP23]], <16 x i32> [[TMP4]], <16 x i32> [[TMP21]]
+// CHECK: [[TMP25:%.*]] = bitcast <16 x i32> [[TMP24]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP25]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP27:%.*]] = bitcast <8 x i64> [[TMP26]] to <16 x i32>
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP29:%.*]] = bitcast <8 x i64> [[TMP28]] to <16 x i32>
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i32> [[TMP27]], <16 x i32> [[TMP29]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP30:%.*]] = bitcast <16 x i32> [[SHUFFLE_I]] to <8 x i64>
+// CHECK: [[TMP31:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP32:%.*]] = bitcast <8 x i64> [[TMP31]] to <16 x i32>
+// CHECK: [[TMP33:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = bitcast <8 x i64> [[TMP33]] to <16 x i32>
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x i32> [[TMP32]], <16 x i32> [[TMP34]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP35:%.*]] = bitcast <16 x i32> [[SHUFFLE1_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP30]], <8 x i64>* [[__A_ADDR_I19_I]], align 64
+// CHECK: store <8 x i64> [[TMP35]], <8 x i64>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I19_I]], align 64
+// CHECK: [[TMP37:%.*]] = bitcast <8 x i64> [[TMP36]] to <16 x i32>
+// CHECK: [[TMP38:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP39:%.*]] = bitcast <8 x i64> [[TMP38]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP40:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP41:%.*]] = bitcast <8 x i64> [[TMP40]] to <16 x i32>
+// CHECK: [[TMP42:%.*]] = icmp sgt <16 x i32> [[TMP37]], [[TMP39]]
+// CHECK: [[TMP43:%.*]] = select <16 x i1> [[TMP42]], <16 x i32> [[TMP37]], <16 x i32> [[TMP39]]
+// CHECK: [[TMP44:%.*]] = bitcast <16 x i32> [[TMP43]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP44]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP45:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP46:%.*]] = bitcast <8 x i64> [[TMP45]] to <16 x i32>
+// CHECK: [[TMP47:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP48:%.*]] = bitcast <8 x i64> [[TMP47]] to <16 x i32>
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x i32> [[TMP46]], <16 x i32> [[TMP48]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP49:%.*]] = bitcast <16 x i32> [[SHUFFLE3_I]] to <8 x i64>
+// CHECK: [[TMP50:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP51:%.*]] = bitcast <8 x i64> [[TMP50]] to <16 x i32>
+// CHECK: [[TMP52:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP53:%.*]] = bitcast <8 x i64> [[TMP52]] to <16 x i32>
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> [[TMP53]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP54:%.*]] = bitcast <16 x i32> [[SHUFFLE4_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP49]], <8 x i64>* [[__A_ADDR_I16_I]], align 64
+// CHECK: store <8 x i64> [[TMP54]], <8 x i64>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP55:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I16_I]], align 64
+// CHECK: [[TMP56:%.*]] = bitcast <8 x i64> [[TMP55]] to <16 x i32>
+// CHECK: [[TMP57:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP58:%.*]] = bitcast <8 x i64> [[TMP57]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP59:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP60:%.*]] = bitcast <8 x i64> [[TMP59]] to <16 x i32>
+// CHECK: [[TMP61:%.*]] = icmp sgt <16 x i32> [[TMP56]], [[TMP58]]
+// CHECK: [[TMP62:%.*]] = select <16 x i1> [[TMP61]], <16 x i32> [[TMP56]], <16 x i32> [[TMP58]]
+// CHECK: [[TMP63:%.*]] = bitcast <16 x i32> [[TMP62]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP63]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP64:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP65:%.*]] = bitcast <8 x i64> [[TMP64]] to <16 x i32>
+// CHECK: [[TMP66:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP67:%.*]] = bitcast <8 x i64> [[TMP66]] to <16 x i32>
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x i32> [[TMP65]], <16 x i32> [[TMP67]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP68:%.*]] = bitcast <16 x i32> [[SHUFFLE6_I]] to <8 x i64>
+// CHECK: [[TMP69:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP70:%.*]] = bitcast <8 x i64> [[TMP69]] to <16 x i32>
+// CHECK: [[TMP71:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP72:%.*]] = bitcast <8 x i64> [[TMP71]] to <16 x i32>
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <16 x i32> [[TMP70]], <16 x i32> [[TMP72]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP73:%.*]] = bitcast <16 x i32> [[SHUFFLE7_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP68]], <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> [[TMP73]], <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP74:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP75:%.*]] = bitcast <8 x i64> [[TMP74]] to <16 x i32>
+// CHECK: [[TMP76:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP77:%.*]] = bitcast <8 x i64> [[TMP76]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP78:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP79:%.*]] = bitcast <8 x i64> [[TMP78]] to <16 x i32>
+// CHECK: [[TMP80:%.*]] = icmp sgt <16 x i32> [[TMP75]], [[TMP77]]
+// CHECK: [[TMP81:%.*]] = select <16 x i1> [[TMP80]], <16 x i32> [[TMP75]], <16 x i32> [[TMP77]]
+// CHECK: [[TMP82:%.*]] = bitcast <16 x i32> [[TMP81]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP82]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP83:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP84:%.*]] = bitcast <8 x i64> [[TMP83]] to <16 x i32>
+// CHECK: [[TMP85:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP86:%.*]] = bitcast <8 x i64> [[TMP85]] to <16 x i32>
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x i32> [[TMP84]], <16 x i32> [[TMP86]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP87:%.*]] = bitcast <16 x i32> [[SHUFFLE9_I]] to <8 x i64>
+// CHECK: [[TMP88:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP89:%.*]] = bitcast <8 x i64> [[TMP88]] to <16 x i32>
+// CHECK: [[TMP90:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP91:%.*]] = bitcast <8 x i64> [[TMP90]] to <16 x i32>
+// CHECK: [[SHUFFLE10_I:%.*]] = shufflevector <16 x i32> [[TMP89]], <16 x i32> [[TMP91]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP92:%.*]] = bitcast <16 x i32> [[SHUFFLE10_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP87]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[TMP92]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP93:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP94:%.*]] = bitcast <8 x i64> [[TMP93]] to <16 x i32>
+// CHECK: [[TMP95:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP96:%.*]] = bitcast <8 x i64> [[TMP95]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP97:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP98:%.*]] = bitcast <8 x i64> [[TMP97]] to <16 x i32>
+// CHECK: [[TMP99:%.*]] = icmp sgt <16 x i32> [[TMP94]], [[TMP96]]
+// CHECK: [[TMP100:%.*]] = select <16 x i1> [[TMP99]], <16 x i32> [[TMP94]], <16 x i32> [[TMP96]]
+// CHECK: [[TMP101:%.*]] = bitcast <16 x i32> [[TMP100]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP101]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP102:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP102]], i32 0
+// CHECK: [[CONV_I:%.*]] = trunc i64 [[VECEXT_I]] to i32
+// CHECK: ret i32 [[CONV_I]]
int test_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __W){
- // CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
- // CHECK: %tmp1 = bitcast i16 %__M to <16 x i1>
- // CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
- // CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = icmp sgt <16 x i32> %tmp2, %shuffle1.i
- // CHECK: %tmp4 = select <16 x i1> %tmp3, <16 x i32> %tmp2, <16 x i32> %shuffle1.i
- // CHECK: %shuffle4.i = shufflevector <16 x i32> %tmp4, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = icmp sgt <16 x i32> %tmp4, %shuffle4.i
- // CHECK: %tmp6 = select <16 x i1> %tmp5, <16 x i32> %tmp4, <16 x i32> %shuffle4.i
- // CHECK: %shuffle7.i = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp7 = icmp sgt <16 x i32> %tmp6, %shuffle7.i
- // CHECK: %tmp8 = select <16 x i1> %tmp7, <16 x i32> %tmp6, <16 x i32> %shuffle7.i
- // CHECK: %shuffle10.i = shufflevector <16 x i32> %tmp8, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp9 = icmp sgt <16 x i32> %tmp8, %shuffle10.i
- // CHECK: %tmp10 = select <16 x i1> %tmp9, <16 x i32> %tmp8, <16 x i32> %shuffle10.i
- // CHECK: %tmp11 = bitcast <16 x i32> %tmp10 to <8 x i64>
- // CHECK: %vecext.i = extractelement <8 x i64> %tmp11, i32 0
- // CHECK: %conv.i = trunc i64 %vecext.i to i32
- // CHECK: ret i32 %conv.i
return _mm512_mask_reduce_max_epi32(__M, __W);
}
+// CHECK-LABEL: define i32 @test_mm512_mask_reduce_max_epu32(i16 zeroext %__M, <8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I18_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I19_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I20_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I15_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I16_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I17_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i16, align 2
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i16, align 2
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store i16 %__M, i16* [[__M_ADDR]], align 2
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i16, i16* [[__M_ADDR]], align 2
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store i16 [[TMP0]], i16* [[__M_ADDR_I]], align 2
+// CHECK: store <8 x i64> [[TMP1]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i16, i16* [[__M_ADDR_I]], align 2
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP3]] to <16 x i32>
+// CHECK: store i32 0, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[TMP5:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0
+// CHECK: [[TMP6:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <16 x i32> [[VECINIT_I_I]], i32 [[TMP6]], i32 1
+// CHECK: [[TMP7:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <16 x i32> [[VECINIT1_I_I]], i32 [[TMP7]], i32 2
+// CHECK: [[TMP8:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <16 x i32> [[VECINIT2_I_I]], i32 [[TMP8]], i32 3
+// CHECK: [[TMP9:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <16 x i32> [[VECINIT3_I_I]], i32 [[TMP9]], i32 4
+// CHECK: [[TMP10:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <16 x i32> [[VECINIT4_I_I]], i32 [[TMP10]], i32 5
+// CHECK: [[TMP11:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <16 x i32> [[VECINIT5_I_I]], i32 [[TMP11]], i32 6
+// CHECK: [[TMP12:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <16 x i32> [[VECINIT6_I_I]], i32 [[TMP12]], i32 7
+// CHECK: [[TMP13:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT8_I_I:%.*]] = insertelement <16 x i32> [[VECINIT7_I_I]], i32 [[TMP13]], i32 8
+// CHECK: [[TMP14:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT9_I_I:%.*]] = insertelement <16 x i32> [[VECINIT8_I_I]], i32 [[TMP14]], i32 9
+// CHECK: [[TMP15:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT10_I_I:%.*]] = insertelement <16 x i32> [[VECINIT9_I_I]], i32 [[TMP15]], i32 10
+// CHECK: [[TMP16:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT11_I_I:%.*]] = insertelement <16 x i32> [[VECINIT10_I_I]], i32 [[TMP16]], i32 11
+// CHECK: [[TMP17:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT12_I_I:%.*]] = insertelement <16 x i32> [[VECINIT11_I_I]], i32 [[TMP17]], i32 12
+// CHECK: [[TMP18:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT13_I_I:%.*]] = insertelement <16 x i32> [[VECINIT12_I_I]], i32 [[TMP18]], i32 13
+// CHECK: [[TMP19:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT14_I_I:%.*]] = insertelement <16 x i32> [[VECINIT13_I_I]], i32 [[TMP19]], i32 14
+// CHECK: [[TMP20:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT15_I_I:%.*]] = insertelement <16 x i32> [[VECINIT14_I_I]], i32 [[TMP20]], i32 15
+// CHECK: store <16 x i32> [[VECINIT15_I_I]], <16 x i32>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <16 x i32>, <16 x i32>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP22:%.*]] = bitcast <16 x i32> [[TMP21]] to <8 x i64>
+// CHECK: [[TMP23:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
+// CHECK: [[TMP24:%.*]] = select <16 x i1> [[TMP23]], <16 x i32> [[TMP4]], <16 x i32> [[TMP21]]
+// CHECK: [[TMP25:%.*]] = bitcast <16 x i32> [[TMP24]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP25]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP27:%.*]] = bitcast <8 x i64> [[TMP26]] to <16 x i32>
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP29:%.*]] = bitcast <8 x i64> [[TMP28]] to <16 x i32>
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i32> [[TMP27]], <16 x i32> [[TMP29]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP30:%.*]] = bitcast <16 x i32> [[SHUFFLE_I]] to <8 x i64>
+// CHECK: [[TMP31:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP32:%.*]] = bitcast <8 x i64> [[TMP31]] to <16 x i32>
+// CHECK: [[TMP33:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = bitcast <8 x i64> [[TMP33]] to <16 x i32>
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x i32> [[TMP32]], <16 x i32> [[TMP34]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP35:%.*]] = bitcast <16 x i32> [[SHUFFLE1_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP30]], <8 x i64>* [[__A_ADDR_I19_I]], align 64
+// CHECK: store <8 x i64> [[TMP35]], <8 x i64>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I19_I]], align 64
+// CHECK: [[TMP37:%.*]] = bitcast <8 x i64> [[TMP36]] to <16 x i32>
+// CHECK: [[TMP38:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP39:%.*]] = bitcast <8 x i64> [[TMP38]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP40:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP41:%.*]] = bitcast <8 x i64> [[TMP40]] to <16 x i32>
+// CHECK: [[TMP42:%.*]] = icmp ugt <16 x i32> [[TMP37]], [[TMP39]]
+// CHECK: [[TMP43:%.*]] = select <16 x i1> [[TMP42]], <16 x i32> [[TMP37]], <16 x i32> [[TMP39]]
+// CHECK: [[TMP44:%.*]] = bitcast <16 x i32> [[TMP43]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP44]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP45:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP46:%.*]] = bitcast <8 x i64> [[TMP45]] to <16 x i32>
+// CHECK: [[TMP47:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP48:%.*]] = bitcast <8 x i64> [[TMP47]] to <16 x i32>
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x i32> [[TMP46]], <16 x i32> [[TMP48]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP49:%.*]] = bitcast <16 x i32> [[SHUFFLE3_I]] to <8 x i64>
+// CHECK: [[TMP50:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP51:%.*]] = bitcast <8 x i64> [[TMP50]] to <16 x i32>
+// CHECK: [[TMP52:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP53:%.*]] = bitcast <8 x i64> [[TMP52]] to <16 x i32>
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> [[TMP53]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP54:%.*]] = bitcast <16 x i32> [[SHUFFLE4_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP49]], <8 x i64>* [[__A_ADDR_I16_I]], align 64
+// CHECK: store <8 x i64> [[TMP54]], <8 x i64>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP55:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I16_I]], align 64
+// CHECK: [[TMP56:%.*]] = bitcast <8 x i64> [[TMP55]] to <16 x i32>
+// CHECK: [[TMP57:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP58:%.*]] = bitcast <8 x i64> [[TMP57]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP59:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP60:%.*]] = bitcast <8 x i64> [[TMP59]] to <16 x i32>
+// CHECK: [[TMP61:%.*]] = icmp ugt <16 x i32> [[TMP56]], [[TMP58]]
+// CHECK: [[TMP62:%.*]] = select <16 x i1> [[TMP61]], <16 x i32> [[TMP56]], <16 x i32> [[TMP58]]
+// CHECK: [[TMP63:%.*]] = bitcast <16 x i32> [[TMP62]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP63]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP64:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP65:%.*]] = bitcast <8 x i64> [[TMP64]] to <16 x i32>
+// CHECK: [[TMP66:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP67:%.*]] = bitcast <8 x i64> [[TMP66]] to <16 x i32>
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x i32> [[TMP65]], <16 x i32> [[TMP67]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP68:%.*]] = bitcast <16 x i32> [[SHUFFLE6_I]] to <8 x i64>
+// CHECK: [[TMP69:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP70:%.*]] = bitcast <8 x i64> [[TMP69]] to <16 x i32>
+// CHECK: [[TMP71:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP72:%.*]] = bitcast <8 x i64> [[TMP71]] to <16 x i32>
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <16 x i32> [[TMP70]], <16 x i32> [[TMP72]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP73:%.*]] = bitcast <16 x i32> [[SHUFFLE7_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP68]], <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> [[TMP73]], <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP74:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP75:%.*]] = bitcast <8 x i64> [[TMP74]] to <16 x i32>
+// CHECK: [[TMP76:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP77:%.*]] = bitcast <8 x i64> [[TMP76]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP78:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP79:%.*]] = bitcast <8 x i64> [[TMP78]] to <16 x i32>
+// CHECK: [[TMP80:%.*]] = icmp ugt <16 x i32> [[TMP75]], [[TMP77]]
+// CHECK: [[TMP81:%.*]] = select <16 x i1> [[TMP80]], <16 x i32> [[TMP75]], <16 x i32> [[TMP77]]
+// CHECK: [[TMP82:%.*]] = bitcast <16 x i32> [[TMP81]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP82]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP83:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP84:%.*]] = bitcast <8 x i64> [[TMP83]] to <16 x i32>
+// CHECK: [[TMP85:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP86:%.*]] = bitcast <8 x i64> [[TMP85]] to <16 x i32>
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x i32> [[TMP84]], <16 x i32> [[TMP86]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP87:%.*]] = bitcast <16 x i32> [[SHUFFLE9_I]] to <8 x i64>
+// CHECK: [[TMP88:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP89:%.*]] = bitcast <8 x i64> [[TMP88]] to <16 x i32>
+// CHECK: [[TMP90:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP91:%.*]] = bitcast <8 x i64> [[TMP90]] to <16 x i32>
+// CHECK: [[SHUFFLE10_I:%.*]] = shufflevector <16 x i32> [[TMP89]], <16 x i32> [[TMP91]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP92:%.*]] = bitcast <16 x i32> [[SHUFFLE10_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP87]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[TMP92]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP93:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP94:%.*]] = bitcast <8 x i64> [[TMP93]] to <16 x i32>
+// CHECK: [[TMP95:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP96:%.*]] = bitcast <8 x i64> [[TMP95]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP97:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP98:%.*]] = bitcast <8 x i64> [[TMP97]] to <16 x i32>
+// CHECK: [[TMP99:%.*]] = icmp ugt <16 x i32> [[TMP94]], [[TMP96]]
+// CHECK: [[TMP100:%.*]] = select <16 x i1> [[TMP99]], <16 x i32> [[TMP94]], <16 x i32> [[TMP96]]
+// CHECK: [[TMP101:%.*]] = bitcast <16 x i32> [[TMP100]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP101]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP102:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP102]], i32 0
+// CHECK: [[CONV_I:%.*]] = trunc i64 [[VECEXT_I]] to i32
+// CHECK: ret i32 [[CONV_I]]
unsigned int test_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __W){
- // CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
- // CHECK: %tmp1 = bitcast i16 %__M to <16 x i1>
- // CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> zeroinitializer
- // CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = icmp ugt <16 x i32> %tmp2, %shuffle1.i
- // CHECK: %tmp4 = select <16 x i1> %tmp3, <16 x i32> %tmp2, <16 x i32> %shuffle1.i
- // CHECK: %shuffle4.i = shufflevector <16 x i32> %tmp4, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = icmp ugt <16 x i32> %tmp4, %shuffle4.i
- // CHECK: %tmp6 = select <16 x i1> %tmp5, <16 x i32> %tmp4, <16 x i32> %shuffle4.i
- // CHECK: %shuffle7.i = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp7 = icmp ugt <16 x i32> %tmp6, %shuffle7.i
- // CHECK: %tmp8 = select <16 x i1> %tmp7, <16 x i32> %tmp6, <16 x i32> %shuffle7.i
- // CHECK: %shuffle10.i = shufflevector <16 x i32> %tmp8, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp9 = icmp ugt <16 x i32> %tmp8, %shuffle10.i
- // CHECK: %tmp10 = select <16 x i1> %tmp9, <16 x i32> %tmp8, <16 x i32> %shuffle10.i
- // CHECK: %tmp11 = bitcast <16 x i32> %tmp10 to <8 x i64>
- // CHECK: %vecext.i = extractelement <8 x i64> %tmp11, i32 0
- // CHECK: %conv.i = trunc i64 %vecext.i to i32
- // CHECK: ret i32 %conv.i
return _mm512_mask_reduce_max_epu32(__M, __W);
}
+// CHECK-LABEL: define float @test_mm512_mask_reduce_max_ps(i16 zeroext %__M, <16 x float> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I18_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I19_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I20_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I15_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I16_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I17_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__W_ADDR_I_I:%.*]] = alloca float, align 4
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i16, align 2
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i16, align 2
+// CHECK: [[__W_ADDR:%.*]] = alloca <16 x float>, align 64
+// CHECK: store i16 %__M, i16* [[__M_ADDR]], align 2
+// CHECK: store <16 x float> %__W, <16 x float>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i16, i16* [[__M_ADDR]], align 2
+// CHECK: [[TMP1:%.*]] = load <16 x float>, <16 x float>* [[__W_ADDR]], align 64
+// CHECK: store i16 [[TMP0]], i16* [[__M_ADDR_I]], align 2
+// CHECK: store <16 x float> [[TMP1]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i16, i16* [[__M_ADDR_I]], align 2
+// CHECK: [[TMP3:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: store float 0x7FF0000000000000, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[TMP4:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <16 x float> undef, float [[TMP4]], i32 0
+// CHECK: [[TMP5:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <16 x float> [[VECINIT_I_I]], float [[TMP5]], i32 1
+// CHECK: [[TMP6:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <16 x float> [[VECINIT1_I_I]], float [[TMP6]], i32 2
+// CHECK: [[TMP7:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <16 x float> [[VECINIT2_I_I]], float [[TMP7]], i32 3
+// CHECK: [[TMP8:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <16 x float> [[VECINIT3_I_I]], float [[TMP8]], i32 4
+// CHECK: [[TMP9:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <16 x float> [[VECINIT4_I_I]], float [[TMP9]], i32 5
+// CHECK: [[TMP10:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <16 x float> [[VECINIT5_I_I]], float [[TMP10]], i32 6
+// CHECK: [[TMP11:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <16 x float> [[VECINIT6_I_I]], float [[TMP11]], i32 7
+// CHECK: [[TMP12:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT8_I_I:%.*]] = insertelement <16 x float> [[VECINIT7_I_I]], float [[TMP12]], i32 8
+// CHECK: [[TMP13:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT9_I_I:%.*]] = insertelement <16 x float> [[VECINIT8_I_I]], float [[TMP13]], i32 9
+// CHECK: [[TMP14:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT10_I_I:%.*]] = insertelement <16 x float> [[VECINIT9_I_I]], float [[TMP14]], i32 10
+// CHECK: [[TMP15:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT11_I_I:%.*]] = insertelement <16 x float> [[VECINIT10_I_I]], float [[TMP15]], i32 11
+// CHECK: [[TMP16:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT12_I_I:%.*]] = insertelement <16 x float> [[VECINIT11_I_I]], float [[TMP16]], i32 12
+// CHECK: [[TMP17:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT13_I_I:%.*]] = insertelement <16 x float> [[VECINIT12_I_I]], float [[TMP17]], i32 13
+// CHECK: [[TMP18:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT14_I_I:%.*]] = insertelement <16 x float> [[VECINIT13_I_I]], float [[TMP18]], i32 14
+// CHECK: [[TMP19:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT15_I_I:%.*]] = insertelement <16 x float> [[VECINIT14_I_I]], float [[TMP19]], i32 15
+// CHECK: store <16 x float> [[VECINIT15_I_I]], <16 x float>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[SUB_I:%.*]] = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, [[TMP20]]
+// CHECK: [[TMP21:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
+// CHECK: [[TMP22:%.*]] = select <16 x i1> [[TMP21]], <16 x float> [[TMP3]], <16 x float> [[SUB_I]]
+// CHECK: store <16 x float> [[TMP22]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x float> [[TMP23]], <16 x float> [[TMP24]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP25:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x float> [[TMP25]], <16 x float> [[TMP26]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE_I]], <16 x float>* [[__A_ADDR_I19_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE1_I]], <16 x float>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP27:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I19_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I20_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP30:%.*]] = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> [[TMP27]], <16 x float> [[TMP28]], <16 x float> [[TMP29]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP30]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP31:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP32:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x float> [[TMP31]], <16 x float> [[TMP32]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP33:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <16 x float> [[TMP33]], <16 x float> [[TMP34]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE3_I]], <16 x float>* [[__A_ADDR_I16_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE4_I]], <16 x float>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP35:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I16_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I17_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP37:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP38:%.*]] = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> [[TMP35]], <16 x float> [[TMP36]], <16 x float> [[TMP37]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP38]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP40:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x float> [[TMP39]], <16 x float> [[TMP40]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP41:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <16 x float> [[TMP41]], <16 x float> [[TMP42]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE6_I]], <16 x float>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE7_I]], <16 x float>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP43:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP44:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I14_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP45:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP46:%.*]] = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> [[TMP43]], <16 x float> [[TMP44]], <16 x float> [[TMP45]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP46]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP47:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP48:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x float> [[TMP47]], <16 x float> [[TMP48]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP49:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP50:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE10_I:%.*]] = shufflevector <16 x float> [[TMP49]], <16 x float> [[TMP50]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE9_I]], <16 x float>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE10_I]], <16 x float>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP51:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP52:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP53:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP54:%.*]] = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> [[TMP51]], <16 x float> [[TMP52]], <16 x float> [[TMP53]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP54]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP55:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <16 x float> [[TMP55]], i32 0
+// CHECK: ret float [[VECEXT_I]]
float test_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __W){
- // CHECK: %tmp = bitcast i16 %__M to <16 x i1>
- // CHECK: %tmp1 = select <16 x i1> %tmp, <16 x float> %__W, <16 x float> <float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000, float 0xFFF0000000000000>
- // CHECK: %shuffle1.i = shufflevector <16 x float> %tmp1, <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %tmp1, <16 x float> %shuffle1.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle4.i = shufflevector <16 x float> %tmp2, <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %tmp2, <16 x float> %shuffle4.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle7.i = shufflevector <16 x float> %tmp3, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %tmp3, <16 x float> %shuffle7.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle10.i = shufflevector <16 x float> %tmp4, <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = tail call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %tmp4, <16 x float> %shuffle10.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %vecext.i = extractelement <16 x float> %tmp5, i32 0
- // CHECK: ret float %vecext.i
return _mm512_mask_reduce_max_ps(__M, __W);
}
+// CHECK-LABEL: define i32 @test_mm512_mask_reduce_min_epi32(i16 zeroext %__M, <8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I18_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I19_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I20_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I15_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I16_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I17_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i16, align 2
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i16, align 2
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store i16 %__M, i16* [[__M_ADDR]], align 2
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i16, i16* [[__M_ADDR]], align 2
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store i16 [[TMP0]], i16* [[__M_ADDR_I]], align 2
+// CHECK: store <8 x i64> [[TMP1]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i16, i16* [[__M_ADDR_I]], align 2
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP3]] to <16 x i32>
+// CHECK: store i32 2147483647, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[TMP5:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0
+// CHECK: [[TMP6:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <16 x i32> [[VECINIT_I_I]], i32 [[TMP6]], i32 1
+// CHECK: [[TMP7:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <16 x i32> [[VECINIT1_I_I]], i32 [[TMP7]], i32 2
+// CHECK: [[TMP8:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <16 x i32> [[VECINIT2_I_I]], i32 [[TMP8]], i32 3
+// CHECK: [[TMP9:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <16 x i32> [[VECINIT3_I_I]], i32 [[TMP9]], i32 4
+// CHECK: [[TMP10:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <16 x i32> [[VECINIT4_I_I]], i32 [[TMP10]], i32 5
+// CHECK: [[TMP11:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <16 x i32> [[VECINIT5_I_I]], i32 [[TMP11]], i32 6
+// CHECK: [[TMP12:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <16 x i32> [[VECINIT6_I_I]], i32 [[TMP12]], i32 7
+// CHECK: [[TMP13:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT8_I_I:%.*]] = insertelement <16 x i32> [[VECINIT7_I_I]], i32 [[TMP13]], i32 8
+// CHECK: [[TMP14:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT9_I_I:%.*]] = insertelement <16 x i32> [[VECINIT8_I_I]], i32 [[TMP14]], i32 9
+// CHECK: [[TMP15:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT10_I_I:%.*]] = insertelement <16 x i32> [[VECINIT9_I_I]], i32 [[TMP15]], i32 10
+// CHECK: [[TMP16:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT11_I_I:%.*]] = insertelement <16 x i32> [[VECINIT10_I_I]], i32 [[TMP16]], i32 11
+// CHECK: [[TMP17:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT12_I_I:%.*]] = insertelement <16 x i32> [[VECINIT11_I_I]], i32 [[TMP17]], i32 12
+// CHECK: [[TMP18:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT13_I_I:%.*]] = insertelement <16 x i32> [[VECINIT12_I_I]], i32 [[TMP18]], i32 13
+// CHECK: [[TMP19:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT14_I_I:%.*]] = insertelement <16 x i32> [[VECINIT13_I_I]], i32 [[TMP19]], i32 14
+// CHECK: [[TMP20:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT15_I_I:%.*]] = insertelement <16 x i32> [[VECINIT14_I_I]], i32 [[TMP20]], i32 15
+// CHECK: store <16 x i32> [[VECINIT15_I_I]], <16 x i32>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <16 x i32>, <16 x i32>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP22:%.*]] = bitcast <16 x i32> [[TMP21]] to <8 x i64>
+// CHECK: [[TMP23:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
+// CHECK: [[TMP24:%.*]] = select <16 x i1> [[TMP23]], <16 x i32> [[TMP4]], <16 x i32> [[TMP21]]
+// CHECK: [[TMP25:%.*]] = bitcast <16 x i32> [[TMP24]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP25]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP27:%.*]] = bitcast <8 x i64> [[TMP26]] to <16 x i32>
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP29:%.*]] = bitcast <8 x i64> [[TMP28]] to <16 x i32>
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i32> [[TMP27]], <16 x i32> [[TMP29]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP30:%.*]] = bitcast <16 x i32> [[SHUFFLE_I]] to <8 x i64>
+// CHECK: [[TMP31:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP32:%.*]] = bitcast <8 x i64> [[TMP31]] to <16 x i32>
+// CHECK: [[TMP33:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = bitcast <8 x i64> [[TMP33]] to <16 x i32>
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x i32> [[TMP32]], <16 x i32> [[TMP34]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP35:%.*]] = bitcast <16 x i32> [[SHUFFLE1_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP30]], <8 x i64>* [[__A_ADDR_I19_I]], align 64
+// CHECK: store <8 x i64> [[TMP35]], <8 x i64>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I19_I]], align 64
+// CHECK: [[TMP37:%.*]] = bitcast <8 x i64> [[TMP36]] to <16 x i32>
+// CHECK: [[TMP38:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP39:%.*]] = bitcast <8 x i64> [[TMP38]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP40:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP41:%.*]] = bitcast <8 x i64> [[TMP40]] to <16 x i32>
+// CHECK: [[TMP42:%.*]] = icmp slt <16 x i32> [[TMP37]], [[TMP39]]
+// CHECK: [[TMP43:%.*]] = select <16 x i1> [[TMP42]], <16 x i32> [[TMP37]], <16 x i32> [[TMP39]]
+// CHECK: [[TMP44:%.*]] = bitcast <16 x i32> [[TMP43]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP44]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP45:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP46:%.*]] = bitcast <8 x i64> [[TMP45]] to <16 x i32>
+// CHECK: [[TMP47:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP48:%.*]] = bitcast <8 x i64> [[TMP47]] to <16 x i32>
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x i32> [[TMP46]], <16 x i32> [[TMP48]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP49:%.*]] = bitcast <16 x i32> [[SHUFFLE3_I]] to <8 x i64>
+// CHECK: [[TMP50:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP51:%.*]] = bitcast <8 x i64> [[TMP50]] to <16 x i32>
+// CHECK: [[TMP52:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP53:%.*]] = bitcast <8 x i64> [[TMP52]] to <16 x i32>
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> [[TMP53]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP54:%.*]] = bitcast <16 x i32> [[SHUFFLE4_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP49]], <8 x i64>* [[__A_ADDR_I16_I]], align 64
+// CHECK: store <8 x i64> [[TMP54]], <8 x i64>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP55:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I16_I]], align 64
+// CHECK: [[TMP56:%.*]] = bitcast <8 x i64> [[TMP55]] to <16 x i32>
+// CHECK: [[TMP57:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP58:%.*]] = bitcast <8 x i64> [[TMP57]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP59:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP60:%.*]] = bitcast <8 x i64> [[TMP59]] to <16 x i32>
+// CHECK: [[TMP61:%.*]] = icmp slt <16 x i32> [[TMP56]], [[TMP58]]
+// CHECK: [[TMP62:%.*]] = select <16 x i1> [[TMP61]], <16 x i32> [[TMP56]], <16 x i32> [[TMP58]]
+// CHECK: [[TMP63:%.*]] = bitcast <16 x i32> [[TMP62]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP63]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP64:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP65:%.*]] = bitcast <8 x i64> [[TMP64]] to <16 x i32>
+// CHECK: [[TMP66:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP67:%.*]] = bitcast <8 x i64> [[TMP66]] to <16 x i32>
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x i32> [[TMP65]], <16 x i32> [[TMP67]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP68:%.*]] = bitcast <16 x i32> [[SHUFFLE6_I]] to <8 x i64>
+// CHECK: [[TMP69:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP70:%.*]] = bitcast <8 x i64> [[TMP69]] to <16 x i32>
+// CHECK: [[TMP71:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP72:%.*]] = bitcast <8 x i64> [[TMP71]] to <16 x i32>
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <16 x i32> [[TMP70]], <16 x i32> [[TMP72]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP73:%.*]] = bitcast <16 x i32> [[SHUFFLE7_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP68]], <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> [[TMP73]], <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP74:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP75:%.*]] = bitcast <8 x i64> [[TMP74]] to <16 x i32>
+// CHECK: [[TMP76:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP77:%.*]] = bitcast <8 x i64> [[TMP76]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP78:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP79:%.*]] = bitcast <8 x i64> [[TMP78]] to <16 x i32>
+// CHECK: [[TMP80:%.*]] = icmp slt <16 x i32> [[TMP75]], [[TMP77]]
+// CHECK: [[TMP81:%.*]] = select <16 x i1> [[TMP80]], <16 x i32> [[TMP75]], <16 x i32> [[TMP77]]
+// CHECK: [[TMP82:%.*]] = bitcast <16 x i32> [[TMP81]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP82]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP83:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP84:%.*]] = bitcast <8 x i64> [[TMP83]] to <16 x i32>
+// CHECK: [[TMP85:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP86:%.*]] = bitcast <8 x i64> [[TMP85]] to <16 x i32>
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x i32> [[TMP84]], <16 x i32> [[TMP86]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP87:%.*]] = bitcast <16 x i32> [[SHUFFLE9_I]] to <8 x i64>
+// CHECK: [[TMP88:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP89:%.*]] = bitcast <8 x i64> [[TMP88]] to <16 x i32>
+// CHECK: [[TMP90:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP91:%.*]] = bitcast <8 x i64> [[TMP90]] to <16 x i32>
+// CHECK: [[SHUFFLE10_I:%.*]] = shufflevector <16 x i32> [[TMP89]], <16 x i32> [[TMP91]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP92:%.*]] = bitcast <16 x i32> [[SHUFFLE10_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP87]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[TMP92]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP93:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP94:%.*]] = bitcast <8 x i64> [[TMP93]] to <16 x i32>
+// CHECK: [[TMP95:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP96:%.*]] = bitcast <8 x i64> [[TMP95]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP97:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP98:%.*]] = bitcast <8 x i64> [[TMP97]] to <16 x i32>
+// CHECK: [[TMP99:%.*]] = icmp slt <16 x i32> [[TMP94]], [[TMP96]]
+// CHECK: [[TMP100:%.*]] = select <16 x i1> [[TMP99]], <16 x i32> [[TMP94]], <16 x i32> [[TMP96]]
+// CHECK: [[TMP101:%.*]] = bitcast <16 x i32> [[TMP100]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP101]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP102:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP102]], i32 0
+// CHECK: [[CONV_I:%.*]] = trunc i64 [[VECEXT_I]] to i32
+// CHECK: ret i32 [[CONV_I]]
int test_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __W){
- // CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
- // CHECK: %tmp1 = bitcast i16 %__M to <16 x i1>
- // CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
- // CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = icmp slt <16 x i32> %tmp2, %shuffle1.i
- // CHECK: %tmp4 = select <16 x i1> %tmp3, <16 x i32> %tmp2, <16 x i32> %shuffle1.i
- // CHECK: %shuffle4.i = shufflevector <16 x i32> %tmp4, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = icmp slt <16 x i32> %tmp4, %shuffle4.i
- // CHECK: %tmp6 = select <16 x i1> %tmp5, <16 x i32> %tmp4, <16 x i32> %shuffle4.i
- // CHECK: %shuffle7.i = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp7 = icmp slt <16 x i32> %tmp6, %shuffle7.i
- // CHECK: %tmp8 = select <16 x i1> %tmp7, <16 x i32> %tmp6, <16 x i32> %shuffle7.i
- // CHECK: %shuffle10.i = shufflevector <16 x i32> %tmp8, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp9 = icmp slt <16 x i32> %tmp8, %shuffle10.i
- // CHECK: %tmp10 = select <16 x i1> %tmp9, <16 x i32> %tmp8, <16 x i32> %shuffle10.i
- // CHECK: %tmp11 = bitcast <16 x i32> %tmp10 to <8 x i64>
- // CHECK: %vecext.i = extractelement <8 x i64> %tmp11, i32 0
- // CHECK: %conv.i = trunc i64 %vecext.i to i32
- // CHECK: ret i32 %conv.i
return _mm512_mask_reduce_min_epi32(__M, __W);
}
+// CHECK-LABEL: define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, <8 x i64> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I18_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I19_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I20_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I15_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I16_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I17_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i16, align 2
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i16, align 2
+// CHECK: [[__W_ADDR:%.*]] = alloca <8 x i64>, align 64
+// CHECK: store i16 %__M, i16* [[__M_ADDR]], align 2
+// CHECK: store <8 x i64> %__W, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i16, i16* [[__M_ADDR]], align 2
+// CHECK: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[__W_ADDR]], align 64
+// CHECK: store i16 [[TMP0]], i16* [[__M_ADDR_I]], align 2
+// CHECK: store <8 x i64> [[TMP1]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i16, i16* [[__M_ADDR_I]], align 2
+// CHECK: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP3]] to <16 x i32>
+// CHECK: store i32 -1, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[TMP5:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0
+// CHECK: [[TMP6:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <16 x i32> [[VECINIT_I_I]], i32 [[TMP6]], i32 1
+// CHECK: [[TMP7:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <16 x i32> [[VECINIT1_I_I]], i32 [[TMP7]], i32 2
+// CHECK: [[TMP8:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <16 x i32> [[VECINIT2_I_I]], i32 [[TMP8]], i32 3
+// CHECK: [[TMP9:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <16 x i32> [[VECINIT3_I_I]], i32 [[TMP9]], i32 4
+// CHECK: [[TMP10:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <16 x i32> [[VECINIT4_I_I]], i32 [[TMP10]], i32 5
+// CHECK: [[TMP11:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <16 x i32> [[VECINIT5_I_I]], i32 [[TMP11]], i32 6
+// CHECK: [[TMP12:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <16 x i32> [[VECINIT6_I_I]], i32 [[TMP12]], i32 7
+// CHECK: [[TMP13:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT8_I_I:%.*]] = insertelement <16 x i32> [[VECINIT7_I_I]], i32 [[TMP13]], i32 8
+// CHECK: [[TMP14:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT9_I_I:%.*]] = insertelement <16 x i32> [[VECINIT8_I_I]], i32 [[TMP14]], i32 9
+// CHECK: [[TMP15:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT10_I_I:%.*]] = insertelement <16 x i32> [[VECINIT9_I_I]], i32 [[TMP15]], i32 10
+// CHECK: [[TMP16:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT11_I_I:%.*]] = insertelement <16 x i32> [[VECINIT10_I_I]], i32 [[TMP16]], i32 11
+// CHECK: [[TMP17:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT12_I_I:%.*]] = insertelement <16 x i32> [[VECINIT11_I_I]], i32 [[TMP17]], i32 12
+// CHECK: [[TMP18:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT13_I_I:%.*]] = insertelement <16 x i32> [[VECINIT12_I_I]], i32 [[TMP18]], i32 13
+// CHECK: [[TMP19:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT14_I_I:%.*]] = insertelement <16 x i32> [[VECINIT13_I_I]], i32 [[TMP19]], i32 14
+// CHECK: [[TMP20:%.*]] = load i32, i32* [[__S_ADDR_I_I]], align 4
+// CHECK: [[VECINIT15_I_I:%.*]] = insertelement <16 x i32> [[VECINIT14_I_I]], i32 [[TMP20]], i32 15
+// CHECK: store <16 x i32> [[VECINIT15_I_I]], <16 x i32>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP21:%.*]] = load <16 x i32>, <16 x i32>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP22:%.*]] = bitcast <16 x i32> [[TMP21]] to <8 x i64>
+// CHECK: [[TMP23:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
+// CHECK: [[TMP24:%.*]] = select <16 x i1> [[TMP23]], <16 x i32> [[TMP4]], <16 x i32> [[TMP21]]
+// CHECK: [[TMP25:%.*]] = bitcast <16 x i32> [[TMP24]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP25]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP27:%.*]] = bitcast <8 x i64> [[TMP26]] to <16 x i32>
+// CHECK: [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP29:%.*]] = bitcast <8 x i64> [[TMP28]] to <16 x i32>
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i32> [[TMP27]], <16 x i32> [[TMP29]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP30:%.*]] = bitcast <16 x i32> [[SHUFFLE_I]] to <8 x i64>
+// CHECK: [[TMP31:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP32:%.*]] = bitcast <8 x i64> [[TMP31]] to <16 x i32>
+// CHECK: [[TMP33:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = bitcast <8 x i64> [[TMP33]] to <16 x i32>
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x i32> [[TMP32]], <16 x i32> [[TMP34]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP35:%.*]] = bitcast <16 x i32> [[SHUFFLE1_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP30]], <8 x i64>* [[__A_ADDR_I19_I]], align 64
+// CHECK: store <8 x i64> [[TMP35]], <8 x i64>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I19_I]], align 64
+// CHECK: [[TMP37:%.*]] = bitcast <8 x i64> [[TMP36]] to <16 x i32>
+// CHECK: [[TMP38:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP39:%.*]] = bitcast <8 x i64> [[TMP38]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP40:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP41:%.*]] = bitcast <8 x i64> [[TMP40]] to <16 x i32>
+// CHECK: [[TMP42:%.*]] = icmp ult <16 x i32> [[TMP37]], [[TMP39]]
+// CHECK: [[TMP43:%.*]] = select <16 x i1> [[TMP42]], <16 x i32> [[TMP37]], <16 x i32> [[TMP39]]
+// CHECK: [[TMP44:%.*]] = bitcast <16 x i32> [[TMP43]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP44]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP45:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP46:%.*]] = bitcast <8 x i64> [[TMP45]] to <16 x i32>
+// CHECK: [[TMP47:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP48:%.*]] = bitcast <8 x i64> [[TMP47]] to <16 x i32>
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x i32> [[TMP46]], <16 x i32> [[TMP48]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP49:%.*]] = bitcast <16 x i32> [[SHUFFLE3_I]] to <8 x i64>
+// CHECK: [[TMP50:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP51:%.*]] = bitcast <8 x i64> [[TMP50]] to <16 x i32>
+// CHECK: [[TMP52:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP53:%.*]] = bitcast <8 x i64> [[TMP52]] to <16 x i32>
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> [[TMP53]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP54:%.*]] = bitcast <16 x i32> [[SHUFFLE4_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP49]], <8 x i64>* [[__A_ADDR_I16_I]], align 64
+// CHECK: store <8 x i64> [[TMP54]], <8 x i64>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP55:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I16_I]], align 64
+// CHECK: [[TMP56:%.*]] = bitcast <8 x i64> [[TMP55]] to <16 x i32>
+// CHECK: [[TMP57:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP58:%.*]] = bitcast <8 x i64> [[TMP57]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP59:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP60:%.*]] = bitcast <8 x i64> [[TMP59]] to <16 x i32>
+// CHECK: [[TMP61:%.*]] = icmp ult <16 x i32> [[TMP56]], [[TMP58]]
+// CHECK: [[TMP62:%.*]] = select <16 x i1> [[TMP61]], <16 x i32> [[TMP56]], <16 x i32> [[TMP58]]
+// CHECK: [[TMP63:%.*]] = bitcast <16 x i32> [[TMP62]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP63]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP64:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP65:%.*]] = bitcast <8 x i64> [[TMP64]] to <16 x i32>
+// CHECK: [[TMP66:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP67:%.*]] = bitcast <8 x i64> [[TMP66]] to <16 x i32>
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x i32> [[TMP65]], <16 x i32> [[TMP67]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP68:%.*]] = bitcast <16 x i32> [[SHUFFLE6_I]] to <8 x i64>
+// CHECK: [[TMP69:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP70:%.*]] = bitcast <8 x i64> [[TMP69]] to <16 x i32>
+// CHECK: [[TMP71:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP72:%.*]] = bitcast <8 x i64> [[TMP71]] to <16 x i32>
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <16 x i32> [[TMP70]], <16 x i32> [[TMP72]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP73:%.*]] = bitcast <16 x i32> [[SHUFFLE7_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP68]], <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <8 x i64> [[TMP73]], <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP74:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP75:%.*]] = bitcast <8 x i64> [[TMP74]] to <16 x i32>
+// CHECK: [[TMP76:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP77:%.*]] = bitcast <8 x i64> [[TMP76]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP78:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP79:%.*]] = bitcast <8 x i64> [[TMP78]] to <16 x i32>
+// CHECK: [[TMP80:%.*]] = icmp ult <16 x i32> [[TMP75]], [[TMP77]]
+// CHECK: [[TMP81:%.*]] = select <16 x i1> [[TMP80]], <16 x i32> [[TMP75]], <16 x i32> [[TMP77]]
+// CHECK: [[TMP82:%.*]] = bitcast <16 x i32> [[TMP81]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP82]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP83:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP84:%.*]] = bitcast <8 x i64> [[TMP83]] to <16 x i32>
+// CHECK: [[TMP85:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP86:%.*]] = bitcast <8 x i64> [[TMP85]] to <16 x i32>
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x i32> [[TMP84]], <16 x i32> [[TMP86]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP87:%.*]] = bitcast <16 x i32> [[SHUFFLE9_I]] to <8 x i64>
+// CHECK: [[TMP88:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP89:%.*]] = bitcast <8 x i64> [[TMP88]] to <16 x i32>
+// CHECK: [[TMP90:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP91:%.*]] = bitcast <8 x i64> [[TMP90]] to <16 x i32>
+// CHECK: [[SHUFFLE10_I:%.*]] = shufflevector <16 x i32> [[TMP89]], <16 x i32> [[TMP91]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP92:%.*]] = bitcast <16 x i32> [[SHUFFLE10_I]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP87]], <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <8 x i64> [[TMP92]], <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP93:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP94:%.*]] = bitcast <8 x i64> [[TMP93]] to <16 x i32>
+// CHECK: [[TMP95:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP96:%.*]] = bitcast <8 x i64> [[TMP95]] to <16 x i32>
+// CHECK: store <8 x i64> zeroinitializer, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP97:%.*]] = load <8 x i64>, <8 x i64>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP98:%.*]] = bitcast <8 x i64> [[TMP97]] to <16 x i32>
+// CHECK: [[TMP99:%.*]] = icmp ult <16 x i32> [[TMP94]], [[TMP96]]
+// CHECK: [[TMP100:%.*]] = select <16 x i1> [[TMP99]], <16 x i32> [[TMP94]], <16 x i32> [[TMP96]]
+// CHECK: [[TMP101:%.*]] = bitcast <16 x i32> [[TMP100]] to <8 x i64>
+// CHECK: store <8 x i64> [[TMP101]], <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP102:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <8 x i64> [[TMP102]], i32 0
+// CHECK: [[CONV_I:%.*]] = trunc i64 [[VECEXT_I]] to i32
+// CHECK: ret i32 [[CONV_I]]
unsigned int test_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __W){
- // CHECK: %tmp = bitcast <8 x i64> %__W to <16 x i32>
- // CHECK: %tmp1 = bitcast i16 %__M to <16 x i1>
- // CHECK: %tmp2 = select <16 x i1> %tmp1, <16 x i32> %tmp, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
- // CHECK: %shuffle1.i = shufflevector <16 x i32> %tmp2, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = icmp ult <16 x i32> %tmp2, %shuffle1.i
- // CHECK: %tmp4 = select <16 x i1> %tmp3, <16 x i32> %tmp2, <16 x i32> %shuffle1.i
- // CHECK: %shuffle4.i = shufflevector <16 x i32> %tmp4, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = icmp ult <16 x i32> %tmp4, %shuffle4.i
- // CHECK: %tmp6 = select <16 x i1> %tmp5, <16 x i32> %tmp4, <16 x i32> %shuffle4.i
- // CHECK: %shuffle7.i = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp7 = icmp ult <16 x i32> %tmp6, %shuffle7.i
- // CHECK: %tmp8 = select <16 x i1> %tmp7, <16 x i32> %tmp6, <16 x i32> %shuffle7.i
- // CHECK: %shuffle10.i = shufflevector <16 x i32> %tmp8, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp9 = icmp ult <16 x i32> %tmp8, %shuffle10.i
- // CHECK: %tmp10 = select <16 x i1> %tmp9, <16 x i32> %tmp8, <16 x i32> %shuffle10.i
- // CHECK: %tmp11 = bitcast <16 x i32> %tmp10 to <8 x i64>
- // CHECK: %vecext.i = extractelement <8 x i64> %tmp11, i32 0
- // CHECK: %conv.i = trunc i64 %vecext.i to i32
- // CHECK: ret i32 %conv.i
return _mm512_mask_reduce_min_epu32(__M, __W);
}
+// CHECK-LABEL: define float @test_mm512_mask_reduce_min_ps(i16 zeroext %__M, <16 x float> %__W) #0 {
+// CHECK: [[_COMPOUNDLITERAL_I_I18_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I19_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I20_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I15_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I16_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I17_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I12_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I13_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I14_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[_COMPOUNDLITERAL_I_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__A_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__B_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__W_ADDR_I_I:%.*]] = alloca float, align 4
+// CHECK: [[_COMPOUNDLITERAL_I_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__M_ADDR_I:%.*]] = alloca i16, align 2
+// CHECK: [[__V_ADDR_I:%.*]] = alloca <16 x float>, align 64
+// CHECK: [[__M_ADDR:%.*]] = alloca i16, align 2
+// CHECK: [[__W_ADDR:%.*]] = alloca <16 x float>, align 64
+// CHECK: store i16 %__M, i16* [[__M_ADDR]], align 2
+// CHECK: store <16 x float> %__W, <16 x float>* [[__W_ADDR]], align 64
+// CHECK: [[TMP0:%.*]] = load i16, i16* [[__M_ADDR]], align 2
+// CHECK: [[TMP1:%.*]] = load <16 x float>, <16 x float>* [[__W_ADDR]], align 64
+// CHECK: store i16 [[TMP0]], i16* [[__M_ADDR_I]], align 2
+// CHECK: store <16 x float> [[TMP1]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP2:%.*]] = load i16, i16* [[__M_ADDR_I]], align 2
+// CHECK: [[TMP3:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: store float 0x7FF0000000000000, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[TMP4:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT_I_I:%.*]] = insertelement <16 x float> undef, float [[TMP4]], i32 0
+// CHECK: [[TMP5:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT1_I_I:%.*]] = insertelement <16 x float> [[VECINIT_I_I]], float [[TMP5]], i32 1
+// CHECK: [[TMP6:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT2_I_I:%.*]] = insertelement <16 x float> [[VECINIT1_I_I]], float [[TMP6]], i32 2
+// CHECK: [[TMP7:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT3_I_I:%.*]] = insertelement <16 x float> [[VECINIT2_I_I]], float [[TMP7]], i32 3
+// CHECK: [[TMP8:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT4_I_I:%.*]] = insertelement <16 x float> [[VECINIT3_I_I]], float [[TMP8]], i32 4
+// CHECK: [[TMP9:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT5_I_I:%.*]] = insertelement <16 x float> [[VECINIT4_I_I]], float [[TMP9]], i32 5
+// CHECK: [[TMP10:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT6_I_I:%.*]] = insertelement <16 x float> [[VECINIT5_I_I]], float [[TMP10]], i32 6
+// CHECK: [[TMP11:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT7_I_I:%.*]] = insertelement <16 x float> [[VECINIT6_I_I]], float [[TMP11]], i32 7
+// CHECK: [[TMP12:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT8_I_I:%.*]] = insertelement <16 x float> [[VECINIT7_I_I]], float [[TMP12]], i32 8
+// CHECK: [[TMP13:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT9_I_I:%.*]] = insertelement <16 x float> [[VECINIT8_I_I]], float [[TMP13]], i32 9
+// CHECK: [[TMP14:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT10_I_I:%.*]] = insertelement <16 x float> [[VECINIT9_I_I]], float [[TMP14]], i32 10
+// CHECK: [[TMP15:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT11_I_I:%.*]] = insertelement <16 x float> [[VECINIT10_I_I]], float [[TMP15]], i32 11
+// CHECK: [[TMP16:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT12_I_I:%.*]] = insertelement <16 x float> [[VECINIT11_I_I]], float [[TMP16]], i32 12
+// CHECK: [[TMP17:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT13_I_I:%.*]] = insertelement <16 x float> [[VECINIT12_I_I]], float [[TMP17]], i32 13
+// CHECK: [[TMP18:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT14_I_I:%.*]] = insertelement <16 x float> [[VECINIT13_I_I]], float [[TMP18]], i32 14
+// CHECK: [[TMP19:%.*]] = load float, float* [[__W_ADDR_I_I]], align 4
+// CHECK: [[VECINIT15_I_I:%.*]] = insertelement <16 x float> [[VECINIT14_I_I]], float [[TMP19]], i32 15
+// CHECK: store <16 x float> [[VECINIT15_I_I]], <16 x float>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP20:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I]], align 64
+// CHECK: [[TMP21:%.*]] = bitcast i16 [[TMP2]] to <16 x i1>
+// CHECK: [[TMP22:%.*]] = select <16 x i1> [[TMP21]], <16 x float> [[TMP3]], <16 x float> [[TMP20]]
+// CHECK: store <16 x float> [[TMP22]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP23:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP24:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x float> [[TMP23]], <16 x float> [[TMP24]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP25:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP26:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE1_I:%.*]] = shufflevector <16 x float> [[TMP25]], <16 x float> [[TMP26]], <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE_I]], <16 x float>* [[__A_ADDR_I19_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE1_I]], <16 x float>* [[__B_ADDR_I20_I]], align 64
+// CHECK: [[TMP27:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I19_I]], align 64
+// CHECK: [[TMP28:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I20_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP29:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I18_I]], align 64
+// CHECK: [[TMP30:%.*]] = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> [[TMP27]], <16 x float> [[TMP28]], <16 x float> [[TMP29]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP30]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP31:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP32:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE3_I:%.*]] = shufflevector <16 x float> [[TMP31]], <16 x float> [[TMP32]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP33:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP34:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE4_I:%.*]] = shufflevector <16 x float> [[TMP33]], <16 x float> [[TMP34]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE3_I]], <16 x float>* [[__A_ADDR_I16_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE4_I]], <16 x float>* [[__B_ADDR_I17_I]], align 64
+// CHECK: [[TMP35:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I16_I]], align 64
+// CHECK: [[TMP36:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I17_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP37:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I15_I]], align 64
+// CHECK: [[TMP38:%.*]] = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> [[TMP35]], <16 x float> [[TMP36]], <16 x float> [[TMP37]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP38]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP39:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP40:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE6_I:%.*]] = shufflevector <16 x float> [[TMP39]], <16 x float> [[TMP40]], <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP41:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP42:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE7_I:%.*]] = shufflevector <16 x float> [[TMP41]], <16 x float> [[TMP42]], <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE6_I]], <16 x float>* [[__A_ADDR_I13_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE7_I]], <16 x float>* [[__B_ADDR_I14_I]], align 64
+// CHECK: [[TMP43:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I13_I]], align 64
+// CHECK: [[TMP44:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I14_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP45:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I12_I]], align 64
+// CHECK: [[TMP46:%.*]] = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> [[TMP43]], <16 x float> [[TMP44]], <16 x float> [[TMP45]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP46]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP47:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP48:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE9_I:%.*]] = shufflevector <16 x float> [[TMP47]], <16 x float> [[TMP48]], <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: [[TMP49:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP50:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[SHUFFLE10_I:%.*]] = shufflevector <16 x float> [[TMP49]], <16 x float> [[TMP50]], <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+// CHECK: store <16 x float> [[SHUFFLE9_I]], <16 x float>* [[__A_ADDR_I_I]], align 64
+// CHECK: store <16 x float> [[SHUFFLE10_I]], <16 x float>* [[__B_ADDR_I_I]], align 64
+// CHECK: [[TMP51:%.*]] = load <16 x float>, <16 x float>* [[__A_ADDR_I_I]], align 64
+// CHECK: [[TMP52:%.*]] = load <16 x float>, <16 x float>* [[__B_ADDR_I_I]], align 64
+// CHECK: store <16 x float> zeroinitializer, <16 x float>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP53:%.*]] = load <16 x float>, <16 x float>* [[_COMPOUNDLITERAL_I_I_I]], align 64
+// CHECK: [[TMP54:%.*]] = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> [[TMP51]], <16 x float> [[TMP52]], <16 x float> [[TMP53]], i16 -1, i32 4) #2
+// CHECK: store <16 x float> [[TMP54]], <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[TMP55:%.*]] = load <16 x float>, <16 x float>* [[__V_ADDR_I]], align 64
+// CHECK: [[VECEXT_I:%.*]] = extractelement <16 x float> [[TMP55]], i32 0
+// CHECK: ret float [[VECEXT_I]]
float test_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __W){
- // CHECK: %tmp = bitcast i16 %__M to <16 x i1>
- // CHECK: %tmp1 = select <16 x i1> %tmp, <16 x float> %__W, <16 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000, float 0x7FF0000000000000>
- // CHECK: %shuffle1.i = shufflevector <16 x float> %tmp1, <16 x float> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp2 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %tmp1, <16 x float> %shuffle1.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle4.i = shufflevector <16 x float> %tmp2, <16 x float> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp3 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %tmp2, <16 x float> %shuffle4.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle7.i = shufflevector <16 x float> %tmp3, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp4 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %tmp3, <16 x float> %shuffle7.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %shuffle10.i = shufflevector <16 x float> %tmp4, <16 x float> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // CHECK: %tmp5 = tail call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %tmp4, <16 x float> %shuffle10.i, <16 x float> zeroinitializer, i16 -1, i32 4) #3
- // CHECK: %vecext.i = extractelement <16 x float> %tmp5, i32 0
- // CHECK: ret float %vecext.i
return _mm512_mask_reduce_min_ps(__M, __W);
}
diff --git a/test/CodeGen/avx512bw-builtins.c b/test/CodeGen/avx512bw-builtins.c
index 60d2423e9c34..3160a6667c00 100644
--- a/test/CodeGen/avx512bw-builtins.c
+++ b/test/CodeGen/avx512bw-builtins.c
@@ -480,32 +480,48 @@ __m512i test_mm512_mask_blend_epi16(__mmask32 __U, __m512i __A, __m512i __W) {
}
__m512i test_mm512_abs_epi8(__m512i __A) {
// CHECK-LABEL: @test_mm512_abs_epi8
- // CHECK: @llvm.x86.avx512.mask.pabs.b.512
+ // CHECK: [[SUB:%.*]] = sub <64 x i8> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <64 x i8> [[A]], zeroinitializer
+ // CHECK: select <64 x i1> [[CMP]], <64 x i8> [[A]], <64 x i8> [[SUB]]
return _mm512_abs_epi8(__A);
}
__m512i test_mm512_mask_abs_epi8(__m512i __W, __mmask64 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_abs_epi8
- // CHECK: @llvm.x86.avx512.mask.pabs.b.512
+ // CHECK: [[SUB:%.*]] = sub <64 x i8> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <64 x i8> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <64 x i1> [[CMP]], <64 x i8> [[A]], <64 x i8> [[SUB]]
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> [[SEL]], <64 x i8> %{{.*}}
return _mm512_mask_abs_epi8(__W,__U,__A);
}
__m512i test_mm512_maskz_abs_epi8(__mmask64 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_abs_epi8
- // CHECK: @llvm.x86.avx512.mask.pabs.b.512
+ // CHECK: [[SUB:%.*]] = sub <64 x i8> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <64 x i8> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <64 x i1> [[CMP]], <64 x i8> [[A]], <64 x i8> [[SUB]]
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> [[SEL]], <64 x i8> %{{.*}}
return _mm512_maskz_abs_epi8(__U,__A);
}
__m512i test_mm512_abs_epi16(__m512i __A) {
// CHECK-LABEL: @test_mm512_abs_epi16
- // CHECK: @llvm.x86.avx512.mask.pabs.w.512
+ // CHECK: [[SUB:%.*]] = sub <32 x i16> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <32 x i16> [[A]], zeroinitializer
+ // CHECK: select <32 x i1> [[CMP]], <32 x i16> [[A]], <32 x i16> [[SUB]]
return _mm512_abs_epi16(__A);
}
__m512i test_mm512_mask_abs_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_abs_epi16
- // CHECK: @llvm.x86.avx512.mask.pabs.w.512
+ // CHECK: [[SUB:%.*]] = sub <32 x i16> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <32 x i16> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <32 x i1> [[CMP]], <32 x i16> [[A]], <32 x i16> [[SUB]]
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> [[SEL]], <32 x i16> %{{.*}}
return _mm512_mask_abs_epi16(__W,__U,__A);
}
__m512i test_mm512_maskz_abs_epi16(__mmask32 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_abs_epi16
- // CHECK: @llvm.x86.avx512.mask.pabs.w.512
+ // CHECK: [[SUB:%.*]] = sub <32 x i16> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <32 x i16> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <32 x i1> [[CMP]], <32 x i16> [[A]], <32 x i16> [[SUB]]
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> [[SEL]], <32 x i16> %{{.*}}
return _mm512_maskz_abs_epi16(__U,__A);
}
__m512i test_mm512_packs_epi32(__m512i __A, __m512i __B) {
@@ -638,32 +654,74 @@ __m512i test_mm512_maskz_adds_epu16(__mmask32 __U, __m512i __A, __m512i __B) {
}
__m512i test_mm512_avg_epu8(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_avg_epu8
- // CHECK: @llvm.x86.avx512.mask.pavg.b.512
+ // CHECK-NOT: @llvm.x86.avx512.mask.pavg.b.512
+ // CHECK: zext <64 x i8> %{{.*}} to <64 x i16>
+ // CHECK: zext <64 x i8> %{{.*}} to <64 x i16>
+ // CHECK: add <64 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <64 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <64 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: trunc <64 x i16> %{{.*}} to <64 x i8>
return _mm512_avg_epu8(__A,__B);
}
__m512i test_mm512_mask_avg_epu8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_avg_epu8
- // CHECK: @llvm.x86.avx512.mask.pavg.b.512
+ // CHECK-NOT: @llvm.x86.avx512.mask.pavg.b.512
+ // CHECK: zext <64 x i8> %{{.*}} to <64 x i16>
+ // CHECK: zext <64 x i8> %{{.*}} to <64 x i16>
+ // CHECK: add <64 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <64 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <64 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: trunc <64 x i16> %{{.*}} to <64 x i8>
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_avg_epu8(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_avg_epu8(__mmask64 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_avg_epu8
- // CHECK: @llvm.x86.avx512.mask.pavg.b.512
+ // CHECK-NOT: @llvm.x86.avx512.mask.pavg.b.512
+ // CHECK: zext <64 x i8> %{{.*}} to <64 x i16>
+ // CHECK: zext <64 x i8> %{{.*}} to <64 x i16>
+ // CHECK: add <64 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <64 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <64 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: trunc <64 x i16> %{{.*}} to <64 x i8>
+ // CHECK: store <64 x i8> zeroinitializer
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_avg_epu8(__U,__A,__B);
}
__m512i test_mm512_avg_epu16(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_avg_epu16
- // CHECK: @llvm.x86.avx512.mask.pavg.w.512
+ // CHECK-NOT: @llvm.x86.avx512.mask.pavg.w.512
+ // CHECK: zext <32 x i16> %{{.*}} to <32 x i32>
+ // CHECK: zext <32 x i16> %{{.*}} to <32 x i32>
+ // CHECK: add <32 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <32 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <32 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <32 x i32> %{{.*}} to <32 x i16>
return _mm512_avg_epu16(__A,__B);
}
__m512i test_mm512_mask_avg_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_avg_epu16
- // CHECK: @llvm.x86.avx512.mask.pavg.w.512
+ // CHECK-NOT: @llvm.x86.avx512.mask.pavg.w.512
+ // CHECK: zext <32 x i16> %{{.*}} to <32 x i32>
+ // CHECK: zext <32 x i16> %{{.*}} to <32 x i32>
+ // CHECK: add <32 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <32 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <32 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <32 x i32> %{{.*}} to <32 x i16>
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_avg_epu16(__W,__U,__A,__B);
}
__m512i test_mm512_maskz_avg_epu16(__mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_avg_epu16
- // CHECK: @llvm.x86.avx512.mask.pavg.w.512
+ // CHECK-NOT: @llvm.x86.avx512.mask.pavg.w.512
+ // CHECK: zext <32 x i16> %{{.*}} to <32 x i32>
+ // CHECK: zext <32 x i16> %{{.*}} to <32 x i32>
+ // CHECK: add <32 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <32 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <32 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <32 x i32> %{{.*}} to <32 x i16>
+ // CHECK: store <32 x i16> zeroinitializer
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_avg_epu16(__U,__A,__B);
}
__m512i test_mm512_max_epi8(__m512i __A, __m512i __B) {
@@ -1432,26 +1490,162 @@ __m512i test_mm512_maskz_mov_epi8(__mmask64 __U, __m512i __A) {
__m512i test_mm512_mask_set1_epi8(__m512i __O, __mmask64 __M, char __A) {
// CHECK-LABEL: @test_mm512_mask_set1_epi8
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.b.gpr.512
+ // CHECK: insertelement <64 x i8> undef, i8 %{{.*}}, i32 0
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 1
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 2
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 3
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 4
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 5
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 6
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 7
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 8
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 9
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 10
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 11
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 12
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 13
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 14
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 15
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 16
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 17
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 18
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 19
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 20
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 21
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 22
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 23
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 24
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 25
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 26
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 27
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 28
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 29
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 30
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 31
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 34
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 35
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 36
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 37
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 38
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 39
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 40
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 41
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 42
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 43
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 44
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 45
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 46
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 47
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 48
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 49
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 50
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 51
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 52
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 53
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 54
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 55
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 56
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 57
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 58
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 59
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 60
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 61
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 62
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 63
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_set1_epi8(__O, __M, __A);
}
__m512i test_mm512_maskz_set1_epi8(__mmask64 __M, char __A) {
// CHECK-LABEL: @test_mm512_maskz_set1_epi8
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.b.gpr.512
+ // CHECK: insertelement <64 x i8> undef, i8 %{{.*}}, i32 0
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 1
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 2
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 3
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 4
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 5
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 6
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 7
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 8
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 9
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 10
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 11
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 12
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 13
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 14
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 15
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 16
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 17
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 18
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 19
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 20
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 21
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 22
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 23
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 24
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 25
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 26
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 27
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 28
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 29
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 30
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 31
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 32
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 33
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 34
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 35
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 36
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 37
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 38
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 39
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 40
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 41
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 42
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 43
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 44
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 45
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 46
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 47
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 48
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 49
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 50
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 51
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 52
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 53
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 54
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 55
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 56
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 57
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 58
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 59
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 60
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 61
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 62
+ // CHECK: insertelement <64 x i8> %{{.*}}, i8 %{{.*}}, i32 63
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_set1_epi8(__M, __A);
}
-__mmask64 test_mm512_kunpackd(__mmask64 __A, __mmask64 __B) {
+__mmask64 test_mm512_kunpackd(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
// CHECK-LABEL: @test_mm512_kunpackd
- // CHECK: @llvm.x86.avx512.kunpck.dq
- return _mm512_kunpackd(__A, __B);
+ // CHECK: bitcast <64 x i1> %{{.*}} to i64
+ // CHECK: bitcast <64 x i1> %{{.*}} to i64
+ // CHECK: and i64 %{{.*}}, 4294967295
+ // CHECK: shl i64 %{{.*}}, 32
+ // CHECK: or i64 %{{.*}}, %{{.*}}
+ // CHECK: bitcast i64 %{{.*}} to <64 x i1>
+ return _mm512_mask_cmpneq_epu8_mask(_mm512_kunpackd(_mm512_cmpneq_epu8_mask(__B, __A),_mm512_cmpneq_epu8_mask(__C, __D)), __E, __F);
}
-__mmask32 test_mm512_kunpackw(__mmask32 __A, __mmask32 __B) {
+__mmask32 test_mm512_kunpackw(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
// CHECK-LABEL: @test_mm512_kunpackw
- // CHECK: @llvm.x86.avx512.kunpck.wd
- return _mm512_kunpackw(__A, __B);
+ // CHECK: bitcast <32 x i1> %{{.*}} to i32
+ // CHECK: bitcast <32 x i1> %{{.*}} to i32
+ // CHECK: and i32 %{{.*}}, 65535
+ // CHECK: shl i32 %{{.*}}, 16
+ // CHECK: or i32 %{{.*}}, %{{.*}}
+ // CHECK: bitcast i32 %{{.*}} to <32 x i1>
+ return _mm512_mask_cmpneq_epu16_mask(_mm512_kunpackw(_mm512_cmpneq_epu16_mask(__B, __A),_mm512_cmpneq_epu16_mask(__C, __D)), __E, __F);
}
__m512i test_mm512_mask_loadu_epi16(__m512i __W, __mmask32 __U, void const *__P) {
@@ -1484,7 +1678,8 @@ void test_mm512_mask_storeu_epi16(void *__P, __mmask32 __U, __m512i __A) {
}
__mmask64 test_mm512_test_epi8_mask(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_test_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestm.b.512
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <64 x i8> %{{.*}}, %{{.*}}
return _mm512_test_epi8_mask(__A, __B);
}
@@ -1495,43 +1690,54 @@ void test_mm512_mask_storeu_epi8(void *__P, __mmask64 __U, __m512i __A) {
}
__mmask64 test_mm512_mask_test_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_test_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestm.b.512
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <64 x i8> %{{.*}}, %{{.*}}
+ // CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_test_epi8_mask(__U, __A, __B);
}
__mmask32 test_mm512_test_epi16_mask(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_test_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestm.w.
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <32 x i16> %{{.*}}, %{{.*}}
return _mm512_test_epi16_mask(__A, __B);
}
__mmask32 test_mm512_mask_test_epi16_mask(__mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_test_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestm.w.
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <32 x i16> %{{.*}}, %{{.*}}
+ // CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_test_epi16_mask(__U, __A, __B);
}
__mmask64 test_mm512_testn_epi8_mask(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_testn_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestnm.b.
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
return _mm512_testn_epi8_mask(__A, __B);
}
__mmask64 test_mm512_mask_testn_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_testn_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestnm.b.
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <64 x i8> %{{.*}}, %{{.*}}
+ // CHECK: and <64 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_testn_epi8_mask(__U, __A, __B);
}
__mmask32 test_mm512_testn_epi16_mask(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_testn_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestnm.w.
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
return _mm512_testn_epi16_mask(__A, __B);
}
__mmask32 test_mm512_mask_testn_epi16_mask(__mmask32 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_testn_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestnm.w.
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <32 x i16> %{{.*}}, %{{.*}}
+ // CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_testn_epi16_mask(__U, __A, __B);
}
@@ -1597,13 +1803,77 @@ __m512i test_mm512_maskz_broadcastw_epi16(__mmask32 __M, __m128i __A) {
__m512i test_mm512_mask_set1_epi16(__m512i __O, __mmask32 __M, short __A) {
// CHECK-LABEL: @test_mm512_mask_set1_epi16
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.w.gpr.512
+ // CHECK: insertelement <32 x i16> undef, i16 %{{.*}}, i32 0
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 1
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 2
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 3
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 4
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 5
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 6
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 7
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 8
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 9
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 10
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 11
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 12
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 13
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 14
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 15
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 16
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 17
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 18
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 19
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 20
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 21
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 22
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 23
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 24
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 25
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 26
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 27
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 28
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 29
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 30
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 31
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_set1_epi16(__O, __M, __A);
}
__m512i test_mm512_maskz_set1_epi16(__mmask32 __M, short __A) {
// CHECK-LABEL: @test_mm512_maskz_set1_epi16
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.w.gpr.512
+ // CHECK: insertelement <32 x i16> undef, i16 %{{.*}}, i32 0
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 1
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 2
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 3
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 4
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 5
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 6
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 7
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 8
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 9
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 10
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 11
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 12
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 13
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 14
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 15
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 16
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 17
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 18
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 19
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 20
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 21
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 22
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 23
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 24
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 25
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 26
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 27
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 28
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 29
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 30
+ // CHECK: insertelement <32 x i16> %{{.*}}, i16 %{{.*}}, i32 31
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_set1_epi16(__M, __A);
}
__m512i test_mm512_permutexvar_epi16(__m512i __A, __m512i __B) {
diff --git a/test/CodeGen/avx512cdintrin.c b/test/CodeGen/avx512cdintrin.c
index a28601895be1..e01d277be9ff 100644
--- a/test/CodeGen/avx512cdintrin.c
+++ b/test/CodeGen/avx512cdintrin.c
@@ -68,14 +68,40 @@ __m512i test_mm512_maskz_lzcnt_epi64(__mmask8 __U, __m512i __A) {
return _mm512_maskz_lzcnt_epi64(__U,__A);
}
-__m512i test_mm512_broadcastmb_epi64(__mmask8 __A) {
+__m512i test_mm512_broadcastmb_epi64(__m512i a, __m512i b) {
// CHECK-LABEL: @test_mm512_broadcastmb_epi64
- // CHECK: @llvm.x86.avx512.broadcastmb.512
- return _mm512_broadcastmb_epi64(__A);
+ // CHECK: icmp eq <8 x i64> %{{.*}}, %{{.*}}
+ // CHECK: zext i8 %{{.*}} to i64
+ // CHECK: insertelement <8 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 2
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 3
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 4
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 5
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 6
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 7
+ return _mm512_broadcastmb_epi64(_mm512_cmpeq_epu64_mask ( a, b));
}
-__m512i test_mm512_broadcastmw_epi32(__mmask16 __A) {
+__m512i test_mm512_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK-LABEL: @test_mm512_broadcastmw_epi32
- // CHECK: @llvm.x86.avx512.broadcastmw.512
- return _mm512_broadcastmw_epi32(__A);
+ // CHECK: icmp eq <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: zext i16 %{{.*}} to i32
+ // CHECK: insertelement <16 x i32> undef, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
+ return _mm512_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b));
}
diff --git a/test/CodeGen/avx512dq-builtins.c b/test/CodeGen/avx512dq-builtins.c
index ca8566c5979a..1b21ca3c4302 100644
--- a/test/CodeGen/avx512dq-builtins.c
+++ b/test/CodeGen/avx512dq-builtins.c
@@ -949,19 +949,21 @@ __mmask8 test_mm512_movepi64_mask(__m512i __A) {
__m512 test_mm512_broadcast_f32x2(__m128 __A) {
// CHECK-LABEL: @test_mm512_broadcast_f32x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x2
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_f32x2(__A);
}
__m512 test_mm512_mask_broadcast_f32x2(__m512 __O, __mmask16 __M, __m128 __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_f32x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x2
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_broadcast_f32x2(__O, __M, __A);
}
__m512 test_mm512_maskz_broadcast_f32x2(__mmask16 __M, __m128 __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_f32x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x2
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_broadcast_f32x2(__M, __A);
}
@@ -1007,19 +1009,21 @@ __m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
__m512i test_mm512_broadcast_i32x2(__m128i __A) {
// CHECK-LABEL: @test_mm512_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm512_broadcast_i32x2(__A);
}
__m512i test_mm512_mask_broadcast_i32x2(__m512i __O, __mmask16 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_mask_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_broadcast_i32x2(__O, __M, __A);
}
__m512i test_mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) {
// CHECK-LABEL: @test_mm512_maskz_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_broadcast_i32x2(__M, __A);
}
diff --git a/test/CodeGen/avx512f-builtins.c b/test/CodeGen/avx512f-builtins.c
index 1ce09df7caf7..ce831d690ee7 100644
--- a/test/CodeGen/avx512f-builtins.c
+++ b/test/CodeGen/avx512f-builtins.c
@@ -1,9 +1,5 @@
// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror | FileCheck %s
-// FIXME: It's wrong to check LLVM IR transformations from clang. This run should be removed and tests added to the appropriate LLVM pass.
-
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -O2 -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=O2
-
#include <immintrin.h>
__m512d test_mm512_sqrt_pd(__m512d a)
@@ -389,7 +385,9 @@ __m512d test_mm512_set1_pd(double d)
__mmask16 test_mm512_knot(__mmask16 a)
{
// CHECK-LABEL: @test_mm512_knot
- // CHECK: @llvm.x86.avx512.knot.w
+ // CHECK: [[IN:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[NOT:%.*]] = xor <16 x i1> [[IN]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+ // CHECK: bitcast <16 x i1> [[NOT]] to i16
return _mm512_knot(a);
}
@@ -3861,39 +3859,48 @@ __m512i test_mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i _
}
__mmask16 test_mm512_testn_epi32_mask(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_testn_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestnm.d.512
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <16 x i32> %{{.*}}, %{{.*}}
return _mm512_testn_epi32_mask(__A, __B);
}
__mmask16 test_mm512_mask_testn_epi32_mask(__mmask16 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_testn_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestnm.d.512
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_testn_epi32_mask(__U, __A, __B);
}
__mmask8 test_mm512_testn_epi64_mask(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_testn_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestnm.q.512
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <8 x i64> %{{.*}}, %{{.*}}
return _mm512_testn_epi64_mask(__A, __B);
}
__mmask8 test_mm512_mask_testn_epi64_mask(__mmask8 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_testn_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestnm.q.512
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <8 x i64> %{{.*}}, %{{.*}}
+ // CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_testn_epi64_mask(__U, __A, __B);
}
__mmask16 test_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
{
// CHECK-LABEL: @test_mm512_mask_test_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestm.d.512
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <16 x i32> %{{.*}}, %{{.*}}
return _mm512_mask_test_epi32_mask (__U,__A,__B);
}
__mmask8 test_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
{
// CHECK-LABEL: @test_mm512_mask_test_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestm.q.512
+ // CHECK: and <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <8 x i64> %{{.*}}, %{{.*}}
+ // CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return _mm512_mask_test_epi64_mask (__U,__A,__B);
}
@@ -4488,73 +4495,81 @@ __m512i test_mm512_maskz_ternarylogic_epi64(__mmask8 __U, __m512i __A, __m512i _
__m512 test_mm512_shuffle_f32x4(__m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_shuffle_f32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.f32x4
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19>
return _mm512_shuffle_f32x4(__A, __B, 4);
}
__m512 test_mm512_mask_shuffle_f32x4(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_f32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.f32x4
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_shuffle_f32x4(__W, __U, __A, __B, 4);
}
__m512 test_mm512_maskz_shuffle_f32x4(__mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_f32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.f32x4
+ // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_shuffle_f32x4(__U, __A, __B, 4);
}
__m512d test_mm512_shuffle_f64x2(__m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_shuffle_f64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.f64x2
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
return _mm512_shuffle_f64x2(__A, __B, 4);
}
__m512d test_mm512_mask_shuffle_f64x2(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_f64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.f64x2
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_shuffle_f64x2(__W, __U, __A, __B, 4);
}
__m512d test_mm512_maskz_shuffle_f64x2(__mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_f64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.f64x2
+ // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_shuffle_f64x2(__U, __A, __B, 4);
}
__m512i test_mm512_shuffle_i32x4(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_shuffle_i32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.i32x4
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
return _mm512_shuffle_i32x4(__A, __B, 4);
}
__m512i test_mm512_mask_shuffle_i32x4(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_i32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.i32x4
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_shuffle_i32x4(__W, __U, __A, __B, 4);
}
__m512i test_mm512_maskz_shuffle_i32x4(__mmask16 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_i32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.i32x4
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_shuffle_i32x4(__U, __A, __B, 4);
}
__m512i test_mm512_shuffle_i64x2(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_shuffle_i64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.i64x2
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
return _mm512_shuffle_i64x2(__A, __B, 4);
}
__m512i test_mm512_mask_shuffle_i64x2(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_i64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.i64x2
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_shuffle_i64x2(__W, __U, __A, __B, 4);
}
__m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_i64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.i64x2
+ // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_shuffle_i64x2(__U, __A, __B, 4);
}
@@ -6198,22 +6213,38 @@ __m512i test_mm512_mask_permutexvar_epi32(__m512i __W, __mmask16 __M, __m512i __
return _mm512_mask_permutexvar_epi32(__W, __M, __X, __Y);
}
-__mmask16 test_mm512_kand(__mmask16 __A, __mmask16 __B) {
+__mmask16 test_mm512_kand(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
// CHECK-LABEL: @test_mm512_kand
- // CHECK: @llvm.x86.avx512.kand.w
- return _mm512_kand(__A, __B);
+ // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[RES:%.*]] = and <16 x i1> [[LHS]], [[RHS]]
+ // CHECK: bitcast <16 x i1> [[RES]] to i16
+ return _mm512_mask_cmpneq_epu32_mask(_mm512_kand(_mm512_cmpneq_epu32_mask(__A, __B),
+ _mm512_cmpneq_epu32_mask(__C, __D)),
+ __E, __F);
}
-__mmask16 test_mm512_kandn(__mmask16 __A, __mmask16 __B) {
+__mmask16 test_mm512_kandn(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
// CHECK-LABEL: @test_mm512_kandn
- // CHECK: @llvm.x86.avx512.kandn.w
- return _mm512_kandn(__A, __B);
+ // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[NOT:%.*]] = xor <16 x i1> [[LHS]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+ // CHECK: [[RES:%.*]] = and <16 x i1> [[NOT]], [[RHS]]
+ // CHECK: bitcast <16 x i1> [[RES]] to i16
+ return _mm512_mask_cmpneq_epu32_mask(_mm512_kandn(_mm512_cmpneq_epu32_mask(__A, __B),
+ _mm512_cmpneq_epu32_mask(__C, __D)),
+ __E, __F);
}
-__mmask16 test_mm512_kor(__mmask16 __A, __mmask16 __B) {
+__mmask16 test_mm512_kor(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
// CHECK-LABEL: @test_mm512_kor
- // CHECK: @llvm.x86.avx512.kor.w
- return _mm512_kor(__A, __B);
+ // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[RES:%.*]] = or <16 x i1> [[LHS]], [[RHS]]
+ // CHECK: bitcast <16 x i1> [[RES]] to i16
+ return _mm512_mask_cmpneq_epu32_mask(_mm512_kor(_mm512_cmpneq_epu32_mask(__A, __B),
+ _mm512_cmpneq_epu32_mask(__C, __D)),
+ __E, __F);
}
int test_mm512_kortestc(__mmask16 __A, __mmask16 __B) {
@@ -6228,22 +6259,40 @@ int test_mm512_kortestz(__mmask16 __A, __mmask16 __B) {
return _mm512_kortestz(__A, __B);
}
-__mmask16 test_mm512_kunpackb(__mmask16 __A, __mmask16 __B) {
+__mmask16 test_mm512_kunpackb(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
// CHECK-LABEL: @test_mm512_kunpackb
- // CHECK: @llvm.x86.avx512.kunpck.bw
- return _mm512_kunpackb(__A, __B);
-}
-
-__mmask16 test_mm512_kxnor(__mmask16 __A, __mmask16 __B) {
+ // CHECK: bitcast <16 x i1> %{{.*}} to i16
+ // CHECK: bitcast <16 x i1> %{{.*}} to i16
+ // CHECK: and i32 %{{.*}}, 255
+ // CHECK: shl i32 %{{.*}}, 8
+ // CHECK: or i32 %{{.*}}, %{{.*}}
+ // CHECK: bitcast i16 %{{.*}} to <16 x i1>
+ return _mm512_mask_cmpneq_epu32_mask(_mm512_kunpackb(_mm512_cmpneq_epu32_mask(__A, __B),
+ _mm512_cmpneq_epu32_mask(__C, __D)),
+ __E, __F);
+}
+
+__mmask16 test_mm512_kxnor(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
// CHECK-LABEL: @test_mm512_kxnor
- // CHECK: @llvm.x86.avx512.kxnor.w
- return _mm512_kxnor(__A, __B);
+ // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[NOT:%.*]] = xor <16 x i1> [[LHS]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+ // CHECK: [[RES:%.*]] = xor <16 x i1> [[NOT]], [[RHS]]
+ // CHECK: bitcast <16 x i1> [[RES]] to i16
+ return _mm512_mask_cmpneq_epu32_mask(_mm512_kxnor(_mm512_cmpneq_epu32_mask(__A, __B),
+ _mm512_cmpneq_epu32_mask(__C, __D)),
+ __E, __F);
}
-__mmask16 test_mm512_kxor(__mmask16 __A, __mmask16 __B) {
+__mmask16 test_mm512_kxor(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
// CHECK-LABEL: @test_mm512_kxor
- // CHECK: @llvm.x86.avx512.kxor.w
- return _mm512_kxor(__A, __B);
+ // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: [[RES:%.*]] = xor <16 x i1> [[LHS]], [[RHS]]
+ // CHECK: bitcast <16 x i1> [[RES]] to i16
+ return _mm512_mask_cmpneq_epu32_mask(_mm512_kxor(_mm512_cmpneq_epu32_mask(__A, __B),
+ _mm512_cmpneq_epu32_mask(__C, __D)),
+ __E, __F);
}
void test_mm512_stream_si512(__m512i * __P, __m512i __A) {
@@ -6258,6 +6307,12 @@ __m512i test_mm512_stream_load_si512(void *__P) {
return _mm512_stream_load_si512(__P);
}
+__m512i test_mm512_stream_load_si512_const(void const *__P) {
+ // CHECK-LABEL: @test_mm512_stream_load_si512_const
+ // CHECK: load <8 x i64>, <8 x i64>* %{{.*}}, align 64, !nontemporal
+ return _mm512_stream_load_si512(__P);
+}
+
void test_mm512_stream_pd(double *__P, __m512d __A) {
// CHECK-LABEL: @test_mm512_stream_pd
// CHECK: store <8 x double> %{{.*}}, <8 x double>* %{{.*}}, align 64, !nontemporal
@@ -7722,11 +7777,51 @@ __m512i test_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
__m512i test_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
{
- //CHECK-LABEL: @test_mm512_mask_set1_epi32
- //CHECK: @llvm.x86.avx512.mask.pbroadcast.d.gpr.512
+ // CHECK-LABEL: @test_mm512_mask_set1_epi32
+ // CHECK: insertelement <16 x i32> undef, i32 %{{.*}}, i32 0
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 1
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 2
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 3
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 4
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 5
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 6
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 7
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 8
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 9
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 10
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 11
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 12
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 13
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 14
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 15
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_set1_epi32 ( __O, __M, __A);
}
+__m512i test_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
+{
+ // CHECK-LABEL: @test_mm512_maskz_set1_epi32
+ // CHECK: insertelement <16 x i32> undef, i32 %{{.*}}, i32 0
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 1
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 2
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 3
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 4
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 5
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 6
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 7
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 8
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 9
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 10
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 11
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 12
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 13
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 14
+ // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}, i32 15
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
+ return _mm512_maskz_set1_epi32(__M, __A);
+}
+
+
__m512i test_mm512_set_epi8(char e63, char e62, char e61, char e60, char e59,
char e58, char e57, char e56, char e55, char e54, char e53, char e52,
char e51, char e50, char e49, char e48, char e47, char e46, char e45,
@@ -7861,21 +7956,21 @@ __m512i test_mm512_set_epi32 (int __A, int __B, int __C, int __D,
{
//CHECK-LABEL: @test_mm512_set_epi32
//CHECK: insertelement{{.*}}i32 0
- //CHECK: insertelement{{.*}}i32 1
- //CHECK: insertelement{{.*}}i32 2
- //CHECK: insertelement{{.*}}i32 3
- //CHECK: insertelement{{.*}}i32 4
- //CHECK: insertelement{{.*}}i32 5
- //CHECK: insertelement{{.*}}i32 6
- //CHECK: insertelement{{.*}}i32 7
- //CHECK: insertelement{{.*}}i32 8
- //CHECK: insertelement{{.*}}i32 9
- //CHECK: insertelement{{.*}}i32 10
- //CHECK: insertelement{{.*}}i32 11
- //CHECK: insertelement{{.*}}i32 12
- //CHECK: insertelement{{.*}}i32 13
- //CHECK: insertelement{{.*}}i32 14
- //CHECK: insertelement{{.*}}i32 15
+ //CHECK: insertelement{{.*}}i32 1
+ //CHECK: insertelement{{.*}}i32 2
+ //CHECK: insertelement{{.*}}i32 3
+ //CHECK: insertelement{{.*}}i32 4
+ //CHECK: insertelement{{.*}}i32 5
+ //CHECK: insertelement{{.*}}i32 6
+ //CHECK: insertelement{{.*}}i32 7
+ //CHECK: insertelement{{.*}}i32 8
+ //CHECK: insertelement{{.*}}i32 9
+ //CHECK: insertelement{{.*}}i32 10
+ //CHECK: insertelement{{.*}}i32 11
+ //CHECK: insertelement{{.*}}i32 12
+ //CHECK: insertelement{{.*}}i32 13
+ //CHECK: insertelement{{.*}}i32 14
+ //CHECK: insertelement{{.*}}i32 15
return _mm512_set_epi32( __A, __B, __C, __D,__E, __F, __G, __H,
__I, __J, __K, __L,__M, __N, __O, __P);
}
@@ -7885,39 +7980,39 @@ __m512i test_mm512_setr_epi32 (int __A, int __B, int __C, int __D,
int __I, int __J, int __K, int __L,
int __M, int __N, int __O, int __P)
{
- //CHECK-LABEL: @test_mm512_setr_epi32
- //CHECK: load{{.*}}%__P.addr, align 4
- //CHECK: load{{.*}}%__O.addr, align 4
- //CHECK: load{{.*}}%__N.addr, align 4
- //CHECK: load{{.*}}%__M.addr, align 4
- //CHECK: load{{.*}}%__L.addr, align 4
- //CHECK: load{{.*}}%__K.addr, align 4
- //CHECK: load{{.*}}%__J.addr, align 4
- //CHECK: load{{.*}}%__I.addr, align 4
- //CHECK: load{{.*}}%__H.addr, align 4
- //CHECK: load{{.*}}%__G.addr, align 4
- //CHECK: load{{.*}}%__F.addr, align 4
- //CHECK: load{{.*}}%__E.addr, align 4
- //CHECK: load{{.*}}%__D.addr, align 4
- //CHECK: load{{.*}}%__C.addr, align 4
- //CHECK: load{{.*}}%__B.addr, align 4
- //CHECK: load{{.*}}%__A.addr, align 4
- //CHECK: insertelement{{.*}}i32 0
- //CHECK: insertelement{{.*}}i32 1
- //CHECK: insertelement{{.*}}i32 2
- //CHECK: insertelement{{.*}}i32 3
- //CHECK: insertelement{{.*}}i32 4
- //CHECK: insertelement{{.*}}i32 5
- //CHECK: insertelement{{.*}}i32 6
- //CHECK: insertelement{{.*}}i32 7
- //CHECK: insertelement{{.*}}i32 8
- //CHECK: insertelement{{.*}}i32 9
- //CHECK: insertelement{{.*}}i32 10
- //CHECK: insertelement{{.*}}i32 11
- //CHECK: insertelement{{.*}}i32 12
- //CHECK: insertelement{{.*}}i32 13
- //CHECK: insertelement{{.*}}i32 14
- //CHECK: insertelement{{.*}}i32 15
+ //CHECK-LABEL: @test_mm512_setr_epi32
+ //CHECK: load{{.*}}%__P.addr, align 4
+ //CHECK: load{{.*}}%__O.addr, align 4
+ //CHECK: load{{.*}}%__N.addr, align 4
+ //CHECK: load{{.*}}%__M.addr, align 4
+ //CHECK: load{{.*}}%__L.addr, align 4
+ //CHECK: load{{.*}}%__K.addr, align 4
+ //CHECK: load{{.*}}%__J.addr, align 4
+ //CHECK: load{{.*}}%__I.addr, align 4
+ //CHECK: load{{.*}}%__H.addr, align 4
+ //CHECK: load{{.*}}%__G.addr, align 4
+ //CHECK: load{{.*}}%__F.addr, align 4
+ //CHECK: load{{.*}}%__E.addr, align 4
+ //CHECK: load{{.*}}%__D.addr, align 4
+ //CHECK: load{{.*}}%__C.addr, align 4
+ //CHECK: load{{.*}}%__B.addr, align 4
+ //CHECK: load{{.*}}%__A.addr, align 4
+ //CHECK: insertelement{{.*}}i32 0
+ //CHECK: insertelement{{.*}}i32 1
+ //CHECK: insertelement{{.*}}i32 2
+ //CHECK: insertelement{{.*}}i32 3
+ //CHECK: insertelement{{.*}}i32 4
+ //CHECK: insertelement{{.*}}i32 5
+ //CHECK: insertelement{{.*}}i32 6
+ //CHECK: insertelement{{.*}}i32 7
+ //CHECK: insertelement{{.*}}i32 8
+ //CHECK: insertelement{{.*}}i32 9
+ //CHECK: insertelement{{.*}}i32 10
+ //CHECK: insertelement{{.*}}i32 11
+ //CHECK: insertelement{{.*}}i32 12
+ //CHECK: insertelement{{.*}}i32 13
+ //CHECK: insertelement{{.*}}i32 14
+ //CHECK: insertelement{{.*}}i32 15
return _mm512_setr_epi32( __A, __B, __C, __D,__E, __F, __G, __H,
__I, __J, __K, __L,__M, __N, __O, __P);
}
@@ -7925,19 +8020,36 @@ __m512i test_mm512_setr_epi32 (int __A, int __B, int __C, int __D,
#ifdef __x86_64__
__m512i test_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
{
- //CHECK-LABEL: @test_mm512_mask_set1_epi64
- //CHECK: @llvm.x86.avx512.mask.pbroadcast.q.gpr.512
+ // CHECK-LABEL: @test_mm512_mask_set1_epi64
+ // CHECK: insertelement <8 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 2
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 3
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 4
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 5
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 6
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 7
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_set1_epi64 (__O, __M, __A);
}
__m512i test_mm512_maskz_set1_epi64 (__mmask8 __M, long long __A)
{
- //CHECK-LABEL: @test_mm512_maskz_set1_epi64
- //CHECK: @llvm.x86.avx512.mask.pbroadcast.q.gpr.512
+ // CHECK-LABEL: @test_mm512_maskz_set1_epi64
+ // CHECK: insertelement <8 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 2
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 3
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 4
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 5
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 6
+ // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 7
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_set1_epi64 (__M, __A);
}
#endif
+
__m512i test_mm512_set_epi64 (long long __A, long long __B, long long __C,
long long __D, long long __E, long long __F,
long long __G, long long __H)
@@ -8045,28 +8157,40 @@ __m512 test_mm512_set_ps (float __A, float __B, float __C, float __D,
__m512i test_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
// CHECK-LABEL: @test_mm512_mask_abs_epi64
- // CHECK: @llvm.x86.avx512.mask.pabs.q.512
+ // CHECK: [[SUB:%.*]] = sub <8 x i64> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <8 x i64> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i64> [[A]], <8 x i64> [[SUB]]
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> [[SEL]], <8 x i64> %{{.*}}
return _mm512_mask_abs_epi64 (__W,__U,__A);
}
__m512i test_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
{
// CHECK-LABEL: @test_mm512_maskz_abs_epi64
- // CHECK: @llvm.x86.avx512.mask.pabs.q.512
+ // CHECK: [[SUB:%.*]] = sub <8 x i64> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <8 x i64> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i64> [[A]], <8 x i64> [[SUB]]
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> [[SEL]], <8 x i64> %{{.*}}
return _mm512_maskz_abs_epi64 (__U,__A);
}
__m512i test_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
// CHECK-LABEL: @test_mm512_mask_abs_epi32
- // CHECK: @llvm.x86.avx512.mask.pabs.d.512
+ // CHECK: [[SUB:%.*]] = sub <16 x i32> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <16 x i32> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i32> [[A]], <16 x i32> [[SUB]]
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> [[SEL]], <16 x i32> %{{.*}}
return _mm512_mask_abs_epi32 (__W,__U,__A);
}
__m512i test_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
{
// CHECK-LABEL: @test_mm512_maskz_abs_epi32
- // CHECK: @llvm.x86.avx512.mask.pabs.d.512
+ // CHECK: [[SUB:%.*]] = sub <16 x i32> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <16 x i32> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i32> [[A]], <16 x i32> [[SUB]]
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> [[SEL]], <16 x i32> %{{.*}}
return _mm512_maskz_abs_epi32 (__U,__A);
}
@@ -8234,138 +8358,95 @@ __m512d test_mm512_setzero_pd()
__mmask16 test_mm512_int2mask(int __a)
{
- // O2-LABEL: test_mm512_int2mask
- // O2: trunc i32 %__a to i16
+ // CHECK-LABEL: test_mm512_int2mask
+ // CHECK: trunc i32 %{{.*}} to i16
return _mm512_int2mask(__a);
}
int test_mm512_mask2int(__mmask16 __a)
{
- // O2-LABEL: test_mm512_mask2int
- // O2: zext i16 %__a to i32
+ // CHECK-LABEL: test_mm512_mask2int
+ // CHECK: zext i16 %{{.*}} to i32
return _mm512_mask2int(__a);
}
__m128 test_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- // O2-LABEL: @test_mm_mask_move_ss
- // O2: %[[M:.*]] = and i8 %__U, 1
- // O2: %[[M2:.*]] = icmp
- // O2: %[[ELM1:.*]] = extractelement <4 x float>
- // O2: %[[ELM2:.*]] = extractelement <4 x float>
- // O2: %[[SEL:.*]] = select i1 %[[M2]]
- // O2: %[[RES:.*]] = insertelement <4 x float> %__A, float %[[SEL]], i32 0
- // O2: ret <4 x float> %[[RES]]
+ // CHECK-LABEL: @test_mm_mask_move_ss
+ // CHECK: extractelement <4 x float> %{{.*}}, i32 0
+ // CHECK: extractelement <4 x float> %{{.*}}, i32 0
+ // CHECK: phi float [ %{{.*}}, %{{.*}} ], [ %{{.*}}, %{{.*}} ]
+ // CHECK: insertelement <4 x float> %{{.*}}, float %cond.i, i32 0
return _mm_mask_move_ss ( __W, __U, __A, __B);
}
__m128 test_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
- // O2-LABEL: @test_mm_maskz_move_ss
- // O2: %[[M:.*]] = and i8 %__U, 1
- // O2: %[[M2:.*]] = icmp
- // O2: %[[ELM1:.*]] = extractelement <4 x float> %__B, i32 0
- // O2: %[[SEL:.*]] = select i1 %[[M2]]
- // O2: %[[RES:.*]] = insertelement <4 x float> %__A, float %[[SEL]], i32 0
- // O2: ret <4 x float> %[[RES]]
+ // CHECK-LABEL: @test_mm_maskz_move_ss
+ // CHECK: extractelement <4 x float> %{{.*}}, i32 0
+ // CHECK: phi float [ %{{.*}}, %{{.*}} ], [ 0.000000e+00, %{{.*}} ]
+ // CHECK: insertelement <4 x float> %{{.*}}, float %{{.*}}, i32 0
return _mm_maskz_move_ss (__U, __A, __B);
}
__m128d test_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- // O2-LABEL: @test_mm_mask_move_sd
- // O2: %[[M:.*]] = and i8 %__U, 1
- // O2: %[[M2:.*]] = icmp
- // O2: %[[ELM1:.*]] = extractelement <2 x double>
- // O2: %[[ELM2:.*]] = extractelement <2 x double>
- // O2: %[[SEL:.*]] = select i1 %[[M2]]
- // O2: %[[RES:.*]] = insertelement <2 x double> %__A, double %[[SEL]], i32 0
- // O2: ret <2 x double> %[[RES]]
+ // CHECK-LABEL: @test_mm_mask_move_sd
+ // CHECK: extractelement <2 x double> %{{.*}}, i32 0
+ // CHECK: extractelement <2 x double> %{{.*}}, i32 0
+ // CHECK: phi double [ %{{.*}}, %{{.*}} ], [ %{{.*}}, %{{.*}} ]
+ // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 0
return _mm_mask_move_sd ( __W, __U, __A, __B);
}
__m128d test_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
{
- // O2-LABEL: @test_mm_maskz_move_sd
- // O2: %[[M:.*]] = and i8 %__U, 1
- // O2: %[[M2:.*]] = icmp
- // O2: %[[ELM1:.*]] = extractelement <2 x double> %__B, i32 0
- // O2: %[[SEL:.*]] = select i1 %[[M2]]
- // O2: %[[RES:.*]] = insertelement <2 x double> %__A, double %[[SEL]], i32 0
- // O2: ret <2 x double> %[[RES]]
+ // CHECK-LABEL: @test_mm_maskz_move_sd
+ // CHECK: extractelement <2 x double> %{{.*}}, i32 0
+ // CHECK: phi double [ %{{.*}}, %{{.*}} ], [ 0.000000e+00, %{{.*}} ]
+ // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 0
return _mm_maskz_move_sd (__U, __A, __B);
}
void test_mm_mask_store_ss(float * __P, __mmask8 __U, __m128 __A)
{
- // O2-LABEL: @test_mm_mask_store_ss
- // O2: %[[CAST:.*]] = bitcast float* %__P to <16 x float>*
- // O2: %[[SHUFFLE:.*]] = shufflevector <4 x float> %__A, <4 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // O2: %[[MASK1:.*]] = and i8 %__U, 1
- // O2: %[[MASK2:.*]] = zext i8 %[[MASK1]] to i16
- // O2: %[[MASK3:.*]] = bitcast i16 %[[MASK2]] to <16 x i1>
- // O2: tail call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> %[[SHUFFLE]], <16 x float>* %[[CAST]], i32 16, <16 x i1> %[[MASK3]])
+ // CHECK-LABEL: @test_mm_mask_store_ss
+ // CHECK: call void @llvm.masked.store.v16f32.p0v16f32(
_mm_mask_store_ss(__P, __U, __A);
}
void test_mm_mask_store_sd(double * __P, __mmask8 __U, __m128d __A)
{
- // O2-LABEL: @test_mm_mask_store_sd
- // O2: %[[CAST:.*]] = bitcast double* %__P to <8 x double>*
- // O2: %[[SHUFFLE:.*]] = shufflevector <2 x double> %__A, <2 x double> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // O2: %[[MASK1:.*]] = and i8 %__U, 1
- // O2: %[[MASK2:.*]] = bitcast i8 %[[MASK1]] to <8 x i1>
- // O2: tail call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %[[SHUFFLE]], <8 x double>* %[[CAST]], i32 16, <8 x i1> %[[MASK2]])
+ // CHECK-LABEL: @test_mm_mask_store_sd
+ // CHECK: call void @llvm.masked.store.v8f64.p0v8f64(
_mm_mask_store_sd(__P, __U, __A);
}
__m128 test_mm_mask_load_ss(__m128 __A, __mmask8 __U, const float* __W)
{
- // O2-LABEL: @test_mm_mask_load_ss
- // O2: %[[SHUF:.*]] = shufflevector <4 x float> %__A, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x i32> <i32 0, i32 4, i32 4, i32 4>
- // O2: %[[PTR:.*]] = bitcast float* %__W to <16 x float>*
- // O2: %[[SHUF2:.*]] = shufflevector <4 x float> %[[SHUF]], <4 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // O2: %[[AND:.*]] = and i8 %__U, 1
- // O2: %[[MASK:.*]] = zext i8 %[[AND]] to i16
- // O2: %[[MASK2:.*]] = bitcast i16 %[[MASK]] to <16 x i1>
- // O2: %[[RES:.*]] = tail call <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>* %[[PTR]], i32 16, <16 x i1> %[[MASK2]], <16 x float> %[[SHUF2]])
- // O2: shufflevector <16 x float> %[[RES]], <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK-LABEL: @test_mm_mask_load_ss
+ // CHECK: call <16 x float> @llvm.masked.load.v16f32.p0v16f32(
return _mm_mask_load_ss(__A, __U, __W);
}
__m128 test_mm_maskz_load_ss (__mmask8 __U, const float * __W)
{
- // O2-LABEL: @test_mm_maskz_load_ss
- // O2: %[[PTR:.*]] = bitcast float* %__W to <16 x float>*
- // O2: %[[AND:.*]] = and i8 %__U, 1
- // O2: %[[MASK:.*]] = zext i8 %[[AND]] to i16
- // O2: %[[MASK2:.*]] = bitcast i16 %[[MASK]] to <16 x i1>
- // O2: %[[RES:.*]] = tail call <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>* %[[PTR]], i32 16, <16 x i1> %[[MASK2]], <16 x float> zeroinitializer)
- // O2: shufflevector <16 x float> %[[RES]], <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK-LABEL: @test_mm_maskz_load_ss
+ // CHECK: call <16 x float> @llvm.masked.load.v16f32.p0v16f32(
return _mm_maskz_load_ss (__U, __W);
}
__m128d test_mm_mask_load_sd (__m128d __A, __mmask8 __U, const double * __W)
{
- // O2-LABEL: @test_mm_mask_load_sd
- // O2: %[[SHUF:.*]] = insertelement <2 x double> %__A, double 0.000000e+00, i32 1
- // O2: %[[PTR:.*]] = bitcast double* %__W to <8 x double>*
- // O2: %[[SHUF2:.*]] = shufflevector <2 x double> %[[SHUF]], <2 x double> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- // O2: %[[AND:.*]] = and i8 %__U, 1
- // O2: %[[MASK:.*]] = bitcast i8 %[[AND]] to <8 x i1>
- // O2: %[[RES:.*]] = tail call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %[[PTR]], i32 16, <8 x i1> %[[MASK]], <8 x double> %[[SHUF2]])
- // O2: shufflevector <8 x double> %[[RES]], <8 x double> undef, <2 x i32> <i32 0, i32 1>
+ // CHECK-LABEL: @test_mm_mask_load_sd
+ // CHECK: call <8 x double> @llvm.masked.load.v8f64.p0v8f64(
return _mm_mask_load_sd (__A, __U, __W);
}
__m128d test_mm_maskz_load_sd (__mmask8 __U, const double * __W)
{
- // O2-LABEL: @test_mm_maskz_load_sd
- // O2: %[[PTR:.*]] = bitcast double* %__W to <8 x double>*
- // O2: %[[AND:.*]] = and i8 %__U, 1
- // O2: %[[MASK:.*]] = bitcast i8 %[[AND]] to <8 x i1>
- // O2: %[[RES:.*]] = tail call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %[[PTR]], i32 16, <8 x i1> %[[MASK]], <8 x double> zeroinitializer)
- // O2: shufflevector <8 x double> %[[RES]], <8 x double> undef, <2 x i32> <i32 0, i32 1>
+ // CHECK-LABEL: @test_mm_maskz_load_sd
+ // CHECK: call <8 x double> @llvm.masked.load.v8f64.p0v8f64(
return _mm_maskz_load_sd (__U, __W);
}
diff --git a/test/CodeGen/avx512ifmavl-builtins.c b/test/CodeGen/avx512ifmavl-builtins.c
index c59af0ec6d06..4aeec336ad94 100644
--- a/test/CodeGen/avx512ifmavl-builtins.c
+++ b/test/CodeGen/avx512ifmavl-builtins.c
@@ -1,6 +1,4 @@
-// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
-
-#define __MM_MALLOC_H
+// RUN: %clang_cc1 %s -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <immintrin.h>
diff --git a/test/CodeGen/avx512vl-builtins.c b/test/CodeGen/avx512vl-builtins.c
index c64b7bcec23e..b4fc86da704b 100644
--- a/test/CodeGen/avx512vl-builtins.c
+++ b/test/CodeGen/avx512vl-builtins.c
@@ -2502,56 +2502,82 @@ __m256 test_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) {
}
__m128i test_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_abs_epi32
- // CHECK: @llvm.x86.ssse3.pabs.d.128
- // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <4 x i32> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <4 x i32> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[A]], <4 x i32> [[SUB]]
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> [[SEL]], <4 x i32> %{{.*}}
return _mm_mask_abs_epi32(__W,__U,__A);
}
__m128i test_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_abs_epi32
- // CHECK: @llvm.x86.ssse3.pabs.d.128
- // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <4 x i32> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <4 x i32> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[A]], <4 x i32> [[SUB]]
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> [[SEL]], <4 x i32> %{{.*}}
return _mm_maskz_abs_epi32(__U,__A);
}
__m256i test_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_abs_epi32
- // CHECK: @llvm.x86.avx2.pabs.d
- // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <8 x i32> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <8 x i32> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[A]], <8 x i32> [[SUB]]
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> [[SEL]], <8 x i32> %{{.*}}
return _mm256_mask_abs_epi32(__W,__U,__A);
}
__m256i test_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_abs_epi32
- // CHECK: @llvm.x86.avx2.pabs.d
- // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <8 x i32> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <8 x i32> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[A]], <8 x i32> [[SUB]]
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> [[SEL]], <8 x i32> %{{.*}}
return _mm256_maskz_abs_epi32(__U,__A);
}
__m128i test_mm_abs_epi64(__m128i __A) {
// CHECK-LABEL: @test_mm_abs_epi64
- // CHECK: @llvm.x86.avx512.mask.pabs.q.128
+ // CHECK: [[SUB:%.*]] = sub <2 x i64> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <2 x i64> [[A]], zeroinitializer
+ // CHECK: select <2 x i1> [[CMP]], <2 x i64> [[A]], <2 x i64> [[SUB]]
+
return _mm_abs_epi64(__A);
}
__m128i test_mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_abs_epi64
- // CHECK: @llvm.x86.avx512.mask.pabs.q.128
+ // CHECK: [[SUB:%.*]] = sub <2 x i64> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <2 x i64> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A]], <2 x i64> [[SUB]]
+ // CHECK: select <2 x i1> %{{.*}}, <2 x i64> [[SEL]], <2 x i64> %{{.*}}
+
return _mm_mask_abs_epi64(__W,__U,__A);
}
__m128i test_mm_maskz_abs_epi64(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_abs_epi64
- // CHECK: @llvm.x86.avx512.mask.pabs.q.128
+ // CHECK: [[SUB:%.*]] = sub <2 x i64> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <2 x i64> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A]], <2 x i64> [[SUB]]
+ // CHECK: select <2 x i1> %{{.*}}, <2 x i64> [[SEL]], <2 x i64> %{{.*}}
return _mm_maskz_abs_epi64(__U,__A);
}
__m256i test_mm256_abs_epi64(__m256i __A) {
// CHECK-LABEL: @test_mm256_abs_epi64
- // CHECK: @llvm.x86.avx512.mask.pabs.q.256
+ // CHECK: [[SUB:%.*]] = sub <4 x i64> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <4 x i64> [[A]], zeroinitializer
+ // CHECK: select <4 x i1> [[CMP]], <4 x i64> [[A]], <4 x i64> [[SUB]]
return _mm256_abs_epi64(__A);
}
__m256i test_mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_abs_epi64
- // CHECK: @llvm.x86.avx512.mask.pabs.q.256
+ // CHECK: [[SUB:%.*]] = sub <4 x i64> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <4 x i64> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <4 x i1> [[CMP]], <4 x i64> [[A]], <4 x i64> [[SUB]]
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> [[SEL]], <4 x i64> %{{.*}}
return _mm256_mask_abs_epi64(__W,__U,__A);
}
__m256i test_mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_abs_epi64
- // CHECK: @llvm.x86.avx512.mask.pabs.q.256
+ // CHECK: [[SUB:%.*]] = sub <4 x i64> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <4 x i64> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <4 x i1> [[CMP]], <4 x i64> [[A]], <4 x i64> [[SUB]]
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> [[SEL]], <4 x i64> %{{.*}}
return _mm256_maskz_abs_epi64(__U,__A);
}
__m128i test_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
@@ -4486,50 +4512,92 @@ __m256d test_mm256_maskz_movedup_pd(__mmask8 __U, __m256d __A) {
__m128i test_mm_mask_set1_epi32(__m128i __O, __mmask8 __M) {
// CHECK-LABEL: @test_mm_mask_set1_epi32
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.d.gpr.128
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 0
+ // CHECK: insertelement <4 x i32> %{{.*}}32 1
+ // CHECK: insertelement <4 x i32> %{{.*}}32 2
+ // CHECK: insertelement <4 x i32> %{{.*}}32 3
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_set1_epi32(__O, __M, 5);
}
__m128i test_mm_maskz_set1_epi32(__mmask8 __M) {
// CHECK-LABEL: @test_mm_maskz_set1_epi32
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.d.gpr.128
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 0
+ // CHECK: insertelement <4 x i32> %{{.*}}32 1
+ // CHECK: insertelement <4 x i32> %{{.*}}32 2
+ // CHECK: insertelement <4 x i32> %{{.*}}32 3
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_set1_epi32(__M, 5);
}
__m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) {
// CHECK-LABEL: @test_mm256_mask_set1_epi32
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.d.gpr.256
+ // CHECK: insertelement <8 x i32> undef, i32 %{{.*}}, i32 0
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 1
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 2
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 3
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 4
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 5
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 6
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 7
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_set1_epi32(__O, __M, 5);
}
__m256i test_mm256_maskz_set1_epi32(__mmask8 __M) {
// CHECK-LABEL: @test_mm256_maskz_set1_epi32
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.d.gpr.256
+ // CHECK: insertelement <8 x i32> undef, i32 %{{.*}}, i32 0
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 1
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 2
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 3
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 4
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 5
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 6
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 7
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_set1_epi32(__M, 5);
}
#ifdef __x86_64__
__m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) {
// CHECK-LABEL: @test_mm_mask_set1_epi64
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.q.gpr.128
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
+ // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_set1_epi64(__O, __M, __A);
}
__m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) {
// CHECK-LABEL: @test_mm_maskz_set1_epi64
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.q.gpr.128
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <2 x i32> <i32 0, i32 1>
+ // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_set1_epi64(__M, __A);
}
__m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) {
// CHECK-LABEL: @test_mm256_mask_set1_epi64
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.q.gpr.256
+ // CHECK: insertelement <4 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 2
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 3
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_set1_epi64(__O, __M, __A);
}
__m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) {
// CHECK-LABEL: @test_mm256_maskz_set1_epi64
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.q.gpr.256
+ // CHECK: insertelement <4 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 2
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 3
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_set1_epi64(__M, __A);
}
#endif
@@ -5120,99 +5188,124 @@ __m256 test_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C) {
__mmask8 test_mm_test_epi32_mask(__m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_test_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestm.d.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
return _mm_test_epi32_mask(__A, __B);
}
__mmask8 test_mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_test_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestm.d.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <4 x i32> %{{.*}}, %{{.*}}
+ // CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return _mm_mask_test_epi32_mask(__U, __A, __B);
}
__mmask8 test_mm256_test_epi32_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_test_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestm.d.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
return _mm256_test_epi32_mask(__A, __B);
}
__mmask8 test_mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_test_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestm.d.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <8 x i32> %{{.*}}, %{{.*}}
+ // CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_test_epi32_mask(__U, __A, __B);
}
__mmask8 test_mm_test_epi64_mask(__m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_test_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestm.q.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
return _mm_test_epi64_mask(__A, __B);
}
__mmask8 test_mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_test_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestm.q.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return _mm_mask_test_epi64_mask(__U, __A, __B);
}
__mmask8 test_mm256_test_epi64_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_test_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestm.q.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
return _mm256_test_epi64_mask(__A, __B);
}
__mmask8 test_mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_test_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestm.q.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_test_epi64_mask(__U, __A, __B);
}
__mmask8 test_mm_testn_epi32_mask(__m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_testn_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestnm.d.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
return _mm_testn_epi32_mask(__A, __B);
}
__mmask8 test_mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_testn_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestnm.d.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ // CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return _mm_mask_testn_epi32_mask(__U, __A, __B);
}
__mmask8 test_mm256_testn_epi32_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_testn_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestnm.d.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
return _mm256_testn_epi32_mask(__A, __B);
}
__mmask8 test_mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_testn_epi32_mask
- // CHECK: @llvm.x86.avx512.ptestnm.d.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <8 x i32> %{{.*}}, %{{.*}}
+ // CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_testn_epi32_mask(__U, __A, __B);
}
__mmask8 test_mm_testn_epi64_mask(__m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_testn_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestnm.q.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
return _mm_testn_epi64_mask(__A, __B);
}
__mmask8 test_mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_testn_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestnm.q.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: and <2 x i1> %{{.*}}, %{{.*}}
return _mm_mask_testn_epi64_mask(__U, __A, __B);
}
__mmask8 test_mm256_testn_epi64_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_testn_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestnm.q.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
return _mm256_testn_epi64_mask(__A, __B);
}
__mmask8 test_mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_testn_epi64_mask
- // CHECK: @llvm.x86.avx512.ptestnm.q.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: and <4 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_testn_epi64_mask(__U, __A, __B);
}
+
__m128i test_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_unpackhi_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -5534,73 +5627,85 @@ __m256i test_mm256_maskz_ternarylogic_epi64(__mmask8 __U, __m256i __A, __m256i _
}
__m256 test_mm256_shuffle_f32x4(__m256 __A, __m256 __B) {
// CHECK-LABEL: @test_mm256_shuffle_f32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.f32x4
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
return _mm256_shuffle_f32x4(__A, __B, 3);
}
__m256 test_mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_f32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.f32x4
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_shuffle_f32x4(__W, __U, __A, __B, 3);
}
__m256 test_mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_f32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.f32x4
+ // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_shuffle_f32x4(__U, __A, __B, 3);
}
__m256d test_mm256_shuffle_f64x2(__m256d __A, __m256d __B) {
// CHECK-LABEL: @test_mm256_shuffle_f64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.f64x2
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
return _mm256_shuffle_f64x2(__A, __B, 3);
}
__m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_f64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.f64x2
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_shuffle_f64x2(__W, __U, __A, __B, 3);
}
__m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_f64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.f64x2
+ // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_shuffle_f64x2(__U, __A, __B, 3);
}
__m256i test_mm256_shuffle_i32x4(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_shuffle_i32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.i32x4
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
return _mm256_shuffle_i32x4(__A, __B, 3);
}
__m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_i32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.i32x4
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shuffle_i32x4(__W, __U, __A, __B, 3);
}
__m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_i32x4
- // CHECK: @llvm.x86.avx512.mask.shuf.i32x4
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shuffle_i32x4(__U, __A, __B, 3);
}
__m256i test_mm256_shuffle_i64x2(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_shuffle_i64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.i64x2
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
return _mm256_shuffle_i64x2(__A, __B, 3);
}
__m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_i64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.i64x2
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_shuffle_i64x2(__W, __U, __A, __B, 3);
}
__m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_i64x2
- // CHECK: @llvm.x86.avx512.mask.shuf.i64x2
+ // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_shuffle_i64x2(__U, __A, __B, 3);
}
diff --git a/test/CodeGen/avx512vlbw-builtins.c b/test/CodeGen/avx512vlbw-builtins.c
index 5a7283608bc3..23fbd4026aaa 100644
--- a/test/CodeGen/avx512vlbw-builtins.c
+++ b/test/CodeGen/avx512vlbw-builtins.c
@@ -898,57 +898,73 @@ __m256i test_mm256_mask_blend_epi16(__mmask16 __U, __m256i __A, __m256i __W) {
__m128i test_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_abs_epi8
- // CHECK: @llvm.x86.ssse3.pabs.b
- // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <16 x i8> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <16 x i8> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[A]], <16 x i8> [[SUB]]
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> [[SEL]], <16 x i8> %{{.*}}
return _mm_mask_abs_epi8(__W,__U,__A);
}
__m128i test_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_abs_epi8
- // CHECK: @llvm.x86.ssse3.pabs.b
- // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <16 x i8> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <16 x i8> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[A]], <16 x i8> [[SUB]]
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> [[SEL]], <16 x i8> %{{.*}}
return _mm_maskz_abs_epi8(__U,__A);
}
__m256i test_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_abs_epi8
- // CHECK: @llvm.x86.avx2.pabs.b
- // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <32 x i8> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <32 x i8> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[A]], <32 x i8> [[SUB]]
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> [[SEL]], <32 x i8> %{{.*}}
return _mm256_mask_abs_epi8(__W,__U,__A);
}
__m256i test_mm256_maskz_abs_epi8(__mmask32 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_abs_epi8
- // CHECK: @llvm.x86.avx2.pabs.b
- // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <32 x i8> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <32 x i8> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[A]], <32 x i8> [[SUB]]
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> [[SEL]], <32 x i8> %{{.*}}
return _mm256_maskz_abs_epi8(__U,__A);
}
__m128i test_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_abs_epi16
- // CHECK: @llvm.x86.ssse3.pabs.w
- // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <8 x i16> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <8 x i16> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[A]], <8 x i16> [[SUB]]
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> [[SEL]], <8 x i16> %{{.*}}
return _mm_mask_abs_epi16(__W,__U,__A);
}
__m128i test_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_abs_epi16
- // CHECK: @llvm.x86.ssse3.pabs.w
- // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <8 x i16> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <8 x i16> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[A]], <8 x i16> [[SUB]]
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> [[SEL]], <8 x i16> %{{.*}}
return _mm_maskz_abs_epi16(__U,__A);
}
__m256i test_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_abs_epi16
- // CHECK: @llvm.x86.avx2.pabs.w
- // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <16 x i16> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <16 x i16> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[A]], <16 x i16> [[SUB]]
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> [[SEL]], <16 x i16> %{{.*}}
return _mm256_mask_abs_epi16(__W,__U,__A);
}
__m256i test_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_abs_epi16
- // CHECK: @llvm.x86.avx2.pabs.w
- // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+ // CHECK: [[SUB:%.*]] = sub <16 x i16> zeroinitializer, [[A:%.*]]
+ // CHECK: [[CMP:%.*]] = icmp sgt <16 x i16> [[A]], zeroinitializer
+ // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[A]], <16 x i16> [[SUB]]
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> [[SEL]], <16 x i16> %{{.*}}
return _mm256_maskz_abs_epi16(__U,__A);
}
@@ -1155,49 +1171,101 @@ __m256i test_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
}
__m128i test_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_avg_epu8
- // CHECK: @llvm.x86.sse2.pavg.b
+ // CHECK-NOT: @llvm.x86.sse2.pavg.b
+ // CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
+ // CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
+ // CHECK: add <16 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <16 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <16 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: trunc <16 x i16> %{{.*}} to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_avg_epu8(__W,__U,__A,__B);
}
__m128i test_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_maskz_avg_epu8
- // CHECK: @llvm.x86.sse2.pavg.b
+ // CHECK-NOT: @llvm.x86.sse2.pavg.b
+ // CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
+ // CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
+ // CHECK: add <16 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <16 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <16 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: trunc <16 x i16> %{{.*}} to <16 x i8>
+ // CHECK: store <2 x i64> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_avg_epu8(__U,__A,__B);
}
__m256i test_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_avg_epu8
- // CHECK: @llvm.x86.avx2.pavg.b
+ // CHECK-NOT: @llvm.x86.avx2.pavg.b
+ // CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
+ // CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
+ // CHECK: add <32 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <32 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <32 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_avg_epu8(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_maskz_avg_epu8
- // CHECK: @llvm.x86.avx2.pavg.b
+ // CHECK-NOT: @llvm.x86.avx2.pavg.b
+ // CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
+ // CHECK: zext <32 x i8> %{{.*}} to <32 x i16>
+ // CHECK: add <32 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <32 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <32 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
+ // CHECK: store <4 x i64> zeroinitializer
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_avg_epu8(__U,__A,__B);
}
__m128i test_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_avg_epu16
- // CHECK: @llvm.x86.sse2.pavg.w
+ // CHECK-NOT: @llvm.x86.sse2.pavg.w
+ // CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
+ // CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
+ // CHECK: add <8 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <8 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <8 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <8 x i32> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_avg_epu16(__W,__U,__A,__B);
}
__m128i test_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_maskz_avg_epu16
- // CHECK: @llvm.x86.sse2.pavg.w
+ // CHECK-NOT: @llvm.x86.sse2.pavg.w
+ // CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
+ // CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
+ // CHECK: add <8 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <8 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <8 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <8 x i32> %{{.*}} to <8 x i16>
+ // CHECK: store <2 x i64> zeroinitializer
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_avg_epu16(__U,__A,__B);
}
__m256i test_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_avg_epu16
- // CHECK: @llvm.x86.avx2.pavg.w
+ // CHECK-NOT: @llvm.x86.avx2.pavg.w
+ // CHECK: zext <16 x i16> %{{.*}} to <16 x i32>
+ // CHECK: zext <16 x i16> %{{.*}} to <16 x i32>
+ // CHECK: add <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <16 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <16 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <16 x i32> %{{.*}} to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_avg_epu16(__W,__U,__A,__B);
}
__m256i test_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_maskz_avg_epu16
- // CHECK: @llvm.x86.avx2.pavg.w
+ // CHECK-NOT: @llvm.x86.avx2.pavg.w
+ // CHECK: zext <16 x i16> %{{.*}} to <16 x i32>
+ // CHECK: zext <16 x i16> %{{.*}} to <16 x i32>
+ // CHECK: add <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <16 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <16 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <16 x i32> %{{.*}} to <16 x i16>
+ // CHECK: store <4 x i64> zeroinitializer
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_avg_epu16(__U,__A,__B);
}
@@ -2413,97 +2481,121 @@ void test_mm256_mask_storeu_epi8(void *__P, __mmask32 __U, __m256i __A) {
}
__mmask16 test_mm_test_epi8_mask(__m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_test_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestm.b.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <16 x i8> %{{.*}}, %{{.*}}
return _mm_test_epi8_mask(__A, __B);
}
__mmask16 test_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_test_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestm.b.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <16 x i8> %{{.*}}, %{{.*}}
+ // CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm_mask_test_epi8_mask(__U, __A, __B);
}
__mmask32 test_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_test_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestm.b.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
return _mm256_test_epi8_mask(__A, __B);
}
__mmask32 test_mm256_mask_test_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_test_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestm.b.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
+ // CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_test_epi8_mask(__U, __A, __B);
}
__mmask8 test_mm_test_epi16_mask(__m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_test_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestm.w.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <8 x i16> %{{.*}}, %{{.*}}
return _mm_test_epi16_mask(__A, __B);
}
__mmask8 test_mm_mask_test_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_test_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestm.w.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <8 x i16> %{{.*}}, %{{.*}}
+ // CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return _mm_mask_test_epi16_mask(__U, __A, __B);
}
__mmask16 test_mm256_test_epi16_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_test_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestm.w.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <16 x i16> %{{.*}}, %{{.*}}
return _mm256_test_epi16_mask(__A, __B);
}
__mmask16 test_mm256_mask_test_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_test_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestm.w.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp ne <16 x i16> %{{.*}}, %{{.*}}
+ // CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_test_epi16_mask(__U, __A, __B);
}
__mmask16 test_mm_testn_epi8_mask(__m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_testn_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestnm.b.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
return _mm_testn_epi8_mask(__A, __B);
}
__mmask16 test_mm_mask_testn_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_testn_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestnm.b.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
+ // CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm_mask_testn_epi8_mask(__U, __A, __B);
}
__mmask32 test_mm256_testn_epi8_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_testn_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestnm.b.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
return _mm256_testn_epi8_mask(__A, __B);
}
__mmask32 test_mm256_mask_testn_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_testn_epi8_mask
- // CHECK: @llvm.x86.avx512.ptestnm.b.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <32 x i8> %{{.*}}, %{{.*}}
+ // CHECK: and <32 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_testn_epi8_mask(__U, __A, __B);
}
__mmask8 test_mm_testn_epi16_mask(__m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_testn_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestnm.w.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
return _mm_testn_epi16_mask(__A, __B);
}
__mmask8 test_mm_mask_testn_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: @test_mm_mask_testn_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestnm.w.128
+ // CHECK: and <2 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}}
+ // CHECK: and <8 x i1> %{{.*}}, %{{.*}}
return _mm_mask_testn_epi16_mask(__U, __A, __B);
}
__mmask16 test_mm256_testn_epi16_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_testn_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestnm.w.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
return _mm256_testn_epi16_mask(__A, __B);
}
__mmask16 test_mm256_mask_testn_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_testn_epi16_mask
- // CHECK: @llvm.x86.avx512.ptestnm.w.256
+ // CHECK: and <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: icmp eq <16 x i16> %{{.*}}, %{{.*}}
+ // CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm256_mask_testn_epi16_mask(__U, __A, __B);
}
@@ -2602,28 +2694,195 @@ __m256i test_mm256_maskz_broadcastw_epi16(__mmask16 __M, __m128i __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_broadcastw_epi16(__M, __A);
}
+__m128i test_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A){
+ // CHECK-LABEL: @test_mm_mask_set1_epi8
+ // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 0
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 1
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 2
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 3
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 4
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 5
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 6
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 7
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 8
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 9
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 10
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 11
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 12
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 13
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 14
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 15
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+ return _mm_mask_set1_epi8(__O, __M, __A);
+}
+__m128i test_mm_maskz_set1_epi8 ( __mmask16 __M, char __A){
+ // CHECK-LABEL: @test_mm_maskz_set1_epi8
+ // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 0
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 1
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 2
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 3
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 4
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 5
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 6
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 7
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 8
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 9
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 10
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 11
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 12
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 13
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 14
+ // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 15
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+ return _mm_maskz_set1_epi8( __M, __A);
+}
+
+__m256i test_mm256_mask_set1_epi8(__m256i __O, __mmask32 __M, char __A) {
+ // CHECK-LABEL: @test_mm256_mask_set1_epi8
+ // CHECK: insertelement <32 x i8> undef, i8 %{{.*}}, i32 0
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 1
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 2
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 3
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 4
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 5
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 6
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 7
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 8
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 9
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 10
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 11
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 12
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 13
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 14
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 15
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 16
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 17
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 18
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 19
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 20
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 21
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 22
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 23
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 24
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 25
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 26
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 27
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 28
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 29
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 30
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 31
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+ return _mm256_mask_set1_epi8(__O, __M, __A);
+}
+
+__m256i test_mm256_maskz_set1_epi8( __mmask32 __M, char __A) {
+ // CHECK-LABEL: @test_mm256_maskz_set1_epi8
+ // CHECK: insertelement <32 x i8> undef, i8 %{{.*}}, i32 0
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 1
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 2
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 3
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 4
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 5
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 6
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 7
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 8
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 9
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 10
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 11
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 12
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 13
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 14
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 15
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 16
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 17
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 18
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 19
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 20
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 21
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 22
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 23
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 24
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 25
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 26
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 27
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 28
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 29
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 30
+ // CHECK: insertelement <32 x i8> %{{.*}}, i8 %{{.*}}, i32 31
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+ return _mm256_maskz_set1_epi8( __M, __A);
+}
+
__m256i test_mm256_mask_set1_epi16(__m256i __O, __mmask16 __M, short __A) {
// CHECK-LABEL: @test_mm256_mask_set1_epi16
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.w.gpr.256
+ // CHECK: insertelement <16 x i16> undef, i16 %{{.*}}, i32 0
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 1
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 2
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 3
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 4
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 5
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 6
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 7
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 8
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 9
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 10
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 11
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 12
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 13
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 14
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 15
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_set1_epi16(__O, __M, __A);
}
__m256i test_mm256_maskz_set1_epi16(__mmask16 __M, short __A) {
// CHECK-LABEL: @test_mm256_maskz_set1_epi16
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.w.gpr.256
+ // CHECK: insertelement <16 x i16> undef, i16 %{{.*}}, i32 0
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 1
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 2
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 3
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 4
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 5
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 6
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 7
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 8
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 9
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 10
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 11
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 12
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 13
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 14
+ // CHECK: insertelement <16 x i16> %{{.*}}, i16 %{{.*}}, i32 15
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_set1_epi16(__M, __A);
}
__m128i test_mm_mask_set1_epi16(__m128i __O, __mmask8 __M, short __A) {
// CHECK-LABEL: @test_mm_mask_set1_epi16
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.w.gpr.128
+ // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 0
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 1
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 2
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 3
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 4
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 5
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 6
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 7
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_set1_epi16(__O, __M, __A);
}
__m128i test_mm_maskz_set1_epi16(__mmask8 __M, short __A) {
// CHECK-LABEL: @test_mm_maskz_set1_epi16
- // CHECK: @llvm.x86.avx512.mask.pbroadcast.w.gpr.128
+ // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 0
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 1
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 2
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 3
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 4
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 5
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 6
+ // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 7
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_set1_epi16(__M, __A);
}
__m128i test_mm_permutexvar_epi16(__m128i __A, __m128i __B) {
diff --git a/test/CodeGen/avx512vlcd-builtins.c b/test/CodeGen/avx512vlcd-builtins.c
index 643f24f1d22c..376a342f76ee 100644
--- a/test/CodeGen/avx512vlcd-builtins.c
+++ b/test/CodeGen/avx512vlcd-builtins.c
@@ -3,28 +3,56 @@
#include <immintrin.h>
-__m128i test_mm_broadcastmb_epi64(__mmask8 __A) {
+__m128i test_mm_broadcastmb_epi64(__m128i a,__m128i b) {
// CHECK-LABEL: @test_mm_broadcastmb_epi64
- // CHECK: @llvm.x86.avx512.broadcastmb.128
- return _mm_broadcastmb_epi64(__A);
+ // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}}
+ // CHECK: shufflevector <4 x i1> %{{.*}}, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ // CHECK: bitcast <8 x i1> %{{.*}} to i8
+ // CHECK: zext i8 %{{.*}} to i64
+ // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ return _mm_broadcastmb_epi64(_mm_cmpeq_epi32_mask (a, b));
}
-__m256i test_mm256_broadcastmb_epi64(__mmask8 __A) {
+__m256i test_mm256_broadcastmb_epi64(__m256i a, __m256i b) {
// CHECK-LABEL: @test_mm256_broadcastmb_epi64
- // CHECK: @llvm.x86.avx512.broadcastmb.256
- return _mm256_broadcastmb_epi64(__A);
-}
-
-__m128i test_mm_broadcastmw_epi32(__mmask16 __A) {
+ // CHECK: icmp eq <4 x i64> %{{.*}}, %{{.*}}
+ // CHECK: shufflevector <4 x i1> %{{.*}}, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ // CHECK: bitcast <8 x i1> %{{.*}} to i8
+ // CHECK: zext i8 %{{.*}} to i64
+ // CHECK: insertelement <4 x i64> undef, i64 %{{.*}}, i32 0
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 1
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 2
+ // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 3
+ return _mm256_broadcastmb_epi64(_mm256_cmpeq_epi64_mask ( a, b));
+}
+
+__m128i test_mm_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK-LABEL: @test_mm_broadcastmw_epi32
- // CHECK: @llvm.x86.avx512.broadcastmw.128
- return _mm_broadcastmw_epi32(__A);
+ // CHECK: icmp eq <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: bitcast <16 x i1> %{{.*}} to i16
+ // CHECK: zext i16 %{{.*}} to i32
+ // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 0
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 1
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 2
+ // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 3
+ return _mm_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b));
}
-__m256i test_mm256_broadcastmw_epi32(__mmask16 __A) {
+__m256i test_mm256_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK-LABEL: @test_mm256_broadcastmw_epi32
- // CHECK: @llvm.x86.avx512.broadcastmw.256
- return _mm256_broadcastmw_epi32(__A);
+ // CHECK: icmp eq <16 x i32> %{{.*}}, %{{.*}}
+ // CHECK: bitcast <16 x i1> %{{.*}} to i16
+ // CHECK: zext i16 %{{.*}} to i32
+ // CHECK: insertelement <8 x i32> undef, i32 %{{.*}}, i32 0
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 1
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 2
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 3
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 4
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 5
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 6
+ // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 7
+ return _mm256_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b));
}
__m128i test_mm_conflict_epi64(__m128i __A) {
diff --git a/test/CodeGen/avx512vldq-builtins.c b/test/CodeGen/avx512vldq-builtins.c
index b18c811f845b..3ca4b2135ea7 100644
--- a/test/CodeGen/avx512vldq-builtins.c
+++ b/test/CodeGen/avx512vldq-builtins.c
@@ -909,19 +909,21 @@ __mmask8 test_mm256_movepi64_mask(__m256i __A) {
__m256 test_mm256_broadcast_f32x2(__m128 __A) {
// CHECK-LABEL: @test_mm256_broadcast_f32x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x2
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcast_f32x2(__A);
}
__m256 test_mm256_mask_broadcast_f32x2(__m256 __O, __mmask8 __M, __m128 __A) {
// CHECK-LABEL: @test_mm256_mask_broadcast_f32x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x2
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_broadcast_f32x2(__O, __M, __A);
}
__m256 test_mm256_maskz_broadcast_f32x2(__mmask8 __M, __m128 __A) {
// CHECK-LABEL: @test_mm256_maskz_broadcast_f32x2
- // CHECK: @llvm.x86.avx512.mask.broadcastf32x2
+ // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_broadcast_f32x2(__M, __A);
}
@@ -947,37 +949,41 @@ __m256d test_mm256_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
__m128i test_mm_broadcast_i32x2(__m128i __A) {
// CHECK-LABEL: @test_mm_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
return _mm_broadcast_i32x2(__A);
}
__m128i test_mm_mask_broadcast_i32x2(__m128i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_broadcast_i32x2(__O, __M, __A);
}
__m128i test_mm_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_broadcast_i32x2(__M, __A);
}
__m256i test_mm256_broadcast_i32x2(__m128i __A) {
// CHECK-LABEL: @test_mm256_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcast_i32x2(__A);
}
__m256i test_mm256_mask_broadcast_i32x2(__m256i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: @test_mm256_mask_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_broadcast_i32x2(__O, __M, __A);
}
__m256i test_mm256_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
// CHECK-LABEL: @test_mm256_maskz_broadcast_i32x2
- // CHECK: @llvm.x86.avx512.mask.broadcasti32x2
+ // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_broadcast_i32x2(__M, __A);
}
diff --git a/test/CodeGen/avx512vpopcntdqvlintrin.c b/test/CodeGen/avx512vpopcntdqvlintrin.c
new file mode 100644
index 000000000000..010cb6b4f344
--- /dev/null
+++ b/test/CodeGen/avx512vpopcntdqvlintrin.c
@@ -0,0 +1,73 @@
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512vpopcntdq -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <immintrin.h>
+
+__m128i test_mm_popcnt_epi64(__m128i __A) {
+ // CHECK-LABEL: @test_mm_popcnt_epi64
+ // CHECK: @llvm.ctpop.v2i64
+ return _mm_popcnt_epi64(__A);
+}
+__m128i test_mm_mask_popcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
+ // CHECK-LABEL: @test_mm_mask_popcnt_epi64
+ // CHECK: @llvm.ctpop.v2i64
+ // CHECK: select <2 x i1> %{{.+}}, <2 x i64> %{{[0-9]+}}, <2 x i64> {{.*}}
+ return _mm_mask_popcnt_epi64(__W, __U, __A);
+}
+__m128i test_mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) {
+ // CHECK-LABEL: @test_mm_maskz_popcnt_epi64
+ // CHECK: @llvm.ctpop.v2i64
+ // CHECK: select <2 x i1> %{{.+}}, <2 x i64> %{{[0-9]+}}, <2 x i64> {{.*}}
+ return _mm_maskz_popcnt_epi64(__U, __A);
+}
+__m128i test_mm_popcnt_epi32(__m128i __A) {
+ // CHECK-LABEL: @test_mm_popcnt_epi32
+ // CHECK: @llvm.ctpop.v4i32
+ return _mm_popcnt_epi32(__A);
+}
+__m128i test_mm_mask_popcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
+ // CHECK-LABEL: @test_mm_mask_popcnt_epi32
+ // CHECK: @llvm.ctpop.v4i32
+ // CHECK: select <4 x i1> %{{.+}}, <4 x i32> %{{[0-9]+}}, <4 x i32> {{.*}}
+ return _mm_mask_popcnt_epi32(__W, __U, __A);
+}
+__m128i test_mm_maskz_popcnt_epi32(__mmask8 __U, __m128i __A) {
+ // CHECK-LABEL: @test_mm_maskz_popcnt_epi32
+ // CHECK: @llvm.ctpop.v4i32
+ // CHECK: select <4 x i1> %{{.+}}, <4 x i32> %{{[0-9]+}}, <4 x i32> {{.*}}
+ return _mm_maskz_popcnt_epi32(__U, __A);
+}
+
+__m256i test_mm256_popcnt_epi64(__m256i __A) {
+ // CHECK-LABEL: @test_mm256_popcnt_epi64
+ // CHECK: @llvm.ctpop.v4i64
+ return _mm256_popcnt_epi64(__A);
+}
+__m256i test_mm256_mask_popcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
+ // CHECK-LABEL: @test_mm256_mask_popcnt_epi64
+ // CHECK: @llvm.ctpop.v4i64
+ // CHECK: select <4 x i1> %{{.+}}, <4 x i64> %{{[0-9]+}}, <4 x i64> {{.*}}
+ return _mm256_mask_popcnt_epi64(__W, __U, __A);
+}
+__m256i test_mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) {
+ // CHECK-LABEL: @test_mm256_maskz_popcnt_epi64
+ // CHECK: @llvm.ctpop.v4i64
+ // CHECK: select <4 x i1> %{{.+}}, <4 x i64> %{{[0-9]+}}, <4 x i64> {{.*}}
+ return _mm256_maskz_popcnt_epi64(__U, __A);
+}
+__m256i test_mm256_popcnt_epi32(__m256i __A) {
+ // CHECK-LABEL: @test_mm256_popcnt_epi32
+ // CHECK: @llvm.ctpop.v8i32
+ return _mm256_popcnt_epi32(__A);
+}
+__m256i test_mm256_mask_popcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
+ // CHECK-LABEL: @test_mm256_mask_popcnt_epi32
+ // CHECK: @llvm.ctpop.v8i32
+ // CHECK: select <8 x i1> %{{.+}}, <8 x i32> %{{[0-9]+}}, <8 x i32> {{.*}}
+ return _mm256_mask_popcnt_epi32(__W, __U, __A);
+}
+__m256i test_mm256_maskz_popcnt_epi32(__mmask8 __U, __m256i __A) {
+ // CHECK-LABEL: @test_mm256_maskz_popcnt_epi32
+ // CHECK: @llvm.ctpop.v8i32
+ // CHECK: select <8 x i1> %{{.+}}, <8 x i32> %{{[0-9]+}}, <8 x i32> {{.*}}
+ return _mm256_maskz_popcnt_epi32(__U, __A);
+}
diff --git a/test/CodeGen/blocks-opencl.cl b/test/CodeGen/blocks-opencl.cl
deleted file mode 100644
index 12513331e9d4..000000000000
--- a/test/CodeGen/blocks-opencl.cl
+++ /dev/null
@@ -1,17 +0,0 @@
-// RUN: %clang_cc1 -O0 %s -ffake-address-space-map -emit-llvm -o - -fblocks -triple x86_64-unknown-unknown | FileCheck %s
-// This used to crash due to trying to generate a bitcase from a cstring
-// in the constant address space to i8* in AS0.
-
-void dummy(float (^const op)(float)) {
-}
-
-// CHECK: i8 addrspace(2)* getelementptr inbounds ([9 x i8], [9 x i8] addrspace(2)* @.str, i32 0, i32 0)
-
-kernel void test_block()
-{
- float (^const X)(float) = ^(float x) {
- return x + 42.0f;
- };
- dummy(X);
-}
-
diff --git a/test/CodeGen/bounds-checking.c b/test/CodeGen/bounds-checking.c
index 90b7f0f6523e..2e6a08650dd9 100644
--- a/test/CodeGen/bounds-checking.c
+++ b/test/CodeGen/bounds-checking.c
@@ -1,5 +1,9 @@
// RUN: %clang_cc1 -fsanitize=local-bounds -emit-llvm -triple x86_64-apple-darwin10 %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fsanitize=local-bounds -fexperimental-new-pass-manager -emit-llvm -triple x86_64-apple-darwin10 %s -o - | FileCheck %s
// RUN: %clang_cc1 -fsanitize=array-bounds -O -fsanitize-trap=array-bounds -emit-llvm -triple x86_64-apple-darwin10 -DNO_DYNAMIC %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fsanitize=array-bounds -O -fsanitize-trap=array-bounds -fexperimental-new-pass-manager -emit-llvm -triple x86_64-apple-darwin10 -DNO_DYNAMIC %s -o - | FileCheck %s
+//
+// REQUIRES: x86-registered-target
// CHECK-LABEL: @f
double f(int b, int i) {
diff --git a/test/CodeGen/builtin-clflushopt.c b/test/CodeGen/builtin-clflushopt.c
index 93861164c4a8..f82ac4638f40 100644
--- a/test/CodeGen/builtin-clflushopt.c
+++ b/test/CodeGen/builtin-clflushopt.c
@@ -1,7 +1,7 @@
-// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin -target-feature +clflushopt -emit-llvm -o - -Wall -Werror | FileCheck %s
-#define __MM_MALLOC_H
+// RUN: %clang_cc1 %s -ffreestanding -triple=x86_64-apple-darwin -target-feature +clflushopt -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <x86intrin.h>
-#include <immintrin.h>
void test_mm_clflushopt(char * __m) {
//CHECK-LABEL: @test_mm_clflushopt
//CHECK: @llvm.x86.clflushopt
diff --git a/test/CodeGen/builtin-clwb.c b/test/CodeGen/builtin-clwb.c
new file mode 100644
index 000000000000..96d00a6a7ca1
--- /dev/null
+++ b/test/CodeGen/builtin-clwb.c
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 %s -ffreestanding -triple=x86_64-apple-darwin -target-feature +clwb -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <x86intrin.h>
+
+void test_mm_clwb(const void *__m) {
+ //CHECK-LABEL: @test_mm_clwb
+ //CHECK: @llvm.x86.clwb
+ _mm_clwb(__m);
+}
diff --git a/test/CodeGen/builtin-clzero.c b/test/CodeGen/builtin-clzero.c
index c9960ced12ec..f9ecb9ddb526 100644
--- a/test/CodeGen/builtin-clzero.c
+++ b/test/CodeGen/builtin-clzero.c
@@ -1,7 +1,7 @@
-// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin -target-feature +clzero -emit-llvm -o - -Wall -Werror | FileCheck %s
-#define __MM_MALLOC_H
+// RUN: %clang_cc1 %s -ffreestanding -triple=x86_64-apple-darwin -target-feature +clzero -emit-llvm -o - -Wall -Werror | FileCheck %s
#include <x86intrin.h>
+
void test_mm_clzero(void * __m) {
//CHECK-LABEL: @test_mm_clzero
//CHECK: @llvm.x86.clzero
diff --git a/test/CodeGen/builtin-cpu-is.c b/test/CodeGen/builtin-cpu-is.c
new file mode 100644
index 000000000000..f2a5f54a0c80
--- /dev/null
+++ b/test/CodeGen/builtin-cpu-is.c
@@ -0,0 +1,53 @@
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm < %s| FileCheck %s
+
+// Test that we have the structure definition, the gep offsets, the name of the
+// global, the bit grab, and the icmp correct.
+extern void a(const char *);
+
+void intel() {
+ if (__builtin_cpu_is("intel"))
+ a("intel");
+
+ // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 0)
+ // CHECK: = icmp eq i32 [[LOAD]], 1
+}
+
+void amd() {
+ if (__builtin_cpu_is("amd"))
+ a("amd");
+
+ // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 0)
+ // CHECK: = icmp eq i32 [[LOAD]], 2
+}
+
+void atom() {
+ if (__builtin_cpu_is("atom"))
+ a("atom");
+
+ // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 1)
+ // CHECK: = icmp eq i32 [[LOAD]], 1
+}
+
+void amdfam10h() {
+ if (__builtin_cpu_is("amdfam10h"))
+ a("amdfam10h");
+
+ // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 1)
+ // CHECK: = icmp eq i32 [[LOAD]], 4
+}
+
+void barcelona() {
+ if (__builtin_cpu_is("barcelona"))
+ a("barcelona");
+
+ // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 2)
+ // CHECK: = icmp eq i32 [[LOAD]], 4
+}
+
+void nehalem() {
+ if (__builtin_cpu_is("nehalem"))
+ a("nehalem");
+
+ // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 2)
+ // CHECK: = icmp eq i32 [[LOAD]], 1
+}
diff --git a/test/CodeGen/builtin-cpu-supports.c b/test/CodeGen/builtin-cpu-supports.c
index 96813923f279..b70f4aca9d43 100644
--- a/test/CodeGen/builtin-cpu-supports.c
+++ b/test/CodeGen/builtin-cpu-supports.c
@@ -5,6 +5,10 @@
extern void a(const char *);
int main() {
+ __builtin_cpu_init();
+
+ // CHECK: call void @__cpu_indicator_init
+
if (__builtin_cpu_supports("sse4.2"))
a("sse4.2");
diff --git a/test/CodeGen/builtin-sqrt.c b/test/CodeGen/builtin-sqrt.c
new file mode 100644
index 000000000000..5b275bf9066d
--- /dev/null
+++ b/test/CodeGen/builtin-sqrt.c
@@ -0,0 +1,15 @@
+// RUN: %clang_cc1 -fmath-errno -triple x86_64-apple-darwin %s -emit-llvm -o - | FileCheck %s --check-prefix=HAS_ERRNO
+// RUN: %clang_cc1 -triple x86_64-apple-darwin %s -emit-llvm -o - | FileCheck %s --check-prefix=NO_ERRNO
+
+float foo(float X) {
+ // HAS_ERRNO: call float @sqrtf(float
+ // NO_ERRNO: call float @llvm.sqrt.f32(float
+ return __builtin_sqrtf(X);
+}
+
+// HAS_ERRNO: declare float @sqrtf(float) [[ATTR:#[0-9]+]]
+// HAS_ERRNO-NOT: attributes [[ATTR]] = {{{.*}} readnone
+
+// NO_ERRNO: declare float @llvm.sqrt.f32(float) [[ATTR:#[0-9]+]]
+// NO_ERRNO: attributes [[ATTR]] = { nounwind readnone {{.*}}}
+
diff --git a/test/CodeGen/builtins-hexagon.c b/test/CodeGen/builtins-hexagon.c
index f9f5d495d02a..22835a23f292 100644
--- a/test/CodeGen/builtins-hexagon.c
+++ b/test/CodeGen/builtins-hexagon.c
@@ -1,2977 +1,3371 @@
// REQUIRES: hexagon-registered-target
// RUN: %clang_cc1 -triple hexagon-unknown-elf -emit-llvm %s -o - | FileCheck %s
-void foo() {
- int v16 __attribute__((__vector_size__(64)));
- int v32 __attribute__((__vector_size__(128)));
- int v64 __attribute__((__vector_size__(256)));
+void test() {
+ int v64 __attribute__((__vector_size__(64)));
+ int v128 __attribute__((__vector_size__(128)));
+ int v256 __attribute__((__vector_size__(256)));
- // The circ/brev intrinsics do not have _HEXAGON_ in the name.
- __builtin_brev_ldb(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.ldb
- __builtin_brev_ldd(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.ldd
- __builtin_brev_ldh(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.ldh
- __builtin_brev_ldub(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.ldub
- __builtin_brev_lduh(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.lduh
- __builtin_brev_ldw(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.ldw
- __builtin_brev_stb(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.stb
- __builtin_brev_std(0, 0LL, 0);
- // CHECK: @llvm.hexagon.brev.std
- __builtin_brev_sth(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.sth
- __builtin_brev_sthhi(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.sthhi
- __builtin_brev_stw(0, 0, 0);
- // CHECK: @llvm.hexagon.brev.stw
- __builtin_circ_ldb(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.ldb
- __builtin_circ_ldd(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.ldd
- __builtin_circ_ldh(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.ldh
- __builtin_circ_ldub(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.ldub
- __builtin_circ_lduh(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.lduh
- __builtin_circ_ldw(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.ldw
- __builtin_circ_stb(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.stb
- __builtin_circ_std(0, 0LL, 0, 0);
- // CHECK: llvm.hexagon.circ.std
- __builtin_circ_sth(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.sth
- __builtin_circ_sthhi(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.sthhi
- __builtin_circ_stw(0, 0, 0, 0);
- // CHECK: llvm.hexagon.circ.stw
-
- __builtin_HEXAGON_A2_abs(0);
// CHECK: @llvm.hexagon.A2.abs
- __builtin_HEXAGON_A2_absp(0);
+ __builtin_HEXAGON_A2_abs(0);
// CHECK: @llvm.hexagon.A2.absp
- __builtin_HEXAGON_A2_abssat(0);
+ __builtin_HEXAGON_A2_absp(0);
// CHECK: @llvm.hexagon.A2.abssat
- __builtin_HEXAGON_A2_add(0, 0);
+ __builtin_HEXAGON_A2_abssat(0);
// CHECK: @llvm.hexagon.A2.add
- __builtin_HEXAGON_A2_addh_h16_hh(0, 0);
+ __builtin_HEXAGON_A2_add(0, 0);
// CHECK: @llvm.hexagon.A2.addh.h16.hh
- __builtin_HEXAGON_A2_addh_h16_hl(0, 0);
+ __builtin_HEXAGON_A2_addh_h16_hh(0, 0);
// CHECK: @llvm.hexagon.A2.addh.h16.hl
- __builtin_HEXAGON_A2_addh_h16_lh(0, 0);
+ __builtin_HEXAGON_A2_addh_h16_hl(0, 0);
// CHECK: @llvm.hexagon.A2.addh.h16.lh
- __builtin_HEXAGON_A2_addh_h16_ll(0, 0);
+ __builtin_HEXAGON_A2_addh_h16_lh(0, 0);
// CHECK: @llvm.hexagon.A2.addh.h16.ll
- __builtin_HEXAGON_A2_addh_h16_sat_hh(0, 0);
+ __builtin_HEXAGON_A2_addh_h16_ll(0, 0);
// CHECK: @llvm.hexagon.A2.addh.h16.sat.hh
- __builtin_HEXAGON_A2_addh_h16_sat_hl(0, 0);
+ __builtin_HEXAGON_A2_addh_h16_sat_hh(0, 0);
// CHECK: @llvm.hexagon.A2.addh.h16.sat.hl
- __builtin_HEXAGON_A2_addh_h16_sat_lh(0, 0);
+ __builtin_HEXAGON_A2_addh_h16_sat_hl(0, 0);
// CHECK: @llvm.hexagon.A2.addh.h16.sat.lh
- __builtin_HEXAGON_A2_addh_h16_sat_ll(0, 0);
+ __builtin_HEXAGON_A2_addh_h16_sat_lh(0, 0);
// CHECK: @llvm.hexagon.A2.addh.h16.sat.ll
- __builtin_HEXAGON_A2_addh_l16_hl(0, 0);
+ __builtin_HEXAGON_A2_addh_h16_sat_ll(0, 0);
// CHECK: @llvm.hexagon.A2.addh.l16.hl
- __builtin_HEXAGON_A2_addh_l16_ll(0, 0);
+ __builtin_HEXAGON_A2_addh_l16_hl(0, 0);
// CHECK: @llvm.hexagon.A2.addh.l16.ll
- __builtin_HEXAGON_A2_addh_l16_sat_hl(0, 0);
+ __builtin_HEXAGON_A2_addh_l16_ll(0, 0);
// CHECK: @llvm.hexagon.A2.addh.l16.sat.hl
- __builtin_HEXAGON_A2_addh_l16_sat_ll(0, 0);
+ __builtin_HEXAGON_A2_addh_l16_sat_hl(0, 0);
// CHECK: @llvm.hexagon.A2.addh.l16.sat.ll
- __builtin_HEXAGON_A2_addi(0, 0);
+ __builtin_HEXAGON_A2_addh_l16_sat_ll(0, 0);
// CHECK: @llvm.hexagon.A2.addi
- __builtin_HEXAGON_A2_addp(0, 0);
+ __builtin_HEXAGON_A2_addi(0, 0);
// CHECK: @llvm.hexagon.A2.addp
- __builtin_HEXAGON_A2_addpsat(0, 0);
+ __builtin_HEXAGON_A2_addp(0, 0);
// CHECK: @llvm.hexagon.A2.addpsat
- __builtin_HEXAGON_A2_addsat(0, 0);
+ __builtin_HEXAGON_A2_addpsat(0, 0);
// CHECK: @llvm.hexagon.A2.addsat
- __builtin_HEXAGON_A2_addsp(0, 0);
+ __builtin_HEXAGON_A2_addsat(0, 0);
// CHECK: @llvm.hexagon.A2.addsp
- __builtin_HEXAGON_A2_and(0, 0);
+ __builtin_HEXAGON_A2_addsp(0, 0);
// CHECK: @llvm.hexagon.A2.and
- __builtin_HEXAGON_A2_andir(0, 0);
+ __builtin_HEXAGON_A2_and(0, 0);
// CHECK: @llvm.hexagon.A2.andir
- __builtin_HEXAGON_A2_andp(0, 0);
+ __builtin_HEXAGON_A2_andir(0, 0);
// CHECK: @llvm.hexagon.A2.andp
- __builtin_HEXAGON_A2_aslh(0);
+ __builtin_HEXAGON_A2_andp(0, 0);
// CHECK: @llvm.hexagon.A2.aslh
- __builtin_HEXAGON_A2_asrh(0);
+ __builtin_HEXAGON_A2_aslh(0);
// CHECK: @llvm.hexagon.A2.asrh
- __builtin_HEXAGON_A2_combine_hh(0, 0);
+ __builtin_HEXAGON_A2_asrh(0);
// CHECK: @llvm.hexagon.A2.combine.hh
- __builtin_HEXAGON_A2_combine_hl(0, 0);
+ __builtin_HEXAGON_A2_combine_hh(0, 0);
// CHECK: @llvm.hexagon.A2.combine.hl
- __builtin_HEXAGON_A2_combineii(0, 0);
- // CHECK: @llvm.hexagon.A2.combineii
- __builtin_HEXAGON_A2_combine_lh(0, 0);
+ __builtin_HEXAGON_A2_combine_hl(0, 0);
// CHECK: @llvm.hexagon.A2.combine.lh
- __builtin_HEXAGON_A2_combine_ll(0, 0);
+ __builtin_HEXAGON_A2_combine_lh(0, 0);
// CHECK: @llvm.hexagon.A2.combine.ll
- __builtin_HEXAGON_A2_combinew(0, 0);
+ __builtin_HEXAGON_A2_combine_ll(0, 0);
+ // CHECK: @llvm.hexagon.A2.combineii
+ __builtin_HEXAGON_A2_combineii(0, 0);
// CHECK: @llvm.hexagon.A2.combinew
- __builtin_HEXAGON_A2_max(0, 0);
+ __builtin_HEXAGON_A2_combinew(0, 0);
// CHECK: @llvm.hexagon.A2.max
- __builtin_HEXAGON_A2_maxp(0, 0);
+ __builtin_HEXAGON_A2_max(0, 0);
// CHECK: @llvm.hexagon.A2.maxp
- __builtin_HEXAGON_A2_maxu(0, 0);
+ __builtin_HEXAGON_A2_maxp(0, 0);
// CHECK: @llvm.hexagon.A2.maxu
- __builtin_HEXAGON_A2_maxup(0, 0);
+ __builtin_HEXAGON_A2_maxu(0, 0);
// CHECK: @llvm.hexagon.A2.maxup
- __builtin_HEXAGON_A2_min(0, 0);
+ __builtin_HEXAGON_A2_maxup(0, 0);
// CHECK: @llvm.hexagon.A2.min
- __builtin_HEXAGON_A2_minp(0, 0);
+ __builtin_HEXAGON_A2_min(0, 0);
// CHECK: @llvm.hexagon.A2.minp
- __builtin_HEXAGON_A2_minu(0, 0);
+ __builtin_HEXAGON_A2_minp(0, 0);
// CHECK: @llvm.hexagon.A2.minu
- __builtin_HEXAGON_A2_minup(0, 0);
+ __builtin_HEXAGON_A2_minu(0, 0);
// CHECK: @llvm.hexagon.A2.minup
- __builtin_HEXAGON_A2_neg(0);
+ __builtin_HEXAGON_A2_minup(0, 0);
// CHECK: @llvm.hexagon.A2.neg
- __builtin_HEXAGON_A2_negp(0);
+ __builtin_HEXAGON_A2_neg(0);
// CHECK: @llvm.hexagon.A2.negp
- __builtin_HEXAGON_A2_negsat(0);
+ __builtin_HEXAGON_A2_negp(0);
// CHECK: @llvm.hexagon.A2.negsat
- __builtin_HEXAGON_A2_not(0);
+ __builtin_HEXAGON_A2_negsat(0);
// CHECK: @llvm.hexagon.A2.not
- __builtin_HEXAGON_A2_notp(0);
+ __builtin_HEXAGON_A2_not(0);
// CHECK: @llvm.hexagon.A2.notp
- __builtin_HEXAGON_A2_or(0, 0);
+ __builtin_HEXAGON_A2_notp(0);
// CHECK: @llvm.hexagon.A2.or
- __builtin_HEXAGON_A2_orir(0, 0);
+ __builtin_HEXAGON_A2_or(0, 0);
// CHECK: @llvm.hexagon.A2.orir
- __builtin_HEXAGON_A2_orp(0, 0);
+ __builtin_HEXAGON_A2_orir(0, 0);
// CHECK: @llvm.hexagon.A2.orp
- __builtin_HEXAGON_A2_roundsat(0);
+ __builtin_HEXAGON_A2_orp(0, 0);
// CHECK: @llvm.hexagon.A2.roundsat
- __builtin_HEXAGON_A2_sat(0);
+ __builtin_HEXAGON_A2_roundsat(0);
// CHECK: @llvm.hexagon.A2.sat
- __builtin_HEXAGON_A2_satb(0);
+ __builtin_HEXAGON_A2_sat(0);
// CHECK: @llvm.hexagon.A2.satb
- __builtin_HEXAGON_A2_sath(0);
+ __builtin_HEXAGON_A2_satb(0);
// CHECK: @llvm.hexagon.A2.sath
- __builtin_HEXAGON_A2_satub(0);
+ __builtin_HEXAGON_A2_sath(0);
// CHECK: @llvm.hexagon.A2.satub
- __builtin_HEXAGON_A2_satuh(0);
+ __builtin_HEXAGON_A2_satub(0);
// CHECK: @llvm.hexagon.A2.satuh
- __builtin_HEXAGON_A2_sub(0, 0);
+ __builtin_HEXAGON_A2_satuh(0);
// CHECK: @llvm.hexagon.A2.sub
- __builtin_HEXAGON_A2_subh_h16_hh(0, 0);
+ __builtin_HEXAGON_A2_sub(0, 0);
// CHECK: @llvm.hexagon.A2.subh.h16.hh
- __builtin_HEXAGON_A2_subh_h16_hl(0, 0);
+ __builtin_HEXAGON_A2_subh_h16_hh(0, 0);
// CHECK: @llvm.hexagon.A2.subh.h16.hl
- __builtin_HEXAGON_A2_subh_h16_lh(0, 0);
+ __builtin_HEXAGON_A2_subh_h16_hl(0, 0);
// CHECK: @llvm.hexagon.A2.subh.h16.lh
- __builtin_HEXAGON_A2_subh_h16_ll(0, 0);
+ __builtin_HEXAGON_A2_subh_h16_lh(0, 0);
// CHECK: @llvm.hexagon.A2.subh.h16.ll
- __builtin_HEXAGON_A2_subh_h16_sat_hh(0, 0);
+ __builtin_HEXAGON_A2_subh_h16_ll(0, 0);
// CHECK: @llvm.hexagon.A2.subh.h16.sat.hh
- __builtin_HEXAGON_A2_subh_h16_sat_hl(0, 0);
+ __builtin_HEXAGON_A2_subh_h16_sat_hh(0, 0);
// CHECK: @llvm.hexagon.A2.subh.h16.sat.hl
- __builtin_HEXAGON_A2_subh_h16_sat_lh(0, 0);
+ __builtin_HEXAGON_A2_subh_h16_sat_hl(0, 0);
// CHECK: @llvm.hexagon.A2.subh.h16.sat.lh
- __builtin_HEXAGON_A2_subh_h16_sat_ll(0, 0);
+ __builtin_HEXAGON_A2_subh_h16_sat_lh(0, 0);
// CHECK: @llvm.hexagon.A2.subh.h16.sat.ll
- __builtin_HEXAGON_A2_subh_l16_hl(0, 0);
+ __builtin_HEXAGON_A2_subh_h16_sat_ll(0, 0);
// CHECK: @llvm.hexagon.A2.subh.l16.hl
- __builtin_HEXAGON_A2_subh_l16_ll(0, 0);
+ __builtin_HEXAGON_A2_subh_l16_hl(0, 0);
// CHECK: @llvm.hexagon.A2.subh.l16.ll
- __builtin_HEXAGON_A2_subh_l16_sat_hl(0, 0);
+ __builtin_HEXAGON_A2_subh_l16_ll(0, 0);
// CHECK: @llvm.hexagon.A2.subh.l16.sat.hl
- __builtin_HEXAGON_A2_subh_l16_sat_ll(0, 0);
+ __builtin_HEXAGON_A2_subh_l16_sat_hl(0, 0);
// CHECK: @llvm.hexagon.A2.subh.l16.sat.ll
- __builtin_HEXAGON_A2_subp(0, 0);
+ __builtin_HEXAGON_A2_subh_l16_sat_ll(0, 0);
// CHECK: @llvm.hexagon.A2.subp
- __builtin_HEXAGON_A2_subri(0, 0);
+ __builtin_HEXAGON_A2_subp(0, 0);
// CHECK: @llvm.hexagon.A2.subri
- __builtin_HEXAGON_A2_subsat(0, 0);
+ __builtin_HEXAGON_A2_subri(0, 0);
// CHECK: @llvm.hexagon.A2.subsat
- __builtin_HEXAGON_A2_svaddh(0, 0);
+ __builtin_HEXAGON_A2_subsat(0, 0);
// CHECK: @llvm.hexagon.A2.svaddh
- __builtin_HEXAGON_A2_svaddhs(0, 0);
+ __builtin_HEXAGON_A2_svaddh(0, 0);
// CHECK: @llvm.hexagon.A2.svaddhs
- __builtin_HEXAGON_A2_svadduhs(0, 0);
+ __builtin_HEXAGON_A2_svaddhs(0, 0);
// CHECK: @llvm.hexagon.A2.svadduhs
- __builtin_HEXAGON_A2_svavgh(0, 0);
+ __builtin_HEXAGON_A2_svadduhs(0, 0);
// CHECK: @llvm.hexagon.A2.svavgh
- __builtin_HEXAGON_A2_svavghs(0, 0);
+ __builtin_HEXAGON_A2_svavgh(0, 0);
// CHECK: @llvm.hexagon.A2.svavghs
- __builtin_HEXAGON_A2_svnavgh(0, 0);
+ __builtin_HEXAGON_A2_svavghs(0, 0);
// CHECK: @llvm.hexagon.A2.svnavgh
- __builtin_HEXAGON_A2_svsubh(0, 0);
+ __builtin_HEXAGON_A2_svnavgh(0, 0);
// CHECK: @llvm.hexagon.A2.svsubh
- __builtin_HEXAGON_A2_svsubhs(0, 0);
+ __builtin_HEXAGON_A2_svsubh(0, 0);
// CHECK: @llvm.hexagon.A2.svsubhs
- __builtin_HEXAGON_A2_svsubuhs(0, 0);
+ __builtin_HEXAGON_A2_svsubhs(0, 0);
// CHECK: @llvm.hexagon.A2.svsubuhs
- __builtin_HEXAGON_A2_swiz(0);
+ __builtin_HEXAGON_A2_svsubuhs(0, 0);
// CHECK: @llvm.hexagon.A2.swiz
- __builtin_HEXAGON_A2_sxtb(0);
+ __builtin_HEXAGON_A2_swiz(0);
// CHECK: @llvm.hexagon.A2.sxtb
- __builtin_HEXAGON_A2_sxth(0);
+ __builtin_HEXAGON_A2_sxtb(0);
// CHECK: @llvm.hexagon.A2.sxth
- __builtin_HEXAGON_A2_sxtw(0);
+ __builtin_HEXAGON_A2_sxth(0);
// CHECK: @llvm.hexagon.A2.sxtw
- __builtin_HEXAGON_A2_tfr(0);
+ __builtin_HEXAGON_A2_sxtw(0);
// CHECK: @llvm.hexagon.A2.tfr
- __builtin_HEXAGON_A2_tfrih(0, 0);
+ __builtin_HEXAGON_A2_tfr(0);
// CHECK: @llvm.hexagon.A2.tfrih
- __builtin_HEXAGON_A2_tfril(0, 0);
+ __builtin_HEXAGON_A2_tfrih(0, 0);
// CHECK: @llvm.hexagon.A2.tfril
- __builtin_HEXAGON_A2_tfrp(0);
+ __builtin_HEXAGON_A2_tfril(0, 0);
// CHECK: @llvm.hexagon.A2.tfrp
- __builtin_HEXAGON_A2_tfrpi(0);
+ __builtin_HEXAGON_A2_tfrp(0);
// CHECK: @llvm.hexagon.A2.tfrpi
- __builtin_HEXAGON_A2_tfrsi(0);
+ __builtin_HEXAGON_A2_tfrpi(0);
// CHECK: @llvm.hexagon.A2.tfrsi
- __builtin_HEXAGON_A2_vabsh(0);
+ __builtin_HEXAGON_A2_tfrsi(0);
// CHECK: @llvm.hexagon.A2.vabsh
- __builtin_HEXAGON_A2_vabshsat(0);
+ __builtin_HEXAGON_A2_vabsh(0);
// CHECK: @llvm.hexagon.A2.vabshsat
- __builtin_HEXAGON_A2_vabsw(0);
+ __builtin_HEXAGON_A2_vabshsat(0);
// CHECK: @llvm.hexagon.A2.vabsw
- __builtin_HEXAGON_A2_vabswsat(0);
+ __builtin_HEXAGON_A2_vabsw(0);
// CHECK: @llvm.hexagon.A2.vabswsat
- __builtin_HEXAGON_A2_vaddb_map(0, 0);
+ __builtin_HEXAGON_A2_vabswsat(0);
// CHECK: @llvm.hexagon.A2.vaddb.map
- __builtin_HEXAGON_A2_vaddh(0, 0);
+ __builtin_HEXAGON_A2_vaddb_map(0, 0);
// CHECK: @llvm.hexagon.A2.vaddh
- __builtin_HEXAGON_A2_vaddhs(0, 0);
+ __builtin_HEXAGON_A2_vaddh(0, 0);
// CHECK: @llvm.hexagon.A2.vaddhs
- __builtin_HEXAGON_A2_vaddub(0, 0);
+ __builtin_HEXAGON_A2_vaddhs(0, 0);
// CHECK: @llvm.hexagon.A2.vaddub
- __builtin_HEXAGON_A2_vaddubs(0, 0);
+ __builtin_HEXAGON_A2_vaddub(0, 0);
// CHECK: @llvm.hexagon.A2.vaddubs
- __builtin_HEXAGON_A2_vadduhs(0, 0);
+ __builtin_HEXAGON_A2_vaddubs(0, 0);
// CHECK: @llvm.hexagon.A2.vadduhs
- __builtin_HEXAGON_A2_vaddw(0, 0);
+ __builtin_HEXAGON_A2_vadduhs(0, 0);
// CHECK: @llvm.hexagon.A2.vaddw
- __builtin_HEXAGON_A2_vaddws(0, 0);
+ __builtin_HEXAGON_A2_vaddw(0, 0);
// CHECK: @llvm.hexagon.A2.vaddws
- __builtin_HEXAGON_A2_vavgh(0, 0);
+ __builtin_HEXAGON_A2_vaddws(0, 0);
// CHECK: @llvm.hexagon.A2.vavgh
- __builtin_HEXAGON_A2_vavghcr(0, 0);
+ __builtin_HEXAGON_A2_vavgh(0, 0);
// CHECK: @llvm.hexagon.A2.vavghcr
- __builtin_HEXAGON_A2_vavghr(0, 0);
+ __builtin_HEXAGON_A2_vavghcr(0, 0);
// CHECK: @llvm.hexagon.A2.vavghr
- __builtin_HEXAGON_A2_vavgub(0, 0);
+ __builtin_HEXAGON_A2_vavghr(0, 0);
// CHECK: @llvm.hexagon.A2.vavgub
- __builtin_HEXAGON_A2_vavgubr(0, 0);
+ __builtin_HEXAGON_A2_vavgub(0, 0);
// CHECK: @llvm.hexagon.A2.vavgubr
- __builtin_HEXAGON_A2_vavguh(0, 0);
+ __builtin_HEXAGON_A2_vavgubr(0, 0);
// CHECK: @llvm.hexagon.A2.vavguh
- __builtin_HEXAGON_A2_vavguhr(0, 0);
+ __builtin_HEXAGON_A2_vavguh(0, 0);
// CHECK: @llvm.hexagon.A2.vavguhr
- __builtin_HEXAGON_A2_vavguw(0, 0);
+ __builtin_HEXAGON_A2_vavguhr(0, 0);
// CHECK: @llvm.hexagon.A2.vavguw
- __builtin_HEXAGON_A2_vavguwr(0, 0);
+ __builtin_HEXAGON_A2_vavguw(0, 0);
// CHECK: @llvm.hexagon.A2.vavguwr
- __builtin_HEXAGON_A2_vavgw(0, 0);
+ __builtin_HEXAGON_A2_vavguwr(0, 0);
// CHECK: @llvm.hexagon.A2.vavgw
- __builtin_HEXAGON_A2_vavgwcr(0, 0);
+ __builtin_HEXAGON_A2_vavgw(0, 0);
// CHECK: @llvm.hexagon.A2.vavgwcr
- __builtin_HEXAGON_A2_vavgwr(0, 0);
+ __builtin_HEXAGON_A2_vavgwcr(0, 0);
// CHECK: @llvm.hexagon.A2.vavgwr
- __builtin_HEXAGON_A2_vcmpbeq(0, 0);
+ __builtin_HEXAGON_A2_vavgwr(0, 0);
// CHECK: @llvm.hexagon.A2.vcmpbeq
- __builtin_HEXAGON_A2_vcmpbgtu(0, 0);
+ __builtin_HEXAGON_A2_vcmpbeq(0, 0);
// CHECK: @llvm.hexagon.A2.vcmpbgtu
- __builtin_HEXAGON_A2_vcmpheq(0, 0);
+ __builtin_HEXAGON_A2_vcmpbgtu(0, 0);
// CHECK: @llvm.hexagon.A2.vcmpheq
- __builtin_HEXAGON_A2_vcmphgt(0, 0);
+ __builtin_HEXAGON_A2_vcmpheq(0, 0);
// CHECK: @llvm.hexagon.A2.vcmphgt
- __builtin_HEXAGON_A2_vcmphgtu(0, 0);
+ __builtin_HEXAGON_A2_vcmphgt(0, 0);
// CHECK: @llvm.hexagon.A2.vcmphgtu
- __builtin_HEXAGON_A2_vcmpweq(0, 0);
+ __builtin_HEXAGON_A2_vcmphgtu(0, 0);
// CHECK: @llvm.hexagon.A2.vcmpweq
- __builtin_HEXAGON_A2_vcmpwgt(0, 0);
+ __builtin_HEXAGON_A2_vcmpweq(0, 0);
// CHECK: @llvm.hexagon.A2.vcmpwgt
- __builtin_HEXAGON_A2_vcmpwgtu(0, 0);
+ __builtin_HEXAGON_A2_vcmpwgt(0, 0);
// CHECK: @llvm.hexagon.A2.vcmpwgtu
- __builtin_HEXAGON_A2_vconj(0);
+ __builtin_HEXAGON_A2_vcmpwgtu(0, 0);
// CHECK: @llvm.hexagon.A2.vconj
- __builtin_HEXAGON_A2_vmaxb(0, 0);
+ __builtin_HEXAGON_A2_vconj(0);
// CHECK: @llvm.hexagon.A2.vmaxb
- __builtin_HEXAGON_A2_vmaxh(0, 0);
+ __builtin_HEXAGON_A2_vmaxb(0, 0);
// CHECK: @llvm.hexagon.A2.vmaxh
- __builtin_HEXAGON_A2_vmaxub(0, 0);
+ __builtin_HEXAGON_A2_vmaxh(0, 0);
// CHECK: @llvm.hexagon.A2.vmaxub
- __builtin_HEXAGON_A2_vmaxuh(0, 0);
+ __builtin_HEXAGON_A2_vmaxub(0, 0);
// CHECK: @llvm.hexagon.A2.vmaxuh
- __builtin_HEXAGON_A2_vmaxuw(0, 0);
+ __builtin_HEXAGON_A2_vmaxuh(0, 0);
// CHECK: @llvm.hexagon.A2.vmaxuw
- __builtin_HEXAGON_A2_vmaxw(0, 0);
+ __builtin_HEXAGON_A2_vmaxuw(0, 0);
// CHECK: @llvm.hexagon.A2.vmaxw
- __builtin_HEXAGON_A2_vminb(0, 0);
+ __builtin_HEXAGON_A2_vmaxw(0, 0);
// CHECK: @llvm.hexagon.A2.vminb
- __builtin_HEXAGON_A2_vminh(0, 0);
+ __builtin_HEXAGON_A2_vminb(0, 0);
// CHECK: @llvm.hexagon.A2.vminh
- __builtin_HEXAGON_A2_vminub(0, 0);
+ __builtin_HEXAGON_A2_vminh(0, 0);
// CHECK: @llvm.hexagon.A2.vminub
- __builtin_HEXAGON_A2_vminuh(0, 0);
+ __builtin_HEXAGON_A2_vminub(0, 0);
// CHECK: @llvm.hexagon.A2.vminuh
- __builtin_HEXAGON_A2_vminuw(0, 0);
+ __builtin_HEXAGON_A2_vminuh(0, 0);
// CHECK: @llvm.hexagon.A2.vminuw
- __builtin_HEXAGON_A2_vminw(0, 0);
+ __builtin_HEXAGON_A2_vminuw(0, 0);
// CHECK: @llvm.hexagon.A2.vminw
- __builtin_HEXAGON_A2_vnavgh(0, 0);
+ __builtin_HEXAGON_A2_vminw(0, 0);
// CHECK: @llvm.hexagon.A2.vnavgh
- __builtin_HEXAGON_A2_vnavghcr(0, 0);
+ __builtin_HEXAGON_A2_vnavgh(0, 0);
// CHECK: @llvm.hexagon.A2.vnavghcr
- __builtin_HEXAGON_A2_vnavghr(0, 0);
+ __builtin_HEXAGON_A2_vnavghcr(0, 0);
// CHECK: @llvm.hexagon.A2.vnavghr
- __builtin_HEXAGON_A2_vnavgw(0, 0);
+ __builtin_HEXAGON_A2_vnavghr(0, 0);
// CHECK: @llvm.hexagon.A2.vnavgw
- __builtin_HEXAGON_A2_vnavgwcr(0, 0);
+ __builtin_HEXAGON_A2_vnavgw(0, 0);
// CHECK: @llvm.hexagon.A2.vnavgwcr
- __builtin_HEXAGON_A2_vnavgwr(0, 0);
+ __builtin_HEXAGON_A2_vnavgwcr(0, 0);
// CHECK: @llvm.hexagon.A2.vnavgwr
- __builtin_HEXAGON_A2_vraddub(0, 0);
+ __builtin_HEXAGON_A2_vnavgwr(0, 0);
// CHECK: @llvm.hexagon.A2.vraddub
- __builtin_HEXAGON_A2_vraddub_acc(0, 0, 0);
+ __builtin_HEXAGON_A2_vraddub(0, 0);
// CHECK: @llvm.hexagon.A2.vraddub.acc
- __builtin_HEXAGON_A2_vrsadub(0, 0);
+ __builtin_HEXAGON_A2_vraddub_acc(0, 0, 0);
// CHECK: @llvm.hexagon.A2.vrsadub
- __builtin_HEXAGON_A2_vrsadub_acc(0, 0, 0);
+ __builtin_HEXAGON_A2_vrsadub(0, 0);
// CHECK: @llvm.hexagon.A2.vrsadub.acc
- __builtin_HEXAGON_A2_vsubb_map(0, 0);
+ __builtin_HEXAGON_A2_vrsadub_acc(0, 0, 0);
// CHECK: @llvm.hexagon.A2.vsubb.map
- __builtin_HEXAGON_A2_vsubh(0, 0);
+ __builtin_HEXAGON_A2_vsubb_map(0, 0);
// CHECK: @llvm.hexagon.A2.vsubh
- __builtin_HEXAGON_A2_vsubhs(0, 0);
+ __builtin_HEXAGON_A2_vsubh(0, 0);
// CHECK: @llvm.hexagon.A2.vsubhs
- __builtin_HEXAGON_A2_vsubub(0, 0);
+ __builtin_HEXAGON_A2_vsubhs(0, 0);
// CHECK: @llvm.hexagon.A2.vsubub
- __builtin_HEXAGON_A2_vsububs(0, 0);
+ __builtin_HEXAGON_A2_vsubub(0, 0);
// CHECK: @llvm.hexagon.A2.vsububs
- __builtin_HEXAGON_A2_vsubuhs(0, 0);
+ __builtin_HEXAGON_A2_vsububs(0, 0);
// CHECK: @llvm.hexagon.A2.vsubuhs
- __builtin_HEXAGON_A2_vsubw(0, 0);
+ __builtin_HEXAGON_A2_vsubuhs(0, 0);
// CHECK: @llvm.hexagon.A2.vsubw
- __builtin_HEXAGON_A2_vsubws(0, 0);
+ __builtin_HEXAGON_A2_vsubw(0, 0);
// CHECK: @llvm.hexagon.A2.vsubws
- __builtin_HEXAGON_A2_xor(0, 0);
+ __builtin_HEXAGON_A2_vsubws(0, 0);
// CHECK: @llvm.hexagon.A2.xor
- __builtin_HEXAGON_A2_xorp(0, 0);
+ __builtin_HEXAGON_A2_xor(0, 0);
// CHECK: @llvm.hexagon.A2.xorp
- __builtin_HEXAGON_A2_zxtb(0);
+ __builtin_HEXAGON_A2_xorp(0, 0);
// CHECK: @llvm.hexagon.A2.zxtb
- __builtin_HEXAGON_A2_zxth(0);
+ __builtin_HEXAGON_A2_zxtb(0);
// CHECK: @llvm.hexagon.A2.zxth
- __builtin_HEXAGON_A4_andn(0, 0);
+ __builtin_HEXAGON_A2_zxth(0);
// CHECK: @llvm.hexagon.A4.andn
- __builtin_HEXAGON_A4_andnp(0, 0);
+ __builtin_HEXAGON_A4_andn(0, 0);
// CHECK: @llvm.hexagon.A4.andnp
- __builtin_HEXAGON_A4_bitsplit(0, 0);
+ __builtin_HEXAGON_A4_andnp(0, 0);
// CHECK: @llvm.hexagon.A4.bitsplit
- __builtin_HEXAGON_A4_bitspliti(0, 0);
+ __builtin_HEXAGON_A4_bitsplit(0, 0);
// CHECK: @llvm.hexagon.A4.bitspliti
- __builtin_HEXAGON_A4_boundscheck(0, 0);
+ __builtin_HEXAGON_A4_bitspliti(0, 0);
// CHECK: @llvm.hexagon.A4.boundscheck
- __builtin_HEXAGON_A4_cmpbeq(0, 0);
+ __builtin_HEXAGON_A4_boundscheck(0, 0);
// CHECK: @llvm.hexagon.A4.cmpbeq
- __builtin_HEXAGON_A4_cmpbeqi(0, 0);
+ __builtin_HEXAGON_A4_cmpbeq(0, 0);
// CHECK: @llvm.hexagon.A4.cmpbeqi
- __builtin_HEXAGON_A4_cmpbgt(0, 0);
+ __builtin_HEXAGON_A4_cmpbeqi(0, 0);
// CHECK: @llvm.hexagon.A4.cmpbgt
- __builtin_HEXAGON_A4_cmpbgti(0, 0);
+ __builtin_HEXAGON_A4_cmpbgt(0, 0);
// CHECK: @llvm.hexagon.A4.cmpbgti
- __builtin_HEXAGON_A4_cmpbgtu(0, 0);
+ __builtin_HEXAGON_A4_cmpbgti(0, 0);
// CHECK: @llvm.hexagon.A4.cmpbgtu
- __builtin_HEXAGON_A4_cmpbgtui(0, 0);
+ __builtin_HEXAGON_A4_cmpbgtu(0, 0);
// CHECK: @llvm.hexagon.A4.cmpbgtui
- __builtin_HEXAGON_A4_cmpheq(0, 0);
+ __builtin_HEXAGON_A4_cmpbgtui(0, 0);
// CHECK: @llvm.hexagon.A4.cmpheq
- __builtin_HEXAGON_A4_cmpheqi(0, 0);
+ __builtin_HEXAGON_A4_cmpheq(0, 0);
// CHECK: @llvm.hexagon.A4.cmpheqi
- __builtin_HEXAGON_A4_cmphgt(0, 0);
+ __builtin_HEXAGON_A4_cmpheqi(0, 0);
// CHECK: @llvm.hexagon.A4.cmphgt
- __builtin_HEXAGON_A4_cmphgti(0, 0);
+ __builtin_HEXAGON_A4_cmphgt(0, 0);
// CHECK: @llvm.hexagon.A4.cmphgti
- __builtin_HEXAGON_A4_cmphgtu(0, 0);
+ __builtin_HEXAGON_A4_cmphgti(0, 0);
// CHECK: @llvm.hexagon.A4.cmphgtu
- __builtin_HEXAGON_A4_cmphgtui(0, 0);
+ __builtin_HEXAGON_A4_cmphgtu(0, 0);
// CHECK: @llvm.hexagon.A4.cmphgtui
- __builtin_HEXAGON_A4_combineir(0, 0);
+ __builtin_HEXAGON_A4_cmphgtui(0, 0);
// CHECK: @llvm.hexagon.A4.combineir
- __builtin_HEXAGON_A4_combineri(0, 0);
+ __builtin_HEXAGON_A4_combineir(0, 0);
// CHECK: @llvm.hexagon.A4.combineri
- __builtin_HEXAGON_A4_cround_ri(0, 0);
+ __builtin_HEXAGON_A4_combineri(0, 0);
// CHECK: @llvm.hexagon.A4.cround.ri
- __builtin_HEXAGON_A4_cround_rr(0, 0);
+ __builtin_HEXAGON_A4_cround_ri(0, 0);
// CHECK: @llvm.hexagon.A4.cround.rr
- __builtin_HEXAGON_A4_modwrapu(0, 0);
+ __builtin_HEXAGON_A4_cround_rr(0, 0);
// CHECK: @llvm.hexagon.A4.modwrapu
- __builtin_HEXAGON_A4_orn(0, 0);
+ __builtin_HEXAGON_A4_modwrapu(0, 0);
// CHECK: @llvm.hexagon.A4.orn
- __builtin_HEXAGON_A4_ornp(0, 0);
+ __builtin_HEXAGON_A4_orn(0, 0);
// CHECK: @llvm.hexagon.A4.ornp
- __builtin_HEXAGON_A4_rcmpeq(0, 0);
+ __builtin_HEXAGON_A4_ornp(0, 0);
// CHECK: @llvm.hexagon.A4.rcmpeq
- __builtin_HEXAGON_A4_rcmpeqi(0, 0);
+ __builtin_HEXAGON_A4_rcmpeq(0, 0);
// CHECK: @llvm.hexagon.A4.rcmpeqi
- __builtin_HEXAGON_A4_rcmpneq(0, 0);
+ __builtin_HEXAGON_A4_rcmpeqi(0, 0);
// CHECK: @llvm.hexagon.A4.rcmpneq
- __builtin_HEXAGON_A4_rcmpneqi(0, 0);
+ __builtin_HEXAGON_A4_rcmpneq(0, 0);
// CHECK: @llvm.hexagon.A4.rcmpneqi
- __builtin_HEXAGON_A4_round_ri(0, 0);
+ __builtin_HEXAGON_A4_rcmpneqi(0, 0);
// CHECK: @llvm.hexagon.A4.round.ri
- __builtin_HEXAGON_A4_round_ri_sat(0, 0);
+ __builtin_HEXAGON_A4_round_ri(0, 0);
// CHECK: @llvm.hexagon.A4.round.ri.sat
- __builtin_HEXAGON_A4_round_rr(0, 0);
+ __builtin_HEXAGON_A4_round_ri_sat(0, 0);
// CHECK: @llvm.hexagon.A4.round.rr
- __builtin_HEXAGON_A4_round_rr_sat(0, 0);
+ __builtin_HEXAGON_A4_round_rr(0, 0);
// CHECK: @llvm.hexagon.A4.round.rr.sat
- __builtin_HEXAGON_A4_tlbmatch(0, 0);
+ __builtin_HEXAGON_A4_round_rr_sat(0, 0);
// CHECK: @llvm.hexagon.A4.tlbmatch
- __builtin_HEXAGON_A4_vcmpbeq_any(0, 0);
+ __builtin_HEXAGON_A4_tlbmatch(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpbeq.any
- __builtin_HEXAGON_A4_vcmpbeqi(0, 0);
+ __builtin_HEXAGON_A4_vcmpbeq_any(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpbeqi
- __builtin_HEXAGON_A4_vcmpbgt(0, 0);
+ __builtin_HEXAGON_A4_vcmpbeqi(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpbgt
- __builtin_HEXAGON_A4_vcmpbgti(0, 0);
+ __builtin_HEXAGON_A4_vcmpbgt(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpbgti
- __builtin_HEXAGON_A4_vcmpbgtui(0, 0);
+ __builtin_HEXAGON_A4_vcmpbgti(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpbgtui
- __builtin_HEXAGON_A4_vcmpheqi(0, 0);
+ __builtin_HEXAGON_A4_vcmpbgtui(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpheqi
- __builtin_HEXAGON_A4_vcmphgti(0, 0);
+ __builtin_HEXAGON_A4_vcmpheqi(0, 0);
// CHECK: @llvm.hexagon.A4.vcmphgti
- __builtin_HEXAGON_A4_vcmphgtui(0, 0);
+ __builtin_HEXAGON_A4_vcmphgti(0, 0);
// CHECK: @llvm.hexagon.A4.vcmphgtui
- __builtin_HEXAGON_A4_vcmpweqi(0, 0);
+ __builtin_HEXAGON_A4_vcmphgtui(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpweqi
- __builtin_HEXAGON_A4_vcmpwgti(0, 0);
+ __builtin_HEXAGON_A4_vcmpweqi(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpwgti
- __builtin_HEXAGON_A4_vcmpwgtui(0, 0);
+ __builtin_HEXAGON_A4_vcmpwgti(0, 0);
// CHECK: @llvm.hexagon.A4.vcmpwgtui
- __builtin_HEXAGON_A4_vrmaxh(0, 0, 0);
+ __builtin_HEXAGON_A4_vcmpwgtui(0, 0);
// CHECK: @llvm.hexagon.A4.vrmaxh
- __builtin_HEXAGON_A4_vrmaxuh(0, 0, 0);
+ __builtin_HEXAGON_A4_vrmaxh(0, 0, 0);
// CHECK: @llvm.hexagon.A4.vrmaxuh
- __builtin_HEXAGON_A4_vrmaxuw(0, 0, 0);
+ __builtin_HEXAGON_A4_vrmaxuh(0, 0, 0);
// CHECK: @llvm.hexagon.A4.vrmaxuw
- __builtin_HEXAGON_A4_vrmaxw(0, 0, 0);
+ __builtin_HEXAGON_A4_vrmaxuw(0, 0, 0);
// CHECK: @llvm.hexagon.A4.vrmaxw
- __builtin_HEXAGON_A4_vrminh(0, 0, 0);
+ __builtin_HEXAGON_A4_vrmaxw(0, 0, 0);
// CHECK: @llvm.hexagon.A4.vrminh
- __builtin_HEXAGON_A4_vrminuh(0, 0, 0);
+ __builtin_HEXAGON_A4_vrminh(0, 0, 0);
// CHECK: @llvm.hexagon.A4.vrminuh
- __builtin_HEXAGON_A4_vrminuw(0, 0, 0);
+ __builtin_HEXAGON_A4_vrminuh(0, 0, 0);
// CHECK: @llvm.hexagon.A4.vrminuw
- __builtin_HEXAGON_A4_vrminw(0, 0, 0);
+ __builtin_HEXAGON_A4_vrminuw(0, 0, 0);
// CHECK: @llvm.hexagon.A4.vrminw
- __builtin_HEXAGON_A5_vaddhubs(0, 0);
+ __builtin_HEXAGON_A4_vrminw(0, 0, 0);
// CHECK: @llvm.hexagon.A5.vaddhubs
- __builtin_HEXAGON_C2_all8(0);
+ __builtin_HEXAGON_A5_vaddhubs(0, 0);
+ // CHECK: @llvm.hexagon.A6.vcmpbeq.notany
+ __builtin_HEXAGON_A6_vcmpbeq_notany(0, 0);
+ // CHECK: @llvm.hexagon.A6.vcmpbeq.notany.128B
+ __builtin_HEXAGON_A6_vcmpbeq_notany_128B(0, 0);
// CHECK: @llvm.hexagon.C2.all8
- __builtin_HEXAGON_C2_and(0, 0);
+ __builtin_HEXAGON_C2_all8(0);
// CHECK: @llvm.hexagon.C2.and
- __builtin_HEXAGON_C2_andn(0, 0);
+ __builtin_HEXAGON_C2_and(0, 0);
// CHECK: @llvm.hexagon.C2.andn
- __builtin_HEXAGON_C2_any8(0);
+ __builtin_HEXAGON_C2_andn(0, 0);
// CHECK: @llvm.hexagon.C2.any8
- __builtin_HEXAGON_C2_bitsclr(0, 0);
+ __builtin_HEXAGON_C2_any8(0);
// CHECK: @llvm.hexagon.C2.bitsclr
- __builtin_HEXAGON_C2_bitsclri(0, 0);
+ __builtin_HEXAGON_C2_bitsclr(0, 0);
// CHECK: @llvm.hexagon.C2.bitsclri
- __builtin_HEXAGON_C2_bitsset(0, 0);
+ __builtin_HEXAGON_C2_bitsclri(0, 0);
// CHECK: @llvm.hexagon.C2.bitsset
- __builtin_HEXAGON_C2_cmpeq(0, 0);
+ __builtin_HEXAGON_C2_bitsset(0, 0);
// CHECK: @llvm.hexagon.C2.cmpeq
- __builtin_HEXAGON_C2_cmpeqi(0, 0);
+ __builtin_HEXAGON_C2_cmpeq(0, 0);
// CHECK: @llvm.hexagon.C2.cmpeqi
- __builtin_HEXAGON_C2_cmpeqp(0, 0);
+ __builtin_HEXAGON_C2_cmpeqi(0, 0);
// CHECK: @llvm.hexagon.C2.cmpeqp
- __builtin_HEXAGON_C2_cmpgei(0, 0);
+ __builtin_HEXAGON_C2_cmpeqp(0, 0);
// CHECK: @llvm.hexagon.C2.cmpgei
- __builtin_HEXAGON_C2_cmpgeui(0, 0);
+ __builtin_HEXAGON_C2_cmpgei(0, 0);
// CHECK: @llvm.hexagon.C2.cmpgeui
- __builtin_HEXAGON_C2_cmpgt(0, 0);
+ __builtin_HEXAGON_C2_cmpgeui(0, 0);
// CHECK: @llvm.hexagon.C2.cmpgt
- __builtin_HEXAGON_C2_cmpgti(0, 0);
+ __builtin_HEXAGON_C2_cmpgt(0, 0);
// CHECK: @llvm.hexagon.C2.cmpgti
- __builtin_HEXAGON_C2_cmpgtp(0, 0);
+ __builtin_HEXAGON_C2_cmpgti(0, 0);
// CHECK: @llvm.hexagon.C2.cmpgtp
- __builtin_HEXAGON_C2_cmpgtu(0, 0);
+ __builtin_HEXAGON_C2_cmpgtp(0, 0);
// CHECK: @llvm.hexagon.C2.cmpgtu
- __builtin_HEXAGON_C2_cmpgtui(0, 0);
+ __builtin_HEXAGON_C2_cmpgtu(0, 0);
// CHECK: @llvm.hexagon.C2.cmpgtui
- __builtin_HEXAGON_C2_cmpgtup(0, 0);
+ __builtin_HEXAGON_C2_cmpgtui(0, 0);
// CHECK: @llvm.hexagon.C2.cmpgtup
- __builtin_HEXAGON_C2_cmplt(0, 0);
+ __builtin_HEXAGON_C2_cmpgtup(0, 0);
// CHECK: @llvm.hexagon.C2.cmplt
- __builtin_HEXAGON_C2_cmpltu(0, 0);
+ __builtin_HEXAGON_C2_cmplt(0, 0);
// CHECK: @llvm.hexagon.C2.cmpltu
- __builtin_HEXAGON_C2_mask(0);
+ __builtin_HEXAGON_C2_cmpltu(0, 0);
// CHECK: @llvm.hexagon.C2.mask
- __builtin_HEXAGON_C2_mux(0, 0, 0);
+ __builtin_HEXAGON_C2_mask(0);
// CHECK: @llvm.hexagon.C2.mux
- __builtin_HEXAGON_C2_muxii(0, 0, 0);
+ __builtin_HEXAGON_C2_mux(0, 0, 0);
// CHECK: @llvm.hexagon.C2.muxii
- __builtin_HEXAGON_C2_muxir(0, 0, 0);
+ __builtin_HEXAGON_C2_muxii(0, 0, 0);
// CHECK: @llvm.hexagon.C2.muxir
- __builtin_HEXAGON_C2_muxri(0, 0, 0);
+ __builtin_HEXAGON_C2_muxir(0, 0, 0);
// CHECK: @llvm.hexagon.C2.muxri
- __builtin_HEXAGON_C2_not(0);
+ __builtin_HEXAGON_C2_muxri(0, 0, 0);
// CHECK: @llvm.hexagon.C2.not
- __builtin_HEXAGON_C2_or (0, 0);
- // CHECK: @llvm.hexagon.C2.or
- __builtin_HEXAGON_C2_orn(0, 0);
+ __builtin_HEXAGON_C2_not(0);
+ // CHECK: @llvm.hexagon.C2.or
+ __builtin_HEXAGON_C2_or(0, 0);
// CHECK: @llvm.hexagon.C2.orn
- __builtin_HEXAGON_C2_pxfer_map(0);
+ __builtin_HEXAGON_C2_orn(0, 0);
// CHECK: @llvm.hexagon.C2.pxfer.map
- __builtin_HEXAGON_C2_tfrpr(0);
+ __builtin_HEXAGON_C2_pxfer_map(0);
// CHECK: @llvm.hexagon.C2.tfrpr
- __builtin_HEXAGON_C2_tfrrp(0);
+ __builtin_HEXAGON_C2_tfrpr(0);
// CHECK: @llvm.hexagon.C2.tfrrp
- __builtin_HEXAGON_C2_vitpack(0, 0);
+ __builtin_HEXAGON_C2_tfrrp(0);
// CHECK: @llvm.hexagon.C2.vitpack
- __builtin_HEXAGON_C2_vmux(0, 0, 0);
+ __builtin_HEXAGON_C2_vitpack(0, 0);
// CHECK: @llvm.hexagon.C2.vmux
- __builtin_HEXAGON_C2_xor(0, 0);
+ __builtin_HEXAGON_C2_vmux(0, 0, 0);
// CHECK: @llvm.hexagon.C2.xor
- __builtin_HEXAGON_C4_and_and(0, 0, 0);
+ __builtin_HEXAGON_C2_xor(0, 0);
// CHECK: @llvm.hexagon.C4.and.and
- __builtin_HEXAGON_C4_and_andn(0, 0, 0);
+ __builtin_HEXAGON_C4_and_and(0, 0, 0);
// CHECK: @llvm.hexagon.C4.and.andn
- __builtin_HEXAGON_C4_and_or(0, 0, 0);
+ __builtin_HEXAGON_C4_and_andn(0, 0, 0);
// CHECK: @llvm.hexagon.C4.and.or
- __builtin_HEXAGON_C4_and_orn(0, 0, 0);
+ __builtin_HEXAGON_C4_and_or(0, 0, 0);
// CHECK: @llvm.hexagon.C4.and.orn
- __builtin_HEXAGON_C4_cmplte(0, 0);
+ __builtin_HEXAGON_C4_and_orn(0, 0, 0);
// CHECK: @llvm.hexagon.C4.cmplte
- __builtin_HEXAGON_C4_cmpltei(0, 0);
+ __builtin_HEXAGON_C4_cmplte(0, 0);
// CHECK: @llvm.hexagon.C4.cmpltei
- __builtin_HEXAGON_C4_cmplteu(0, 0);
+ __builtin_HEXAGON_C4_cmpltei(0, 0);
// CHECK: @llvm.hexagon.C4.cmplteu
- __builtin_HEXAGON_C4_cmplteui(0, 0);
+ __builtin_HEXAGON_C4_cmplteu(0, 0);
// CHECK: @llvm.hexagon.C4.cmplteui
- __builtin_HEXAGON_C4_cmpneq(0, 0);
+ __builtin_HEXAGON_C4_cmplteui(0, 0);
// CHECK: @llvm.hexagon.C4.cmpneq
- __builtin_HEXAGON_C4_cmpneqi(0, 0);
+ __builtin_HEXAGON_C4_cmpneq(0, 0);
// CHECK: @llvm.hexagon.C4.cmpneqi
- __builtin_HEXAGON_C4_fastcorner9(0, 0);
+ __builtin_HEXAGON_C4_cmpneqi(0, 0);
// CHECK: @llvm.hexagon.C4.fastcorner9
- __builtin_HEXAGON_C4_fastcorner9_not(0, 0);
+ __builtin_HEXAGON_C4_fastcorner9(0, 0);
// CHECK: @llvm.hexagon.C4.fastcorner9.not
- __builtin_HEXAGON_C4_nbitsclr(0, 0);
+ __builtin_HEXAGON_C4_fastcorner9_not(0, 0);
// CHECK: @llvm.hexagon.C4.nbitsclr
- __builtin_HEXAGON_C4_nbitsclri(0, 0);
+ __builtin_HEXAGON_C4_nbitsclr(0, 0);
// CHECK: @llvm.hexagon.C4.nbitsclri
- __builtin_HEXAGON_C4_nbitsset(0, 0);
+ __builtin_HEXAGON_C4_nbitsclri(0, 0);
// CHECK: @llvm.hexagon.C4.nbitsset
- __builtin_HEXAGON_C4_or_and(0, 0, 0);
+ __builtin_HEXAGON_C4_nbitsset(0, 0);
// CHECK: @llvm.hexagon.C4.or.and
- __builtin_HEXAGON_C4_or_andn(0, 0, 0);
+ __builtin_HEXAGON_C4_or_and(0, 0, 0);
// CHECK: @llvm.hexagon.C4.or.andn
- __builtin_HEXAGON_C4_or_or(0, 0, 0);
+ __builtin_HEXAGON_C4_or_andn(0, 0, 0);
// CHECK: @llvm.hexagon.C4.or.or
- __builtin_HEXAGON_C4_or_orn(0, 0, 0);
+ __builtin_HEXAGON_C4_or_or(0, 0, 0);
// CHECK: @llvm.hexagon.C4.or.orn
- __builtin_HEXAGON_F2_conv_d2df(0);
+ __builtin_HEXAGON_C4_or_orn(0, 0, 0);
// CHECK: @llvm.hexagon.F2.conv.d2df
- __builtin_HEXAGON_F2_conv_d2sf(0);
+ __builtin_HEXAGON_F2_conv_d2df(0);
// CHECK: @llvm.hexagon.F2.conv.d2sf
- __builtin_HEXAGON_F2_conv_df2d(0.0);
+ __builtin_HEXAGON_F2_conv_d2sf(0);
// CHECK: @llvm.hexagon.F2.conv.df2d
- __builtin_HEXAGON_F2_conv_df2d_chop(0.0);
+ __builtin_HEXAGON_F2_conv_df2d(0.0);
// CHECK: @llvm.hexagon.F2.conv.df2d.chop
- __builtin_HEXAGON_F2_conv_df2sf(0.0);
+ __builtin_HEXAGON_F2_conv_df2d_chop(0.0);
// CHECK: @llvm.hexagon.F2.conv.df2sf
- __builtin_HEXAGON_F2_conv_df2ud(0.0);
+ __builtin_HEXAGON_F2_conv_df2sf(0.0);
// CHECK: @llvm.hexagon.F2.conv.df2ud
- __builtin_HEXAGON_F2_conv_df2ud_chop(0.0);
+ __builtin_HEXAGON_F2_conv_df2ud(0.0);
// CHECK: @llvm.hexagon.F2.conv.df2ud.chop
- __builtin_HEXAGON_F2_conv_df2uw(0.0);
+ __builtin_HEXAGON_F2_conv_df2ud_chop(0.0);
// CHECK: @llvm.hexagon.F2.conv.df2uw
- __builtin_HEXAGON_F2_conv_df2uw_chop(0.0);
+ __builtin_HEXAGON_F2_conv_df2uw(0.0);
// CHECK: @llvm.hexagon.F2.conv.df2uw.chop
- __builtin_HEXAGON_F2_conv_df2w(0.0);
+ __builtin_HEXAGON_F2_conv_df2uw_chop(0.0);
// CHECK: @llvm.hexagon.F2.conv.df2w
- __builtin_HEXAGON_F2_conv_df2w_chop(0.0);
+ __builtin_HEXAGON_F2_conv_df2w(0.0);
// CHECK: @llvm.hexagon.F2.conv.df2w.chop
- __builtin_HEXAGON_F2_conv_sf2d(0.0f);
+ __builtin_HEXAGON_F2_conv_df2w_chop(0.0);
// CHECK: @llvm.hexagon.F2.conv.sf2d
- __builtin_HEXAGON_F2_conv_sf2d_chop(0.0f);
+ __builtin_HEXAGON_F2_conv_sf2d(0.0f);
// CHECK: @llvm.hexagon.F2.conv.sf2d.chop
- __builtin_HEXAGON_F2_conv_sf2df(0.0f);
+ __builtin_HEXAGON_F2_conv_sf2d_chop(0.0f);
// CHECK: @llvm.hexagon.F2.conv.sf2df
- __builtin_HEXAGON_F2_conv_sf2ud(0.0f);
+ __builtin_HEXAGON_F2_conv_sf2df(0.0f);
// CHECK: @llvm.hexagon.F2.conv.sf2ud
- __builtin_HEXAGON_F2_conv_sf2ud_chop(0.0f);
+ __builtin_HEXAGON_F2_conv_sf2ud(0.0f);
// CHECK: @llvm.hexagon.F2.conv.sf2ud.chop
- __builtin_HEXAGON_F2_conv_sf2uw(0.0f);
+ __builtin_HEXAGON_F2_conv_sf2ud_chop(0.0f);
// CHECK: @llvm.hexagon.F2.conv.sf2uw
- __builtin_HEXAGON_F2_conv_sf2uw_chop(0.0f);
+ __builtin_HEXAGON_F2_conv_sf2uw(0.0f);
// CHECK: @llvm.hexagon.F2.conv.sf2uw.chop
- __builtin_HEXAGON_F2_conv_sf2w(0.0f);
+ __builtin_HEXAGON_F2_conv_sf2uw_chop(0.0f);
// CHECK: @llvm.hexagon.F2.conv.sf2w
- __builtin_HEXAGON_F2_conv_sf2w_chop(0.0f);
+ __builtin_HEXAGON_F2_conv_sf2w(0.0f);
// CHECK: @llvm.hexagon.F2.conv.sf2w.chop
- __builtin_HEXAGON_F2_conv_ud2df(0);
+ __builtin_HEXAGON_F2_conv_sf2w_chop(0.0f);
// CHECK: @llvm.hexagon.F2.conv.ud2df
- __builtin_HEXAGON_F2_conv_ud2sf(0);
+ __builtin_HEXAGON_F2_conv_ud2df(0);
// CHECK: @llvm.hexagon.F2.conv.ud2sf
- __builtin_HEXAGON_F2_conv_uw2df(0);
+ __builtin_HEXAGON_F2_conv_ud2sf(0);
// CHECK: @llvm.hexagon.F2.conv.uw2df
- __builtin_HEXAGON_F2_conv_uw2sf(0);
+ __builtin_HEXAGON_F2_conv_uw2df(0);
// CHECK: @llvm.hexagon.F2.conv.uw2sf
- __builtin_HEXAGON_F2_conv_w2df(0);
+ __builtin_HEXAGON_F2_conv_uw2sf(0);
// CHECK: @llvm.hexagon.F2.conv.w2df
- __builtin_HEXAGON_F2_conv_w2sf(0);
+ __builtin_HEXAGON_F2_conv_w2df(0);
// CHECK: @llvm.hexagon.F2.conv.w2sf
- __builtin_HEXAGON_F2_dfclass(0.0, 0);
+ __builtin_HEXAGON_F2_conv_w2sf(0);
// CHECK: @llvm.hexagon.F2.dfclass
- __builtin_HEXAGON_F2_dfcmpeq(0.0, 0.0);
+ __builtin_HEXAGON_F2_dfclass(0.0, 0);
// CHECK: @llvm.hexagon.F2.dfcmpeq
- __builtin_HEXAGON_F2_dfcmpge(0.0, 0.0);
+ __builtin_HEXAGON_F2_dfcmpeq(0.0, 0.0);
// CHECK: @llvm.hexagon.F2.dfcmpge
- __builtin_HEXAGON_F2_dfcmpgt(0.0, 0.0);
+ __builtin_HEXAGON_F2_dfcmpge(0.0, 0.0);
// CHECK: @llvm.hexagon.F2.dfcmpgt
- __builtin_HEXAGON_F2_dfcmpuo(0.0, 0.0);
+ __builtin_HEXAGON_F2_dfcmpgt(0.0, 0.0);
// CHECK: @llvm.hexagon.F2.dfcmpuo
- __builtin_HEXAGON_F2_dfimm_n(0);
+ __builtin_HEXAGON_F2_dfcmpuo(0.0, 0.0);
// CHECK: @llvm.hexagon.F2.dfimm.n
- __builtin_HEXAGON_F2_dfimm_p(0);
+ __builtin_HEXAGON_F2_dfimm_n(0);
// CHECK: @llvm.hexagon.F2.dfimm.p
- __builtin_HEXAGON_F2_sfadd(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_dfimm_p(0);
// CHECK: @llvm.hexagon.F2.sfadd
- __builtin_HEXAGON_F2_sfclass(0.0f, 0);
+ __builtin_HEXAGON_F2_sfadd(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sfclass
- __builtin_HEXAGON_F2_sfcmpeq(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfclass(0.0f, 0);
// CHECK: @llvm.hexagon.F2.sfcmpeq
- __builtin_HEXAGON_F2_sfcmpge(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfcmpeq(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sfcmpge
- __builtin_HEXAGON_F2_sfcmpgt(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfcmpge(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sfcmpgt
- __builtin_HEXAGON_F2_sfcmpuo(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfcmpgt(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sfcmpuo
- __builtin_HEXAGON_F2_sffixupd(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfcmpuo(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sffixupd
- __builtin_HEXAGON_F2_sffixupn(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sffixupd(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sffixupn
- __builtin_HEXAGON_F2_sffixupr(0.0f);
+ __builtin_HEXAGON_F2_sffixupn(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sffixupr
- __builtin_HEXAGON_F2_sffma(0.0f, 0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sffixupr(0.0f);
// CHECK: @llvm.hexagon.F2.sffma
- __builtin_HEXAGON_F2_sffma_lib(0.0f, 0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sffma(0.0f, 0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sffma.lib
- __builtin_HEXAGON_F2_sffma_sc(0.0f, 0.0f, 0.0f, 0);
+ __builtin_HEXAGON_F2_sffma_lib(0.0f, 0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sffma.sc
- __builtin_HEXAGON_F2_sffms(0.0f, 0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sffma_sc(0.0f, 0.0f, 0.0f, 0);
// CHECK: @llvm.hexagon.F2.sffms
- __builtin_HEXAGON_F2_sffms_lib(0.0f, 0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sffms(0.0f, 0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sffms.lib
- __builtin_HEXAGON_F2_sfimm_n(0);
+ __builtin_HEXAGON_F2_sffms_lib(0.0f, 0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sfimm.n
- __builtin_HEXAGON_F2_sfimm_p(0);
+ __builtin_HEXAGON_F2_sfimm_n(0);
// CHECK: @llvm.hexagon.F2.sfimm.p
- __builtin_HEXAGON_F2_sfmax(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfimm_p(0);
// CHECK: @llvm.hexagon.F2.sfmax
- __builtin_HEXAGON_F2_sfmin(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfmax(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sfmin
- __builtin_HEXAGON_F2_sfmpy(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfmin(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sfmpy
- __builtin_HEXAGON_F2_sfsub(0.0f, 0.0f);
+ __builtin_HEXAGON_F2_sfmpy(0.0f, 0.0f);
// CHECK: @llvm.hexagon.F2.sfsub
- __builtin_HEXAGON_M2_acci(0, 0, 0);
+ __builtin_HEXAGON_F2_sfsub(0.0f, 0.0f);
// CHECK: @llvm.hexagon.M2.acci
- __builtin_HEXAGON_M2_accii(0, 0, 0);
+ __builtin_HEXAGON_M2_acci(0, 0, 0);
// CHECK: @llvm.hexagon.M2.accii
- __builtin_HEXAGON_M2_cmaci_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_accii(0, 0, 0);
// CHECK: @llvm.hexagon.M2.cmaci.s0
- __builtin_HEXAGON_M2_cmacr_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_cmaci_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.cmacr.s0
- __builtin_HEXAGON_M2_cmacsc_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.cmacsc.s0
- __builtin_HEXAGON_M2_cmacsc_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.cmacsc.s1
- __builtin_HEXAGON_M2_cmacs_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_cmacr_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.cmacs.s0
- __builtin_HEXAGON_M2_cmacs_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_cmacs_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.cmacs.s1
- __builtin_HEXAGON_M2_cmpyi_s0(0, 0);
+ __builtin_HEXAGON_M2_cmacs_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.cmacsc.s0
+ __builtin_HEXAGON_M2_cmacsc_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.cmacsc.s1
+ __builtin_HEXAGON_M2_cmacsc_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.cmpyi.s0
- __builtin_HEXAGON_M2_cmpyr_s0(0, 0);
+ __builtin_HEXAGON_M2_cmpyi_s0(0, 0);
// CHECK: @llvm.hexagon.M2.cmpyr.s0
- __builtin_HEXAGON_M2_cmpyrsc_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.cmpyrsc.s0
- __builtin_HEXAGON_M2_cmpyrsc_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.cmpyrsc.s1
- __builtin_HEXAGON_M2_cmpyrs_s0(0, 0);
+ __builtin_HEXAGON_M2_cmpyr_s0(0, 0);
// CHECK: @llvm.hexagon.M2.cmpyrs.s0
- __builtin_HEXAGON_M2_cmpyrs_s1(0, 0);
+ __builtin_HEXAGON_M2_cmpyrs_s0(0, 0);
// CHECK: @llvm.hexagon.M2.cmpyrs.s1
- __builtin_HEXAGON_M2_cmpysc_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.cmpysc.s0
- __builtin_HEXAGON_M2_cmpysc_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.cmpysc.s1
- __builtin_HEXAGON_M2_cmpys_s0(0, 0);
+ __builtin_HEXAGON_M2_cmpyrs_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.cmpyrsc.s0
+ __builtin_HEXAGON_M2_cmpyrsc_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.cmpyrsc.s1
+ __builtin_HEXAGON_M2_cmpyrsc_s1(0, 0);
// CHECK: @llvm.hexagon.M2.cmpys.s0
- __builtin_HEXAGON_M2_cmpys_s1(0, 0);
+ __builtin_HEXAGON_M2_cmpys_s0(0, 0);
// CHECK: @llvm.hexagon.M2.cmpys.s1
- __builtin_HEXAGON_M2_cnacsc_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.cnacsc.s0
- __builtin_HEXAGON_M2_cnacsc_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.cnacsc.s1
- __builtin_HEXAGON_M2_cnacs_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_cmpys_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.cmpysc.s0
+ __builtin_HEXAGON_M2_cmpysc_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.cmpysc.s1
+ __builtin_HEXAGON_M2_cmpysc_s1(0, 0);
// CHECK: @llvm.hexagon.M2.cnacs.s0
- __builtin_HEXAGON_M2_cnacs_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_cnacs_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.cnacs.s1
- __builtin_HEXAGON_M2_dpmpyss_acc_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_cnacs_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.cnacsc.s0
+ __builtin_HEXAGON_M2_cnacsc_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.cnacsc.s1
+ __builtin_HEXAGON_M2_cnacsc_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.dpmpyss.acc.s0
- __builtin_HEXAGON_M2_dpmpyss_nac_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_dpmpyss_acc_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.dpmpyss.nac.s0
- __builtin_HEXAGON_M2_dpmpyss_rnd_s0(0, 0);
+ __builtin_HEXAGON_M2_dpmpyss_nac_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.dpmpyss.rnd.s0
- __builtin_HEXAGON_M2_dpmpyss_s0(0, 0);
+ __builtin_HEXAGON_M2_dpmpyss_rnd_s0(0, 0);
// CHECK: @llvm.hexagon.M2.dpmpyss.s0
- __builtin_HEXAGON_M2_dpmpyuu_acc_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_dpmpyss_s0(0, 0);
// CHECK: @llvm.hexagon.M2.dpmpyuu.acc.s0
- __builtin_HEXAGON_M2_dpmpyuu_nac_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_dpmpyuu_acc_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.dpmpyuu.nac.s0
- __builtin_HEXAGON_M2_dpmpyuu_s0(0, 0);
+ __builtin_HEXAGON_M2_dpmpyuu_nac_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.dpmpyuu.s0
- __builtin_HEXAGON_M2_hmmpyh_rs1(0, 0);
+ __builtin_HEXAGON_M2_dpmpyuu_s0(0, 0);
// CHECK: @llvm.hexagon.M2.hmmpyh.rs1
- __builtin_HEXAGON_M2_hmmpyh_s1(0, 0);
+ __builtin_HEXAGON_M2_hmmpyh_rs1(0, 0);
// CHECK: @llvm.hexagon.M2.hmmpyh.s1
- __builtin_HEXAGON_M2_hmmpyl_rs1(0, 0);
+ __builtin_HEXAGON_M2_hmmpyh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.hmmpyl.rs1
- __builtin_HEXAGON_M2_hmmpyl_s1(0, 0);
+ __builtin_HEXAGON_M2_hmmpyl_rs1(0, 0);
// CHECK: @llvm.hexagon.M2.hmmpyl.s1
- __builtin_HEXAGON_M2_maci(0, 0, 0);
+ __builtin_HEXAGON_M2_hmmpyl_s1(0, 0);
// CHECK: @llvm.hexagon.M2.maci
- __builtin_HEXAGON_M2_macsin(0, 0, 0);
+ __builtin_HEXAGON_M2_maci(0, 0, 0);
// CHECK: @llvm.hexagon.M2.macsin
- __builtin_HEXAGON_M2_macsip(0, 0, 0);
+ __builtin_HEXAGON_M2_macsin(0, 0, 0);
// CHECK: @llvm.hexagon.M2.macsip
- __builtin_HEXAGON_M2_mmachs_rs0(0, 0, 0);
+ __builtin_HEXAGON_M2_macsip(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmachs.rs0
- __builtin_HEXAGON_M2_mmachs_rs1(0, 0, 0);
+ __builtin_HEXAGON_M2_mmachs_rs0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmachs.rs1
- __builtin_HEXAGON_M2_mmachs_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mmachs_rs1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmachs.s0
- __builtin_HEXAGON_M2_mmachs_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mmachs_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmachs.s1
- __builtin_HEXAGON_M2_mmacls_rs0(0, 0, 0);
+ __builtin_HEXAGON_M2_mmachs_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmacls.rs0
- __builtin_HEXAGON_M2_mmacls_rs1(0, 0, 0);
+ __builtin_HEXAGON_M2_mmacls_rs0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmacls.rs1
- __builtin_HEXAGON_M2_mmacls_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mmacls_rs1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmacls.s0
- __builtin_HEXAGON_M2_mmacls_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mmacls_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmacls.s1
- __builtin_HEXAGON_M2_mmacuhs_rs0(0, 0, 0);
+ __builtin_HEXAGON_M2_mmacls_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmacuhs.rs0
- __builtin_HEXAGON_M2_mmacuhs_rs1(0, 0, 0);
+ __builtin_HEXAGON_M2_mmacuhs_rs0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmacuhs.rs1
- __builtin_HEXAGON_M2_mmacuhs_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mmacuhs_rs1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmacuhs.s0
- __builtin_HEXAGON_M2_mmacuhs_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mmacuhs_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmacuhs.s1
- __builtin_HEXAGON_M2_mmaculs_rs0(0, 0, 0);
+ __builtin_HEXAGON_M2_mmacuhs_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmaculs.rs0
- __builtin_HEXAGON_M2_mmaculs_rs1(0, 0, 0);
+ __builtin_HEXAGON_M2_mmaculs_rs0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmaculs.rs1
- __builtin_HEXAGON_M2_mmaculs_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mmaculs_rs1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmaculs.s0
- __builtin_HEXAGON_M2_mmaculs_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mmaculs_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmaculs.s1
- __builtin_HEXAGON_M2_mmpyh_rs0(0, 0);
+ __builtin_HEXAGON_M2_mmaculs_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mmpyh.rs0
- __builtin_HEXAGON_M2_mmpyh_rs1(0, 0);
+ __builtin_HEXAGON_M2_mmpyh_rs0(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyh.rs1
- __builtin_HEXAGON_M2_mmpyh_s0(0, 0);
+ __builtin_HEXAGON_M2_mmpyh_rs1(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyh.s0
- __builtin_HEXAGON_M2_mmpyh_s1(0, 0);
+ __builtin_HEXAGON_M2_mmpyh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyh.s1
- __builtin_HEXAGON_M2_mmpyl_rs0(0, 0);
+ __builtin_HEXAGON_M2_mmpyh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyl.rs0
- __builtin_HEXAGON_M2_mmpyl_rs1(0, 0);
+ __builtin_HEXAGON_M2_mmpyl_rs0(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyl.rs1
- __builtin_HEXAGON_M2_mmpyl_s0(0, 0);
+ __builtin_HEXAGON_M2_mmpyl_rs1(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyl.s0
- __builtin_HEXAGON_M2_mmpyl_s1(0, 0);
+ __builtin_HEXAGON_M2_mmpyl_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyl.s1
- __builtin_HEXAGON_M2_mmpyuh_rs0(0, 0);
+ __builtin_HEXAGON_M2_mmpyl_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyuh.rs0
- __builtin_HEXAGON_M2_mmpyuh_rs1(0, 0);
+ __builtin_HEXAGON_M2_mmpyuh_rs0(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyuh.rs1
- __builtin_HEXAGON_M2_mmpyuh_s0(0, 0);
+ __builtin_HEXAGON_M2_mmpyuh_rs1(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyuh.s0
- __builtin_HEXAGON_M2_mmpyuh_s1(0, 0);
+ __builtin_HEXAGON_M2_mmpyuh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyuh.s1
- __builtin_HEXAGON_M2_mmpyul_rs0(0, 0);
+ __builtin_HEXAGON_M2_mmpyuh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyul.rs0
- __builtin_HEXAGON_M2_mmpyul_rs1(0, 0);
+ __builtin_HEXAGON_M2_mmpyul_rs0(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyul.rs1
- __builtin_HEXAGON_M2_mmpyul_s0(0, 0);
+ __builtin_HEXAGON_M2_mmpyul_rs1(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyul.s0
- __builtin_HEXAGON_M2_mmpyul_s1(0, 0);
+ __builtin_HEXAGON_M2_mmpyul_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mmpyul.s1
- __builtin_HEXAGON_M2_mpy_acc_hh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mmpyul_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.hh.s0
- __builtin_HEXAGON_M2_mpy_acc_hh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_hh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.hh.s1
- __builtin_HEXAGON_M2_mpy_acc_hl_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_hh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.hl.s0
- __builtin_HEXAGON_M2_mpy_acc_hl_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_hl_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.hl.s1
- __builtin_HEXAGON_M2_mpy_acc_lh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_hl_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.lh.s0
- __builtin_HEXAGON_M2_mpy_acc_lh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_lh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.lh.s1
- __builtin_HEXAGON_M2_mpy_acc_ll_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_lh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.ll.s0
- __builtin_HEXAGON_M2_mpy_acc_ll_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_ll_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.ll.s1
- __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_ll_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.sat.hh.s0
- __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.sat.hh.s1
- __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.sat.hl.s0
- __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.sat.hl.s1
- __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.sat.lh.s0
- __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.sat.lh.s1
- __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.sat.ll.s0
- __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.acc.sat.ll.s1
- __builtin_HEXAGON_M2_mpyd_acc_hh_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.acc.hh.s0
- __builtin_HEXAGON_M2_mpyd_acc_hh_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.acc.hh.s1
- __builtin_HEXAGON_M2_mpyd_acc_hl_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.acc.hl.s0
- __builtin_HEXAGON_M2_mpyd_acc_hl_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.acc.hl.s1
- __builtin_HEXAGON_M2_mpyd_acc_lh_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.acc.lh.s0
- __builtin_HEXAGON_M2_mpyd_acc_lh_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.acc.lh.s1
- __builtin_HEXAGON_M2_mpyd_acc_ll_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.acc.ll.s0
- __builtin_HEXAGON_M2_mpyd_acc_ll_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.acc.ll.s1
- __builtin_HEXAGON_M2_mpyd_hh_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.hh.s0
- __builtin_HEXAGON_M2_mpyd_hh_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.hh.s1
- __builtin_HEXAGON_M2_mpyd_hl_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.hl.s0
- __builtin_HEXAGON_M2_mpyd_hl_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.hl.s1
- __builtin_HEXAGON_M2_mpyd_lh_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.lh.s0
- __builtin_HEXAGON_M2_mpyd_lh_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.lh.s1
- __builtin_HEXAGON_M2_mpyd_ll_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.ll.s0
- __builtin_HEXAGON_M2_mpyd_ll_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.ll.s1
- __builtin_HEXAGON_M2_mpyd_nac_hh_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.nac.hh.s0
- __builtin_HEXAGON_M2_mpyd_nac_hh_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.nac.hh.s1
- __builtin_HEXAGON_M2_mpyd_nac_hl_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.nac.hl.s0
- __builtin_HEXAGON_M2_mpyd_nac_hl_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.nac.hl.s1
- __builtin_HEXAGON_M2_mpyd_nac_lh_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.nac.lh.s0
- __builtin_HEXAGON_M2_mpyd_nac_lh_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.nac.lh.s1
- __builtin_HEXAGON_M2_mpyd_nac_ll_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.nac.ll.s0
- __builtin_HEXAGON_M2_mpyd_nac_ll_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.nac.ll.s1
- __builtin_HEXAGON_M2_mpyd_rnd_hh_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.rnd.hh.s0
- __builtin_HEXAGON_M2_mpyd_rnd_hh_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.rnd.hh.s1
- __builtin_HEXAGON_M2_mpyd_rnd_hl_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.rnd.hl.s0
- __builtin_HEXAGON_M2_mpyd_rnd_hl_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.rnd.hl.s1
- __builtin_HEXAGON_M2_mpyd_rnd_lh_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.rnd.lh.s0
- __builtin_HEXAGON_M2_mpyd_rnd_lh_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.rnd.lh.s1
- __builtin_HEXAGON_M2_mpyd_rnd_ll_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.rnd.ll.s0
- __builtin_HEXAGON_M2_mpyd_rnd_ll_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyd.rnd.ll.s1
- __builtin_HEXAGON_M2_mpy_hh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.hh.s0
- __builtin_HEXAGON_M2_mpy_hh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_hh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.hh.s1
- __builtin_HEXAGON_M2_mpy_hl_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_hh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.hl.s0
- __builtin_HEXAGON_M2_mpy_hl_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_hl_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.hl.s1
- __builtin_HEXAGON_M2_mpyi(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyi
- __builtin_HEXAGON_M2_mpy_lh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_hl_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.lh.s0
- __builtin_HEXAGON_M2_mpy_lh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_lh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.lh.s1
- __builtin_HEXAGON_M2_mpy_ll_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_lh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.ll.s0
- __builtin_HEXAGON_M2_mpy_ll_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_ll_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.ll.s1
- __builtin_HEXAGON_M2_mpy_nac_hh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_ll_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.hh.s0
- __builtin_HEXAGON_M2_mpy_nac_hh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_hh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.hh.s1
- __builtin_HEXAGON_M2_mpy_nac_hl_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_hh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.hl.s0
- __builtin_HEXAGON_M2_mpy_nac_hl_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_hl_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.hl.s1
- __builtin_HEXAGON_M2_mpy_nac_lh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_hl_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.lh.s0
- __builtin_HEXAGON_M2_mpy_nac_lh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_lh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.lh.s1
- __builtin_HEXAGON_M2_mpy_nac_ll_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_lh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.ll.s0
- __builtin_HEXAGON_M2_mpy_nac_ll_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_ll_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.ll.s1
- __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_ll_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.sat.hh.s0
- __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.sat.hh.s1
- __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.sat.hl.s0
- __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.sat.hl.s1
- __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.sat.lh.s0
- __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.sat.lh.s1
- __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.sat.ll.s0
- __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.nac.sat.ll.s1
- __builtin_HEXAGON_M2_mpy_rnd_hh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpy.rnd.hh.s0
- __builtin_HEXAGON_M2_mpy_rnd_hh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_rnd_hh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.rnd.hh.s1
- __builtin_HEXAGON_M2_mpy_rnd_hl_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_rnd_hh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.rnd.hl.s0
- __builtin_HEXAGON_M2_mpy_rnd_hl_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_rnd_hl_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.rnd.hl.s1
- __builtin_HEXAGON_M2_mpy_rnd_lh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_rnd_hl_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.rnd.lh.s0
- __builtin_HEXAGON_M2_mpy_rnd_lh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_rnd_lh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.rnd.lh.s1
- __builtin_HEXAGON_M2_mpy_rnd_ll_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_rnd_lh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.rnd.ll.s0
- __builtin_HEXAGON_M2_mpy_rnd_ll_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_rnd_ll_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.rnd.ll.s1
- __builtin_HEXAGON_M2_mpy_sat_hh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_rnd_ll_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.hh.s0
- __builtin_HEXAGON_M2_mpy_sat_hh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_hh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.hh.s1
- __builtin_HEXAGON_M2_mpy_sat_hl_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_hh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.hl.s0
- __builtin_HEXAGON_M2_mpy_sat_hl_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_hl_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.hl.s1
- __builtin_HEXAGON_M2_mpy_sat_lh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_hl_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.lh.s0
- __builtin_HEXAGON_M2_mpy_sat_lh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_lh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.lh.s1
- __builtin_HEXAGON_M2_mpy_sat_ll_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_lh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.ll.s0
- __builtin_HEXAGON_M2_mpy_sat_ll_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_ll_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.ll.s1
- __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_ll_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.rnd.hh.s0
- __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.rnd.hh.s1
- __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.rnd.hl.s0
- __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.rnd.hl.s1
- __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.rnd.lh.s0
- __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.rnd.lh.s1
- __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.rnd.ll.s0
- __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpy.sat.rnd.ll.s1
- __builtin_HEXAGON_M2_mpysmi(0, 0);
+ __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpy.up
+ __builtin_HEXAGON_M2_mpy_up(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpy.up.s1
+ __builtin_HEXAGON_M2_mpy_up_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpy.up.s1.sat
+ __builtin_HEXAGON_M2_mpy_up_s1_sat(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.acc.hh.s0
+ __builtin_HEXAGON_M2_mpyd_acc_hh_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.acc.hh.s1
+ __builtin_HEXAGON_M2_mpyd_acc_hh_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.acc.hl.s0
+ __builtin_HEXAGON_M2_mpyd_acc_hl_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.acc.hl.s1
+ __builtin_HEXAGON_M2_mpyd_acc_hl_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.acc.lh.s0
+ __builtin_HEXAGON_M2_mpyd_acc_lh_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.acc.lh.s1
+ __builtin_HEXAGON_M2_mpyd_acc_lh_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.acc.ll.s0
+ __builtin_HEXAGON_M2_mpyd_acc_ll_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.acc.ll.s1
+ __builtin_HEXAGON_M2_mpyd_acc_ll_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.hh.s0
+ __builtin_HEXAGON_M2_mpyd_hh_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.hh.s1
+ __builtin_HEXAGON_M2_mpyd_hh_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.hl.s0
+ __builtin_HEXAGON_M2_mpyd_hl_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.hl.s1
+ __builtin_HEXAGON_M2_mpyd_hl_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.lh.s0
+ __builtin_HEXAGON_M2_mpyd_lh_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.lh.s1
+ __builtin_HEXAGON_M2_mpyd_lh_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.ll.s0
+ __builtin_HEXAGON_M2_mpyd_ll_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.ll.s1
+ __builtin_HEXAGON_M2_mpyd_ll_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.nac.hh.s0
+ __builtin_HEXAGON_M2_mpyd_nac_hh_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.nac.hh.s1
+ __builtin_HEXAGON_M2_mpyd_nac_hh_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.nac.hl.s0
+ __builtin_HEXAGON_M2_mpyd_nac_hl_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.nac.hl.s1
+ __builtin_HEXAGON_M2_mpyd_nac_hl_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.nac.lh.s0
+ __builtin_HEXAGON_M2_mpyd_nac_lh_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.nac.lh.s1
+ __builtin_HEXAGON_M2_mpyd_nac_lh_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.nac.ll.s0
+ __builtin_HEXAGON_M2_mpyd_nac_ll_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.nac.ll.s1
+ __builtin_HEXAGON_M2_mpyd_nac_ll_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.rnd.hh.s0
+ __builtin_HEXAGON_M2_mpyd_rnd_hh_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.rnd.hh.s1
+ __builtin_HEXAGON_M2_mpyd_rnd_hh_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.rnd.hl.s0
+ __builtin_HEXAGON_M2_mpyd_rnd_hl_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.rnd.hl.s1
+ __builtin_HEXAGON_M2_mpyd_rnd_hl_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.rnd.lh.s0
+ __builtin_HEXAGON_M2_mpyd_rnd_lh_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.rnd.lh.s1
+ __builtin_HEXAGON_M2_mpyd_rnd_lh_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.rnd.ll.s0
+ __builtin_HEXAGON_M2_mpyd_rnd_ll_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyd.rnd.ll.s1
+ __builtin_HEXAGON_M2_mpyd_rnd_ll_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyi
+ __builtin_HEXAGON_M2_mpyi(0, 0);
// CHECK: @llvm.hexagon.M2.mpysmi
- __builtin_HEXAGON_M2_mpysu_up(0, 0);
+ __builtin_HEXAGON_M2_mpysmi(0, 0);
// CHECK: @llvm.hexagon.M2.mpysu.up
- __builtin_HEXAGON_M2_mpyu_acc_hh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpysu_up(0, 0);
// CHECK: @llvm.hexagon.M2.mpyu.acc.hh.s0
- __builtin_HEXAGON_M2_mpyu_acc_hh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyu_acc_hh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyu.acc.hh.s1
- __builtin_HEXAGON_M2_mpyu_acc_hl_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyu_acc_hh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyu.acc.hl.s0
- __builtin_HEXAGON_M2_mpyu_acc_hl_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyu_acc_hl_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyu.acc.hl.s1
- __builtin_HEXAGON_M2_mpyu_acc_lh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyu_acc_hl_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyu.acc.lh.s0
- __builtin_HEXAGON_M2_mpyu_acc_lh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyu_acc_lh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyu.acc.lh.s1
- __builtin_HEXAGON_M2_mpyu_acc_ll_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyu_acc_lh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyu.acc.ll.s0
- __builtin_HEXAGON_M2_mpyu_acc_ll_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyu_acc_ll_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyu.acc.ll.s1
- __builtin_HEXAGON_M2_mpyud_acc_hh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyu_acc_ll_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.hh.s0
+ __builtin_HEXAGON_M2_mpyu_hh_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.hh.s1
+ __builtin_HEXAGON_M2_mpyu_hh_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.hl.s0
+ __builtin_HEXAGON_M2_mpyu_hl_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.hl.s1
+ __builtin_HEXAGON_M2_mpyu_hl_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.lh.s0
+ __builtin_HEXAGON_M2_mpyu_lh_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.lh.s1
+ __builtin_HEXAGON_M2_mpyu_lh_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.ll.s0
+ __builtin_HEXAGON_M2_mpyu_ll_s0(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.ll.s1
+ __builtin_HEXAGON_M2_mpyu_ll_s1(0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.nac.hh.s0
+ __builtin_HEXAGON_M2_mpyu_nac_hh_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.nac.hh.s1
+ __builtin_HEXAGON_M2_mpyu_nac_hh_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.nac.hl.s0
+ __builtin_HEXAGON_M2_mpyu_nac_hl_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.nac.hl.s1
+ __builtin_HEXAGON_M2_mpyu_nac_hl_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.nac.lh.s0
+ __builtin_HEXAGON_M2_mpyu_nac_lh_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.nac.lh.s1
+ __builtin_HEXAGON_M2_mpyu_nac_lh_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.nac.ll.s0
+ __builtin_HEXAGON_M2_mpyu_nac_ll_s0(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.nac.ll.s1
+ __builtin_HEXAGON_M2_mpyu_nac_ll_s1(0, 0, 0);
+ // CHECK: @llvm.hexagon.M2.mpyu.up
+ __builtin_HEXAGON_M2_mpyu_up(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.acc.hh.s0
- __builtin_HEXAGON_M2_mpyud_acc_hh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_acc_hh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.acc.hh.s1
- __builtin_HEXAGON_M2_mpyud_acc_hl_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_acc_hh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.acc.hl.s0
- __builtin_HEXAGON_M2_mpyud_acc_hl_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_acc_hl_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.acc.hl.s1
- __builtin_HEXAGON_M2_mpyud_acc_lh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_acc_hl_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.acc.lh.s0
- __builtin_HEXAGON_M2_mpyud_acc_lh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_acc_lh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.acc.lh.s1
- __builtin_HEXAGON_M2_mpyud_acc_ll_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_acc_lh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.acc.ll.s0
- __builtin_HEXAGON_M2_mpyud_acc_ll_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_acc_ll_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.acc.ll.s1
- __builtin_HEXAGON_M2_mpyud_hh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpyud_acc_ll_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.hh.s0
- __builtin_HEXAGON_M2_mpyud_hh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpyud_hh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.hh.s1
- __builtin_HEXAGON_M2_mpyud_hl_s0(0, 0);
+ __builtin_HEXAGON_M2_mpyud_hh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.hl.s0
- __builtin_HEXAGON_M2_mpyud_hl_s1(0, 0);
+ __builtin_HEXAGON_M2_mpyud_hl_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.hl.s1
- __builtin_HEXAGON_M2_mpyud_lh_s0(0, 0);
+ __builtin_HEXAGON_M2_mpyud_hl_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.lh.s0
- __builtin_HEXAGON_M2_mpyud_lh_s1(0, 0);
+ __builtin_HEXAGON_M2_mpyud_lh_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.lh.s1
- __builtin_HEXAGON_M2_mpyud_ll_s0(0, 0);
+ __builtin_HEXAGON_M2_mpyud_lh_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.ll.s0
- __builtin_HEXAGON_M2_mpyud_ll_s1(0, 0);
+ __builtin_HEXAGON_M2_mpyud_ll_s0(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.ll.s1
- __builtin_HEXAGON_M2_mpyud_nac_hh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_ll_s1(0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.nac.hh.s0
- __builtin_HEXAGON_M2_mpyud_nac_hh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_nac_hh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.nac.hh.s1
- __builtin_HEXAGON_M2_mpyud_nac_hl_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_nac_hh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.nac.hl.s0
- __builtin_HEXAGON_M2_mpyud_nac_hl_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_nac_hl_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.nac.hl.s1
- __builtin_HEXAGON_M2_mpyud_nac_lh_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_nac_hl_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.nac.lh.s0
- __builtin_HEXAGON_M2_mpyud_nac_lh_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_nac_lh_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.nac.lh.s1
- __builtin_HEXAGON_M2_mpyud_nac_ll_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_nac_lh_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.nac.ll.s0
- __builtin_HEXAGON_M2_mpyud_nac_ll_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyud_nac_ll_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyud.nac.ll.s1
- __builtin_HEXAGON_M2_mpyu_hh_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.hh.s0
- __builtin_HEXAGON_M2_mpyu_hh_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.hh.s1
- __builtin_HEXAGON_M2_mpyu_hl_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.hl.s0
- __builtin_HEXAGON_M2_mpyu_hl_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.hl.s1
- __builtin_HEXAGON_M2_mpyui(0, 0);
+ __builtin_HEXAGON_M2_mpyud_nac_ll_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.mpyui
- __builtin_HEXAGON_M2_mpyu_lh_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.lh.s0
- __builtin_HEXAGON_M2_mpyu_lh_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.lh.s1
- __builtin_HEXAGON_M2_mpyu_ll_s0(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.ll.s0
- __builtin_HEXAGON_M2_mpyu_ll_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.ll.s1
- __builtin_HEXAGON_M2_mpyu_nac_hh_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.nac.hh.s0
- __builtin_HEXAGON_M2_mpyu_nac_hh_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.nac.hh.s1
- __builtin_HEXAGON_M2_mpyu_nac_hl_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.nac.hl.s0
- __builtin_HEXAGON_M2_mpyu_nac_hl_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.nac.hl.s1
- __builtin_HEXAGON_M2_mpyu_nac_lh_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.nac.lh.s0
- __builtin_HEXAGON_M2_mpyu_nac_lh_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.nac.lh.s1
- __builtin_HEXAGON_M2_mpyu_nac_ll_s0(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.nac.ll.s0
- __builtin_HEXAGON_M2_mpyu_nac_ll_s1(0, 0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.nac.ll.s1
- __builtin_HEXAGON_M2_mpy_up(0, 0);
- // CHECK: @llvm.hexagon.M2.mpy.up
- __builtin_HEXAGON_M2_mpy_up_s1(0, 0);
- // CHECK: @llvm.hexagon.M2.mpy.up.s1
- __builtin_HEXAGON_M2_mpy_up_s1_sat(0, 0);
- // CHECK: @llvm.hexagon.M2.mpy.up.s1.sat
- __builtin_HEXAGON_M2_mpyu_up(0, 0);
- // CHECK: @llvm.hexagon.M2.mpyu.up
- __builtin_HEXAGON_M2_nacci(0, 0, 0);
+ __builtin_HEXAGON_M2_mpyui(0, 0);
// CHECK: @llvm.hexagon.M2.nacci
- __builtin_HEXAGON_M2_naccii(0, 0, 0);
+ __builtin_HEXAGON_M2_nacci(0, 0, 0);
// CHECK: @llvm.hexagon.M2.naccii
- __builtin_HEXAGON_M2_subacc(0, 0, 0);
+ __builtin_HEXAGON_M2_naccii(0, 0, 0);
// CHECK: @llvm.hexagon.M2.subacc
- __builtin_HEXAGON_M2_vabsdiffh(0, 0);
+ __builtin_HEXAGON_M2_subacc(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vabsdiffh
- __builtin_HEXAGON_M2_vabsdiffw(0, 0);
+ __builtin_HEXAGON_M2_vabsdiffh(0, 0);
// CHECK: @llvm.hexagon.M2.vabsdiffw
- __builtin_HEXAGON_M2_vcmac_s0_sat_i(0, 0, 0);
+ __builtin_HEXAGON_M2_vabsdiffw(0, 0);
// CHECK: @llvm.hexagon.M2.vcmac.s0.sat.i
- __builtin_HEXAGON_M2_vcmac_s0_sat_r(0, 0, 0);
+ __builtin_HEXAGON_M2_vcmac_s0_sat_i(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vcmac.s0.sat.r
- __builtin_HEXAGON_M2_vcmpy_s0_sat_i(0, 0);
+ __builtin_HEXAGON_M2_vcmac_s0_sat_r(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vcmpy.s0.sat.i
- __builtin_HEXAGON_M2_vcmpy_s0_sat_r(0, 0);
+ __builtin_HEXAGON_M2_vcmpy_s0_sat_i(0, 0);
// CHECK: @llvm.hexagon.M2.vcmpy.s0.sat.r
- __builtin_HEXAGON_M2_vcmpy_s1_sat_i(0, 0);
+ __builtin_HEXAGON_M2_vcmpy_s0_sat_r(0, 0);
// CHECK: @llvm.hexagon.M2.vcmpy.s1.sat.i
- __builtin_HEXAGON_M2_vcmpy_s1_sat_r(0, 0);
+ __builtin_HEXAGON_M2_vcmpy_s1_sat_i(0, 0);
// CHECK: @llvm.hexagon.M2.vcmpy.s1.sat.r
- __builtin_HEXAGON_M2_vdmacs_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_vcmpy_s1_sat_r(0, 0);
// CHECK: @llvm.hexagon.M2.vdmacs.s0
- __builtin_HEXAGON_M2_vdmacs_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_vdmacs_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vdmacs.s1
- __builtin_HEXAGON_M2_vdmpyrs_s0(0, 0);
+ __builtin_HEXAGON_M2_vdmacs_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vdmpyrs.s0
- __builtin_HEXAGON_M2_vdmpyrs_s1(0, 0);
+ __builtin_HEXAGON_M2_vdmpyrs_s0(0, 0);
// CHECK: @llvm.hexagon.M2.vdmpyrs.s1
- __builtin_HEXAGON_M2_vdmpys_s0(0, 0);
+ __builtin_HEXAGON_M2_vdmpyrs_s1(0, 0);
// CHECK: @llvm.hexagon.M2.vdmpys.s0
- __builtin_HEXAGON_M2_vdmpys_s1(0, 0);
+ __builtin_HEXAGON_M2_vdmpys_s0(0, 0);
// CHECK: @llvm.hexagon.M2.vdmpys.s1
- __builtin_HEXAGON_M2_vmac2(0, 0, 0);
+ __builtin_HEXAGON_M2_vdmpys_s1(0, 0);
// CHECK: @llvm.hexagon.M2.vmac2
- __builtin_HEXAGON_M2_vmac2es(0, 0, 0);
+ __builtin_HEXAGON_M2_vmac2(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vmac2es
- __builtin_HEXAGON_M2_vmac2es_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_vmac2es(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vmac2es.s0
- __builtin_HEXAGON_M2_vmac2es_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_vmac2es_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vmac2es.s1
- __builtin_HEXAGON_M2_vmac2s_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_vmac2es_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vmac2s.s0
- __builtin_HEXAGON_M2_vmac2s_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_vmac2s_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vmac2s.s1
- __builtin_HEXAGON_M2_vmac2su_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_vmac2s_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vmac2su.s0
- __builtin_HEXAGON_M2_vmac2su_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_vmac2su_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vmac2su.s1
- __builtin_HEXAGON_M2_vmpy2es_s0(0, 0);
+ __builtin_HEXAGON_M2_vmac2su_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vmpy2es.s0
- __builtin_HEXAGON_M2_vmpy2es_s1(0, 0);
+ __builtin_HEXAGON_M2_vmpy2es_s0(0, 0);
// CHECK: @llvm.hexagon.M2.vmpy2es.s1
- __builtin_HEXAGON_M2_vmpy2s_s0(0, 0);
+ __builtin_HEXAGON_M2_vmpy2es_s1(0, 0);
// CHECK: @llvm.hexagon.M2.vmpy2s.s0
- __builtin_HEXAGON_M2_vmpy2s_s0pack(0, 0);
+ __builtin_HEXAGON_M2_vmpy2s_s0(0, 0);
// CHECK: @llvm.hexagon.M2.vmpy2s.s0pack
- __builtin_HEXAGON_M2_vmpy2s_s1(0, 0);
+ __builtin_HEXAGON_M2_vmpy2s_s0pack(0, 0);
// CHECK: @llvm.hexagon.M2.vmpy2s.s1
- __builtin_HEXAGON_M2_vmpy2s_s1pack(0, 0);
+ __builtin_HEXAGON_M2_vmpy2s_s1(0, 0);
// CHECK: @llvm.hexagon.M2.vmpy2s.s1pack
- __builtin_HEXAGON_M2_vmpy2su_s0(0, 0);
+ __builtin_HEXAGON_M2_vmpy2s_s1pack(0, 0);
// CHECK: @llvm.hexagon.M2.vmpy2su.s0
- __builtin_HEXAGON_M2_vmpy2su_s1(0, 0);
+ __builtin_HEXAGON_M2_vmpy2su_s0(0, 0);
// CHECK: @llvm.hexagon.M2.vmpy2su.s1
- __builtin_HEXAGON_M2_vraddh(0, 0);
+ __builtin_HEXAGON_M2_vmpy2su_s1(0, 0);
// CHECK: @llvm.hexagon.M2.vraddh
- __builtin_HEXAGON_M2_vradduh(0, 0);
+ __builtin_HEXAGON_M2_vraddh(0, 0);
// CHECK: @llvm.hexagon.M2.vradduh
- __builtin_HEXAGON_M2_vrcmaci_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_vradduh(0, 0);
// CHECK: @llvm.hexagon.M2.vrcmaci.s0
- __builtin_HEXAGON_M2_vrcmaci_s0c(0, 0, 0);
+ __builtin_HEXAGON_M2_vrcmaci_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vrcmaci.s0c
- __builtin_HEXAGON_M2_vrcmacr_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_vrcmaci_s0c(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vrcmacr.s0
- __builtin_HEXAGON_M2_vrcmacr_s0c(0, 0, 0);
+ __builtin_HEXAGON_M2_vrcmacr_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vrcmacr.s0c
- __builtin_HEXAGON_M2_vrcmpyi_s0(0, 0);
+ __builtin_HEXAGON_M2_vrcmacr_s0c(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vrcmpyi.s0
- __builtin_HEXAGON_M2_vrcmpyi_s0c(0, 0);
+ __builtin_HEXAGON_M2_vrcmpyi_s0(0, 0);
// CHECK: @llvm.hexagon.M2.vrcmpyi.s0c
- __builtin_HEXAGON_M2_vrcmpyr_s0(0, 0);
+ __builtin_HEXAGON_M2_vrcmpyi_s0c(0, 0);
// CHECK: @llvm.hexagon.M2.vrcmpyr.s0
- __builtin_HEXAGON_M2_vrcmpyr_s0c(0, 0);
+ __builtin_HEXAGON_M2_vrcmpyr_s0(0, 0);
// CHECK: @llvm.hexagon.M2.vrcmpyr.s0c
- __builtin_HEXAGON_M2_vrcmpys_acc_s1(0, 0, 0);
+ __builtin_HEXAGON_M2_vrcmpyr_s0c(0, 0);
// CHECK: @llvm.hexagon.M2.vrcmpys.acc.s1
- __builtin_HEXAGON_M2_vrcmpys_s1(0, 0);
+ __builtin_HEXAGON_M2_vrcmpys_acc_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vrcmpys.s1
- __builtin_HEXAGON_M2_vrcmpys_s1rp(0, 0);
+ __builtin_HEXAGON_M2_vrcmpys_s1(0, 0);
// CHECK: @llvm.hexagon.M2.vrcmpys.s1rp
- __builtin_HEXAGON_M2_vrmac_s0(0, 0, 0);
+ __builtin_HEXAGON_M2_vrcmpys_s1rp(0, 0);
// CHECK: @llvm.hexagon.M2.vrmac.s0
- __builtin_HEXAGON_M2_vrmpy_s0(0, 0);
+ __builtin_HEXAGON_M2_vrmac_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M2.vrmpy.s0
- __builtin_HEXAGON_M2_xor_xacc(0, 0, 0);
+ __builtin_HEXAGON_M2_vrmpy_s0(0, 0);
// CHECK: @llvm.hexagon.M2.xor.xacc
- __builtin_HEXAGON_M4_and_and(0, 0, 0);
+ __builtin_HEXAGON_M2_xor_xacc(0, 0, 0);
// CHECK: @llvm.hexagon.M4.and.and
- __builtin_HEXAGON_M4_and_andn(0, 0, 0);
+ __builtin_HEXAGON_M4_and_and(0, 0, 0);
// CHECK: @llvm.hexagon.M4.and.andn
- __builtin_HEXAGON_M4_and_or(0, 0, 0);
+ __builtin_HEXAGON_M4_and_andn(0, 0, 0);
// CHECK: @llvm.hexagon.M4.and.or
- __builtin_HEXAGON_M4_and_xor(0, 0, 0);
+ __builtin_HEXAGON_M4_and_or(0, 0, 0);
// CHECK: @llvm.hexagon.M4.and.xor
- __builtin_HEXAGON_M4_cmpyi_wh(0, 0);
+ __builtin_HEXAGON_M4_and_xor(0, 0, 0);
// CHECK: @llvm.hexagon.M4.cmpyi.wh
- __builtin_HEXAGON_M4_cmpyi_whc(0, 0);
+ __builtin_HEXAGON_M4_cmpyi_wh(0, 0);
// CHECK: @llvm.hexagon.M4.cmpyi.whc
- __builtin_HEXAGON_M4_cmpyr_wh(0, 0);
+ __builtin_HEXAGON_M4_cmpyi_whc(0, 0);
// CHECK: @llvm.hexagon.M4.cmpyr.wh
- __builtin_HEXAGON_M4_cmpyr_whc(0, 0);
+ __builtin_HEXAGON_M4_cmpyr_wh(0, 0);
// CHECK: @llvm.hexagon.M4.cmpyr.whc
- __builtin_HEXAGON_M4_mac_up_s1_sat(0, 0, 0);
+ __builtin_HEXAGON_M4_cmpyr_whc(0, 0);
// CHECK: @llvm.hexagon.M4.mac.up.s1.sat
- __builtin_HEXAGON_M4_mpyri_addi(0, 0, 0);
+ __builtin_HEXAGON_M4_mac_up_s1_sat(0, 0, 0);
// CHECK: @llvm.hexagon.M4.mpyri.addi
- __builtin_HEXAGON_M4_mpyri_addr(0, 0, 0);
+ __builtin_HEXAGON_M4_mpyri_addi(0, 0, 0);
// CHECK: @llvm.hexagon.M4.mpyri.addr
- __builtin_HEXAGON_M4_mpyri_addr_u2(0, 0, 0);
+ __builtin_HEXAGON_M4_mpyri_addr(0, 0, 0);
// CHECK: @llvm.hexagon.M4.mpyri.addr.u2
- __builtin_HEXAGON_M4_mpyrr_addi(0, 0, 0);
+ __builtin_HEXAGON_M4_mpyri_addr_u2(0, 0, 0);
// CHECK: @llvm.hexagon.M4.mpyrr.addi
- __builtin_HEXAGON_M4_mpyrr_addr(0, 0, 0);
+ __builtin_HEXAGON_M4_mpyrr_addi(0, 0, 0);
// CHECK: @llvm.hexagon.M4.mpyrr.addr
- __builtin_HEXAGON_M4_nac_up_s1_sat(0, 0, 0);
+ __builtin_HEXAGON_M4_mpyrr_addr(0, 0, 0);
// CHECK: @llvm.hexagon.M4.nac.up.s1.sat
- __builtin_HEXAGON_M4_or_and(0, 0, 0);
+ __builtin_HEXAGON_M4_nac_up_s1_sat(0, 0, 0);
// CHECK: @llvm.hexagon.M4.or.and
- __builtin_HEXAGON_M4_or_andn(0, 0, 0);
+ __builtin_HEXAGON_M4_or_and(0, 0, 0);
// CHECK: @llvm.hexagon.M4.or.andn
- __builtin_HEXAGON_M4_or_or(0, 0, 0);
+ __builtin_HEXAGON_M4_or_andn(0, 0, 0);
// CHECK: @llvm.hexagon.M4.or.or
- __builtin_HEXAGON_M4_or_xor(0, 0, 0);
+ __builtin_HEXAGON_M4_or_or(0, 0, 0);
// CHECK: @llvm.hexagon.M4.or.xor
- __builtin_HEXAGON_M4_pmpyw(0, 0);
+ __builtin_HEXAGON_M4_or_xor(0, 0, 0);
// CHECK: @llvm.hexagon.M4.pmpyw
- __builtin_HEXAGON_M4_pmpyw_acc(0, 0, 0);
+ __builtin_HEXAGON_M4_pmpyw(0, 0);
// CHECK: @llvm.hexagon.M4.pmpyw.acc
- __builtin_HEXAGON_M4_vpmpyh(0, 0);
+ __builtin_HEXAGON_M4_pmpyw_acc(0, 0, 0);
// CHECK: @llvm.hexagon.M4.vpmpyh
- __builtin_HEXAGON_M4_vpmpyh_acc(0, 0, 0);
+ __builtin_HEXAGON_M4_vpmpyh(0, 0);
// CHECK: @llvm.hexagon.M4.vpmpyh.acc
- __builtin_HEXAGON_M4_vrmpyeh_acc_s0(0, 0, 0);
+ __builtin_HEXAGON_M4_vpmpyh_acc(0, 0, 0);
// CHECK: @llvm.hexagon.M4.vrmpyeh.acc.s0
- __builtin_HEXAGON_M4_vrmpyeh_acc_s1(0, 0, 0);
+ __builtin_HEXAGON_M4_vrmpyeh_acc_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M4.vrmpyeh.acc.s1
- __builtin_HEXAGON_M4_vrmpyeh_s0(0, 0);
+ __builtin_HEXAGON_M4_vrmpyeh_acc_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M4.vrmpyeh.s0
- __builtin_HEXAGON_M4_vrmpyeh_s1(0, 0);
+ __builtin_HEXAGON_M4_vrmpyeh_s0(0, 0);
// CHECK: @llvm.hexagon.M4.vrmpyeh.s1
- __builtin_HEXAGON_M4_vrmpyoh_acc_s0(0, 0, 0);
+ __builtin_HEXAGON_M4_vrmpyeh_s1(0, 0);
// CHECK: @llvm.hexagon.M4.vrmpyoh.acc.s0
- __builtin_HEXAGON_M4_vrmpyoh_acc_s1(0, 0, 0);
+ __builtin_HEXAGON_M4_vrmpyoh_acc_s0(0, 0, 0);
// CHECK: @llvm.hexagon.M4.vrmpyoh.acc.s1
- __builtin_HEXAGON_M4_vrmpyoh_s0(0, 0);
+ __builtin_HEXAGON_M4_vrmpyoh_acc_s1(0, 0, 0);
// CHECK: @llvm.hexagon.M4.vrmpyoh.s0
- __builtin_HEXAGON_M4_vrmpyoh_s1(0, 0);
+ __builtin_HEXAGON_M4_vrmpyoh_s0(0, 0);
// CHECK: @llvm.hexagon.M4.vrmpyoh.s1
- __builtin_HEXAGON_M4_xor_and(0, 0, 0);
+ __builtin_HEXAGON_M4_vrmpyoh_s1(0, 0);
// CHECK: @llvm.hexagon.M4.xor.and
- __builtin_HEXAGON_M4_xor_andn(0, 0, 0);
+ __builtin_HEXAGON_M4_xor_and(0, 0, 0);
// CHECK: @llvm.hexagon.M4.xor.andn
- __builtin_HEXAGON_M4_xor_or(0, 0, 0);
+ __builtin_HEXAGON_M4_xor_andn(0, 0, 0);
// CHECK: @llvm.hexagon.M4.xor.or
- __builtin_HEXAGON_M4_xor_xacc(0, 0, 0);
+ __builtin_HEXAGON_M4_xor_or(0, 0, 0);
// CHECK: @llvm.hexagon.M4.xor.xacc
- __builtin_HEXAGON_M5_vdmacbsu(0, 0, 0);
+ __builtin_HEXAGON_M4_xor_xacc(0, 0, 0);
// CHECK: @llvm.hexagon.M5.vdmacbsu
- __builtin_HEXAGON_M5_vdmpybsu(0, 0);
+ __builtin_HEXAGON_M5_vdmacbsu(0, 0, 0);
// CHECK: @llvm.hexagon.M5.vdmpybsu
- __builtin_HEXAGON_M5_vmacbsu(0, 0, 0);
+ __builtin_HEXAGON_M5_vdmpybsu(0, 0);
// CHECK: @llvm.hexagon.M5.vmacbsu
- __builtin_HEXAGON_M5_vmacbuu(0, 0, 0);
+ __builtin_HEXAGON_M5_vmacbsu(0, 0, 0);
// CHECK: @llvm.hexagon.M5.vmacbuu
- __builtin_HEXAGON_M5_vmpybsu(0, 0);
+ __builtin_HEXAGON_M5_vmacbuu(0, 0, 0);
// CHECK: @llvm.hexagon.M5.vmpybsu
- __builtin_HEXAGON_M5_vmpybuu(0, 0);
+ __builtin_HEXAGON_M5_vmpybsu(0, 0);
// CHECK: @llvm.hexagon.M5.vmpybuu
- __builtin_HEXAGON_M5_vrmacbsu(0, 0, 0);
+ __builtin_HEXAGON_M5_vmpybuu(0, 0);
// CHECK: @llvm.hexagon.M5.vrmacbsu
- __builtin_HEXAGON_M5_vrmacbuu(0, 0, 0);
+ __builtin_HEXAGON_M5_vrmacbsu(0, 0, 0);
// CHECK: @llvm.hexagon.M5.vrmacbuu
- __builtin_HEXAGON_M5_vrmpybsu(0, 0);
+ __builtin_HEXAGON_M5_vrmacbuu(0, 0, 0);
// CHECK: @llvm.hexagon.M5.vrmpybsu
- __builtin_HEXAGON_M5_vrmpybuu(0, 0);
+ __builtin_HEXAGON_M5_vrmpybsu(0, 0);
// CHECK: @llvm.hexagon.M5.vrmpybuu
- __builtin_HEXAGON_S2_addasl_rrri(0, 0, 0);
+ __builtin_HEXAGON_M5_vrmpybuu(0, 0);
+ // CHECK: @llvm.hexagon.M6.vabsdiffb
+ __builtin_HEXAGON_M6_vabsdiffb(0, 0);
+ // CHECK: @llvm.hexagon.M6.vabsdiffub
+ __builtin_HEXAGON_M6_vabsdiffub(0, 0);
// CHECK: @llvm.hexagon.S2.addasl.rrri
- __builtin_HEXAGON_S2_asl_i_p(0, 0);
+ __builtin_HEXAGON_S2_addasl_rrri(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.p
- __builtin_HEXAGON_S2_asl_i_p_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_p(0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.p.acc
- __builtin_HEXAGON_S2_asl_i_p_and(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_p_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.p.and
- __builtin_HEXAGON_S2_asl_i_p_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_p_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.p.nac
- __builtin_HEXAGON_S2_asl_i_p_or(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_p_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.p.or
- __builtin_HEXAGON_S2_asl_i_p_xacc(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_p_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.p.xacc
- __builtin_HEXAGON_S2_asl_i_r(0, 0);
+ __builtin_HEXAGON_S2_asl_i_p_xacc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.r
- __builtin_HEXAGON_S2_asl_i_r_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_r(0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.r.acc
- __builtin_HEXAGON_S2_asl_i_r_and(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_r_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.r.and
- __builtin_HEXAGON_S2_asl_i_r_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_r_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.r.nac
- __builtin_HEXAGON_S2_asl_i_r_or(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_r_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.r.or
- __builtin_HEXAGON_S2_asl_i_r_sat(0, 0);
+ __builtin_HEXAGON_S2_asl_i_r_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.r.sat
- __builtin_HEXAGON_S2_asl_i_r_xacc(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_i_r_sat(0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.r.xacc
- __builtin_HEXAGON_S2_asl_i_vh(0, 0);
+ __builtin_HEXAGON_S2_asl_i_r_xacc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.vh
- __builtin_HEXAGON_S2_asl_i_vw(0, 0);
+ __builtin_HEXAGON_S2_asl_i_vh(0, 0);
// CHECK: @llvm.hexagon.S2.asl.i.vw
- __builtin_HEXAGON_S2_asl_r_p(0, 0);
+ __builtin_HEXAGON_S2_asl_i_vw(0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.p
- __builtin_HEXAGON_S2_asl_r_p_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_p(0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.p.acc
- __builtin_HEXAGON_S2_asl_r_p_and(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_p_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.p.and
- __builtin_HEXAGON_S2_asl_r_p_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_p_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.p.nac
- __builtin_HEXAGON_S2_asl_r_p_or(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_p_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.p.or
- __builtin_HEXAGON_S2_asl_r_p_xor(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_p_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.p.xor
- __builtin_HEXAGON_S2_asl_r_r(0, 0);
+ __builtin_HEXAGON_S2_asl_r_p_xor(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.r
- __builtin_HEXAGON_S2_asl_r_r_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_r(0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.r.acc
- __builtin_HEXAGON_S2_asl_r_r_and(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_r_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.r.and
- __builtin_HEXAGON_S2_asl_r_r_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_r_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.r.nac
- __builtin_HEXAGON_S2_asl_r_r_or(0, 0, 0);
+ __builtin_HEXAGON_S2_asl_r_r_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.r.or
- __builtin_HEXAGON_S2_asl_r_r_sat(0, 0);
+ __builtin_HEXAGON_S2_asl_r_r_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.r.sat
- __builtin_HEXAGON_S2_asl_r_vh(0, 0);
+ __builtin_HEXAGON_S2_asl_r_r_sat(0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.vh
- __builtin_HEXAGON_S2_asl_r_vw(0, 0);
+ __builtin_HEXAGON_S2_asl_r_vh(0, 0);
// CHECK: @llvm.hexagon.S2.asl.r.vw
- __builtin_HEXAGON_S2_asr_i_p(0, 0);
+ __builtin_HEXAGON_S2_asl_r_vw(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.p
- __builtin_HEXAGON_S2_asr_i_p_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_i_p(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.p.acc
- __builtin_HEXAGON_S2_asr_i_p_and(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_i_p_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.p.and
- __builtin_HEXAGON_S2_asr_i_p_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_i_p_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.p.nac
- __builtin_HEXAGON_S2_asr_i_p_or(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_i_p_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.p.or
- __builtin_HEXAGON_S2_asr_i_p_rnd(0, 0);
+ __builtin_HEXAGON_S2_asr_i_p_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.p.rnd
- __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax(0, 0);
+ __builtin_HEXAGON_S2_asr_i_p_rnd(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax
- __builtin_HEXAGON_S2_asr_i_r(0, 0);
+ __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.r
- __builtin_HEXAGON_S2_asr_i_r_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_i_r(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.r.acc
- __builtin_HEXAGON_S2_asr_i_r_and(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_i_r_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.r.and
- __builtin_HEXAGON_S2_asr_i_r_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_i_r_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.r.nac
- __builtin_HEXAGON_S2_asr_i_r_or(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_i_r_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.r.or
- __builtin_HEXAGON_S2_asr_i_r_rnd(0, 0);
+ __builtin_HEXAGON_S2_asr_i_r_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.r.rnd
- __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax(0, 0);
+ __builtin_HEXAGON_S2_asr_i_r_rnd(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.r.rnd.goodsyntax
- __builtin_HEXAGON_S2_asr_i_svw_trun(0, 0);
+ __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.svw.trun
- __builtin_HEXAGON_S2_asr_i_vh(0, 0);
+ __builtin_HEXAGON_S2_asr_i_svw_trun(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.vh
- __builtin_HEXAGON_S2_asr_i_vw(0, 0);
+ __builtin_HEXAGON_S2_asr_i_vh(0, 0);
// CHECK: @llvm.hexagon.S2.asr.i.vw
- __builtin_HEXAGON_S2_asr_r_p(0, 0);
+ __builtin_HEXAGON_S2_asr_i_vw(0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.p
- __builtin_HEXAGON_S2_asr_r_p_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_p(0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.p.acc
- __builtin_HEXAGON_S2_asr_r_p_and(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_p_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.p.and
- __builtin_HEXAGON_S2_asr_r_p_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_p_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.p.nac
- __builtin_HEXAGON_S2_asr_r_p_or(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_p_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.p.or
- __builtin_HEXAGON_S2_asr_r_p_xor(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_p_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.p.xor
- __builtin_HEXAGON_S2_asr_r_r(0, 0);
+ __builtin_HEXAGON_S2_asr_r_p_xor(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.r
- __builtin_HEXAGON_S2_asr_r_r_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_r(0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.r.acc
- __builtin_HEXAGON_S2_asr_r_r_and(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_r_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.r.and
- __builtin_HEXAGON_S2_asr_r_r_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_r_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.r.nac
- __builtin_HEXAGON_S2_asr_r_r_or(0, 0, 0);
+ __builtin_HEXAGON_S2_asr_r_r_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.r.or
- __builtin_HEXAGON_S2_asr_r_r_sat(0, 0);
+ __builtin_HEXAGON_S2_asr_r_r_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.r.sat
- __builtin_HEXAGON_S2_asr_r_svw_trun(0, 0);
+ __builtin_HEXAGON_S2_asr_r_r_sat(0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.svw.trun
- __builtin_HEXAGON_S2_asr_r_vh(0, 0);
+ __builtin_HEXAGON_S2_asr_r_svw_trun(0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.vh
- __builtin_HEXAGON_S2_asr_r_vw(0, 0);
+ __builtin_HEXAGON_S2_asr_r_vh(0, 0);
// CHECK: @llvm.hexagon.S2.asr.r.vw
- __builtin_HEXAGON_S2_brev(0);
+ __builtin_HEXAGON_S2_asr_r_vw(0, 0);
// CHECK: @llvm.hexagon.S2.brev
- __builtin_HEXAGON_S2_brevp(0);
+ __builtin_HEXAGON_S2_brev(0);
// CHECK: @llvm.hexagon.S2.brevp
- __builtin_HEXAGON_S2_cabacencbin(0, 0, 0);
+ __builtin_HEXAGON_S2_brevp(0);
// CHECK: @llvm.hexagon.S2.cabacencbin
- __builtin_HEXAGON_S2_cl0(0);
+ __builtin_HEXAGON_S2_cabacencbin(0, 0, 0);
// CHECK: @llvm.hexagon.S2.cl0
- __builtin_HEXAGON_S2_cl0p(0);
+ __builtin_HEXAGON_S2_cl0(0);
// CHECK: @llvm.hexagon.S2.cl0p
- __builtin_HEXAGON_S2_cl1(0);
+ __builtin_HEXAGON_S2_cl0p(0);
// CHECK: @llvm.hexagon.S2.cl1
- __builtin_HEXAGON_S2_cl1p(0);
+ __builtin_HEXAGON_S2_cl1(0);
// CHECK: @llvm.hexagon.S2.cl1p
- __builtin_HEXAGON_S2_clb(0);
+ __builtin_HEXAGON_S2_cl1p(0);
// CHECK: @llvm.hexagon.S2.clb
- __builtin_HEXAGON_S2_clbnorm(0);
+ __builtin_HEXAGON_S2_clb(0);
// CHECK: @llvm.hexagon.S2.clbnorm
- __builtin_HEXAGON_S2_clbp(0);
+ __builtin_HEXAGON_S2_clbnorm(0);
// CHECK: @llvm.hexagon.S2.clbp
- __builtin_HEXAGON_S2_clrbit_i(0, 0);
+ __builtin_HEXAGON_S2_clbp(0);
// CHECK: @llvm.hexagon.S2.clrbit.i
- __builtin_HEXAGON_S2_clrbit_r(0, 0);
+ __builtin_HEXAGON_S2_clrbit_i(0, 0);
// CHECK: @llvm.hexagon.S2.clrbit.r
- __builtin_HEXAGON_S2_ct0(0);
+ __builtin_HEXAGON_S2_clrbit_r(0, 0);
// CHECK: @llvm.hexagon.S2.ct0
- __builtin_HEXAGON_S2_ct0p(0);
+ __builtin_HEXAGON_S2_ct0(0);
// CHECK: @llvm.hexagon.S2.ct0p
- __builtin_HEXAGON_S2_ct1(0);
+ __builtin_HEXAGON_S2_ct0p(0);
// CHECK: @llvm.hexagon.S2.ct1
- __builtin_HEXAGON_S2_ct1p(0);
+ __builtin_HEXAGON_S2_ct1(0);
// CHECK: @llvm.hexagon.S2.ct1p
- __builtin_HEXAGON_S2_deinterleave(0);
+ __builtin_HEXAGON_S2_ct1p(0);
// CHECK: @llvm.hexagon.S2.deinterleave
- __builtin_HEXAGON_S2_extractu(0, 0, 0);
+ __builtin_HEXAGON_S2_deinterleave(0);
// CHECK: @llvm.hexagon.S2.extractu
- __builtin_HEXAGON_S2_extractup(0, 0, 0);
+ __builtin_HEXAGON_S2_extractu(0, 0, 0);
+ // CHECK: @llvm.hexagon.S2.extractu.rp
+ __builtin_HEXAGON_S2_extractu_rp(0, 0);
// CHECK: @llvm.hexagon.S2.extractup
- __builtin_HEXAGON_S2_extractup_rp(0, 0);
+ __builtin_HEXAGON_S2_extractup(0, 0, 0);
// CHECK: @llvm.hexagon.S2.extractup.rp
- __builtin_HEXAGON_S2_extractu_rp(0, 0);
- // CHECK: @llvm.hexagon.S2.extractu.rp
- __builtin_HEXAGON_S2_insert(0, 0, 0, 0);
+ __builtin_HEXAGON_S2_extractup_rp(0, 0);
// CHECK: @llvm.hexagon.S2.insert
- __builtin_HEXAGON_S2_insertp(0, 0, 0, 0);
+ __builtin_HEXAGON_S2_insert(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.S2.insert.rp
+ __builtin_HEXAGON_S2_insert_rp(0, 0, 0);
// CHECK: @llvm.hexagon.S2.insertp
- __builtin_HEXAGON_S2_insertp_rp(0, 0, 0);
+ __builtin_HEXAGON_S2_insertp(0, 0, 0, 0);
// CHECK: @llvm.hexagon.S2.insertp.rp
- __builtin_HEXAGON_S2_insert_rp(0, 0, 0);
- // CHECK: @llvm.hexagon.S2.insert.rp
- __builtin_HEXAGON_S2_interleave(0);
+ __builtin_HEXAGON_S2_insertp_rp(0, 0, 0);
// CHECK: @llvm.hexagon.S2.interleave
- __builtin_HEXAGON_S2_lfsp(0, 0);
+ __builtin_HEXAGON_S2_interleave(0);
// CHECK: @llvm.hexagon.S2.lfsp
- __builtin_HEXAGON_S2_lsl_r_p(0, 0);
+ __builtin_HEXAGON_S2_lfsp(0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.p
- __builtin_HEXAGON_S2_lsl_r_p_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_p(0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.p.acc
- __builtin_HEXAGON_S2_lsl_r_p_and(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_p_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.p.and
- __builtin_HEXAGON_S2_lsl_r_p_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_p_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.p.nac
- __builtin_HEXAGON_S2_lsl_r_p_or(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_p_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.p.or
- __builtin_HEXAGON_S2_lsl_r_p_xor(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_p_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.p.xor
- __builtin_HEXAGON_S2_lsl_r_r(0, 0);
+ __builtin_HEXAGON_S2_lsl_r_p_xor(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.r
- __builtin_HEXAGON_S2_lsl_r_r_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_r(0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.r.acc
- __builtin_HEXAGON_S2_lsl_r_r_and(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_r_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.r.and
- __builtin_HEXAGON_S2_lsl_r_r_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_r_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.r.nac
- __builtin_HEXAGON_S2_lsl_r_r_or(0, 0, 0);
+ __builtin_HEXAGON_S2_lsl_r_r_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.r.or
- __builtin_HEXAGON_S2_lsl_r_vh(0, 0);
+ __builtin_HEXAGON_S2_lsl_r_r_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.vh
- __builtin_HEXAGON_S2_lsl_r_vw(0, 0);
+ __builtin_HEXAGON_S2_lsl_r_vh(0, 0);
// CHECK: @llvm.hexagon.S2.lsl.r.vw
- __builtin_HEXAGON_S2_lsr_i_p(0, 0);
+ __builtin_HEXAGON_S2_lsl_r_vw(0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.p
- __builtin_HEXAGON_S2_lsr_i_p_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_p(0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.p.acc
- __builtin_HEXAGON_S2_lsr_i_p_and(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_p_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.p.and
- __builtin_HEXAGON_S2_lsr_i_p_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_p_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.p.nac
- __builtin_HEXAGON_S2_lsr_i_p_or(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_p_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.p.or
- __builtin_HEXAGON_S2_lsr_i_p_xacc(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_p_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.p.xacc
- __builtin_HEXAGON_S2_lsr_i_r(0, 0);
+ __builtin_HEXAGON_S2_lsr_i_p_xacc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.r
- __builtin_HEXAGON_S2_lsr_i_r_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_r(0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.r.acc
- __builtin_HEXAGON_S2_lsr_i_r_and(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_r_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.r.and
- __builtin_HEXAGON_S2_lsr_i_r_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_r_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.r.nac
- __builtin_HEXAGON_S2_lsr_i_r_or(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_r_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.r.or
- __builtin_HEXAGON_S2_lsr_i_r_xacc(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_i_r_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.r.xacc
- __builtin_HEXAGON_S2_lsr_i_vh(0, 0);
+ __builtin_HEXAGON_S2_lsr_i_r_xacc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.vh
- __builtin_HEXAGON_S2_lsr_i_vw(0, 0);
+ __builtin_HEXAGON_S2_lsr_i_vh(0, 0);
// CHECK: @llvm.hexagon.S2.lsr.i.vw
- __builtin_HEXAGON_S2_lsr_r_p(0, 0);
+ __builtin_HEXAGON_S2_lsr_i_vw(0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.p
- __builtin_HEXAGON_S2_lsr_r_p_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_p(0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.p.acc
- __builtin_HEXAGON_S2_lsr_r_p_and(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_p_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.p.and
- __builtin_HEXAGON_S2_lsr_r_p_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_p_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.p.nac
- __builtin_HEXAGON_S2_lsr_r_p_or(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_p_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.p.or
- __builtin_HEXAGON_S2_lsr_r_p_xor(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_p_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.p.xor
- __builtin_HEXAGON_S2_lsr_r_r(0, 0);
+ __builtin_HEXAGON_S2_lsr_r_p_xor(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.r
- __builtin_HEXAGON_S2_lsr_r_r_acc(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_r(0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.r.acc
- __builtin_HEXAGON_S2_lsr_r_r_and(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_r_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.r.and
- __builtin_HEXAGON_S2_lsr_r_r_nac(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_r_and(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.r.nac
- __builtin_HEXAGON_S2_lsr_r_r_or(0, 0, 0);
+ __builtin_HEXAGON_S2_lsr_r_r_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.r.or
- __builtin_HEXAGON_S2_lsr_r_vh(0, 0);
+ __builtin_HEXAGON_S2_lsr_r_r_or(0, 0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.vh
- __builtin_HEXAGON_S2_lsr_r_vw(0, 0);
+ __builtin_HEXAGON_S2_lsr_r_vh(0, 0);
// CHECK: @llvm.hexagon.S2.lsr.r.vw
- __builtin_HEXAGON_S2_packhl(0, 0);
+ __builtin_HEXAGON_S2_lsr_r_vw(0, 0);
// CHECK: @llvm.hexagon.S2.packhl
- __builtin_HEXAGON_S2_parityp(0, 0);
+ __builtin_HEXAGON_S2_packhl(0, 0);
// CHECK: @llvm.hexagon.S2.parityp
- __builtin_HEXAGON_S2_setbit_i(0, 0);
+ __builtin_HEXAGON_S2_parityp(0, 0);
// CHECK: @llvm.hexagon.S2.setbit.i
- __builtin_HEXAGON_S2_setbit_r(0, 0);
+ __builtin_HEXAGON_S2_setbit_i(0, 0);
// CHECK: @llvm.hexagon.S2.setbit.r
- __builtin_HEXAGON_S2_shuffeb(0, 0);
+ __builtin_HEXAGON_S2_setbit_r(0, 0);
// CHECK: @llvm.hexagon.S2.shuffeb
- __builtin_HEXAGON_S2_shuffeh(0, 0);
+ __builtin_HEXAGON_S2_shuffeb(0, 0);
// CHECK: @llvm.hexagon.S2.shuffeh
- __builtin_HEXAGON_S2_shuffob(0, 0);
+ __builtin_HEXAGON_S2_shuffeh(0, 0);
// CHECK: @llvm.hexagon.S2.shuffob
- __builtin_HEXAGON_S2_shuffoh(0, 0);
+ __builtin_HEXAGON_S2_shuffob(0, 0);
// CHECK: @llvm.hexagon.S2.shuffoh
- __builtin_HEXAGON_S2_svsathb(0);
+ __builtin_HEXAGON_S2_shuffoh(0, 0);
// CHECK: @llvm.hexagon.S2.svsathb
- __builtin_HEXAGON_S2_svsathub(0);
+ __builtin_HEXAGON_S2_svsathb(0);
// CHECK: @llvm.hexagon.S2.svsathub
- __builtin_HEXAGON_S2_tableidxb_goodsyntax(0, 0, 0, 0);
+ __builtin_HEXAGON_S2_svsathub(0);
// CHECK: @llvm.hexagon.S2.tableidxb.goodsyntax
- __builtin_HEXAGON_S2_tableidxd_goodsyntax(0, 0, 0, 0);
+ __builtin_HEXAGON_S2_tableidxb_goodsyntax(0, 0, 0, 0);
// CHECK: @llvm.hexagon.S2.tableidxd.goodsyntax
- __builtin_HEXAGON_S2_tableidxh_goodsyntax(0, 0, 0, 0);
+ __builtin_HEXAGON_S2_tableidxd_goodsyntax(0, 0, 0, 0);
// CHECK: @llvm.hexagon.S2.tableidxh.goodsyntax
- __builtin_HEXAGON_S2_tableidxw_goodsyntax(0, 0, 0, 0);
+ __builtin_HEXAGON_S2_tableidxh_goodsyntax(0, 0, 0, 0);
// CHECK: @llvm.hexagon.S2.tableidxw.goodsyntax
- __builtin_HEXAGON_S2_togglebit_i(0, 0);
+ __builtin_HEXAGON_S2_tableidxw_goodsyntax(0, 0, 0, 0);
// CHECK: @llvm.hexagon.S2.togglebit.i
- __builtin_HEXAGON_S2_togglebit_r(0, 0);
+ __builtin_HEXAGON_S2_togglebit_i(0, 0);
// CHECK: @llvm.hexagon.S2.togglebit.r
- __builtin_HEXAGON_S2_tstbit_i(0, 0);
+ __builtin_HEXAGON_S2_togglebit_r(0, 0);
// CHECK: @llvm.hexagon.S2.tstbit.i
- __builtin_HEXAGON_S2_tstbit_r(0, 0);
+ __builtin_HEXAGON_S2_tstbit_i(0, 0);
// CHECK: @llvm.hexagon.S2.tstbit.r
- __builtin_HEXAGON_S2_valignib(0, 0, 0);
+ __builtin_HEXAGON_S2_tstbit_r(0, 0);
// CHECK: @llvm.hexagon.S2.valignib
- __builtin_HEXAGON_S2_valignrb(0, 0, 0);
+ __builtin_HEXAGON_S2_valignib(0, 0, 0);
// CHECK: @llvm.hexagon.S2.valignrb
- __builtin_HEXAGON_S2_vcnegh(0, 0);
+ __builtin_HEXAGON_S2_valignrb(0, 0, 0);
// CHECK: @llvm.hexagon.S2.vcnegh
- __builtin_HEXAGON_S2_vcrotate(0, 0);
+ __builtin_HEXAGON_S2_vcnegh(0, 0);
// CHECK: @llvm.hexagon.S2.vcrotate
- __builtin_HEXAGON_S2_vrcnegh(0, 0, 0);
+ __builtin_HEXAGON_S2_vcrotate(0, 0);
// CHECK: @llvm.hexagon.S2.vrcnegh
- __builtin_HEXAGON_S2_vrndpackwh(0);
+ __builtin_HEXAGON_S2_vrcnegh(0, 0, 0);
// CHECK: @llvm.hexagon.S2.vrndpackwh
- __builtin_HEXAGON_S2_vrndpackwhs(0);
+ __builtin_HEXAGON_S2_vrndpackwh(0);
// CHECK: @llvm.hexagon.S2.vrndpackwhs
- __builtin_HEXAGON_S2_vsathb(0);
+ __builtin_HEXAGON_S2_vrndpackwhs(0);
// CHECK: @llvm.hexagon.S2.vsathb
- __builtin_HEXAGON_S2_vsathb_nopack(0);
+ __builtin_HEXAGON_S2_vsathb(0);
// CHECK: @llvm.hexagon.S2.vsathb.nopack
- __builtin_HEXAGON_S2_vsathub(0);
+ __builtin_HEXAGON_S2_vsathb_nopack(0);
// CHECK: @llvm.hexagon.S2.vsathub
- __builtin_HEXAGON_S2_vsathub_nopack(0);
+ __builtin_HEXAGON_S2_vsathub(0);
// CHECK: @llvm.hexagon.S2.vsathub.nopack
- __builtin_HEXAGON_S2_vsatwh(0);
+ __builtin_HEXAGON_S2_vsathub_nopack(0);
// CHECK: @llvm.hexagon.S2.vsatwh
- __builtin_HEXAGON_S2_vsatwh_nopack(0);
+ __builtin_HEXAGON_S2_vsatwh(0);
// CHECK: @llvm.hexagon.S2.vsatwh.nopack
- __builtin_HEXAGON_S2_vsatwuh(0);
+ __builtin_HEXAGON_S2_vsatwh_nopack(0);
// CHECK: @llvm.hexagon.S2.vsatwuh
- __builtin_HEXAGON_S2_vsatwuh_nopack(0);
+ __builtin_HEXAGON_S2_vsatwuh(0);
// CHECK: @llvm.hexagon.S2.vsatwuh.nopack
- __builtin_HEXAGON_S2_vsplatrb(0);
+ __builtin_HEXAGON_S2_vsatwuh_nopack(0);
// CHECK: @llvm.hexagon.S2.vsplatrb
- __builtin_HEXAGON_S2_vsplatrh(0);
+ __builtin_HEXAGON_S2_vsplatrb(0);
// CHECK: @llvm.hexagon.S2.vsplatrh
- __builtin_HEXAGON_S2_vspliceib(0, 0, 0);
+ __builtin_HEXAGON_S2_vsplatrh(0);
// CHECK: @llvm.hexagon.S2.vspliceib
- __builtin_HEXAGON_S2_vsplicerb(0, 0, 0);
+ __builtin_HEXAGON_S2_vspliceib(0, 0, 0);
// CHECK: @llvm.hexagon.S2.vsplicerb
- __builtin_HEXAGON_S2_vsxtbh(0);
+ __builtin_HEXAGON_S2_vsplicerb(0, 0, 0);
// CHECK: @llvm.hexagon.S2.vsxtbh
- __builtin_HEXAGON_S2_vsxthw(0);
+ __builtin_HEXAGON_S2_vsxtbh(0);
// CHECK: @llvm.hexagon.S2.vsxthw
- __builtin_HEXAGON_S2_vtrunehb(0);
+ __builtin_HEXAGON_S2_vsxthw(0);
// CHECK: @llvm.hexagon.S2.vtrunehb
- __builtin_HEXAGON_S2_vtrunewh(0, 0);
+ __builtin_HEXAGON_S2_vtrunehb(0);
// CHECK: @llvm.hexagon.S2.vtrunewh
- __builtin_HEXAGON_S2_vtrunohb(0);
+ __builtin_HEXAGON_S2_vtrunewh(0, 0);
// CHECK: @llvm.hexagon.S2.vtrunohb
- __builtin_HEXAGON_S2_vtrunowh(0, 0);
+ __builtin_HEXAGON_S2_vtrunohb(0);
// CHECK: @llvm.hexagon.S2.vtrunowh
- __builtin_HEXAGON_S2_vzxtbh(0);
+ __builtin_HEXAGON_S2_vtrunowh(0, 0);
// CHECK: @llvm.hexagon.S2.vzxtbh
- __builtin_HEXAGON_S2_vzxthw(0);
+ __builtin_HEXAGON_S2_vzxtbh(0);
// CHECK: @llvm.hexagon.S2.vzxthw
- __builtin_HEXAGON_S4_addaddi(0, 0, 0);
+ __builtin_HEXAGON_S2_vzxthw(0);
// CHECK: @llvm.hexagon.S4.addaddi
- __builtin_HEXAGON_S4_addi_asl_ri(0, 0, 0);
+ __builtin_HEXAGON_S4_addaddi(0, 0, 0);
// CHECK: @llvm.hexagon.S4.addi.asl.ri
- __builtin_HEXAGON_S4_addi_lsr_ri(0, 0, 0);
+ __builtin_HEXAGON_S4_addi_asl_ri(0, 0, 0);
// CHECK: @llvm.hexagon.S4.addi.lsr.ri
- __builtin_HEXAGON_S4_andi_asl_ri(0, 0, 0);
+ __builtin_HEXAGON_S4_addi_lsr_ri(0, 0, 0);
// CHECK: @llvm.hexagon.S4.andi.asl.ri
- __builtin_HEXAGON_S4_andi_lsr_ri(0, 0, 0);
+ __builtin_HEXAGON_S4_andi_asl_ri(0, 0, 0);
// CHECK: @llvm.hexagon.S4.andi.lsr.ri
- __builtin_HEXAGON_S4_clbaddi(0, 0);
+ __builtin_HEXAGON_S4_andi_lsr_ri(0, 0, 0);
// CHECK: @llvm.hexagon.S4.clbaddi
- __builtin_HEXAGON_S4_clbpaddi(0, 0);
+ __builtin_HEXAGON_S4_clbaddi(0, 0);
// CHECK: @llvm.hexagon.S4.clbpaddi
- __builtin_HEXAGON_S4_clbpnorm(0);
+ __builtin_HEXAGON_S4_clbpaddi(0, 0);
// CHECK: @llvm.hexagon.S4.clbpnorm
- __builtin_HEXAGON_S4_extract(0, 0, 0);
+ __builtin_HEXAGON_S4_clbpnorm(0);
// CHECK: @llvm.hexagon.S4.extract
- __builtin_HEXAGON_S4_extractp(0, 0, 0);
+ __builtin_HEXAGON_S4_extract(0, 0, 0);
+ // CHECK: @llvm.hexagon.S4.extract.rp
+ __builtin_HEXAGON_S4_extract_rp(0, 0);
// CHECK: @llvm.hexagon.S4.extractp
- __builtin_HEXAGON_S4_extractp_rp(0, 0);
+ __builtin_HEXAGON_S4_extractp(0, 0, 0);
// CHECK: @llvm.hexagon.S4.extractp.rp
- __builtin_HEXAGON_S4_extract_rp(0, 0);
- // CHECK: @llvm.hexagon.S4.extract.rp
- __builtin_HEXAGON_S4_lsli(0, 0);
+ __builtin_HEXAGON_S4_extractp_rp(0, 0);
// CHECK: @llvm.hexagon.S4.lsli
- __builtin_HEXAGON_S4_ntstbit_i(0, 0);
+ __builtin_HEXAGON_S4_lsli(0, 0);
// CHECK: @llvm.hexagon.S4.ntstbit.i
- __builtin_HEXAGON_S4_ntstbit_r(0, 0);
+ __builtin_HEXAGON_S4_ntstbit_i(0, 0);
// CHECK: @llvm.hexagon.S4.ntstbit.r
- __builtin_HEXAGON_S4_or_andi(0, 0, 0);
+ __builtin_HEXAGON_S4_ntstbit_r(0, 0);
// CHECK: @llvm.hexagon.S4.or.andi
- __builtin_HEXAGON_S4_or_andix(0, 0, 0);
+ __builtin_HEXAGON_S4_or_andi(0, 0, 0);
// CHECK: @llvm.hexagon.S4.or.andix
- __builtin_HEXAGON_S4_ori_asl_ri(0, 0, 0);
+ __builtin_HEXAGON_S4_or_andix(0, 0, 0);
+ // CHECK: @llvm.hexagon.S4.or.ori
+ __builtin_HEXAGON_S4_or_ori(0, 0, 0);
// CHECK: @llvm.hexagon.S4.ori.asl.ri
- __builtin_HEXAGON_S4_ori_lsr_ri(0, 0, 0);
+ __builtin_HEXAGON_S4_ori_asl_ri(0, 0, 0);
// CHECK: @llvm.hexagon.S4.ori.lsr.ri
- __builtin_HEXAGON_S4_or_ori(0, 0, 0);
- // CHECK: @llvm.hexagon.S4.or.ori
- __builtin_HEXAGON_S4_parity(0, 0);
+ __builtin_HEXAGON_S4_ori_lsr_ri(0, 0, 0);
// CHECK: @llvm.hexagon.S4.parity
- __builtin_HEXAGON_S4_subaddi(0, 0, 0);
+ __builtin_HEXAGON_S4_parity(0, 0);
// CHECK: @llvm.hexagon.S4.subaddi
- __builtin_HEXAGON_S4_subi_asl_ri(0, 0, 0);
+ __builtin_HEXAGON_S4_subaddi(0, 0, 0);
// CHECK: @llvm.hexagon.S4.subi.asl.ri
- __builtin_HEXAGON_S4_subi_lsr_ri(0, 0, 0);
+ __builtin_HEXAGON_S4_subi_asl_ri(0, 0, 0);
// CHECK: @llvm.hexagon.S4.subi.lsr.ri
- __builtin_HEXAGON_S4_vrcrotate(0, 0, 0);
+ __builtin_HEXAGON_S4_subi_lsr_ri(0, 0, 0);
// CHECK: @llvm.hexagon.S4.vrcrotate
- __builtin_HEXAGON_S4_vrcrotate_acc(0, 0, 0, 0);
+ __builtin_HEXAGON_S4_vrcrotate(0, 0, 0);
// CHECK: @llvm.hexagon.S4.vrcrotate.acc
- __builtin_HEXAGON_S4_vxaddsubh(0, 0);
+ __builtin_HEXAGON_S4_vrcrotate_acc(0, 0, 0, 0);
// CHECK: @llvm.hexagon.S4.vxaddsubh
- __builtin_HEXAGON_S4_vxaddsubhr(0, 0);
+ __builtin_HEXAGON_S4_vxaddsubh(0, 0);
// CHECK: @llvm.hexagon.S4.vxaddsubhr
- __builtin_HEXAGON_S4_vxaddsubw(0, 0);
+ __builtin_HEXAGON_S4_vxaddsubhr(0, 0);
// CHECK: @llvm.hexagon.S4.vxaddsubw
- __builtin_HEXAGON_S4_vxsubaddh(0, 0);
+ __builtin_HEXAGON_S4_vxaddsubw(0, 0);
// CHECK: @llvm.hexagon.S4.vxsubaddh
- __builtin_HEXAGON_S4_vxsubaddhr(0, 0);
+ __builtin_HEXAGON_S4_vxsubaddh(0, 0);
// CHECK: @llvm.hexagon.S4.vxsubaddhr
- __builtin_HEXAGON_S4_vxsubaddw(0, 0);
+ __builtin_HEXAGON_S4_vxsubaddhr(0, 0);
// CHECK: @llvm.hexagon.S4.vxsubaddw
- __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax(0, 0);
+ __builtin_HEXAGON_S4_vxsubaddw(0, 0);
// CHECK: @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax
- __builtin_HEXAGON_S5_asrhub_sat(0, 0);
+ __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax(0, 0);
// CHECK: @llvm.hexagon.S5.asrhub.sat
- __builtin_HEXAGON_S5_popcountp(0);
+ __builtin_HEXAGON_S5_asrhub_sat(0, 0);
// CHECK: @llvm.hexagon.S5.popcountp
- __builtin_HEXAGON_S5_vasrhrnd_goodsyntax(0, 0);
+ __builtin_HEXAGON_S5_popcountp(0);
// CHECK: @llvm.hexagon.S5.vasrhrnd.goodsyntax
- __builtin_HEXAGON_S6_rol_i_p(0, 0);
+ __builtin_HEXAGON_S5_vasrhrnd_goodsyntax(0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.p
- __builtin_HEXAGON_S6_rol_i_p_acc(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_p(0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.p.acc
- __builtin_HEXAGON_S6_rol_i_p_and(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_p_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.p.and
- __builtin_HEXAGON_S6_rol_i_p_nac(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_p_and(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.p.nac
- __builtin_HEXAGON_S6_rol_i_p_or(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_p_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.p.or
- __builtin_HEXAGON_S6_rol_i_p_xacc(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_p_or(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.p.xacc
- __builtin_HEXAGON_S6_rol_i_r(0, 0);
+ __builtin_HEXAGON_S6_rol_i_p_xacc(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.r
- __builtin_HEXAGON_S6_rol_i_r_acc(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_r(0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.r.acc
- __builtin_HEXAGON_S6_rol_i_r_and(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_r_acc(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.r.and
- __builtin_HEXAGON_S6_rol_i_r_nac(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_r_and(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.r.nac
- __builtin_HEXAGON_S6_rol_i_r_or(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_r_nac(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.r.or
- __builtin_HEXAGON_S6_rol_i_r_xacc(0, 0, 0);
+ __builtin_HEXAGON_S6_rol_i_r_or(0, 0, 0);
// CHECK: @llvm.hexagon.S6.rol.i.r.xacc
- __builtin_HEXAGON_V6_extractw_128B(v32, 0);
- // CHECK: @llvm.hexagon.V6.extractw.128B
- __builtin_HEXAGON_V6_extractw(v16, 0);
+ __builtin_HEXAGON_S6_rol_i_r_xacc(0, 0, 0);
+ // CHECK: @llvm.hexagon.S6.vsplatrbp
+ __builtin_HEXAGON_S6_vsplatrbp(0);
+ // CHECK: @llvm.hexagon.S6.vtrunehb.ppp
+ __builtin_HEXAGON_S6_vtrunehb_ppp(0, 0);
+ // CHECK: @llvm.hexagon.S6.vtrunohb.ppp
+ __builtin_HEXAGON_S6_vtrunohb_ppp(0, 0);
// CHECK: @llvm.hexagon.V6.extractw
- __builtin_HEXAGON_V6_hi_128B(v64);
- // CHECK: @llvm.hexagon.V6.hi.128B
- __builtin_HEXAGON_V6_hi(v32);
+ __builtin_HEXAGON_V6_extractw(v64, 0);
+ // CHECK: @llvm.hexagon.V6.extractw.128B
+ __builtin_HEXAGON_V6_extractw_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.hi
- __builtin_HEXAGON_V6_lo_128B(v64);
- // CHECK: @llvm.hexagon.V6.lo.128B
- __builtin_HEXAGON_V6_lo(v32);
+ __builtin_HEXAGON_V6_hi(v128);
+ // CHECK: @llvm.hexagon.V6.hi.128B
+ __builtin_HEXAGON_V6_hi_128B(v256);
// CHECK: @llvm.hexagon.V6.lo
- __builtin_HEXAGON_V6_lvsplatw(0);
+ __builtin_HEXAGON_V6_lo(v128);
+ // CHECK: @llvm.hexagon.V6.lo.128B
+ __builtin_HEXAGON_V6_lo_128B(v256);
+ // CHECK: @llvm.hexagon.V6.lvsplatb
+ __builtin_HEXAGON_V6_lvsplatb(0);
+ // CHECK: @llvm.hexagon.V6.lvsplatb.128B
+ __builtin_HEXAGON_V6_lvsplatb_128B(0);
+ // CHECK: @llvm.hexagon.V6.lvsplath
+ __builtin_HEXAGON_V6_lvsplath(0);
+ // CHECK: @llvm.hexagon.V6.lvsplath.128B
+ __builtin_HEXAGON_V6_lvsplath_128B(0);
// CHECK: @llvm.hexagon.V6.lvsplatw
- __builtin_HEXAGON_V6_lvsplatw_128B(0);
+ __builtin_HEXAGON_V6_lvsplatw(0);
// CHECK: @llvm.hexagon.V6.lvsplatw.128B
- __builtin_HEXAGON_V6_pred_and_128B(v32, v32);
+ __builtin_HEXAGON_V6_lvsplatw_128B(0);
+ // CHECK: @llvm.hexagon.V6.pred.and
+ __builtin_HEXAGON_V6_pred_and(v64, v64);
// CHECK: @llvm.hexagon.V6.pred.and.128B
- __builtin_HEXAGON_V6_pred_and_n_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.pred.and.n.128B
- __builtin_HEXAGON_V6_pred_and_n(v16, v16);
+ __builtin_HEXAGON_V6_pred_and_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.pred.and.n
- __builtin_HEXAGON_V6_pred_and(v16, v16);
- // CHECK: @llvm.hexagon.V6.pred.and
- __builtin_HEXAGON_V6_pred_not_128B(v32);
- // CHECK: @llvm.hexagon.V6.pred.not.128B
- __builtin_HEXAGON_V6_pred_not(v16);
+ __builtin_HEXAGON_V6_pred_and_n(v64, v64);
+ // CHECK: @llvm.hexagon.V6.pred.and.n.128B
+ __builtin_HEXAGON_V6_pred_and_n_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.pred.not
- __builtin_HEXAGON_V6_pred_or_128B(v32, v32);
+ __builtin_HEXAGON_V6_pred_not(v64);
+ // CHECK: @llvm.hexagon.V6.pred.not.128B
+ __builtin_HEXAGON_V6_pred_not_128B(v128);
+ // CHECK: @llvm.hexagon.V6.pred.or
+ __builtin_HEXAGON_V6_pred_or(v64, v64);
// CHECK: @llvm.hexagon.V6.pred.or.128B
- __builtin_HEXAGON_V6_pred_or_n_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.pred.or.n.128B
- __builtin_HEXAGON_V6_pred_or_n(v16, v16);
+ __builtin_HEXAGON_V6_pred_or_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.pred.or.n
- __builtin_HEXAGON_V6_pred_or(v16, v16);
- // CHECK: @llvm.hexagon.V6.pred.or
- __builtin_HEXAGON_V6_pred_scalar2(0);
+ __builtin_HEXAGON_V6_pred_or_n(v64, v64);
+ // CHECK: @llvm.hexagon.V6.pred.or.n.128B
+ __builtin_HEXAGON_V6_pred_or_n_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.pred.scalar2
- __builtin_HEXAGON_V6_pred_scalar2_128B(0);
+ __builtin_HEXAGON_V6_pred_scalar2(0);
// CHECK: @llvm.hexagon.V6.pred.scalar2.128B
- __builtin_HEXAGON_V6_pred_xor_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.pred.xor.128B
- __builtin_HEXAGON_V6_pred_xor(v16, v16);
+ __builtin_HEXAGON_V6_pred_scalar2_128B(0);
+ // CHECK: @llvm.hexagon.V6.pred.scalar2v2
+ __builtin_HEXAGON_V6_pred_scalar2v2(0);
+ // CHECK: @llvm.hexagon.V6.pred.scalar2v2.128B
+ __builtin_HEXAGON_V6_pred_scalar2v2_128B(0);
// CHECK: @llvm.hexagon.V6.pred.xor
- __builtin_HEXAGON_V6_vabsdiffh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vabsdiffh.128B
- __builtin_HEXAGON_V6_vabsdiffh(v16, v16);
+ __builtin_HEXAGON_V6_pred_xor(v64, v64);
+ // CHECK: @llvm.hexagon.V6.pred.xor.128B
+ __builtin_HEXAGON_V6_pred_xor_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.shuffeqh
+ __builtin_HEXAGON_V6_shuffeqh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.shuffeqh.128B
+ __builtin_HEXAGON_V6_shuffeqh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.shuffeqw
+ __builtin_HEXAGON_V6_shuffeqw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.shuffeqw.128B
+ __builtin_HEXAGON_V6_shuffeqw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vS32b.nqpred.ai
+ __builtin_HEXAGON_V6_vS32b_nqpred_ai(v64, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vS32b.nqpred.ai.128B
+ __builtin_HEXAGON_V6_vS32b_nqpred_ai_128B(v128, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vS32b.nt.nqpred.ai
+ __builtin_HEXAGON_V6_vS32b_nt_nqpred_ai(v64, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B
+ __builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B(v128, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vS32b.nt.qpred.ai
+ __builtin_HEXAGON_V6_vS32b_nt_qpred_ai(v64, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vS32b.nt.qpred.ai.128B
+ __builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B(v128, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vS32b.qpred.ai
+ __builtin_HEXAGON_V6_vS32b_qpred_ai(v64, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vS32b.qpred.ai.128B
+ __builtin_HEXAGON_V6_vS32b_qpred_ai_128B(v128, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vabsb
+ __builtin_HEXAGON_V6_vabsb(v64);
+ // CHECK: @llvm.hexagon.V6.vabsb.128B
+ __builtin_HEXAGON_V6_vabsb_128B(v128);
+ // CHECK: @llvm.hexagon.V6.vabsb.sat
+ __builtin_HEXAGON_V6_vabsb_sat(v64);
+ // CHECK: @llvm.hexagon.V6.vabsb.sat.128B
+ __builtin_HEXAGON_V6_vabsb_sat_128B(v128);
// CHECK: @llvm.hexagon.V6.vabsdiffh
- __builtin_HEXAGON_V6_vabsdiffub_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vabsdiffub.128B
- __builtin_HEXAGON_V6_vabsdiffub(v16, v16);
+ __builtin_HEXAGON_V6_vabsdiffh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vabsdiffh.128B
+ __builtin_HEXAGON_V6_vabsdiffh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vabsdiffub
- __builtin_HEXAGON_V6_vabsdiffuh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vabsdiffuh.128B
- __builtin_HEXAGON_V6_vabsdiffuh(v16, v16);
+ __builtin_HEXAGON_V6_vabsdiffub(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vabsdiffub.128B
+ __builtin_HEXAGON_V6_vabsdiffub_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vabsdiffuh
- __builtin_HEXAGON_V6_vabsdiffw_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vabsdiffw.128B
- __builtin_HEXAGON_V6_vabsdiffw(v16, v16);
+ __builtin_HEXAGON_V6_vabsdiffuh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vabsdiffuh.128B
+ __builtin_HEXAGON_V6_vabsdiffuh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vabsdiffw
- __builtin_HEXAGON_V6_vabsh_128B(v32);
+ __builtin_HEXAGON_V6_vabsdiffw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vabsdiffw.128B
+ __builtin_HEXAGON_V6_vabsdiffw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vabsh
+ __builtin_HEXAGON_V6_vabsh(v64);
// CHECK: @llvm.hexagon.V6.vabsh.128B
- __builtin_HEXAGON_V6_vabsh_sat_128B(v32);
- // CHECK: @llvm.hexagon.V6.vabsh.sat.128B
- __builtin_HEXAGON_V6_vabsh_sat(v16);
+ __builtin_HEXAGON_V6_vabsh_128B(v128);
// CHECK: @llvm.hexagon.V6.vabsh.sat
- __builtin_HEXAGON_V6_vabsh(v16);
- // CHECK: @llvm.hexagon.V6.vabsh
- __builtin_HEXAGON_V6_vabsw_128B(v32);
+ __builtin_HEXAGON_V6_vabsh_sat(v64);
+ // CHECK: @llvm.hexagon.V6.vabsh.sat.128B
+ __builtin_HEXAGON_V6_vabsh_sat_128B(v128);
+ // CHECK: @llvm.hexagon.V6.vabsw
+ __builtin_HEXAGON_V6_vabsw(v64);
// CHECK: @llvm.hexagon.V6.vabsw.128B
- __builtin_HEXAGON_V6_vabsw_sat_128B(v32);
- // CHECK: @llvm.hexagon.V6.vabsw.sat.128B
- __builtin_HEXAGON_V6_vabsw_sat(v16);
+ __builtin_HEXAGON_V6_vabsw_128B(v128);
// CHECK: @llvm.hexagon.V6.vabsw.sat
- __builtin_HEXAGON_V6_vabsw(v16);
- // CHECK: @llvm.hexagon.V6.vabsw
- __builtin_HEXAGON_V6_vaddb_128B(v32, v32);
+ __builtin_HEXAGON_V6_vabsw_sat(v64);
+ // CHECK: @llvm.hexagon.V6.vabsw.sat.128B
+ __builtin_HEXAGON_V6_vabsw_sat_128B(v128);
+ // CHECK: @llvm.hexagon.V6.vaddb
+ __builtin_HEXAGON_V6_vaddb(v64, v64);
// CHECK: @llvm.hexagon.V6.vaddb.128B
- __builtin_HEXAGON_V6_vaddb_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vaddb.dv.128B
- __builtin_HEXAGON_V6_vaddb_dv(v32, v32);
+ __builtin_HEXAGON_V6_vaddb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vaddb.dv
- __builtin_HEXAGON_V6_vaddbnq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vaddbnq.128B
- __builtin_HEXAGON_V6_vaddbnq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vaddb_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddb.dv.128B
+ __builtin_HEXAGON_V6_vaddb_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vaddbnq
- __builtin_HEXAGON_V6_vaddbq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vaddbq.128B
- __builtin_HEXAGON_V6_vaddbq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vaddbnq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddbnq.128B
+ __builtin_HEXAGON_V6_vaddbnq_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vaddbq
- __builtin_HEXAGON_V6_vaddb(v16, v16);
- // CHECK: @llvm.hexagon.V6.vaddb
- __builtin_HEXAGON_V6_vaddh_128B(v32, v32);
+ __builtin_HEXAGON_V6_vaddbq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddbq.128B
+ __builtin_HEXAGON_V6_vaddbq_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddbsat
+ __builtin_HEXAGON_V6_vaddbsat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddbsat.128B
+ __builtin_HEXAGON_V6_vaddbsat_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddbsat.dv
+ __builtin_HEXAGON_V6_vaddbsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddbsat.dv.128B
+ __builtin_HEXAGON_V6_vaddbsat_dv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.vaddcarry
+ __builtin_HEXAGON_V6_vaddcarry(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vaddcarry.128B
+ __builtin_HEXAGON_V6_vaddcarry_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vaddclbh
+ __builtin_HEXAGON_V6_vaddclbh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddclbh.128B
+ __builtin_HEXAGON_V6_vaddclbh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddclbw
+ __builtin_HEXAGON_V6_vaddclbw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddclbw.128B
+ __builtin_HEXAGON_V6_vaddclbw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddh
+ __builtin_HEXAGON_V6_vaddh(v64, v64);
// CHECK: @llvm.hexagon.V6.vaddh.128B
- __builtin_HEXAGON_V6_vaddh_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vaddh.dv.128B
- __builtin_HEXAGON_V6_vaddh_dv(v32, v32);
+ __builtin_HEXAGON_V6_vaddh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vaddh.dv
- __builtin_HEXAGON_V6_vaddhnq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vaddhnq.128B
- __builtin_HEXAGON_V6_vaddhnq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vaddh_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddh.dv.128B
+ __builtin_HEXAGON_V6_vaddh_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vaddhnq
- __builtin_HEXAGON_V6_vaddhq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vaddhq.128B
- __builtin_HEXAGON_V6_vaddhq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vaddhnq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddhnq.128B
+ __builtin_HEXAGON_V6_vaddhnq_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vaddhq
- __builtin_HEXAGON_V6_vaddhsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vaddhq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddhq.128B
+ __builtin_HEXAGON_V6_vaddhq_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddhsat
+ __builtin_HEXAGON_V6_vaddhsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vaddhsat.128B
- __builtin_HEXAGON_V6_vaddhsat_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vaddhsat.dv.128B
- __builtin_HEXAGON_V6_vaddhsat_dv(v32, v32);
+ __builtin_HEXAGON_V6_vaddhsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vaddhsat.dv
- __builtin_HEXAGON_V6_vaddhsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vaddhsat
- __builtin_HEXAGON_V6_vaddh(v16, v16);
- // CHECK: @llvm.hexagon.V6.vaddh
- __builtin_HEXAGON_V6_vaddhw_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vaddhw.128B
- __builtin_HEXAGON_V6_vaddhw(v16, v16);
+ __builtin_HEXAGON_V6_vaddhsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddhsat.dv.128B
+ __builtin_HEXAGON_V6_vaddhsat_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vaddhw
- __builtin_HEXAGON_V6_vaddubh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vaddubh.128B
- __builtin_HEXAGON_V6_vaddubh(v16, v16);
+ __builtin_HEXAGON_V6_vaddhw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddhw.128B
+ __builtin_HEXAGON_V6_vaddhw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddhw.acc
+ __builtin_HEXAGON_V6_vaddhw_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddhw.acc.128B
+ __builtin_HEXAGON_V6_vaddhw_acc_128B(v256, v128, v128);
// CHECK: @llvm.hexagon.V6.vaddubh
- __builtin_HEXAGON_V6_vaddubsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vaddubh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddubh.128B
+ __builtin_HEXAGON_V6_vaddubh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddubh.acc
+ __builtin_HEXAGON_V6_vaddubh_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddubh.acc.128B
+ __builtin_HEXAGON_V6_vaddubh_acc_128B(v256, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddubsat
+ __builtin_HEXAGON_V6_vaddubsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vaddubsat.128B
- __builtin_HEXAGON_V6_vaddubsat_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vaddubsat.dv.128B
- __builtin_HEXAGON_V6_vaddubsat_dv(v32, v32);
+ __builtin_HEXAGON_V6_vaddubsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vaddubsat.dv
- __builtin_HEXAGON_V6_vaddubsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vaddubsat
- __builtin_HEXAGON_V6_vadduhsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vaddubsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddubsat.dv.128B
+ __builtin_HEXAGON_V6_vaddubsat_dv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.vaddububb.sat
+ __builtin_HEXAGON_V6_vaddububb_sat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddububb.sat.128B
+ __builtin_HEXAGON_V6_vaddububb_sat_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vadduhsat
+ __builtin_HEXAGON_V6_vadduhsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vadduhsat.128B
- __builtin_HEXAGON_V6_vadduhsat_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vadduhsat.dv.128B
- __builtin_HEXAGON_V6_vadduhsat_dv(v32, v32);
+ __builtin_HEXAGON_V6_vadduhsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vadduhsat.dv
- __builtin_HEXAGON_V6_vadduhsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vadduhsat
- __builtin_HEXAGON_V6_vadduhw_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vadduhw.128B
- __builtin_HEXAGON_V6_vadduhw(v16, v16);
+ __builtin_HEXAGON_V6_vadduhsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vadduhsat.dv.128B
+ __builtin_HEXAGON_V6_vadduhsat_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vadduhw
- __builtin_HEXAGON_V6_vaddw_128B(v32, v32);
+ __builtin_HEXAGON_V6_vadduhw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vadduhw.128B
+ __builtin_HEXAGON_V6_vadduhw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vadduhw.acc
+ __builtin_HEXAGON_V6_vadduhw_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vadduhw.acc.128B
+ __builtin_HEXAGON_V6_vadduhw_acc_128B(v256, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vadduwsat
+ __builtin_HEXAGON_V6_vadduwsat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vadduwsat.128B
+ __builtin_HEXAGON_V6_vadduwsat_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vadduwsat.dv
+ __builtin_HEXAGON_V6_vadduwsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vadduwsat.dv.128B
+ __builtin_HEXAGON_V6_vadduwsat_dv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.vaddw
+ __builtin_HEXAGON_V6_vaddw(v64, v64);
// CHECK: @llvm.hexagon.V6.vaddw.128B
- __builtin_HEXAGON_V6_vaddw_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vaddw.dv.128B
- __builtin_HEXAGON_V6_vaddw_dv(v32, v32);
+ __builtin_HEXAGON_V6_vaddw_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vaddw.dv
- __builtin_HEXAGON_V6_vaddwnq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vaddwnq.128B
- __builtin_HEXAGON_V6_vaddwnq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vaddw_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddw.dv.128B
+ __builtin_HEXAGON_V6_vaddw_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vaddwnq
- __builtin_HEXAGON_V6_vaddwq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vaddwq.128B
- __builtin_HEXAGON_V6_vaddwq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vaddwnq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddwnq.128B
+ __builtin_HEXAGON_V6_vaddwnq_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vaddwq
- __builtin_HEXAGON_V6_vaddwsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vaddwq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaddwq.128B
+ __builtin_HEXAGON_V6_vaddwq_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddwsat
+ __builtin_HEXAGON_V6_vaddwsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vaddwsat.128B
- __builtin_HEXAGON_V6_vaddwsat_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vaddwsat.dv.128B
- __builtin_HEXAGON_V6_vaddwsat_dv(v32, v32);
+ __builtin_HEXAGON_V6_vaddwsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vaddwsat.dv
- __builtin_HEXAGON_V6_vaddwsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vaddwsat
- __builtin_HEXAGON_V6_vaddw(v16, v16);
- // CHECK: @llvm.hexagon.V6.vaddw
- __builtin_HEXAGON_V6_valignb_128B(v32, v32, 0);
+ __builtin_HEXAGON_V6_vaddwsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaddwsat.dv.128B
+ __builtin_HEXAGON_V6_vaddwsat_dv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.valignb
+ __builtin_HEXAGON_V6_valignb(v64, v64, 0);
// CHECK: @llvm.hexagon.V6.valignb.128B
- __builtin_HEXAGON_V6_valignbi_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.valignbi.128B
- __builtin_HEXAGON_V6_valignbi(v16, v16, 0);
+ __builtin_HEXAGON_V6_valignb_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.valignbi
- __builtin_HEXAGON_V6_valignb(v16, v16, 0);
- // CHECK: @llvm.hexagon.V6.valignb
- __builtin_HEXAGON_V6_vand_128B(v32, v32);
+ __builtin_HEXAGON_V6_valignbi(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.valignbi.128B
+ __builtin_HEXAGON_V6_valignbi_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vand
+ __builtin_HEXAGON_V6_vand(v64, v64);
// CHECK: @llvm.hexagon.V6.vand.128B
- __builtin_HEXAGON_V6_vandqrt_128B(v32, 0);
+ __builtin_HEXAGON_V6_vand_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vandnqrt
+ __builtin_HEXAGON_V6_vandnqrt(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vandnqrt.128B
+ __builtin_HEXAGON_V6_vandnqrt_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vandnqrt.acc
+ __builtin_HEXAGON_V6_vandnqrt_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vandnqrt.acc.128B
+ __builtin_HEXAGON_V6_vandnqrt_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vandqrt
+ __builtin_HEXAGON_V6_vandqrt(v64, 0);
// CHECK: @llvm.hexagon.V6.vandqrt.128B
- __builtin_HEXAGON_V6_vandqrt_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vandqrt.acc.128B
- __builtin_HEXAGON_V6_vandqrt_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vandqrt_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vandqrt.acc
- __builtin_HEXAGON_V6_vandqrt(v16, 0);
- // CHECK: @llvm.hexagon.V6.vandqrt
- __builtin_HEXAGON_V6_vand(v16, v16);
- // CHECK: @llvm.hexagon.V6.vand
- __builtin_HEXAGON_V6_vandvrt_128B(v32, 0);
+ __builtin_HEXAGON_V6_vandqrt_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vandqrt.acc.128B
+ __builtin_HEXAGON_V6_vandqrt_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vandvnqv
+ __builtin_HEXAGON_V6_vandvnqv(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vandvnqv.128B
+ __builtin_HEXAGON_V6_vandvnqv_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vandvqv
+ __builtin_HEXAGON_V6_vandvqv(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vandvqv.128B
+ __builtin_HEXAGON_V6_vandvqv_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vandvrt
+ __builtin_HEXAGON_V6_vandvrt(v64, 0);
// CHECK: @llvm.hexagon.V6.vandvrt.128B
- __builtin_HEXAGON_V6_vandvrt_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vandvrt.acc.128B
- __builtin_HEXAGON_V6_vandvrt_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vandvrt_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vandvrt.acc
- __builtin_HEXAGON_V6_vandvrt(v16, 0);
- // CHECK: @llvm.hexagon.V6.vandvrt
- __builtin_HEXAGON_V6_vaslh_128B(v32, 0);
- // CHECK: @llvm.hexagon.V6.vaslh.128B
- __builtin_HEXAGON_V6_vaslhv_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vaslhv.128B
- __builtin_HEXAGON_V6_vaslh(v16, 0);
+ __builtin_HEXAGON_V6_vandvrt_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vandvrt.acc.128B
+ __builtin_HEXAGON_V6_vandvrt_acc_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vaslh
- __builtin_HEXAGON_V6_vaslhv(v16, v16);
+ __builtin_HEXAGON_V6_vaslh(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vaslh.128B
+ __builtin_HEXAGON_V6_vaslh_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vaslh.acc
+ __builtin_HEXAGON_V6_vaslh_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vaslh.acc.128B
+ __builtin_HEXAGON_V6_vaslh_acc_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vaslhv
- __builtin_HEXAGON_V6_vaslw_128B(v32, 0);
+ __builtin_HEXAGON_V6_vaslhv(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaslhv.128B
+ __builtin_HEXAGON_V6_vaslhv_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vaslw
+ __builtin_HEXAGON_V6_vaslw(v64, 0);
// CHECK: @llvm.hexagon.V6.vaslw.128B
- __builtin_HEXAGON_V6_vaslw_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vaslw.acc.128B
- __builtin_HEXAGON_V6_vaslw_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vaslw_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vaslw.acc
- __builtin_HEXAGON_V6_vaslwv_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vaslwv.128B
- __builtin_HEXAGON_V6_vaslw(v16, 0);
- // CHECK: @llvm.hexagon.V6.vaslw
- __builtin_HEXAGON_V6_vaslwv(v16, v16);
+ __builtin_HEXAGON_V6_vaslw_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vaslw.acc.128B
+ __builtin_HEXAGON_V6_vaslw_acc_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vaslwv
- __builtin_HEXAGON_V6_vasrh_128B(v32, 0);
+ __builtin_HEXAGON_V6_vaslwv(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vaslwv.128B
+ __builtin_HEXAGON_V6_vaslwv_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vasrh
+ __builtin_HEXAGON_V6_vasrh(v64, 0);
// CHECK: @llvm.hexagon.V6.vasrh.128B
- __builtin_HEXAGON_V6_vasrhbrndsat_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vasrhbrndsat.128B
- __builtin_HEXAGON_V6_vasrhbrndsat(v16, v16, 0);
+ __builtin_HEXAGON_V6_vasrh_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vasrh.acc
+ __builtin_HEXAGON_V6_vasrh_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrh.acc.128B
+ __builtin_HEXAGON_V6_vasrh_acc_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vasrhbrndsat
- __builtin_HEXAGON_V6_vasrhubrndsat_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vasrhubrndsat.128B
- __builtin_HEXAGON_V6_vasrhubrndsat(v16, v16, 0);
+ __builtin_HEXAGON_V6_vasrhbrndsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrhbrndsat.128B
+ __builtin_HEXAGON_V6_vasrhbrndsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vasrhbsat
+ __builtin_HEXAGON_V6_vasrhbsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrhbsat.128B
+ __builtin_HEXAGON_V6_vasrhbsat_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vasrhubrndsat
- __builtin_HEXAGON_V6_vasrhubsat_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vasrhubsat.128B
- __builtin_HEXAGON_V6_vasrhubsat(v16, v16, 0);
+ __builtin_HEXAGON_V6_vasrhubrndsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrhubrndsat.128B
+ __builtin_HEXAGON_V6_vasrhubrndsat_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vasrhubsat
- __builtin_HEXAGON_V6_vasrhv_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vasrhv.128B
- __builtin_HEXAGON_V6_vasrh(v16, 0);
- // CHECK: @llvm.hexagon.V6.vasrh
- __builtin_HEXAGON_V6_vasrhv(v16, v16);
+ __builtin_HEXAGON_V6_vasrhubsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrhubsat.128B
+ __builtin_HEXAGON_V6_vasrhubsat_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vasrhv
- __builtin_HEXAGON_V6_vasrw_128B(v32, 0);
+ __builtin_HEXAGON_V6_vasrhv(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vasrhv.128B
+ __builtin_HEXAGON_V6_vasrhv_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vasruhubrndsat
+ __builtin_HEXAGON_V6_vasruhubrndsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasruhubrndsat.128B
+ __builtin_HEXAGON_V6_vasruhubrndsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vasruhubsat
+ __builtin_HEXAGON_V6_vasruhubsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasruhubsat.128B
+ __builtin_HEXAGON_V6_vasruhubsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vasruwuhrndsat
+ __builtin_HEXAGON_V6_vasruwuhrndsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasruwuhrndsat.128B
+ __builtin_HEXAGON_V6_vasruwuhrndsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vasruwuhsat
+ __builtin_HEXAGON_V6_vasruwuhsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasruwuhsat.128B
+ __builtin_HEXAGON_V6_vasruwuhsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vasrw
+ __builtin_HEXAGON_V6_vasrw(v64, 0);
// CHECK: @llvm.hexagon.V6.vasrw.128B
- __builtin_HEXAGON_V6_vasrw_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vasrw.acc.128B
- __builtin_HEXAGON_V6_vasrw_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vasrw_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vasrw.acc
- __builtin_HEXAGON_V6_vasrwh_128B(v32, v32, 0);
+ __builtin_HEXAGON_V6_vasrw_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrw.acc.128B
+ __builtin_HEXAGON_V6_vasrw_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vasrwh
+ __builtin_HEXAGON_V6_vasrwh(v64, v64, 0);
// CHECK: @llvm.hexagon.V6.vasrwh.128B
- __builtin_HEXAGON_V6_vasrwhrndsat_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vasrwhrndsat.128B
- __builtin_HEXAGON_V6_vasrwhrndsat(v16, v16, 0);
+ __builtin_HEXAGON_V6_vasrwh_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vasrwhrndsat
- __builtin_HEXAGON_V6_vasrwhsat_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vasrwhsat.128B
- __builtin_HEXAGON_V6_vasrwhsat(v16, v16, 0);
+ __builtin_HEXAGON_V6_vasrwhrndsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrwhrndsat.128B
+ __builtin_HEXAGON_V6_vasrwhrndsat_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vasrwhsat
- __builtin_HEXAGON_V6_vasrwh(v16, v16, 0);
- // CHECK: @llvm.hexagon.V6.vasrwh
- __builtin_HEXAGON_V6_vasrwuhsat_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vasrwuhsat.128B
- __builtin_HEXAGON_V6_vasrwuhsat(v16, v16, 0);
+ __builtin_HEXAGON_V6_vasrwhsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrwhsat.128B
+ __builtin_HEXAGON_V6_vasrwhsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vasrwuhrndsat
+ __builtin_HEXAGON_V6_vasrwuhrndsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrwuhrndsat.128B
+ __builtin_HEXAGON_V6_vasrwuhrndsat_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vasrwuhsat
- __builtin_HEXAGON_V6_vasrwv_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vasrwv.128B
- __builtin_HEXAGON_V6_vasrw(v16, 0);
- // CHECK: @llvm.hexagon.V6.vasrw
- __builtin_HEXAGON_V6_vasrwv(v16, v16);
+ __builtin_HEXAGON_V6_vasrwuhsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vasrwuhsat.128B
+ __builtin_HEXAGON_V6_vasrwuhsat_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vasrwv
- __builtin_HEXAGON_V6_vassign_128B(v32);
+ __builtin_HEXAGON_V6_vasrwv(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vasrwv.128B
+ __builtin_HEXAGON_V6_vasrwv_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vassign
+ __builtin_HEXAGON_V6_vassign(v64);
// CHECK: @llvm.hexagon.V6.vassign.128B
- __builtin_HEXAGON_V6_vassignp_128B(v64);
- // CHECK: @llvm.hexagon.V6.vassignp.128B
- __builtin_HEXAGON_V6_vassignp(v32);
+ __builtin_HEXAGON_V6_vassign_128B(v128);
// CHECK: @llvm.hexagon.V6.vassignp
- __builtin_HEXAGON_V6_vassign(v16);
- // CHECK: @llvm.hexagon.V6.vassign
- __builtin_HEXAGON_V6_vavgh_128B(v32, v32);
+ __builtin_HEXAGON_V6_vassignp(v128);
+ // CHECK: @llvm.hexagon.V6.vassignp.128B
+ __builtin_HEXAGON_V6_vassignp_128B(v256);
+ // CHECK: @llvm.hexagon.V6.vavgb
+ __builtin_HEXAGON_V6_vavgb(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vavgb.128B
+ __builtin_HEXAGON_V6_vavgb_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vavgbrnd
+ __builtin_HEXAGON_V6_vavgbrnd(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vavgbrnd.128B
+ __builtin_HEXAGON_V6_vavgbrnd_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vavgh
+ __builtin_HEXAGON_V6_vavgh(v64, v64);
// CHECK: @llvm.hexagon.V6.vavgh.128B
- __builtin_HEXAGON_V6_vavghrnd_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vavghrnd.128B
- __builtin_HEXAGON_V6_vavghrnd(v16, v16);
+ __builtin_HEXAGON_V6_vavgh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vavghrnd
- __builtin_HEXAGON_V6_vavgh(v16, v16);
- // CHECK: @llvm.hexagon.V6.vavgh
- __builtin_HEXAGON_V6_vavgub_128B(v32, v32);
+ __builtin_HEXAGON_V6_vavghrnd(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vavghrnd.128B
+ __builtin_HEXAGON_V6_vavghrnd_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vavgub
+ __builtin_HEXAGON_V6_vavgub(v64, v64);
// CHECK: @llvm.hexagon.V6.vavgub.128B
- __builtin_HEXAGON_V6_vavgubrnd_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vavgubrnd.128B
- __builtin_HEXAGON_V6_vavgubrnd(v16, v16);
+ __builtin_HEXAGON_V6_vavgub_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vavgubrnd
- __builtin_HEXAGON_V6_vavgub(v16, v16);
- // CHECK: @llvm.hexagon.V6.vavgub
- __builtin_HEXAGON_V6_vavguh_128B(v32, v32);
+ __builtin_HEXAGON_V6_vavgubrnd(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vavgubrnd.128B
+ __builtin_HEXAGON_V6_vavgubrnd_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vavguh
+ __builtin_HEXAGON_V6_vavguh(v64, v64);
// CHECK: @llvm.hexagon.V6.vavguh.128B
- __builtin_HEXAGON_V6_vavguhrnd_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vavguhrnd.128B
- __builtin_HEXAGON_V6_vavguhrnd(v16, v16);
+ __builtin_HEXAGON_V6_vavguh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vavguhrnd
- __builtin_HEXAGON_V6_vavguh(v16, v16);
- // CHECK: @llvm.hexagon.V6.vavguh
- __builtin_HEXAGON_V6_vavgw_128B(v32, v32);
+ __builtin_HEXAGON_V6_vavguhrnd(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vavguhrnd.128B
+ __builtin_HEXAGON_V6_vavguhrnd_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vavguw
+ __builtin_HEXAGON_V6_vavguw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vavguw.128B
+ __builtin_HEXAGON_V6_vavguw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vavguwrnd
+ __builtin_HEXAGON_V6_vavguwrnd(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vavguwrnd.128B
+ __builtin_HEXAGON_V6_vavguwrnd_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vavgw
+ __builtin_HEXAGON_V6_vavgw(v64, v64);
// CHECK: @llvm.hexagon.V6.vavgw.128B
- __builtin_HEXAGON_V6_vavgwrnd_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vavgwrnd.128B
- __builtin_HEXAGON_V6_vavgwrnd(v16, v16);
+ __builtin_HEXAGON_V6_vavgw_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vavgwrnd
- __builtin_HEXAGON_V6_vavgw(v16, v16);
- // CHECK: @llvm.hexagon.V6.vavgw
- __builtin_HEXAGON_V6_vcl0h_128B(v32);
- // CHECK: @llvm.hexagon.V6.vcl0h.128B
- __builtin_HEXAGON_V6_vcl0h(v16);
+ __builtin_HEXAGON_V6_vavgwrnd(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vavgwrnd.128B
+ __builtin_HEXAGON_V6_vavgwrnd_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vcl0h
- __builtin_HEXAGON_V6_vcl0w_128B(v32);
- // CHECK: @llvm.hexagon.V6.vcl0w.128B
- __builtin_HEXAGON_V6_vcl0w(v16);
+ __builtin_HEXAGON_V6_vcl0h(v64);
+ // CHECK: @llvm.hexagon.V6.vcl0h.128B
+ __builtin_HEXAGON_V6_vcl0h_128B(v128);
// CHECK: @llvm.hexagon.V6.vcl0w
- __builtin_HEXAGON_V6_vcombine_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vcombine.128B
- __builtin_HEXAGON_V6_vcombine(v16, v16);
+ __builtin_HEXAGON_V6_vcl0w(v64);
+ // CHECK: @llvm.hexagon.V6.vcl0w.128B
+ __builtin_HEXAGON_V6_vcl0w_128B(v128);
// CHECK: @llvm.hexagon.V6.vcombine
- __builtin_HEXAGON_V6_vd0_128B();
- // CHECK: @llvm.hexagon.V6.vd0.128B
- __builtin_HEXAGON_V6_vd0();
+ __builtin_HEXAGON_V6_vcombine(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vcombine.128B
+ __builtin_HEXAGON_V6_vcombine_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vd0
- __builtin_HEXAGON_V6_vdealb_128B(v32);
+ __builtin_HEXAGON_V6_vd0();
+ // CHECK: @llvm.hexagon.V6.vd0.128B
+ __builtin_HEXAGON_V6_vd0_128B();
+ // CHECK: @llvm.hexagon.V6.vdd0
+ __builtin_HEXAGON_V6_vdd0();
+ // CHECK: @llvm.hexagon.V6.vdd0.128B
+ __builtin_HEXAGON_V6_vdd0_128B();
+ // CHECK: @llvm.hexagon.V6.vdealb
+ __builtin_HEXAGON_V6_vdealb(v64);
// CHECK: @llvm.hexagon.V6.vdealb.128B
- __builtin_HEXAGON_V6_vdealb4w_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vdealb4w.128B
- __builtin_HEXAGON_V6_vdealb4w(v16, v16);
+ __builtin_HEXAGON_V6_vdealb_128B(v128);
// CHECK: @llvm.hexagon.V6.vdealb4w
- __builtin_HEXAGON_V6_vdealb(v16);
- // CHECK: @llvm.hexagon.V6.vdealb
- __builtin_HEXAGON_V6_vdealh_128B(v32);
- // CHECK: @llvm.hexagon.V6.vdealh.128B
- __builtin_HEXAGON_V6_vdealh(v16);
+ __builtin_HEXAGON_V6_vdealb4w(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vdealb4w.128B
+ __builtin_HEXAGON_V6_vdealb4w_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vdealh
- __builtin_HEXAGON_V6_vdealvdd_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vdealvdd.128B
- __builtin_HEXAGON_V6_vdealvdd(v16, v16, 0);
+ __builtin_HEXAGON_V6_vdealh(v64);
+ // CHECK: @llvm.hexagon.V6.vdealh.128B
+ __builtin_HEXAGON_V6_vdealh_128B(v128);
// CHECK: @llvm.hexagon.V6.vdealvdd
- __builtin_HEXAGON_V6_vdelta_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vdelta.128B
- __builtin_HEXAGON_V6_vdelta(v16, v16);
+ __builtin_HEXAGON_V6_vdealvdd(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vdealvdd.128B
+ __builtin_HEXAGON_V6_vdealvdd_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vdelta
- __builtin_HEXAGON_V6_vdmpybus_128B(v32, 0);
+ __builtin_HEXAGON_V6_vdelta(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vdelta.128B
+ __builtin_HEXAGON_V6_vdelta_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vdmpybus
+ __builtin_HEXAGON_V6_vdmpybus(v64, 0);
// CHECK: @llvm.hexagon.V6.vdmpybus.128B
- __builtin_HEXAGON_V6_vdmpybus_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vdmpybus.acc.128B
- __builtin_HEXAGON_V6_vdmpybus_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vdmpybus_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vdmpybus.acc
- __builtin_HEXAGON_V6_vdmpybus_dv_128B(v64, 0);
+ __builtin_HEXAGON_V6_vdmpybus_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpybus.acc.128B
+ __builtin_HEXAGON_V6_vdmpybus_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpybus.dv
+ __builtin_HEXAGON_V6_vdmpybus_dv(v128, 0);
// CHECK: @llvm.hexagon.V6.vdmpybus.dv.128B
- __builtin_HEXAGON_V6_vdmpybus_dv_acc_128B(v64, v64, 0);
- // CHECK: @llvm.hexagon.V6.vdmpybus.dv.acc.128B
- __builtin_HEXAGON_V6_vdmpybus_dv_acc(v32, v32, 0);
+ __builtin_HEXAGON_V6_vdmpybus_dv_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vdmpybus.dv.acc
- __builtin_HEXAGON_V6_vdmpybus_dv(v32, 0);
- // CHECK: @llvm.hexagon.V6.vdmpybus.dv
- __builtin_HEXAGON_V6_vdmpybus(v16, 0);
- // CHECK: @llvm.hexagon.V6.vdmpybus
- __builtin_HEXAGON_V6_vdmpyhb_128B(v32, 0);
+ __builtin_HEXAGON_V6_vdmpybus_dv_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpybus.dv.acc.128B
+ __builtin_HEXAGON_V6_vdmpybus_dv_acc_128B(v256, v256, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhb
+ __builtin_HEXAGON_V6_vdmpyhb(v64, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhb.128B
- __builtin_HEXAGON_V6_vdmpyhb_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhb.acc.128B
- __builtin_HEXAGON_V6_vdmpyhb_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vdmpyhb_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhb.acc
- __builtin_HEXAGON_V6_vdmpyhb_dv_128B(v64, 0);
+ __builtin_HEXAGON_V6_vdmpyhb_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhb.acc.128B
+ __builtin_HEXAGON_V6_vdmpyhb_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhb.dv
+ __builtin_HEXAGON_V6_vdmpyhb_dv(v128, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhb.dv.128B
- __builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B(v64, v64, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhb.dv.acc.128B
- __builtin_HEXAGON_V6_vdmpyhb_dv_acc(v32, v32, 0);
+ __builtin_HEXAGON_V6_vdmpyhb_dv_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhb.dv.acc
- __builtin_HEXAGON_V6_vdmpyhb_dv(v32, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhb.dv
- __builtin_HEXAGON_V6_vdmpyhb(v16, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhb
- __builtin_HEXAGON_V6_vdmpyhisat_128B(v64, 0);
+ __builtin_HEXAGON_V6_vdmpyhb_dv_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhb.dv.acc.128B
+ __builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B(v256, v256, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhisat
+ __builtin_HEXAGON_V6_vdmpyhisat(v128, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhisat.128B
- __builtin_HEXAGON_V6_vdmpyhisat_acc_128B(v32, v64, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhisat.acc.128B
- __builtin_HEXAGON_V6_vdmpyhisat_acc(v16, v32, 0);
+ __builtin_HEXAGON_V6_vdmpyhisat_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhisat.acc
- __builtin_HEXAGON_V6_vdmpyhisat(v32, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhisat
- __builtin_HEXAGON_V6_vdmpyhsat_128B(v32, 0);
+ __builtin_HEXAGON_V6_vdmpyhisat_acc(v64, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhisat.acc.128B
+ __builtin_HEXAGON_V6_vdmpyhisat_acc_128B(v128, v256, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhsat
+ __builtin_HEXAGON_V6_vdmpyhsat(v64, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhsat.128B
- __builtin_HEXAGON_V6_vdmpyhsat_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhsat.acc.128B
- __builtin_HEXAGON_V6_vdmpyhsat_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vdmpyhsat_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhsat.acc
- __builtin_HEXAGON_V6_vdmpyhsat(v16, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhsat
- __builtin_HEXAGON_V6_vdmpyhsuisat_128B(v64, 0);
+ __builtin_HEXAGON_V6_vdmpyhsat_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhsat.acc.128B
+ __builtin_HEXAGON_V6_vdmpyhsat_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhsuisat
+ __builtin_HEXAGON_V6_vdmpyhsuisat(v128, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhsuisat.128B
- __builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B(v32, v64, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhsuisat.acc.128B
- __builtin_HEXAGON_V6_vdmpyhsuisat_acc(v16, v32, 0);
+ __builtin_HEXAGON_V6_vdmpyhsuisat_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhsuisat.acc
- __builtin_HEXAGON_V6_vdmpyhsuisat(v32, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhsuisat
- __builtin_HEXAGON_V6_vdmpyhsusat_128B(v32, 0);
+ __builtin_HEXAGON_V6_vdmpyhsuisat_acc(v64, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhsuisat.acc.128B
+ __builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B(v128, v256, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhsusat
+ __builtin_HEXAGON_V6_vdmpyhsusat(v64, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhsusat.128B
- __builtin_HEXAGON_V6_vdmpyhsusat_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhsusat.acc.128B
- __builtin_HEXAGON_V6_vdmpyhsusat_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vdmpyhsusat_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vdmpyhsusat.acc
- __builtin_HEXAGON_V6_vdmpyhsusat(v16, 0);
- // CHECK: @llvm.hexagon.V6.vdmpyhsusat
- __builtin_HEXAGON_V6_vdmpyhvsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vdmpyhsusat_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhsusat.acc.128B
+ __builtin_HEXAGON_V6_vdmpyhsusat_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdmpyhvsat
+ __builtin_HEXAGON_V6_vdmpyhvsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vdmpyhvsat.128B
- __builtin_HEXAGON_V6_vdmpyhvsat_acc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vdmpyhvsat.acc.128B
- __builtin_HEXAGON_V6_vdmpyhvsat_acc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vdmpyhvsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vdmpyhvsat.acc
- __builtin_HEXAGON_V6_vdmpyhvsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vdmpyhvsat
- __builtin_HEXAGON_V6_vdsaduh_128B(v64, 0);
+ __builtin_HEXAGON_V6_vdmpyhvsat_acc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vdmpyhvsat.acc.128B
+ __builtin_HEXAGON_V6_vdmpyhvsat_acc_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vdsaduh
+ __builtin_HEXAGON_V6_vdsaduh(v128, 0);
// CHECK: @llvm.hexagon.V6.vdsaduh.128B
- __builtin_HEXAGON_V6_vdsaduh_acc_128B(v64, v64, 0);
- // CHECK: @llvm.hexagon.V6.vdsaduh.acc.128B
- __builtin_HEXAGON_V6_vdsaduh_acc(v32, v32, 0);
+ __builtin_HEXAGON_V6_vdsaduh_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vdsaduh.acc
- __builtin_HEXAGON_V6_vdsaduh(v32, 0);
- // CHECK: @llvm.hexagon.V6.vdsaduh
- __builtin_HEXAGON_V6_veqb_128B(v32, v32);
+ __builtin_HEXAGON_V6_vdsaduh_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vdsaduh.acc.128B
+ __builtin_HEXAGON_V6_vdsaduh_acc_128B(v256, v256, 0);
+ // CHECK: @llvm.hexagon.V6.veqb
+ __builtin_HEXAGON_V6_veqb(v64, v64);
// CHECK: @llvm.hexagon.V6.veqb.128B
- __builtin_HEXAGON_V6_veqb_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqb.and.128B
- __builtin_HEXAGON_V6_veqb_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.veqb.and
- __builtin_HEXAGON_V6_veqb_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqb.or.128B
- __builtin_HEXAGON_V6_veqb_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqb_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqb.and.128B
+ __builtin_HEXAGON_V6_veqb_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.veqb.or
- __builtin_HEXAGON_V6_veqb(v16, v16);
- // CHECK: @llvm.hexagon.V6.veqb
- __builtin_HEXAGON_V6_veqb_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqb.xor.128B
- __builtin_HEXAGON_V6_veqb_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqb_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqb.or.128B
+ __builtin_HEXAGON_V6_veqb_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.veqb.xor
- __builtin_HEXAGON_V6_veqh_128B(v32, v32);
+ __builtin_HEXAGON_V6_veqb_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqb.xor.128B
+ __builtin_HEXAGON_V6_veqb_xor_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.veqh
+ __builtin_HEXAGON_V6_veqh(v64, v64);
// CHECK: @llvm.hexagon.V6.veqh.128B
- __builtin_HEXAGON_V6_veqh_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqh.and.128B
- __builtin_HEXAGON_V6_veqh_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.veqh.and
- __builtin_HEXAGON_V6_veqh_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqh.or.128B
- __builtin_HEXAGON_V6_veqh_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqh_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqh.and.128B
+ __builtin_HEXAGON_V6_veqh_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.veqh.or
- __builtin_HEXAGON_V6_veqh(v16, v16);
- // CHECK: @llvm.hexagon.V6.veqh
- __builtin_HEXAGON_V6_veqh_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqh.xor.128B
- __builtin_HEXAGON_V6_veqh_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqh_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqh.or.128B
+ __builtin_HEXAGON_V6_veqh_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.veqh.xor
- __builtin_HEXAGON_V6_veqw_128B(v32, v32);
+ __builtin_HEXAGON_V6_veqh_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqh.xor.128B
+ __builtin_HEXAGON_V6_veqh_xor_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.veqw
+ __builtin_HEXAGON_V6_veqw(v64, v64);
// CHECK: @llvm.hexagon.V6.veqw.128B
- __builtin_HEXAGON_V6_veqw_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqw.and.128B
- __builtin_HEXAGON_V6_veqw_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqw_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.veqw.and
- __builtin_HEXAGON_V6_veqw_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqw.or.128B
- __builtin_HEXAGON_V6_veqw_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqw_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqw.and.128B
+ __builtin_HEXAGON_V6_veqw_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.veqw.or
- __builtin_HEXAGON_V6_veqw(v16, v16);
- // CHECK: @llvm.hexagon.V6.veqw
- __builtin_HEXAGON_V6_veqw_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.veqw.xor.128B
- __builtin_HEXAGON_V6_veqw_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_veqw_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqw.or.128B
+ __builtin_HEXAGON_V6_veqw_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.veqw.xor
- __builtin_HEXAGON_V6_vgtb_128B(v32, v32);
+ __builtin_HEXAGON_V6_veqw_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.veqw.xor.128B
+ __builtin_HEXAGON_V6_veqw_xor_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vgathermh
+ __builtin_HEXAGON_V6_vgathermh(0, 0, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vgathermh.128B
+ __builtin_HEXAGON_V6_vgathermh_128B(0, 0, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vgathermhq
+ __builtin_HEXAGON_V6_vgathermhq(0, v64, 0, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vgathermhq.128B
+ __builtin_HEXAGON_V6_vgathermhq_128B(0, v128, 0, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vgathermhw
+ __builtin_HEXAGON_V6_vgathermhw(0, 0, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vgathermhw.128B
+ __builtin_HEXAGON_V6_vgathermhw_128B(0, 0, 0, v256);
+ // CHECK: @llvm.hexagon.V6.vgathermhwq
+ __builtin_HEXAGON_V6_vgathermhwq(0, v64, 0, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vgathermhwq.128B
+ __builtin_HEXAGON_V6_vgathermhwq_128B(0, v128, 0, 0, v256);
+ // CHECK: @llvm.hexagon.V6.vgathermw
+ __builtin_HEXAGON_V6_vgathermw(0, 0, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vgathermw.128B
+ __builtin_HEXAGON_V6_vgathermw_128B(0, 0, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vgathermwq
+ __builtin_HEXAGON_V6_vgathermwq(0, v64, 0, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vgathermwq.128B
+ __builtin_HEXAGON_V6_vgathermwq_128B(0, v128, 0, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vgtb
+ __builtin_HEXAGON_V6_vgtb(v64, v64);
// CHECK: @llvm.hexagon.V6.vgtb.128B
- __builtin_HEXAGON_V6_vgtb_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtb.and.128B
- __builtin_HEXAGON_V6_vgtb_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vgtb.and
- __builtin_HEXAGON_V6_vgtb_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtb.or.128B
- __builtin_HEXAGON_V6_vgtb_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtb_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtb.and.128B
+ __builtin_HEXAGON_V6_vgtb_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtb.or
- __builtin_HEXAGON_V6_vgtb(v16, v16);
- // CHECK: @llvm.hexagon.V6.vgtb
- __builtin_HEXAGON_V6_vgtb_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtb.xor.128B
- __builtin_HEXAGON_V6_vgtb_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtb_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtb.or.128B
+ __builtin_HEXAGON_V6_vgtb_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtb.xor
- __builtin_HEXAGON_V6_vgth_128B(v32, v32);
+ __builtin_HEXAGON_V6_vgtb_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtb.xor.128B
+ __builtin_HEXAGON_V6_vgtb_xor_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vgth
+ __builtin_HEXAGON_V6_vgth(v64, v64);
// CHECK: @llvm.hexagon.V6.vgth.128B
- __builtin_HEXAGON_V6_vgth_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgth.and.128B
- __builtin_HEXAGON_V6_vgth_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgth_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vgth.and
- __builtin_HEXAGON_V6_vgth_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgth.or.128B
- __builtin_HEXAGON_V6_vgth_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgth_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgth.and.128B
+ __builtin_HEXAGON_V6_vgth_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgth.or
- __builtin_HEXAGON_V6_vgth(v16, v16);
- // CHECK: @llvm.hexagon.V6.vgth
- __builtin_HEXAGON_V6_vgth_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgth.xor.128B
- __builtin_HEXAGON_V6_vgth_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgth_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgth.or.128B
+ __builtin_HEXAGON_V6_vgth_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgth.xor
- __builtin_HEXAGON_V6_vgtub_128B(v32, v32);
+ __builtin_HEXAGON_V6_vgth_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgth.xor.128B
+ __builtin_HEXAGON_V6_vgth_xor_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vgtub
+ __builtin_HEXAGON_V6_vgtub(v64, v64);
// CHECK: @llvm.hexagon.V6.vgtub.128B
- __builtin_HEXAGON_V6_vgtub_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtub.and.128B
- __builtin_HEXAGON_V6_vgtub_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtub_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vgtub.and
- __builtin_HEXAGON_V6_vgtub_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtub.or.128B
- __builtin_HEXAGON_V6_vgtub_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtub_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtub.and.128B
+ __builtin_HEXAGON_V6_vgtub_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtub.or
- __builtin_HEXAGON_V6_vgtub(v16, v16);
- // CHECK: @llvm.hexagon.V6.vgtub
- __builtin_HEXAGON_V6_vgtub_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtub.xor.128B
- __builtin_HEXAGON_V6_vgtub_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtub_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtub.or.128B
+ __builtin_HEXAGON_V6_vgtub_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtub.xor
- __builtin_HEXAGON_V6_vgtuh_128B(v32, v32);
+ __builtin_HEXAGON_V6_vgtub_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtub.xor.128B
+ __builtin_HEXAGON_V6_vgtub_xor_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vgtuh
+ __builtin_HEXAGON_V6_vgtuh(v64, v64);
// CHECK: @llvm.hexagon.V6.vgtuh.128B
- __builtin_HEXAGON_V6_vgtuh_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtuh.and.128B
- __builtin_HEXAGON_V6_vgtuh_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtuh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vgtuh.and
- __builtin_HEXAGON_V6_vgtuh_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtuh.or.128B
- __builtin_HEXAGON_V6_vgtuh_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtuh_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtuh.and.128B
+ __builtin_HEXAGON_V6_vgtuh_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtuh.or
- __builtin_HEXAGON_V6_vgtuh(v16, v16);
- // CHECK: @llvm.hexagon.V6.vgtuh
- __builtin_HEXAGON_V6_vgtuh_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtuh.xor.128B
- __builtin_HEXAGON_V6_vgtuh_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtuh_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtuh.or.128B
+ __builtin_HEXAGON_V6_vgtuh_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtuh.xor
- __builtin_HEXAGON_V6_vgtuw_128B(v32, v32);
+ __builtin_HEXAGON_V6_vgtuh_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtuh.xor.128B
+ __builtin_HEXAGON_V6_vgtuh_xor_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vgtuw
+ __builtin_HEXAGON_V6_vgtuw(v64, v64);
// CHECK: @llvm.hexagon.V6.vgtuw.128B
- __builtin_HEXAGON_V6_vgtuw_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtuw.and.128B
- __builtin_HEXAGON_V6_vgtuw_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtuw_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vgtuw.and
- __builtin_HEXAGON_V6_vgtuw_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtuw.or.128B
- __builtin_HEXAGON_V6_vgtuw_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtuw_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtuw.and.128B
+ __builtin_HEXAGON_V6_vgtuw_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtuw.or
- __builtin_HEXAGON_V6_vgtuw(v16, v16);
- // CHECK: @llvm.hexagon.V6.vgtuw
- __builtin_HEXAGON_V6_vgtuw_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtuw.xor.128B
- __builtin_HEXAGON_V6_vgtuw_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtuw_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtuw.or.128B
+ __builtin_HEXAGON_V6_vgtuw_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtuw.xor
- __builtin_HEXAGON_V6_vgtw_128B(v32, v32);
+ __builtin_HEXAGON_V6_vgtuw_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtuw.xor.128B
+ __builtin_HEXAGON_V6_vgtuw_xor_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vgtw
+ __builtin_HEXAGON_V6_vgtw(v64, v64);
// CHECK: @llvm.hexagon.V6.vgtw.128B
- __builtin_HEXAGON_V6_vgtw_and_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtw.and.128B
- __builtin_HEXAGON_V6_vgtw_and(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtw_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vgtw.and
- __builtin_HEXAGON_V6_vgtw_or_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtw.or.128B
- __builtin_HEXAGON_V6_vgtw_or(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtw_and(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtw.and.128B
+ __builtin_HEXAGON_V6_vgtw_and_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtw.or
- __builtin_HEXAGON_V6_vgtw(v16, v16);
- // CHECK: @llvm.hexagon.V6.vgtw
- __builtin_HEXAGON_V6_vgtw_xor_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vgtw.xor.128B
- __builtin_HEXAGON_V6_vgtw_xor(v16, v16, v16);
+ __builtin_HEXAGON_V6_vgtw_or(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtw.or.128B
+ __builtin_HEXAGON_V6_vgtw_or_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vgtw.xor
- __builtin_HEXAGON_V6_vinsertwr_128B(v32, 0);
- // CHECK: @llvm.hexagon.V6.vinsertwr.128B
- __builtin_HEXAGON_V6_vinsertwr(v16, 0);
+ __builtin_HEXAGON_V6_vgtw_xor(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vgtw.xor.128B
+ __builtin_HEXAGON_V6_vgtw_xor_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vinsertwr
- __builtin_HEXAGON_V6_vlalignb_128B(v32, v32, 0);
+ __builtin_HEXAGON_V6_vinsertwr(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vinsertwr.128B
+ __builtin_HEXAGON_V6_vinsertwr_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlalignb
+ __builtin_HEXAGON_V6_vlalignb(v64, v64, 0);
// CHECK: @llvm.hexagon.V6.vlalignb.128B
- __builtin_HEXAGON_V6_vlalignbi_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vlalignbi.128B
- __builtin_HEXAGON_V6_vlalignbi(v16, v16, 0);
+ __builtin_HEXAGON_V6_vlalignb_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vlalignbi
- __builtin_HEXAGON_V6_vlalignb(v16, v16, 0);
- // CHECK: @llvm.hexagon.V6.vlalignb
- __builtin_HEXAGON_V6_vlsrh_128B(v32, 0);
- // CHECK: @llvm.hexagon.V6.vlsrh.128B
- __builtin_HEXAGON_V6_vlsrhv_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vlsrhv.128B
- __builtin_HEXAGON_V6_vlsrh(v16, 0);
+ __builtin_HEXAGON_V6_vlalignbi(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlalignbi.128B
+ __builtin_HEXAGON_V6_vlalignbi_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlsrb
+ __builtin_HEXAGON_V6_vlsrb(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlsrb.128B
+ __builtin_HEXAGON_V6_vlsrb_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vlsrh
- __builtin_HEXAGON_V6_vlsrhv(v16, v16);
+ __builtin_HEXAGON_V6_vlsrh(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlsrh.128B
+ __builtin_HEXAGON_V6_vlsrh_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vlsrhv
- __builtin_HEXAGON_V6_vlsrw_128B(v32, 0);
- // CHECK: @llvm.hexagon.V6.vlsrw.128B
- __builtin_HEXAGON_V6_vlsrwv_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vlsrwv.128B
- __builtin_HEXAGON_V6_vlsrw(v16, 0);
+ __builtin_HEXAGON_V6_vlsrhv(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vlsrhv.128B
+ __builtin_HEXAGON_V6_vlsrhv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vlsrw
- __builtin_HEXAGON_V6_vlsrwv(v16, v16);
+ __builtin_HEXAGON_V6_vlsrw(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlsrw.128B
+ __builtin_HEXAGON_V6_vlsrw_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vlsrwv
- __builtin_HEXAGON_V6_vlutb_128B(v32, 0, 0);
- // CHECK: @llvm.hexagon.V6.vlutb.128B
- __builtin_HEXAGON_V6_vlutb_acc_128B(v32, v32, 0, 0);
- // CHECK: @llvm.hexagon.V6.vlutb.acc.128B
- __builtin_HEXAGON_V6_vlutb_acc(v16, v16, 0, 0);
- // CHECK: @llvm.hexagon.V6.vlutb.acc
- __builtin_HEXAGON_V6_vlutb_dv_128B(v64, 0, 0);
- // CHECK: @llvm.hexagon.V6.vlutb.dv.128B
- __builtin_HEXAGON_V6_vlutb_dv_acc_128B(v64, v64, 0, 0);
- // CHECK: @llvm.hexagon.V6.vlutb.dv.acc.128B
- __builtin_HEXAGON_V6_vlutb_dv_acc(v32, v32, 0, 0);
- // CHECK: @llvm.hexagon.V6.vlutb.dv.acc
- __builtin_HEXAGON_V6_vlutb_dv(v32, 0, 0);
- // CHECK: @llvm.hexagon.V6.vlutb.dv
- __builtin_HEXAGON_V6_vlutb(v16, 0, 0);
- // CHECK: @llvm.hexagon.V6.vlutb
- __builtin_HEXAGON_V6_vlutvvb_128B(v32, v32, 0);
+ __builtin_HEXAGON_V6_vlsrwv(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vlsrwv.128B
+ __builtin_HEXAGON_V6_vlsrwv_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vlut4
+ __builtin_HEXAGON_V6_vlut4(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlut4.128B
+ __builtin_HEXAGON_V6_vlut4_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvvb
+ __builtin_HEXAGON_V6_vlutvvb(v64, v64, 0);
// CHECK: @llvm.hexagon.V6.vlutvvb.128B
- __builtin_HEXAGON_V6_vlutvvb_oracc_128B(v32, v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vlutvvb.oracc.128B
- __builtin_HEXAGON_V6_vlutvvb_oracc(v16, v16, v16, 0);
+ __builtin_HEXAGON_V6_vlutvvb_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvvb.nm
+ __builtin_HEXAGON_V6_vlutvvb_nm(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvvb.nm.128B
+ __builtin_HEXAGON_V6_vlutvvb_nm_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vlutvvb.oracc
- __builtin_HEXAGON_V6_vlutvvb(v16, v16, 0);
- // CHECK: @llvm.hexagon.V6.vlutvvb
- __builtin_HEXAGON_V6_vlutvwh_128B(v32, v32, 0);
+ __builtin_HEXAGON_V6_vlutvvb_oracc(v64, v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvvb.oracc.128B
+ __builtin_HEXAGON_V6_vlutvvb_oracc_128B(v128, v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvvb.oracci
+ __builtin_HEXAGON_V6_vlutvvb_oracci(v64, v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvvb.oracci.128B
+ __builtin_HEXAGON_V6_vlutvvb_oracci_128B(v128, v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvvbi
+ __builtin_HEXAGON_V6_vlutvvbi(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvvbi.128B
+ __builtin_HEXAGON_V6_vlutvvbi_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvwh
+ __builtin_HEXAGON_V6_vlutvwh(v64, v64, 0);
// CHECK: @llvm.hexagon.V6.vlutvwh.128B
- __builtin_HEXAGON_V6_vlutvwh_oracc_128B(v64, v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vlutvwh.oracc.128B
- __builtin_HEXAGON_V6_vlutvwh_oracc(v32, v16, v16, 0);
+ __builtin_HEXAGON_V6_vlutvwh_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvwh.nm
+ __builtin_HEXAGON_V6_vlutvwh_nm(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvwh.nm.128B
+ __builtin_HEXAGON_V6_vlutvwh_nm_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vlutvwh.oracc
- __builtin_HEXAGON_V6_vlutvwh(v16, v16, 0);
- // CHECK: @llvm.hexagon.V6.vlutvwh
- __builtin_HEXAGON_V6_vmaxh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vmaxh.128B
- __builtin_HEXAGON_V6_vmaxh(v16, v16);
+ __builtin_HEXAGON_V6_vlutvwh_oracc(v128, v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvwh.oracc.128B
+ __builtin_HEXAGON_V6_vlutvwh_oracc_128B(v256, v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvwh.oracci
+ __builtin_HEXAGON_V6_vlutvwh_oracci(v128, v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvwh.oracci.128B
+ __builtin_HEXAGON_V6_vlutvwh_oracci_128B(v256, v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvwhi
+ __builtin_HEXAGON_V6_vlutvwhi(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vlutvwhi.128B
+ __builtin_HEXAGON_V6_vlutvwhi_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmaskedstorenq
+ __builtin_HEXAGON_V6_vmaskedstorenq(v64, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vmaskedstorenq.128B
+ __builtin_HEXAGON_V6_vmaskedstorenq_128B(v128, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vmaskedstorentnq
+ __builtin_HEXAGON_V6_vmaskedstorentnq(v64, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vmaskedstorentnq.128B
+ __builtin_HEXAGON_V6_vmaskedstorentnq_128B(v128, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vmaskedstorentq
+ __builtin_HEXAGON_V6_vmaskedstorentq(v64, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vmaskedstorentq.128B
+ __builtin_HEXAGON_V6_vmaskedstorentq_128B(v128, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vmaskedstoreq
+ __builtin_HEXAGON_V6_vmaskedstoreq(v64, 0, v64);
+ // CHECK: @llvm.hexagon.V6.vmaskedstoreq.128B
+ __builtin_HEXAGON_V6_vmaskedstoreq_128B(v128, 0, v128);
+ // CHECK: @llvm.hexagon.V6.vmaxb
+ __builtin_HEXAGON_V6_vmaxb(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmaxb.128B
+ __builtin_HEXAGON_V6_vmaxb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmaxh
- __builtin_HEXAGON_V6_vmaxub_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vmaxub.128B
- __builtin_HEXAGON_V6_vmaxub(v16, v16);
+ __builtin_HEXAGON_V6_vmaxh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmaxh.128B
+ __builtin_HEXAGON_V6_vmaxh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmaxub
- __builtin_HEXAGON_V6_vmaxuh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vmaxuh.128B
- __builtin_HEXAGON_V6_vmaxuh(v16, v16);
+ __builtin_HEXAGON_V6_vmaxub(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmaxub.128B
+ __builtin_HEXAGON_V6_vmaxub_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmaxuh
- __builtin_HEXAGON_V6_vmaxw_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vmaxw.128B
- __builtin_HEXAGON_V6_vmaxw(v16, v16);
+ __builtin_HEXAGON_V6_vmaxuh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmaxuh.128B
+ __builtin_HEXAGON_V6_vmaxuh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmaxw
- __builtin_HEXAGON_V6_vminh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vminh.128B
- __builtin_HEXAGON_V6_vminh(v16, v16);
+ __builtin_HEXAGON_V6_vmaxw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmaxw.128B
+ __builtin_HEXAGON_V6_vmaxw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vminb
+ __builtin_HEXAGON_V6_vminb(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vminb.128B
+ __builtin_HEXAGON_V6_vminb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vminh
- __builtin_HEXAGON_V6_vminub_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vminub.128B
- __builtin_HEXAGON_V6_vminub(v16, v16);
+ __builtin_HEXAGON_V6_vminh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vminh.128B
+ __builtin_HEXAGON_V6_vminh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vminub
- __builtin_HEXAGON_V6_vminuh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vminuh.128B
- __builtin_HEXAGON_V6_vminuh(v16, v16);
+ __builtin_HEXAGON_V6_vminub(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vminub.128B
+ __builtin_HEXAGON_V6_vminub_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vminuh
- __builtin_HEXAGON_V6_vminw_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vminw.128B
- __builtin_HEXAGON_V6_vminw(v16, v16);
+ __builtin_HEXAGON_V6_vminuh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vminuh.128B
+ __builtin_HEXAGON_V6_vminuh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vminw
- __builtin_HEXAGON_V6_vmpabus_128B(v64, 0);
+ __builtin_HEXAGON_V6_vminw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vminw.128B
+ __builtin_HEXAGON_V6_vminw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpabus
+ __builtin_HEXAGON_V6_vmpabus(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpabus.128B
- __builtin_HEXAGON_V6_vmpabus_acc_128B(v64, v64, 0);
- // CHECK: @llvm.hexagon.V6.vmpabus.acc.128B
- __builtin_HEXAGON_V6_vmpabus_acc(v32, v32, 0);
+ __builtin_HEXAGON_V6_vmpabus_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vmpabus.acc
- __builtin_HEXAGON_V6_vmpabusv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vmpabusv.128B
- __builtin_HEXAGON_V6_vmpabus(v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpabus
- __builtin_HEXAGON_V6_vmpabusv(v32, v32);
+ __builtin_HEXAGON_V6_vmpabus_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpabus.acc.128B
+ __builtin_HEXAGON_V6_vmpabus_acc_128B(v256, v256, 0);
// CHECK: @llvm.hexagon.V6.vmpabusv
- __builtin_HEXAGON_V6_vmpabuuv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vmpabuuv.128B
- __builtin_HEXAGON_V6_vmpabuuv(v32, v32);
+ __builtin_HEXAGON_V6_vmpabusv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpabusv.128B
+ __builtin_HEXAGON_V6_vmpabusv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.vmpabuu
+ __builtin_HEXAGON_V6_vmpabuu(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpabuu.128B
+ __builtin_HEXAGON_V6_vmpabuu_128B(v256, 0);
+ // CHECK: @llvm.hexagon.V6.vmpabuu.acc
+ __builtin_HEXAGON_V6_vmpabuu_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpabuu.acc.128B
+ __builtin_HEXAGON_V6_vmpabuu_acc_128B(v256, v256, 0);
// CHECK: @llvm.hexagon.V6.vmpabuuv
- __builtin_HEXAGON_V6_vmpahb_128B(v64, 0);
+ __builtin_HEXAGON_V6_vmpabuuv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpabuuv.128B
+ __builtin_HEXAGON_V6_vmpabuuv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.vmpahb
+ __builtin_HEXAGON_V6_vmpahb(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpahb.128B
- __builtin_HEXAGON_V6_vmpahb_acc_128B(v64, v64, 0);
- // CHECK: @llvm.hexagon.V6.vmpahb.acc.128B
- __builtin_HEXAGON_V6_vmpahb_acc(v32, v32, 0);
+ __builtin_HEXAGON_V6_vmpahb_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vmpahb.acc
- __builtin_HEXAGON_V6_vmpahb(v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpahb
- __builtin_HEXAGON_V6_vmpybus_128B(v32, 0);
+ __builtin_HEXAGON_V6_vmpahb_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpahb.acc.128B
+ __builtin_HEXAGON_V6_vmpahb_acc_128B(v256, v256, 0);
+ // CHECK: @llvm.hexagon.V6.vmpahhsat
+ __builtin_HEXAGON_V6_vmpahhsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpahhsat.128B
+ __builtin_HEXAGON_V6_vmpahhsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpauhb
+ __builtin_HEXAGON_V6_vmpauhb(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpauhb.128B
+ __builtin_HEXAGON_V6_vmpauhb_128B(v256, 0);
+ // CHECK: @llvm.hexagon.V6.vmpauhb.acc
+ __builtin_HEXAGON_V6_vmpauhb_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpauhb.acc.128B
+ __builtin_HEXAGON_V6_vmpauhb_acc_128B(v256, v256, 0);
+ // CHECK: @llvm.hexagon.V6.vmpauhuhsat
+ __builtin_HEXAGON_V6_vmpauhuhsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpauhuhsat.128B
+ __builtin_HEXAGON_V6_vmpauhuhsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpsuhuhsat
+ __builtin_HEXAGON_V6_vmpsuhuhsat(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpsuhuhsat.128B
+ __builtin_HEXAGON_V6_vmpsuhuhsat_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpybus
+ __builtin_HEXAGON_V6_vmpybus(v64, 0);
// CHECK: @llvm.hexagon.V6.vmpybus.128B
- __builtin_HEXAGON_V6_vmpybus_acc_128B(v64, v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpybus.acc.128B
- __builtin_HEXAGON_V6_vmpybus_acc(v32, v16, 0);
+ __builtin_HEXAGON_V6_vmpybus_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpybus.acc
- __builtin_HEXAGON_V6_vmpybusv_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpybus_acc(v128, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpybus.acc.128B
+ __builtin_HEXAGON_V6_vmpybus_acc_128B(v256, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpybusv
+ __builtin_HEXAGON_V6_vmpybusv(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpybusv.128B
- __builtin_HEXAGON_V6_vmpybus(v16, 0);
- // CHECK: @llvm.hexagon.V6.vmpybus
- __builtin_HEXAGON_V6_vmpybusv_acc_128B(v64, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpybusv.acc.128B
- __builtin_HEXAGON_V6_vmpybusv_acc(v32, v16, v16);
+ __builtin_HEXAGON_V6_vmpybusv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpybusv.acc
- __builtin_HEXAGON_V6_vmpybusv(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpybusv
- __builtin_HEXAGON_V6_vmpybv_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpybusv_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpybusv.acc.128B
+ __builtin_HEXAGON_V6_vmpybusv_acc_128B(v256, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpybv
+ __builtin_HEXAGON_V6_vmpybv(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpybv.128B
- __builtin_HEXAGON_V6_vmpybv_acc_128B(v64, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpybv.acc.128B
- __builtin_HEXAGON_V6_vmpybv_acc(v32, v16, v16);
+ __builtin_HEXAGON_V6_vmpybv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpybv.acc
- __builtin_HEXAGON_V6_vmpybv(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpybv
- __builtin_HEXAGON_V6_vmpyewuh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyewuh.128B
- __builtin_HEXAGON_V6_vmpyewuh(v16, v16);
+ __builtin_HEXAGON_V6_vmpybv_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpybv.acc.128B
+ __builtin_HEXAGON_V6_vmpybv_acc_128B(v256, v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyewuh
- __builtin_HEXAGON_V6_vmpyh_128B(v32, 0);
+ __builtin_HEXAGON_V6_vmpyewuh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyewuh.128B
+ __builtin_HEXAGON_V6_vmpyewuh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyewuh.64
+ __builtin_HEXAGON_V6_vmpyewuh_64(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyewuh.64.128B
+ __builtin_HEXAGON_V6_vmpyewuh_64_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyh
+ __builtin_HEXAGON_V6_vmpyh(v64, 0);
// CHECK: @llvm.hexagon.V6.vmpyh.128B
- __builtin_HEXAGON_V6_vmpyhsat_acc_128B(v64, v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpyhsat.acc.128B
- __builtin_HEXAGON_V6_vmpyhsat_acc(v32, v16, 0);
+ __builtin_HEXAGON_V6_vmpyh_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyh.acc
+ __builtin_HEXAGON_V6_vmpyh_acc(v128, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyh.acc.128B
+ __builtin_HEXAGON_V6_vmpyh_acc_128B(v256, v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyhsat.acc
- __builtin_HEXAGON_V6_vmpyhsrs_128B(v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpyhsrs.128B
- __builtin_HEXAGON_V6_vmpyhsrs(v16, 0);
+ __builtin_HEXAGON_V6_vmpyhsat_acc(v128, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyhsat.acc.128B
+ __builtin_HEXAGON_V6_vmpyhsat_acc_128B(v256, v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyhsrs
- __builtin_HEXAGON_V6_vmpyhss_128B(v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpyhss.128B
- __builtin_HEXAGON_V6_vmpyhss(v16, 0);
+ __builtin_HEXAGON_V6_vmpyhsrs(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyhsrs.128B
+ __builtin_HEXAGON_V6_vmpyhsrs_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyhss
- __builtin_HEXAGON_V6_vmpyhus_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpyhss(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyhss.128B
+ __builtin_HEXAGON_V6_vmpyhss_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyhus
+ __builtin_HEXAGON_V6_vmpyhus(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpyhus.128B
- __builtin_HEXAGON_V6_vmpyhus_acc_128B(v64, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyhus.acc.128B
- __builtin_HEXAGON_V6_vmpyhus_acc(v32, v16, v16);
+ __builtin_HEXAGON_V6_vmpyhus_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyhus.acc
- __builtin_HEXAGON_V6_vmpyhus(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpyhus
- __builtin_HEXAGON_V6_vmpyhv_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpyhus_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyhus.acc.128B
+ __builtin_HEXAGON_V6_vmpyhus_acc_128B(v256, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyhv
+ __builtin_HEXAGON_V6_vmpyhv(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpyhv.128B
- __builtin_HEXAGON_V6_vmpyh(v16, 0);
- // CHECK: @llvm.hexagon.V6.vmpyh
- __builtin_HEXAGON_V6_vmpyhv_acc_128B(v64, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyhv.acc.128B
- __builtin_HEXAGON_V6_vmpyhv_acc(v32, v16, v16);
+ __builtin_HEXAGON_V6_vmpyhv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyhv.acc
- __builtin_HEXAGON_V6_vmpyhvsrs_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyhvsrs.128B
- __builtin_HEXAGON_V6_vmpyhvsrs(v16, v16);
+ __builtin_HEXAGON_V6_vmpyhv_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyhv.acc.128B
+ __builtin_HEXAGON_V6_vmpyhv_acc_128B(v256, v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyhvsrs
- __builtin_HEXAGON_V6_vmpyhv(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpyhv
- __builtin_HEXAGON_V6_vmpyieoh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyieoh.128B
- __builtin_HEXAGON_V6_vmpyieoh(v16, v16);
+ __builtin_HEXAGON_V6_vmpyhvsrs(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyhvsrs.128B
+ __builtin_HEXAGON_V6_vmpyhvsrs_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyieoh
- __builtin_HEXAGON_V6_vmpyiewh_acc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyiewh.acc.128B
- __builtin_HEXAGON_V6_vmpyiewh_acc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vmpyieoh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyieoh.128B
+ __builtin_HEXAGON_V6_vmpyieoh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyiewh.acc
- __builtin_HEXAGON_V6_vmpyiewuh_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpyiewh_acc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyiewh.acc.128B
+ __builtin_HEXAGON_V6_vmpyiewh_acc_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyiewuh
+ __builtin_HEXAGON_V6_vmpyiewuh(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpyiewuh.128B
- __builtin_HEXAGON_V6_vmpyiewuh_acc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyiewuh.acc.128B
- __builtin_HEXAGON_V6_vmpyiewuh_acc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vmpyiewuh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyiewuh.acc
- __builtin_HEXAGON_V6_vmpyiewuh(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpyiewuh
- __builtin_HEXAGON_V6_vmpyih_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpyiewuh_acc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyiewuh.acc.128B
+ __builtin_HEXAGON_V6_vmpyiewuh_acc_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyih
+ __builtin_HEXAGON_V6_vmpyih(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpyih.128B
- __builtin_HEXAGON_V6_vmpyih_acc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyih.acc.128B
- __builtin_HEXAGON_V6_vmpyih_acc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vmpyih_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyih.acc
- __builtin_HEXAGON_V6_vmpyihb_128B(v32, 0);
+ __builtin_HEXAGON_V6_vmpyih_acc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyih.acc.128B
+ __builtin_HEXAGON_V6_vmpyih_acc_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyihb
+ __builtin_HEXAGON_V6_vmpyihb(v64, 0);
// CHECK: @llvm.hexagon.V6.vmpyihb.128B
- __builtin_HEXAGON_V6_vmpyihb_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpyihb.acc.128B
- __builtin_HEXAGON_V6_vmpyihb_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vmpyihb_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyihb.acc
- __builtin_HEXAGON_V6_vmpyihb(v16, 0);
- // CHECK: @llvm.hexagon.V6.vmpyihb
- __builtin_HEXAGON_V6_vmpyih(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpyih
- __builtin_HEXAGON_V6_vmpyiowh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyiowh.128B
- __builtin_HEXAGON_V6_vmpyiowh(v16, v16);
+ __builtin_HEXAGON_V6_vmpyihb_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyihb.acc.128B
+ __builtin_HEXAGON_V6_vmpyihb_acc_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyiowh
- __builtin_HEXAGON_V6_vmpyiwb_128B(v32, 0);
+ __builtin_HEXAGON_V6_vmpyiowh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyiowh.128B
+ __builtin_HEXAGON_V6_vmpyiowh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyiwb
+ __builtin_HEXAGON_V6_vmpyiwb(v64, 0);
// CHECK: @llvm.hexagon.V6.vmpyiwb.128B
- __builtin_HEXAGON_V6_vmpyiwb_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpyiwb.acc.128B
- __builtin_HEXAGON_V6_vmpyiwb_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vmpyiwb_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyiwb.acc
- __builtin_HEXAGON_V6_vmpyiwb(v16, 0);
- // CHECK: @llvm.hexagon.V6.vmpyiwb
- __builtin_HEXAGON_V6_vmpyiwh_128B(v32, 0);
+ __builtin_HEXAGON_V6_vmpyiwb_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyiwb.acc.128B
+ __builtin_HEXAGON_V6_vmpyiwb_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyiwh
+ __builtin_HEXAGON_V6_vmpyiwh(v64, 0);
// CHECK: @llvm.hexagon.V6.vmpyiwh.128B
- __builtin_HEXAGON_V6_vmpyiwh_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpyiwh.acc.128B
- __builtin_HEXAGON_V6_vmpyiwh_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vmpyiwh_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyiwh.acc
- __builtin_HEXAGON_V6_vmpyiwh(v16, 0);
- // CHECK: @llvm.hexagon.V6.vmpyiwh
- __builtin_HEXAGON_V6_vmpyowh_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpyiwh_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyiwh.acc.128B
+ __builtin_HEXAGON_V6_vmpyiwh_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyiwub
+ __builtin_HEXAGON_V6_vmpyiwub(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyiwub.128B
+ __builtin_HEXAGON_V6_vmpyiwub_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyiwub.acc
+ __builtin_HEXAGON_V6_vmpyiwub_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyiwub.acc.128B
+ __builtin_HEXAGON_V6_vmpyiwub_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyowh
+ __builtin_HEXAGON_V6_vmpyowh(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpyowh.128B
- __builtin_HEXAGON_V6_vmpyowh_rnd_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpyowh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyowh.64.acc
+ __builtin_HEXAGON_V6_vmpyowh_64_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyowh.64.acc.128B
+ __builtin_HEXAGON_V6_vmpyowh_64_acc_128B(v256, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyowh.rnd
+ __builtin_HEXAGON_V6_vmpyowh_rnd(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpyowh.rnd.128B
- __builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyowh.rnd.sacc.128B
- __builtin_HEXAGON_V6_vmpyowh_rnd_sacc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vmpyowh_rnd_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyowh.rnd.sacc
- __builtin_HEXAGON_V6_vmpyowh_rnd(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpyowh.rnd
- __builtin_HEXAGON_V6_vmpyowh_sacc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyowh.sacc.128B
- __builtin_HEXAGON_V6_vmpyowh_sacc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vmpyowh_rnd_sacc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyowh.rnd.sacc.128B
+ __builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyowh.sacc
- __builtin_HEXAGON_V6_vmpyowh(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpyowh
- __builtin_HEXAGON_V6_vmpyub_128B(v32, 0);
+ __builtin_HEXAGON_V6_vmpyowh_sacc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyowh.sacc.128B
+ __builtin_HEXAGON_V6_vmpyowh_sacc_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyub
+ __builtin_HEXAGON_V6_vmpyub(v64, 0);
// CHECK: @llvm.hexagon.V6.vmpyub.128B
- __builtin_HEXAGON_V6_vmpyub_acc_128B(v64, v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpyub.acc.128B
- __builtin_HEXAGON_V6_vmpyub_acc(v32, v16, 0);
+ __builtin_HEXAGON_V6_vmpyub_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyub.acc
- __builtin_HEXAGON_V6_vmpyubv_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpyub_acc(v128, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyub.acc.128B
+ __builtin_HEXAGON_V6_vmpyub_acc_128B(v256, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyubv
+ __builtin_HEXAGON_V6_vmpyubv(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpyubv.128B
- __builtin_HEXAGON_V6_vmpyub(v16, 0);
- // CHECK: @llvm.hexagon.V6.vmpyub
- __builtin_HEXAGON_V6_vmpyubv_acc_128B(v64, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyubv.acc.128B
- __builtin_HEXAGON_V6_vmpyubv_acc(v32, v16, v16);
+ __builtin_HEXAGON_V6_vmpyubv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyubv.acc
- __builtin_HEXAGON_V6_vmpyubv(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpyubv
- __builtin_HEXAGON_V6_vmpyuh_128B(v32, 0);
+ __builtin_HEXAGON_V6_vmpyubv_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyubv.acc.128B
+ __builtin_HEXAGON_V6_vmpyubv_acc_128B(v256, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vmpyuh
+ __builtin_HEXAGON_V6_vmpyuh(v64, 0);
// CHECK: @llvm.hexagon.V6.vmpyuh.128B
- __builtin_HEXAGON_V6_vmpyuh_acc_128B(v64, v32, 0);
- // CHECK: @llvm.hexagon.V6.vmpyuh.acc.128B
- __builtin_HEXAGON_V6_vmpyuh_acc(v32, v16, 0);
+ __builtin_HEXAGON_V6_vmpyuh_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vmpyuh.acc
- __builtin_HEXAGON_V6_vmpyuhv_128B(v32, v32);
+ __builtin_HEXAGON_V6_vmpyuh_acc(v128, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyuh.acc.128B
+ __builtin_HEXAGON_V6_vmpyuh_acc_128B(v256, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyuhe
+ __builtin_HEXAGON_V6_vmpyuhe(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyuhe.128B
+ __builtin_HEXAGON_V6_vmpyuhe_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyuhe.acc
+ __builtin_HEXAGON_V6_vmpyuhe_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyuhe.acc.128B
+ __builtin_HEXAGON_V6_vmpyuhe_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vmpyuhv
+ __builtin_HEXAGON_V6_vmpyuhv(v64, v64);
// CHECK: @llvm.hexagon.V6.vmpyuhv.128B
- __builtin_HEXAGON_V6_vmpyuh(v16, 0);
- // CHECK: @llvm.hexagon.V6.vmpyuh
- __builtin_HEXAGON_V6_vmpyuhv_acc_128B(v64, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmpyuhv.acc.128B
- __builtin_HEXAGON_V6_vmpyuhv_acc(v32, v16, v16);
+ __builtin_HEXAGON_V6_vmpyuhv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vmpyuhv.acc
- __builtin_HEXAGON_V6_vmpyuhv(v16, v16);
- // CHECK: @llvm.hexagon.V6.vmpyuhv
- __builtin_HEXAGON_V6_vmux_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vmux.128B
- __builtin_HEXAGON_V6_vmux(v16, v16, v16);
+ __builtin_HEXAGON_V6_vmpyuhv_acc(v128, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmpyuhv.acc.128B
+ __builtin_HEXAGON_V6_vmpyuhv_acc_128B(v256, v128, v128);
// CHECK: @llvm.hexagon.V6.vmux
- __builtin_HEXAGON_V6_vnavgh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vnavgh.128B
- __builtin_HEXAGON_V6_vnavgh(v16, v16);
+ __builtin_HEXAGON_V6_vmux(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vmux.128B
+ __builtin_HEXAGON_V6_vmux_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vnavgb
+ __builtin_HEXAGON_V6_vnavgb(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vnavgb.128B
+ __builtin_HEXAGON_V6_vnavgb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vnavgh
- __builtin_HEXAGON_V6_vnavgub_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vnavgub.128B
- __builtin_HEXAGON_V6_vnavgub(v16, v16);
+ __builtin_HEXAGON_V6_vnavgh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vnavgh.128B
+ __builtin_HEXAGON_V6_vnavgh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vnavgub
- __builtin_HEXAGON_V6_vnavgw_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vnavgw.128B
- __builtin_HEXAGON_V6_vnavgw(v16, v16);
+ __builtin_HEXAGON_V6_vnavgub(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vnavgub.128B
+ __builtin_HEXAGON_V6_vnavgub_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vnavgw
- __builtin_HEXAGON_V6_vnormamth_128B(v32);
- // CHECK: @llvm.hexagon.V6.vnormamth.128B
- __builtin_HEXAGON_V6_vnormamth(v16);
+ __builtin_HEXAGON_V6_vnavgw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vnavgw.128B
+ __builtin_HEXAGON_V6_vnavgw_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vnormamth
- __builtin_HEXAGON_V6_vnormamtw_128B(v32);
- // CHECK: @llvm.hexagon.V6.vnormamtw.128B
- __builtin_HEXAGON_V6_vnormamtw(v16);
+ __builtin_HEXAGON_V6_vnormamth(v64);
+ // CHECK: @llvm.hexagon.V6.vnormamth.128B
+ __builtin_HEXAGON_V6_vnormamth_128B(v128);
// CHECK: @llvm.hexagon.V6.vnormamtw
- __builtin_HEXAGON_V6_vnot_128B(v32);
- // CHECK: @llvm.hexagon.V6.vnot.128B
- __builtin_HEXAGON_V6_vnot(v16);
+ __builtin_HEXAGON_V6_vnormamtw(v64);
+ // CHECK: @llvm.hexagon.V6.vnormamtw.128B
+ __builtin_HEXAGON_V6_vnormamtw_128B(v128);
// CHECK: @llvm.hexagon.V6.vnot
- __builtin_HEXAGON_V6_vor_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vor.128B
- __builtin_HEXAGON_V6_vor(v16, v16);
+ __builtin_HEXAGON_V6_vnot(v64);
+ // CHECK: @llvm.hexagon.V6.vnot.128B
+ __builtin_HEXAGON_V6_vnot_128B(v128);
// CHECK: @llvm.hexagon.V6.vor
- __builtin_HEXAGON_V6_vpackeb_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vpackeb.128B
- __builtin_HEXAGON_V6_vpackeb(v16, v16);
+ __builtin_HEXAGON_V6_vor(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vor.128B
+ __builtin_HEXAGON_V6_vor_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpackeb
- __builtin_HEXAGON_V6_vpackeh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vpackeh.128B
- __builtin_HEXAGON_V6_vpackeh(v16, v16);
+ __builtin_HEXAGON_V6_vpackeb(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vpackeb.128B
+ __builtin_HEXAGON_V6_vpackeb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpackeh
- __builtin_HEXAGON_V6_vpackhb_sat_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vpackhb.sat.128B
- __builtin_HEXAGON_V6_vpackhb_sat(v16, v16);
+ __builtin_HEXAGON_V6_vpackeh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vpackeh.128B
+ __builtin_HEXAGON_V6_vpackeh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpackhb.sat
- __builtin_HEXAGON_V6_vpackhub_sat_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vpackhub.sat.128B
- __builtin_HEXAGON_V6_vpackhub_sat(v16, v16);
+ __builtin_HEXAGON_V6_vpackhb_sat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vpackhb.sat.128B
+ __builtin_HEXAGON_V6_vpackhb_sat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpackhub.sat
- __builtin_HEXAGON_V6_vpackob_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vpackob.128B
- __builtin_HEXAGON_V6_vpackob(v16, v16);
+ __builtin_HEXAGON_V6_vpackhub_sat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vpackhub.sat.128B
+ __builtin_HEXAGON_V6_vpackhub_sat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpackob
- __builtin_HEXAGON_V6_vpackoh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vpackoh.128B
- __builtin_HEXAGON_V6_vpackoh(v16, v16);
+ __builtin_HEXAGON_V6_vpackob(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vpackob.128B
+ __builtin_HEXAGON_V6_vpackob_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpackoh
- __builtin_HEXAGON_V6_vpackwh_sat_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vpackwh.sat.128B
- __builtin_HEXAGON_V6_vpackwh_sat(v16, v16);
+ __builtin_HEXAGON_V6_vpackoh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vpackoh.128B
+ __builtin_HEXAGON_V6_vpackoh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpackwh.sat
- __builtin_HEXAGON_V6_vpackwuh_sat_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vpackwuh.sat.128B
- __builtin_HEXAGON_V6_vpackwuh_sat(v16, v16);
+ __builtin_HEXAGON_V6_vpackwh_sat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vpackwh.sat.128B
+ __builtin_HEXAGON_V6_vpackwh_sat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpackwuh.sat
- __builtin_HEXAGON_V6_vpopcounth_128B(v32);
- // CHECK: @llvm.hexagon.V6.vpopcounth.128B
- __builtin_HEXAGON_V6_vpopcounth(v16);
+ __builtin_HEXAGON_V6_vpackwuh_sat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vpackwuh.sat.128B
+ __builtin_HEXAGON_V6_vpackwuh_sat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vpopcounth
- __builtin_HEXAGON_V6_vrdelta_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vrdelta.128B
- __builtin_HEXAGON_V6_vrdelta(v16, v16);
+ __builtin_HEXAGON_V6_vpopcounth(v64);
+ // CHECK: @llvm.hexagon.V6.vpopcounth.128B
+ __builtin_HEXAGON_V6_vpopcounth_128B(v128);
+ // CHECK: @llvm.hexagon.V6.vprefixqb
+ __builtin_HEXAGON_V6_vprefixqb(v64);
+ // CHECK: @llvm.hexagon.V6.vprefixqb.128B
+ __builtin_HEXAGON_V6_vprefixqb_128B(v128);
+ // CHECK: @llvm.hexagon.V6.vprefixqh
+ __builtin_HEXAGON_V6_vprefixqh(v64);
+ // CHECK: @llvm.hexagon.V6.vprefixqh.128B
+ __builtin_HEXAGON_V6_vprefixqh_128B(v128);
+ // CHECK: @llvm.hexagon.V6.vprefixqw
+ __builtin_HEXAGON_V6_vprefixqw(v64);
+ // CHECK: @llvm.hexagon.V6.vprefixqw.128B
+ __builtin_HEXAGON_V6_vprefixqw_128B(v128);
// CHECK: @llvm.hexagon.V6.vrdelta
- __builtin_HEXAGON_V6_vrmpybus_128B(v32, 0);
+ __builtin_HEXAGON_V6_vrdelta(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vrdelta.128B
+ __builtin_HEXAGON_V6_vrdelta_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vrmpybub.rtt
+ __builtin_HEXAGON_V6_vrmpybub_rtt(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpybub.rtt.128B
+ __builtin_HEXAGON_V6_vrmpybub_rtt_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpybub.rtt.acc
+ __builtin_HEXAGON_V6_vrmpybub_rtt_acc(v128, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpybub.rtt.acc.128B
+ __builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B(v256, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpybus
+ __builtin_HEXAGON_V6_vrmpybus(v64, 0);
// CHECK: @llvm.hexagon.V6.vrmpybus.128B
- __builtin_HEXAGON_V6_vrmpybus_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vrmpybus.acc.128B
- __builtin_HEXAGON_V6_vrmpybus_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vrmpybus_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vrmpybus.acc
- __builtin_HEXAGON_V6_vrmpybusi_128B(v64, 0, 0);
+ __builtin_HEXAGON_V6_vrmpybus_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpybus.acc.128B
+ __builtin_HEXAGON_V6_vrmpybus_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpybusi
+ __builtin_HEXAGON_V6_vrmpybusi(v128, 0, 0);
// CHECK: @llvm.hexagon.V6.vrmpybusi.128B
- __builtin_HEXAGON_V6_vrmpybusi_acc_128B(v64, v64, 0, 0);
- // CHECK: @llvm.hexagon.V6.vrmpybusi.acc.128B
- __builtin_HEXAGON_V6_vrmpybusi_acc(v32, v32, 0, 0);
+ __builtin_HEXAGON_V6_vrmpybusi_128B(v256, 0, 0);
// CHECK: @llvm.hexagon.V6.vrmpybusi.acc
- __builtin_HEXAGON_V6_vrmpybusi(v32, 0, 0);
- // CHECK: @llvm.hexagon.V6.vrmpybusi
- __builtin_HEXAGON_V6_vrmpybusv_128B(v32, v32);
+ __builtin_HEXAGON_V6_vrmpybusi_acc(v128, v128, 0, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpybusi.acc.128B
+ __builtin_HEXAGON_V6_vrmpybusi_acc_128B(v256, v256, 0, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpybusv
+ __builtin_HEXAGON_V6_vrmpybusv(v64, v64);
// CHECK: @llvm.hexagon.V6.vrmpybusv.128B
- __builtin_HEXAGON_V6_vrmpybus(v16, 0);
- // CHECK: @llvm.hexagon.V6.vrmpybus
- __builtin_HEXAGON_V6_vrmpybusv_acc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vrmpybusv.acc.128B
- __builtin_HEXAGON_V6_vrmpybusv_acc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vrmpybusv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vrmpybusv.acc
- __builtin_HEXAGON_V6_vrmpybusv(v16, v16);
- // CHECK: @llvm.hexagon.V6.vrmpybusv
- __builtin_HEXAGON_V6_vrmpybv_128B(v32, v32);
+ __builtin_HEXAGON_V6_vrmpybusv_acc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vrmpybusv.acc.128B
+ __builtin_HEXAGON_V6_vrmpybusv_acc_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vrmpybv
+ __builtin_HEXAGON_V6_vrmpybv(v64, v64);
// CHECK: @llvm.hexagon.V6.vrmpybv.128B
- __builtin_HEXAGON_V6_vrmpybv_acc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vrmpybv.acc.128B
- __builtin_HEXAGON_V6_vrmpybv_acc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vrmpybv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vrmpybv.acc
- __builtin_HEXAGON_V6_vrmpybv(v16, v16);
- // CHECK: @llvm.hexagon.V6.vrmpybv
- __builtin_HEXAGON_V6_vrmpyub_128B(v32, 0);
+ __builtin_HEXAGON_V6_vrmpybv_acc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vrmpybv.acc.128B
+ __builtin_HEXAGON_V6_vrmpybv_acc_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vrmpyub
+ __builtin_HEXAGON_V6_vrmpyub(v64, 0);
// CHECK: @llvm.hexagon.V6.vrmpyub.128B
- __builtin_HEXAGON_V6_vrmpyub_acc_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vrmpyub.acc.128B
- __builtin_HEXAGON_V6_vrmpyub_acc(v16, v16, 0);
+ __builtin_HEXAGON_V6_vrmpyub_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vrmpyub.acc
- __builtin_HEXAGON_V6_vrmpyubi_128B(v64, 0, 0);
+ __builtin_HEXAGON_V6_vrmpyub_acc(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpyub.acc.128B
+ __builtin_HEXAGON_V6_vrmpyub_acc_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpyub.rtt
+ __builtin_HEXAGON_V6_vrmpyub_rtt(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpyub.rtt.128B
+ __builtin_HEXAGON_V6_vrmpyub_rtt_128B(v128, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpyub.rtt.acc
+ __builtin_HEXAGON_V6_vrmpyub_rtt_acc(v128, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpyub.rtt.acc.128B
+ __builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B(v256, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpyubi
+ __builtin_HEXAGON_V6_vrmpyubi(v128, 0, 0);
// CHECK: @llvm.hexagon.V6.vrmpyubi.128B
- __builtin_HEXAGON_V6_vrmpyubi_acc_128B(v64, v64, 0, 0);
- // CHECK: @llvm.hexagon.V6.vrmpyubi.acc.128B
- __builtin_HEXAGON_V6_vrmpyubi_acc(v32, v32, 0, 0);
+ __builtin_HEXAGON_V6_vrmpyubi_128B(v256, 0, 0);
// CHECK: @llvm.hexagon.V6.vrmpyubi.acc
- __builtin_HEXAGON_V6_vrmpyubi(v32, 0, 0);
- // CHECK: @llvm.hexagon.V6.vrmpyubi
- __builtin_HEXAGON_V6_vrmpyubv_128B(v32, v32);
+ __builtin_HEXAGON_V6_vrmpyubi_acc(v128, v128, 0, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpyubi.acc.128B
+ __builtin_HEXAGON_V6_vrmpyubi_acc_128B(v256, v256, 0, 0);
+ // CHECK: @llvm.hexagon.V6.vrmpyubv
+ __builtin_HEXAGON_V6_vrmpyubv(v64, v64);
// CHECK: @llvm.hexagon.V6.vrmpyubv.128B
- __builtin_HEXAGON_V6_vrmpyub(v16, 0);
- // CHECK: @llvm.hexagon.V6.vrmpyub
- __builtin_HEXAGON_V6_vrmpyubv_acc_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vrmpyubv.acc.128B
- __builtin_HEXAGON_V6_vrmpyubv_acc(v16, v16, v16);
+ __builtin_HEXAGON_V6_vrmpyubv_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vrmpyubv.acc
- __builtin_HEXAGON_V6_vrmpyubv(v16, v16);
- // CHECK: @llvm.hexagon.V6.vrmpyubv
- __builtin_HEXAGON_V6_vror_128B(v32, 0);
- // CHECK: @llvm.hexagon.V6.vror.128B
- __builtin_HEXAGON_V6_vror(v16, 0);
+ __builtin_HEXAGON_V6_vrmpyubv_acc(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vrmpyubv.acc.128B
+ __builtin_HEXAGON_V6_vrmpyubv_acc_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vror
- __builtin_HEXAGON_V6_vroundhb_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vroundhb.128B
- __builtin_HEXAGON_V6_vroundhb(v16, v16);
+ __builtin_HEXAGON_V6_vror(v64, 0);
+ // CHECK: @llvm.hexagon.V6.vror.128B
+ __builtin_HEXAGON_V6_vror_128B(v128, 0);
// CHECK: @llvm.hexagon.V6.vroundhb
- __builtin_HEXAGON_V6_vroundhub_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vroundhub.128B
- __builtin_HEXAGON_V6_vroundhub(v16, v16);
+ __builtin_HEXAGON_V6_vroundhb(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vroundhb.128B
+ __builtin_HEXAGON_V6_vroundhb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vroundhub
- __builtin_HEXAGON_V6_vroundwh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vroundwh.128B
- __builtin_HEXAGON_V6_vroundwh(v16, v16);
+ __builtin_HEXAGON_V6_vroundhub(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vroundhub.128B
+ __builtin_HEXAGON_V6_vroundhub_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vrounduhub
+ __builtin_HEXAGON_V6_vrounduhub(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vrounduhub.128B
+ __builtin_HEXAGON_V6_vrounduhub_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vrounduwuh
+ __builtin_HEXAGON_V6_vrounduwuh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vrounduwuh.128B
+ __builtin_HEXAGON_V6_vrounduwuh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vroundwh
- __builtin_HEXAGON_V6_vroundwuh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vroundwuh.128B
- __builtin_HEXAGON_V6_vroundwuh(v16, v16);
+ __builtin_HEXAGON_V6_vroundwh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vroundwh.128B
+ __builtin_HEXAGON_V6_vroundwh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vroundwuh
- __builtin_HEXAGON_V6_vrsadubi_128B(v64, 0, 0);
+ __builtin_HEXAGON_V6_vroundwuh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vroundwuh.128B
+ __builtin_HEXAGON_V6_vroundwuh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vrsadubi
+ __builtin_HEXAGON_V6_vrsadubi(v128, 0, 0);
// CHECK: @llvm.hexagon.V6.vrsadubi.128B
- __builtin_HEXAGON_V6_vrsadubi_acc_128B(v64, v64, 0, 0);
- // CHECK: @llvm.hexagon.V6.vrsadubi.acc.128B
- __builtin_HEXAGON_V6_vrsadubi_acc(v32, v32, 0, 0);
+ __builtin_HEXAGON_V6_vrsadubi_128B(v256, 0, 0);
// CHECK: @llvm.hexagon.V6.vrsadubi.acc
- __builtin_HEXAGON_V6_vrsadubi(v32, 0, 0);
- // CHECK: @llvm.hexagon.V6.vrsadubi
- __builtin_HEXAGON_V6_vsathub_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vsathub.128B
- __builtin_HEXAGON_V6_vsathub(v16, v16);
+ __builtin_HEXAGON_V6_vrsadubi_acc(v128, v128, 0, 0);
+ // CHECK: @llvm.hexagon.V6.vrsadubi.acc.128B
+ __builtin_HEXAGON_V6_vrsadubi_acc_128B(v256, v256, 0, 0);
// CHECK: @llvm.hexagon.V6.vsathub
- __builtin_HEXAGON_V6_vsatwh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vsatwh.128B
- __builtin_HEXAGON_V6_vsatwh(v16, v16);
+ __builtin_HEXAGON_V6_vsathub(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsathub.128B
+ __builtin_HEXAGON_V6_vsathub_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsatuwuh
+ __builtin_HEXAGON_V6_vsatuwuh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsatuwuh.128B
+ __builtin_HEXAGON_V6_vsatuwuh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsatwh
- __builtin_HEXAGON_V6_vsb_128B(v32);
- // CHECK: @llvm.hexagon.V6.vsb.128B
- __builtin_HEXAGON_V6_vsb(v16);
+ __builtin_HEXAGON_V6_vsatwh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsatwh.128B
+ __builtin_HEXAGON_V6_vsatwh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsb
- __builtin_HEXAGON_V6_vsh_128B(v32);
+ __builtin_HEXAGON_V6_vsb(v64);
+ // CHECK: @llvm.hexagon.V6.vsb.128B
+ __builtin_HEXAGON_V6_vsb_128B(v128);
+ // CHECK: @llvm.hexagon.V6.vscattermh
+ __builtin_HEXAGON_V6_vscattermh(0, 0, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermh.128B
+ __builtin_HEXAGON_V6_vscattermh_128B(0, 0, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vscattermh.add
+ __builtin_HEXAGON_V6_vscattermh_add(0, 0, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermh.add.128B
+ __builtin_HEXAGON_V6_vscattermh_add_128B(0, 0, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vscattermhq
+ __builtin_HEXAGON_V6_vscattermhq(v64, 0, 0, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermhq.128B
+ __builtin_HEXAGON_V6_vscattermhq_128B(v128, 0, 0, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vscattermhw
+ __builtin_HEXAGON_V6_vscattermhw(0, 0, v128, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermhw.128B
+ __builtin_HEXAGON_V6_vscattermhw_128B(0, 0, v256, v128);
+ // CHECK: @llvm.hexagon.V6.vscattermhw.add
+ __builtin_HEXAGON_V6_vscattermhw_add(0, 0, v128, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermhw.add.128B
+ __builtin_HEXAGON_V6_vscattermhw_add_128B(0, 0, v256, v128);
+ // CHECK: @llvm.hexagon.V6.vscattermhwq
+ __builtin_HEXAGON_V6_vscattermhwq(v64, 0, 0, v128, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermhwq.128B
+ __builtin_HEXAGON_V6_vscattermhwq_128B(v128, 0, 0, v256, v128);
+ // CHECK: @llvm.hexagon.V6.vscattermw
+ __builtin_HEXAGON_V6_vscattermw(0, 0, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermw.128B
+ __builtin_HEXAGON_V6_vscattermw_128B(0, 0, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vscattermw.add
+ __builtin_HEXAGON_V6_vscattermw_add(0, 0, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermw.add.128B
+ __builtin_HEXAGON_V6_vscattermw_add_128B(0, 0, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vscattermwq
+ __builtin_HEXAGON_V6_vscattermwq(v64, 0, 0, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vscattermwq.128B
+ __builtin_HEXAGON_V6_vscattermwq_128B(v128, 0, 0, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsh
+ __builtin_HEXAGON_V6_vsh(v64);
// CHECK: @llvm.hexagon.V6.vsh.128B
- __builtin_HEXAGON_V6_vshufeh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vshufeh.128B
- __builtin_HEXAGON_V6_vshufeh(v16, v16);
+ __builtin_HEXAGON_V6_vsh_128B(v128);
// CHECK: @llvm.hexagon.V6.vshufeh
- __builtin_HEXAGON_V6_vshuffb_128B(v32);
- // CHECK: @llvm.hexagon.V6.vshuffb.128B
- __builtin_HEXAGON_V6_vshuffb(v16);
+ __builtin_HEXAGON_V6_vshufeh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vshufeh.128B
+ __builtin_HEXAGON_V6_vshufeh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vshuffb
- __builtin_HEXAGON_V6_vshuffeb_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vshuffeb.128B
- __builtin_HEXAGON_V6_vshuffeb(v16, v16);
+ __builtin_HEXAGON_V6_vshuffb(v64);
+ // CHECK: @llvm.hexagon.V6.vshuffb.128B
+ __builtin_HEXAGON_V6_vshuffb_128B(v128);
// CHECK: @llvm.hexagon.V6.vshuffeb
- __builtin_HEXAGON_V6_vshuffh_128B(v32);
- // CHECK: @llvm.hexagon.V6.vshuffh.128B
- __builtin_HEXAGON_V6_vshuffh(v16);
+ __builtin_HEXAGON_V6_vshuffeb(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vshuffeb.128B
+ __builtin_HEXAGON_V6_vshuffeb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vshuffh
- __builtin_HEXAGON_V6_vshuffob_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vshuffob.128B
- __builtin_HEXAGON_V6_vshuffob(v16, v16);
+ __builtin_HEXAGON_V6_vshuffh(v64);
+ // CHECK: @llvm.hexagon.V6.vshuffh.128B
+ __builtin_HEXAGON_V6_vshuffh_128B(v128);
// CHECK: @llvm.hexagon.V6.vshuffob
- __builtin_HEXAGON_V6_vshuffvdd_128B(v32, v32, 0);
- // CHECK: @llvm.hexagon.V6.vshuffvdd.128B
- __builtin_HEXAGON_V6_vshuffvdd(v16, v16, 0);
+ __builtin_HEXAGON_V6_vshuffob(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vshuffob.128B
+ __builtin_HEXAGON_V6_vshuffob_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vshuffvdd
- __builtin_HEXAGON_V6_vshufoeb_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vshufoeb.128B
- __builtin_HEXAGON_V6_vshufoeb(v16, v16);
+ __builtin_HEXAGON_V6_vshuffvdd(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vshuffvdd.128B
+ __builtin_HEXAGON_V6_vshuffvdd_128B(v128, v128, 0);
// CHECK: @llvm.hexagon.V6.vshufoeb
- __builtin_HEXAGON_V6_vshufoeh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vshufoeh.128B
- __builtin_HEXAGON_V6_vshufoeh(v16, v16);
+ __builtin_HEXAGON_V6_vshufoeb(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vshufoeb.128B
+ __builtin_HEXAGON_V6_vshufoeb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vshufoeh
- __builtin_HEXAGON_V6_vshufoh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vshufoh.128B
- __builtin_HEXAGON_V6_vshufoh(v16, v16);
+ __builtin_HEXAGON_V6_vshufoeh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vshufoeh.128B
+ __builtin_HEXAGON_V6_vshufoeh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vshufoh
- __builtin_HEXAGON_V6_vsh(v16);
- // CHECK: @llvm.hexagon.V6.vsh
- __builtin_HEXAGON_V6_vsubb_128B(v32, v32);
+ __builtin_HEXAGON_V6_vshufoh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vshufoh.128B
+ __builtin_HEXAGON_V6_vshufoh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubb
+ __builtin_HEXAGON_V6_vsubb(v64, v64);
// CHECK: @llvm.hexagon.V6.vsubb.128B
- __builtin_HEXAGON_V6_vsubb_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vsubb.dv.128B
- __builtin_HEXAGON_V6_vsubb_dv(v32, v32);
+ __builtin_HEXAGON_V6_vsubb_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsubb.dv
- __builtin_HEXAGON_V6_vsubbnq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vsubbnq.128B
- __builtin_HEXAGON_V6_vsubbnq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vsubb_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubb.dv.128B
+ __builtin_HEXAGON_V6_vsubb_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vsubbnq
- __builtin_HEXAGON_V6_vsubbq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vsubbq.128B
- __builtin_HEXAGON_V6_vsubbq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vsubbnq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubbnq.128B
+ __builtin_HEXAGON_V6_vsubbnq_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vsubbq
- __builtin_HEXAGON_V6_vsubb(v16, v16);
- // CHECK: @llvm.hexagon.V6.vsubb
- __builtin_HEXAGON_V6_vsubh_128B(v32, v32);
+ __builtin_HEXAGON_V6_vsubbq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubbq.128B
+ __builtin_HEXAGON_V6_vsubbq_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubbsat
+ __builtin_HEXAGON_V6_vsubbsat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubbsat.128B
+ __builtin_HEXAGON_V6_vsubbsat_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubbsat.dv
+ __builtin_HEXAGON_V6_vsubbsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubbsat.dv.128B
+ __builtin_HEXAGON_V6_vsubbsat_dv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.vsubcarry
+ __builtin_HEXAGON_V6_vsubcarry(v64, v64, 0);
+ // CHECK: @llvm.hexagon.V6.vsubcarry.128B
+ __builtin_HEXAGON_V6_vsubcarry_128B(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vsubh
+ __builtin_HEXAGON_V6_vsubh(v64, v64);
// CHECK: @llvm.hexagon.V6.vsubh.128B
- __builtin_HEXAGON_V6_vsubh_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vsubh.dv.128B
- __builtin_HEXAGON_V6_vsubh_dv(v32, v32);
+ __builtin_HEXAGON_V6_vsubh_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsubh.dv
- __builtin_HEXAGON_V6_vsubhnq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vsubhnq.128B
- __builtin_HEXAGON_V6_vsubhnq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vsubh_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubh.dv.128B
+ __builtin_HEXAGON_V6_vsubh_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vsubhnq
- __builtin_HEXAGON_V6_vsubhq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vsubhq.128B
- __builtin_HEXAGON_V6_vsubhq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vsubhnq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubhnq.128B
+ __builtin_HEXAGON_V6_vsubhnq_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vsubhq
- __builtin_HEXAGON_V6_vsubhsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vsubhq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubhq.128B
+ __builtin_HEXAGON_V6_vsubhq_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubhsat
+ __builtin_HEXAGON_V6_vsubhsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vsubhsat.128B
- __builtin_HEXAGON_V6_vsubhsat_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vsubhsat.dv.128B
- __builtin_HEXAGON_V6_vsubhsat_dv(v32, v32);
+ __builtin_HEXAGON_V6_vsubhsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsubhsat.dv
- __builtin_HEXAGON_V6_vsubhsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vsubhsat
- __builtin_HEXAGON_V6_vsubh(v16, v16);
- // CHECK: @llvm.hexagon.V6.vsubh
- __builtin_HEXAGON_V6_vsubhw_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vsubhw.128B
- __builtin_HEXAGON_V6_vsubhw(v16, v16);
+ __builtin_HEXAGON_V6_vsubhsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubhsat.dv.128B
+ __builtin_HEXAGON_V6_vsubhsat_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vsubhw
- __builtin_HEXAGON_V6_vsububh_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vsububh.128B
- __builtin_HEXAGON_V6_vsububh(v16, v16);
+ __builtin_HEXAGON_V6_vsubhw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubhw.128B
+ __builtin_HEXAGON_V6_vsubhw_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsububh
- __builtin_HEXAGON_V6_vsububsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vsububh(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsububh.128B
+ __builtin_HEXAGON_V6_vsububh_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsububsat
+ __builtin_HEXAGON_V6_vsububsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vsububsat.128B
- __builtin_HEXAGON_V6_vsububsat_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vsububsat.dv.128B
- __builtin_HEXAGON_V6_vsububsat_dv(v32, v32);
+ __builtin_HEXAGON_V6_vsububsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsububsat.dv
- __builtin_HEXAGON_V6_vsububsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vsububsat
- __builtin_HEXAGON_V6_vsubuhsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vsububsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsububsat.dv.128B
+ __builtin_HEXAGON_V6_vsububsat_dv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.vsubububb.sat
+ __builtin_HEXAGON_V6_vsubububb_sat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubububb.sat.128B
+ __builtin_HEXAGON_V6_vsubububb_sat_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubuhsat
+ __builtin_HEXAGON_V6_vsubuhsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vsubuhsat.128B
- __builtin_HEXAGON_V6_vsubuhsat_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vsubuhsat.dv.128B
- __builtin_HEXAGON_V6_vsubuhsat_dv(v32, v32);
+ __builtin_HEXAGON_V6_vsubuhsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsubuhsat.dv
- __builtin_HEXAGON_V6_vsubuhsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vsubuhsat
- __builtin_HEXAGON_V6_vsubuhw_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vsubuhw.128B
- __builtin_HEXAGON_V6_vsubuhw(v16, v16);
+ __builtin_HEXAGON_V6_vsubuhsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubuhsat.dv.128B
+ __builtin_HEXAGON_V6_vsubuhsat_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vsubuhw
- __builtin_HEXAGON_V6_vsubw_128B(v32, v32);
+ __builtin_HEXAGON_V6_vsubuhw(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubuhw.128B
+ __builtin_HEXAGON_V6_vsubuhw_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubuwsat
+ __builtin_HEXAGON_V6_vsubuwsat(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubuwsat.128B
+ __builtin_HEXAGON_V6_vsubuwsat_128B(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubuwsat.dv
+ __builtin_HEXAGON_V6_vsubuwsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubuwsat.dv.128B
+ __builtin_HEXAGON_V6_vsubuwsat_dv_128B(v256, v256);
+ // CHECK: @llvm.hexagon.V6.vsubw
+ __builtin_HEXAGON_V6_vsubw(v64, v64);
// CHECK: @llvm.hexagon.V6.vsubw.128B
- __builtin_HEXAGON_V6_vsubw_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vsubw.dv.128B
- __builtin_HEXAGON_V6_vsubw_dv(v32, v32);
+ __builtin_HEXAGON_V6_vsubw_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsubw.dv
- __builtin_HEXAGON_V6_vsubwnq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vsubwnq.128B
- __builtin_HEXAGON_V6_vsubwnq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vsubw_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubw.dv.128B
+ __builtin_HEXAGON_V6_vsubw_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vsubwnq
- __builtin_HEXAGON_V6_vsubwq_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vsubwq.128B
- __builtin_HEXAGON_V6_vsubwq(v16, v16, v16);
+ __builtin_HEXAGON_V6_vsubwnq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubwnq.128B
+ __builtin_HEXAGON_V6_vsubwnq_128B(v128, v128, v128);
// CHECK: @llvm.hexagon.V6.vsubwq
- __builtin_HEXAGON_V6_vsubwsat_128B(v32, v32);
+ __builtin_HEXAGON_V6_vsubwq(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vsubwq.128B
+ __builtin_HEXAGON_V6_vsubwq_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubwsat
+ __builtin_HEXAGON_V6_vsubwsat(v64, v64);
// CHECK: @llvm.hexagon.V6.vsubwsat.128B
- __builtin_HEXAGON_V6_vsubwsat_dv_128B(v64, v64);
- // CHECK: @llvm.hexagon.V6.vsubwsat.dv.128B
- __builtin_HEXAGON_V6_vsubwsat_dv(v32, v32);
+ __builtin_HEXAGON_V6_vsubwsat_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vsubwsat.dv
- __builtin_HEXAGON_V6_vsubwsat(v16, v16);
- // CHECK: @llvm.hexagon.V6.vsubwsat
- __builtin_HEXAGON_V6_vsubw(v16, v16);
- // CHECK: @llvm.hexagon.V6.vsubw
- __builtin_HEXAGON_V6_vswap_128B(v32, v32, v32);
- // CHECK: @llvm.hexagon.V6.vswap.128B
- __builtin_HEXAGON_V6_vswap(v16, v16, v16);
+ __builtin_HEXAGON_V6_vsubwsat_dv(v128, v128);
+ // CHECK: @llvm.hexagon.V6.vsubwsat.dv.128B
+ __builtin_HEXAGON_V6_vsubwsat_dv_128B(v256, v256);
// CHECK: @llvm.hexagon.V6.vswap
- __builtin_HEXAGON_V6_vtmpyb_128B(v64, 0);
+ __builtin_HEXAGON_V6_vswap(v64, v64, v64);
+ // CHECK: @llvm.hexagon.V6.vswap.128B
+ __builtin_HEXAGON_V6_vswap_128B(v128, v128, v128);
+ // CHECK: @llvm.hexagon.V6.vtmpyb
+ __builtin_HEXAGON_V6_vtmpyb(v128, 0);
// CHECK: @llvm.hexagon.V6.vtmpyb.128B
- __builtin_HEXAGON_V6_vtmpyb_acc_128B(v64, v64, 0);
- // CHECK: @llvm.hexagon.V6.vtmpyb.acc.128B
- __builtin_HEXAGON_V6_vtmpyb_acc(v32, v32, 0);
+ __builtin_HEXAGON_V6_vtmpyb_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vtmpyb.acc
- __builtin_HEXAGON_V6_vtmpybus_128B(v64, 0);
+ __builtin_HEXAGON_V6_vtmpyb_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vtmpyb.acc.128B
+ __builtin_HEXAGON_V6_vtmpyb_acc_128B(v256, v256, 0);
+ // CHECK: @llvm.hexagon.V6.vtmpybus
+ __builtin_HEXAGON_V6_vtmpybus(v128, 0);
// CHECK: @llvm.hexagon.V6.vtmpybus.128B
- __builtin_HEXAGON_V6_vtmpybus_acc_128B(v64, v64, 0);
- // CHECK: @llvm.hexagon.V6.vtmpybus.acc.128B
- __builtin_HEXAGON_V6_vtmpybus_acc(v32, v32, 0);
+ __builtin_HEXAGON_V6_vtmpybus_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vtmpybus.acc
- __builtin_HEXAGON_V6_vtmpybus(v32, 0);
- // CHECK: @llvm.hexagon.V6.vtmpybus
- __builtin_HEXAGON_V6_vtmpyb(v32, 0);
- // CHECK: @llvm.hexagon.V6.vtmpyb
- __builtin_HEXAGON_V6_vtmpyhb_128B(v64, 0);
+ __builtin_HEXAGON_V6_vtmpybus_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vtmpybus.acc.128B
+ __builtin_HEXAGON_V6_vtmpybus_acc_128B(v256, v256, 0);
+ // CHECK: @llvm.hexagon.V6.vtmpyhb
+ __builtin_HEXAGON_V6_vtmpyhb(v128, 0);
// CHECK: @llvm.hexagon.V6.vtmpyhb.128B
- __builtin_HEXAGON_V6_vtmpyhb_acc_128B(v64, v64, 0);
- // CHECK: @llvm.hexagon.V6.vtmpyhb.acc.128B
- __builtin_HEXAGON_V6_vtmpyhb_acc(v32, v32, 0);
+ __builtin_HEXAGON_V6_vtmpyhb_128B(v256, 0);
// CHECK: @llvm.hexagon.V6.vtmpyhb.acc
- __builtin_HEXAGON_V6_vtmpyhb(v32, 0);
- // CHECK: @llvm.hexagon.V6.vtmpyhb
- __builtin_HEXAGON_V6_vunpackb_128B(v32);
- // CHECK: @llvm.hexagon.V6.vunpackb.128B
- __builtin_HEXAGON_V6_vunpackb(v16);
+ __builtin_HEXAGON_V6_vtmpyhb_acc(v128, v128, 0);
+ // CHECK: @llvm.hexagon.V6.vtmpyhb.acc.128B
+ __builtin_HEXAGON_V6_vtmpyhb_acc_128B(v256, v256, 0);
// CHECK: @llvm.hexagon.V6.vunpackb
- __builtin_HEXAGON_V6_vunpackh_128B(v32);
- // CHECK: @llvm.hexagon.V6.vunpackh.128B
- __builtin_HEXAGON_V6_vunpackh(v16);
+ __builtin_HEXAGON_V6_vunpackb(v64);
+ // CHECK: @llvm.hexagon.V6.vunpackb.128B
+ __builtin_HEXAGON_V6_vunpackb_128B(v128);
// CHECK: @llvm.hexagon.V6.vunpackh
- __builtin_HEXAGON_V6_vunpackob_128B(v64, v32);
- // CHECK: @llvm.hexagon.V6.vunpackob.128B
- __builtin_HEXAGON_V6_vunpackob(v32, v16);
+ __builtin_HEXAGON_V6_vunpackh(v64);
+ // CHECK: @llvm.hexagon.V6.vunpackh.128B
+ __builtin_HEXAGON_V6_vunpackh_128B(v128);
// CHECK: @llvm.hexagon.V6.vunpackob
- __builtin_HEXAGON_V6_vunpackoh_128B(v64, v32);
- // CHECK: @llvm.hexagon.V6.vunpackoh.128B
- __builtin_HEXAGON_V6_vunpackoh(v32, v16);
+ __builtin_HEXAGON_V6_vunpackob(v128, v64);
+ // CHECK: @llvm.hexagon.V6.vunpackob.128B
+ __builtin_HEXAGON_V6_vunpackob_128B(v256, v128);
// CHECK: @llvm.hexagon.V6.vunpackoh
- __builtin_HEXAGON_V6_vunpackub_128B(v32);
- // CHECK: @llvm.hexagon.V6.vunpackub.128B
- __builtin_HEXAGON_V6_vunpackub(v16);
+ __builtin_HEXAGON_V6_vunpackoh(v128, v64);
+ // CHECK: @llvm.hexagon.V6.vunpackoh.128B
+ __builtin_HEXAGON_V6_vunpackoh_128B(v256, v128);
// CHECK: @llvm.hexagon.V6.vunpackub
- __builtin_HEXAGON_V6_vunpackuh_128B(v32);
- // CHECK: @llvm.hexagon.V6.vunpackuh.128B
- __builtin_HEXAGON_V6_vunpackuh(v16);
+ __builtin_HEXAGON_V6_vunpackub(v64);
+ // CHECK: @llvm.hexagon.V6.vunpackub.128B
+ __builtin_HEXAGON_V6_vunpackub_128B(v128);
// CHECK: @llvm.hexagon.V6.vunpackuh
- __builtin_HEXAGON_V6_vxor_128B(v32, v32);
- // CHECK: @llvm.hexagon.V6.vxor.128B
- __builtin_HEXAGON_V6_vxor(v16, v16);
+ __builtin_HEXAGON_V6_vunpackuh(v64);
+ // CHECK: @llvm.hexagon.V6.vunpackuh.128B
+ __builtin_HEXAGON_V6_vunpackuh_128B(v128);
// CHECK: @llvm.hexagon.V6.vxor
- __builtin_HEXAGON_V6_vzb_128B(v32);
- // CHECK: @llvm.hexagon.V6.vzb.128B
- __builtin_HEXAGON_V6_vzb(v16);
+ __builtin_HEXAGON_V6_vxor(v64, v64);
+ // CHECK: @llvm.hexagon.V6.vxor.128B
+ __builtin_HEXAGON_V6_vxor_128B(v128, v128);
// CHECK: @llvm.hexagon.V6.vzb
- __builtin_HEXAGON_V6_vzh_128B(v32);
- // CHECK: @llvm.hexagon.V6.vzh.128B
- __builtin_HEXAGON_V6_vzh(v16);
+ __builtin_HEXAGON_V6_vzb(v64);
+ // CHECK: @llvm.hexagon.V6.vzb.128B
+ __builtin_HEXAGON_V6_vzb_128B(v128);
// CHECK: @llvm.hexagon.V6.vzh
- __builtin_HEXAGON_Y2_dccleana(0);
+ __builtin_HEXAGON_V6_vzh(v64);
+ // CHECK: @llvm.hexagon.V6.vzh.128B
+ __builtin_HEXAGON_V6_vzh_128B(v128);
// CHECK: @llvm.hexagon.Y2.dccleana
- __builtin_HEXAGON_Y2_dccleaninva(0);
+ __builtin_HEXAGON_Y2_dccleana(0);
// CHECK: @llvm.hexagon.Y2.dccleaninva
- __builtin_HEXAGON_Y2_dcinva(0);
+ __builtin_HEXAGON_Y2_dccleaninva(0);
// CHECK: @llvm.hexagon.Y2.dcinva
- __builtin_HEXAGON_Y2_dczeroa(0);
+ __builtin_HEXAGON_Y2_dcinva(0);
// CHECK: @llvm.hexagon.Y2.dczeroa
- __builtin_HEXAGON_Y4_l2fetch(0, 0);
+ __builtin_HEXAGON_Y2_dczeroa(0);
// CHECK: @llvm.hexagon.Y4.l2fetch
- __builtin_HEXAGON_Y5_l2fetch(0, 0);
+ __builtin_HEXAGON_Y4_l2fetch(0, 0);
// CHECK: @llvm.hexagon.Y5.l2fetch
+ __builtin_HEXAGON_Y5_l2fetch(0, 0);
+ // CHECK: @llvm.hexagon.brev.ldb
+ __builtin_brev_ldb(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.ldd
+ __builtin_brev_ldd(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.ldh
+ __builtin_brev_ldh(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.ldub
+ __builtin_brev_ldub(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.lduh
+ __builtin_brev_lduh(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.ldw
+ __builtin_brev_ldw(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.stb
+ __builtin_brev_stb(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.std
+ __builtin_brev_std(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.sth
+ __builtin_brev_sth(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.sthhi
+ __builtin_brev_sthhi(0, 0, 0);
+ // CHECK: @llvm.hexagon.brev.stw
+ __builtin_brev_stw(0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.ldb
+ __builtin_circ_ldb(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.ldd
+ __builtin_circ_ldd(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.ldh
+ __builtin_circ_ldh(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.ldub
+ __builtin_circ_ldub(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.lduh
+ __builtin_circ_lduh(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.ldw
+ __builtin_circ_ldw(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.stb
+ __builtin_circ_stb(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.std
+ __builtin_circ_std(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.sth
+ __builtin_circ_sth(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.sthhi
+ __builtin_circ_sthhi(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.circ.stw
+ __builtin_circ_stw(0, 0, 0, 0);
+ // CHECK: @llvm.hexagon.prefetch
+ __builtin_HEXAGON_prefetch(0);
}
diff --git a/test/CodeGen/builtins-nvptx-ptx50.cu b/test/CodeGen/builtins-nvptx-ptx50.cu
new file mode 100644
index 000000000000..e85be442eb47
--- /dev/null
+++ b/test/CodeGen/builtins-nvptx-ptx50.cu
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_60 \
+// RUN: -fcuda-is-device -S -emit-llvm -o - -x cuda %s \
+// RUN: | FileCheck -check-prefix=CHECK %s
+//
+// RUN: %clang_cc1 -triple nvptx-unknown-unknown -target-cpu sm_50 \
+// RUN: -fcuda-is-device -S -o /dev/null -x cuda -verify %s
+
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+
+// We have to keep all builtins that depend on particular target feature in the
+// same function, because the codegen will stop after the very first function
+// that encounters an error, so -verify will not be able to find errors in
+// subsequent functions.
+
+// CHECK-LABEL: test_fn
+__device__ void test_fn(double d, double* double_ptr) {
+ // CHECK: call double @llvm.nvvm.atomic.load.add.f64.p0f64
+ // expected-error@+1 {{'__nvvm_atom_add_gen_d' needs target feature satom}}
+ __nvvm_atom_add_gen_d(double_ptr, d);
+}
diff --git a/test/CodeGen/builtins-nvptx-ptx60.cu b/test/CodeGen/builtins-nvptx-ptx60.cu
new file mode 100644
index 000000000000..11db9ac46ea5
--- /dev/null
+++ b/test/CodeGen/builtins-nvptx-ptx60.cu
@@ -0,0 +1,97 @@
+// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_60 \
+// RUN: -fcuda-is-device -target-feature +ptx60 \
+// RUN: -S -emit-llvm -o - -x cuda %s \
+// RUN: | FileCheck -check-prefix=CHECK %s
+// RUN: %clang_cc1 -triple nvptx-unknown-unknown -target-cpu sm_60 \
+// RUN: -fcuda-is-device -S -o /dev/null -x cuda -verify %s
+
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+
+typedef unsigned long long uint64_t;
+
+// We have to keep all builtins that depend on particular target feature in the
+// same function, because the codegen will stop after the very first function
+// that encounters an error, so -verify will not be able to find errors in
+// subsequent functions.
+
+// CHECK-LABEL: nvvm_sync
+__device__ void nvvm_sync(unsigned mask, int i, float f, int a, int b,
+ bool pred, uint64_t i64) {
+
+ // CHECK: call void @llvm.nvvm.bar.warp.sync(i32
+ // expected-error@+1 {{'__nvvm_bar_warp_sync' needs target feature ptx60}}
+ __nvvm_bar_warp_sync(mask);
+ // CHECK: call void @llvm.nvvm.barrier.sync(i32
+ // expected-error@+1 {{'__nvvm_barrier_sync' needs target feature ptx60}}
+ __nvvm_barrier_sync(mask);
+ // CHECK: call void @llvm.nvvm.barrier.sync.cnt(i32
+ // expected-error@+1 {{'__nvvm_barrier_sync_cnt' needs target feature ptx60}}
+ __nvvm_barrier_sync_cnt(mask, i);
+
+ //
+ // SHFL.SYNC
+ //
+ // CHECK: call i32 @llvm.nvvm.shfl.sync.down.i32(i32 {{%[0-9]+}}, i32
+ // expected-error@+1 {{'__nvvm_shfl_sync_down_i32' needs target feature ptx60}}
+ __nvvm_shfl_sync_down_i32(mask, i, a, b);
+ // CHECK: call float @llvm.nvvm.shfl.sync.down.f32(i32 {{%[0-9]+}}, float
+ // expected-error@+1 {{'__nvvm_shfl_sync_down_f32' needs target feature ptx60}}
+ __nvvm_shfl_sync_down_f32(mask, f, a, b);
+ // CHECK: call i32 @llvm.nvvm.shfl.sync.up.i32(i32 {{%[0-9]+}}, i32
+ // expected-error@+1 {{'__nvvm_shfl_sync_up_i32' needs target feature ptx60}}
+ __nvvm_shfl_sync_up_i32(mask, i, a, b);
+ // CHECK: call float @llvm.nvvm.shfl.sync.up.f32(i32 {{%[0-9]+}}, float
+ // expected-error@+1 {{'__nvvm_shfl_sync_up_f32' needs target feature ptx60}}
+ __nvvm_shfl_sync_up_f32(mask, f, a, b);
+ // CHECK: call i32 @llvm.nvvm.shfl.sync.bfly.i32(i32 {{%[0-9]+}}, i32
+ // expected-error@+1 {{'__nvvm_shfl_sync_bfly_i32' needs target feature ptx60}}
+ __nvvm_shfl_sync_bfly_i32(mask, i, a, b);
+ // CHECK: call float @llvm.nvvm.shfl.sync.bfly.f32(i32 {{%[0-9]+}}, float
+ // expected-error@+1 {{'__nvvm_shfl_sync_bfly_f32' needs target feature ptx60}}
+ __nvvm_shfl_sync_bfly_f32(mask, f, a, b);
+ // CHECK: call i32 @llvm.nvvm.shfl.sync.idx.i32(i32 {{%[0-9]+}}, i32
+ // expected-error@+1 {{'__nvvm_shfl_sync_idx_i32' needs target feature ptx60}}
+ __nvvm_shfl_sync_idx_i32(mask, i, a, b);
+ // CHECK: call float @llvm.nvvm.shfl.sync.idx.f32(i32 {{%[0-9]+}}, float
+ // expected-error@+1 {{'__nvvm_shfl_sync_idx_f32' needs target feature ptx60}}
+ __nvvm_shfl_sync_idx_f32(mask, f, a, b);
+
+ //
+ // VOTE.SYNC
+ //
+
+ // CHECK: call i1 @llvm.nvvm.vote.all.sync(i32
+ // expected-error@+1 {{'__nvvm_vote_all_sync' needs target feature ptx60}}
+ __nvvm_vote_all_sync(mask, pred);
+ // CHECK: call i1 @llvm.nvvm.vote.any.sync(i32
+ // expected-error@+1 {{'__nvvm_vote_any_sync' needs target feature ptx60}}
+ __nvvm_vote_any_sync(mask, pred);
+ // CHECK: call i1 @llvm.nvvm.vote.uni.sync(i32
+ // expected-error@+1 {{'__nvvm_vote_uni_sync' needs target feature ptx60}}
+ __nvvm_vote_uni_sync(mask, pred);
+ // CHECK: call i32 @llvm.nvvm.vote.ballot.sync(i32
+ // expected-error@+1 {{'__nvvm_vote_ballot_sync' needs target feature ptx60}}
+ __nvvm_vote_ballot_sync(mask, pred);
+
+ //
+ // MATCH.{ALL,ANY}.SYNC
+ //
+
+ // CHECK: call i32 @llvm.nvvm.match.any.sync.i32(i32
+ // expected-error@+1 {{'__nvvm_match_any_sync_i32' needs target feature ptx60}}
+ __nvvm_match_any_sync_i32(mask, i);
+ // CHECK: call i64 @llvm.nvvm.match.any.sync.i64(i32
+ // expected-error@+1 {{'__nvvm_match_any_sync_i64' needs target feature ptx60}}
+ __nvvm_match_any_sync_i64(mask, i64);
+ // CHECK: call { i32, i1 } @llvm.nvvm.match.all.sync.i32p(i32
+ // expected-error@+1 {{'__nvvm_match_all_sync_i32p' needs target feature ptx60}}
+ __nvvm_match_all_sync_i32p(mask, i, &i);
+ // CHECK: call { i64, i1 } @llvm.nvvm.match.all.sync.i64p(i32
+ // expected-error@+1 {{'__nvvm_match_all_sync_i64p' needs target feature ptx60}}
+ __nvvm_match_all_sync_i64p(mask, i64, &i);
+
+ // CHECK: ret void
+}
diff --git a/test/CodeGen/builtins-nvptx-sm_70.cu b/test/CodeGen/builtins-nvptx-sm_70.cu
new file mode 100644
index 000000000000..09e5b6ba7a7d
--- /dev/null
+++ b/test/CodeGen/builtins-nvptx-sm_70.cu
@@ -0,0 +1,166 @@
+// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_70 \
+// RUN: -fcuda-is-device -target-feature +ptx60 \
+// RUN: -S -emit-llvm -o - -x cuda %s \
+// RUN: | FileCheck -check-prefix=CHECK %s
+// RUN: %clang_cc1 -triple nvptx-unknown-unknown -target-cpu sm_60 \
+// RUN: -fcuda-is-device -S -o /dev/null -x cuda -verify %s
+
+#if !defined(CUDA_VERSION)
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+
+typedef unsigned long long uint64_t;
+#endif
+// We have to keep all builtins that depend on particular target feature in the
+// same function, because the codegen will stop after the very first function
+// that encounters an error, so -verify will not be able to find errors in
+// subsequent functions.
+
+// CHECK-LABEL: nvvm_wmma
+__device__ void nvvm_wmma(int *src, int *dst,
+ float *fsrc, float *fdst,
+ int ldm) {
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.load.a.sync.row.m16n16k16.stride.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_ld_a' needs target feature ptx60}}
+ __hmma_m16n16k16_ld_a(dst, src, ldm, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.load.a.sync.col.m16n16k16.stride.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_ld_a' needs target feature ptx60}}
+ __hmma_m16n16k16_ld_a(dst, src+1, ldm, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.load.b.sync.row.m16n16k16.stride.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_ld_b' needs target feature ptx60}}
+ __hmma_m16n16k16_ld_b(dst, src, ldm, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.load.b.sync.col.m16n16k16.stride.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_ld_b' needs target feature ptx60}}
+ __hmma_m16n16k16_ld_b(dst, src+2, ldm, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.load.c.sync.row.m16n16k16.stride.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature ptx60}}
+ __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.load.c.sync.col.m16n16k16.stride.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature ptx60}}
+ __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.load.c.sync.row.m16n16k16.stride.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature ptx60}}
+ __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.load.c.sync.col.m16n16k16.stride.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature ptx60}}
+ __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.store.d.sync.row.m16n16k16.stride.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature ptx60}}
+ __hmma_m16n16k16_st_c_f16(dst, src, ldm, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.store.d.sync.col.m16n16k16.stride.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature ptx60}}
+ __hmma_m16n16k16_st_c_f16(dst, src, ldm, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.store.d.sync.row.m16n16k16.stride.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature ptx60}}
+ __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.store.d.sync.col.m16n16k16.stride.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature ptx60}}
+ __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f16.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f16.f16.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f16.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f16.f16.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f16.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f16.f16.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f16.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f16.f16.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f16.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f16.f32.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f16.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f16.f32.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f16.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f16.f32.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f16.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f16.f32.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f32.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f32.f16.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f32.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f32.f16.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f32.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f32.f16.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f32.f16
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f32.f16.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 1);
+
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f32.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f32.f32.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f32.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f32.f32.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f32.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f32.f32.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 1);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f32.f32
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 0);
+ // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f32.f32.satfinite
+ // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}}
+ __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 1);
+}
diff --git a/test/CodeGen/builtins-nvptx.c b/test/CodeGen/builtins-nvptx.c
index b0d646a51fec..89a982377ad4 100644
--- a/test/CodeGen/builtins-nvptx.c
+++ b/test/CodeGen/builtins-nvptx.c
@@ -636,3 +636,36 @@ __device__ void nvvm_ldg(const void *p) {
typedef double double2 __attribute__((ext_vector_type(2)));
__nvvm_ldg_d2((const double2 *)p);
}
+
+// CHECK-LABEL: nvvm_shfl
+__device__ void nvvm_shfl(int i, float f, int a, int b) {
+ // CHECK: call i32 @llvm.nvvm.shfl.down.i32(i32
+ __nvvm_shfl_down_i32(i, a, b);
+ // CHECK: call float @llvm.nvvm.shfl.down.f32(float
+ __nvvm_shfl_down_f32(f, a, b);
+ // CHECK: call i32 @llvm.nvvm.shfl.up.i32(i32
+ __nvvm_shfl_up_i32(i, a, b);
+ // CHECK: call float @llvm.nvvm.shfl.up.f32(float
+ __nvvm_shfl_up_f32(f, a, b);
+ // CHECK: call i32 @llvm.nvvm.shfl.bfly.i32(i32
+ __nvvm_shfl_bfly_i32(i, a, b);
+ // CHECK: call float @llvm.nvvm.shfl.bfly.f32(float
+ __nvvm_shfl_bfly_f32(f, a, b);
+ // CHECK: call i32 @llvm.nvvm.shfl.idx.i32(i32
+ __nvvm_shfl_idx_i32(i, a, b);
+ // CHECK: call float @llvm.nvvm.shfl.idx.f32(float
+ __nvvm_shfl_idx_f32(f, a, b);
+ // CHECK: ret void
+}
+
+__device__ void nvvm_vote(int pred) {
+ // CHECK: call i1 @llvm.nvvm.vote.all(i1
+ __nvvm_vote_all(pred);
+ // CHECK: call i1 @llvm.nvvm.vote.any(i1
+ __nvvm_vote_any(pred);
+ // CHECK: call i1 @llvm.nvvm.vote.uni(i1
+ __nvvm_vote_uni(pred);
+ // CHECK: call i32 @llvm.nvvm.vote.ballot(i1
+ __nvvm_vote_ballot(pred);
+ // CHECK: ret void
+}
diff --git a/test/CodeGen/builtins-overflow.c b/test/CodeGen/builtins-overflow.c
index c8d828dd33e7..7a30cfbd46ee 100644
--- a/test/CodeGen/builtins-overflow.c
+++ b/test/CodeGen/builtins-overflow.c
@@ -338,3 +338,122 @@ long long test_smulll_overflow(long long x, long long y) {
return LongLongErrorCode;
return result;
}
+
+int test_mixed_sign_mull_overflow(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mull_overflow
+// CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
+// CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
+// CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
+// CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
+// CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
+// CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
+// CHECK-NEXT: [[IsNegZext:%.*]] = zext i1 [[IsNeg]] to i32
+// CHECK-NEXT: [[MaxResult:%.*]] = add i32 2147483647, [[IsNegZext]]
+// CHECK-NEXT: [[SignedOFlow:%.*]] = icmp ugt i32 [[UnsignedResult]], [[MaxResult]]
+// CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[SignedOFlow]]
+// CHECK-NEXT: [[NegativeResult:%.*]] = sub i32 0, [[UnsignedResult]]
+// CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegativeResult]], i32 [[UnsignedResult]]
+// CHECK-NEXT: store i32 [[Result]], i32* %{{.*}}, align 4
+// CHECK: br i1 [[OFlow]]
+
+ int result;
+ if (__builtin_mul_overflow(x, y, &result))
+ return LongErrorCode;
+ return result;
+}
+
+int test_mixed_sign_mull_overflow_unsigned(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mull_overflow_unsigned
+// CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
+// CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
+// CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
+// CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
+// CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
+// CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
+// CHECK-NEXT: [[NotNull:%.*]] = icmp ne i32 [[UnsignedResult]], 0
+// CHECK-NEXT: [[Underflow:%.*]] = and i1 [[IsNeg]], [[NotNull]]
+// CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[Underflow]]
+// CHECK-NEXT: store i32 [[UnsignedResult]], i32* %{{.*}}, align 4
+// CHECK: br i1 [[OFlow]]
+
+ unsigned result;
+ if (__builtin_mul_overflow(x, y, &result))
+ return LongErrorCode;
+ return result;
+}
+
+int test_mixed_sign_mull_overflow_swapped(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mull_overflow_swapped
+// CHECK: call { i32, i1 } @llvm.umul.with.overflow.i32
+// CHECK: add i32 2147483647
+ int result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mulll_overflow(long long x, unsigned long long y) {
+// CHECK: @test_mixed_sign_mulll_overflow
+// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
+// CHECK: add i64 92233720368547
+ long long result;
+ if (__builtin_mul_overflow(x, y, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mulll_overflow_swapped(long long x, unsigned long long y) {
+// CHECK: @test_mixed_sign_mulll_overflow_swapped
+// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
+// CHECK: add i64 92233720368547
+ long long result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mulll_overflow_trunc_signed(long long x, unsigned long long y) {
+// CHECK: @test_mixed_sign_mulll_overflow_trunc_signed
+// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
+// CHECK: add i64 2147483647
+// CHECK: trunc
+// CHECK: store
+ int result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mulll_overflow_trunc_unsigned(long long x, unsigned long long y) {
+// CHECK: @test_mixed_sign_mulll_overflow_trunc_unsigned
+// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
+// CHECK: [[NON_ZERO:%.*]] = icmp ne i64 [[UNSIGNED_RESULT:%.*]], 0
+// CHECK-NEXT: [[UNDERFLOW:%.*]] = and i1 {{.*}}, [[NON_ZERO]]
+// CHECK-NEXT: [[OVERFLOW_PRE_TRUNC:%.*]] = or i1 {{.*}}, [[UNDERFLOW]]
+// CHECK-NEXT: [[TRUNC_OVERFLOW:%.*]] = icmp ugt i64 [[UNSIGNED_RESULT]], 4294967295
+// CHECK-NEXT: [[OVERFLOW:%.*]] = or i1 [[OVERFLOW_PRE_TRUNC]], [[TRUNC_OVERFLOW]]
+// CHECK-NEXT: trunc i64 [[UNSIGNED_RESULT]] to i32
+// CHECK-NEXT: store
+ unsigned result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mul_overflow_extend_signed(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mul_overflow_extend_signed
+// CHECK: call { i64, i1 } @llvm.smul.with.overflow.i64
+ long long result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mul_overflow_extend_unsigned(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mul_overflow_extend_unsigned
+// CHECK: call { i65, i1 } @llvm.smul.with.overflow.i65
+ unsigned long long result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
diff --git a/test/CodeGen/builtins-x86.c b/test/CodeGen/builtins-x86.c
index 0086f7079dd9..fc3cc448cf3c 100644
--- a/test/CodeGen/builtins-x86.c
+++ b/test/CodeGen/builtins-x86.c
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -DUSE_64 -triple x86_64-unknown-unknown -target-feature +fxsr -target-feature +avx -target-feature +xsaveopt -target-feature +xsaves -target-feature +xsavec -target-feature +mwaitx -target-feature +clzero -emit-llvm -o %t %s
-// RUN: %clang_cc1 -DUSE_ALL -triple x86_64-unknown-unknown -target-feature +fxsr -target-feature +avx -target-feature +xsaveopt -target-feature +xsaves -target-feature +xsavec -target-feature +mwaitx -target-feature +clzero -fsyntax-only -o %t %s
+// RUN: %clang_cc1 -DUSE_64 -triple x86_64-unknown-unknown -target-feature +fxsr -target-feature +avx -target-feature +xsaveopt -target-feature +xsaves -target-feature +xsavec -target-feature +mwaitx -target-feature +clzero -target-feature +ibt -target-feature +shstk -emit-llvm -o %t %s
+// RUN: %clang_cc1 -DUSE_ALL -triple x86_64-unknown-unknown -target-feature +fxsr -target-feature +avx -target-feature +xsaveopt -target-feature +xsaves -target-feature +xsavec -target-feature +mwaitx -target-feature +ibt -target-feature +shstk -target-feature +clzero -fsyntax-only -o %t %s
#ifdef USE_ALL
#define USE_3DNOW
@@ -160,8 +160,6 @@ void f0() {
tmp_V4s = __builtin_ia32_psubusw(tmp_V4s, tmp_V4s);
tmp_V4s = __builtin_ia32_pmulhw(tmp_V4s, tmp_V4s);
tmp_V4s = __builtin_ia32_pmulhuw(tmp_V4s, tmp_V4s);
- tmp_V8c = __builtin_ia32_pavgb(tmp_V8c, tmp_V8c);
- tmp_V4s = __builtin_ia32_pavgw(tmp_V4s, tmp_V4s);
tmp_V8c = __builtin_ia32_pcmpeqb(tmp_V8c, tmp_V8c);
tmp_V4s = __builtin_ia32_pcmpeqw(tmp_V4s, tmp_V4s);
tmp_V2i = __builtin_ia32_pcmpeqd(tmp_V2i, tmp_V2i);
@@ -201,8 +199,6 @@ void f0() {
tmp_V16c = __builtin_ia32_psubusb128(tmp_V16c, tmp_V16c);
tmp_V8s = __builtin_ia32_psubusw128(tmp_V8s, tmp_V8s);
tmp_V8s = __builtin_ia32_pmulhw128(tmp_V8s, tmp_V8s);
- tmp_V16c = __builtin_ia32_pavgb128(tmp_V16c, tmp_V16c);
- tmp_V8s = __builtin_ia32_pavgw128(tmp_V8s, tmp_V8s);
tmp_V16c = __builtin_ia32_pmaxub128(tmp_V16c, tmp_V16c);
tmp_V8s = __builtin_ia32_pmaxsw128(tmp_V8s, tmp_V8s);
tmp_V16c = __builtin_ia32_pminub128(tmp_V16c, tmp_V16c);
@@ -261,6 +257,19 @@ void f0() {
tmp_V8c = __builtin_ia32_packuswb(tmp_V4s, tmp_V4s);
tmp_i = __builtin_ia32_vec_ext_v2si(tmp_V2i, 0);
+ __builtin_ia32_incsspd(tmp_Ui);
+ __builtin_ia32_incsspq(tmp_ULLi);
+ tmp_Ui = __builtin_ia32_rdsspd(tmp_Ui);
+ tmp_ULLi = __builtin_ia32_rdsspq(tmp_ULLi);
+ __builtin_ia32_saveprevssp();
+ __builtin_ia32_rstorssp(tmp_vp);
+ __builtin_ia32_wrssd(tmp_Ui, tmp_vp);
+ __builtin_ia32_wrssq(tmp_ULLi, tmp_vp);
+ __builtin_ia32_wrussd(tmp_Ui, tmp_vp);
+ __builtin_ia32_wrussq(tmp_ULLi, tmp_vp);
+ __builtin_ia32_setssbsy();
+ __builtin_ia32_clrssbsy(tmp_vp);
+
(void) __builtin_ia32_ldmxcsr(tmp_Ui);
(void) _mm_setcsr(tmp_Ui);
tmp_Ui = __builtin_ia32_stmxcsr();
diff --git a/test/CodeGen/builtins.c b/test/CodeGen/builtins.c
index 390c2e35bf9d..4f84db00cbd7 100644
--- a/test/CodeGen/builtins.c
+++ b/test/CodeGen/builtins.c
@@ -176,6 +176,19 @@ void bar() {
}
// CHECK: }
+// CHECK-LABEL: define void @test_conditional_bzero
+void test_conditional_bzero() {
+ char dst[20];
+ int _sz = 20, len = 20;
+ return (_sz
+ ? ((_sz >= len)
+ ? __builtin_bzero(dst, len)
+ : foo())
+ : __builtin_bzero(dst, len));
+ // CHECK: call void @llvm.memset
+ // CHECK: call void @llvm.memset
+ // CHECK-NOT: phi
+}
// CHECK-LABEL: define void @test_float_builtins
void test_float_builtins(float F, double D, long double LD) {
@@ -317,6 +330,15 @@ void test_float_builtin_ops(float F, double D, long double LD) {
resld = __builtin_floorl(LD);
// CHECK: call x86_fp80 @llvm.floor.f80
+ resf = __builtin_sqrtf(F);
+ // CHECK: call float @llvm.sqrt.f32(
+
+ resd = __builtin_sqrt(D);
+ // CHECK: call double @llvm.sqrt.f64(
+
+ resld = __builtin_sqrtl(LD);
+ // CHECK: call x86_fp80 @llvm.sqrt.f80
+
resf = __builtin_truncf(F);
// CHECK: call float @llvm.trunc.f32
@@ -378,229 +400,385 @@ long long test_builtin_readcyclecounter() {
#ifdef __x86_64__
// CHECK-LABEL: define void @test_builtin_os_log
-// CHECK: (i8* [[BUF:%.*]], i32 [[I:%.*]], i8* [[DATA:%.*]])
+// CHECK: (i8* %[[BUF:.*]], i32 %[[I:.*]], i8* %[[DATA:.*]])
void test_builtin_os_log(void *buf, int i, const char *data) {
volatile int len;
- // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
- // CHECK: store i32 [[I]], i32* [[I_ADDR:%.*]], align 4
- // CHECK: store i8* [[DATA]], i8** [[DATA_ADDR:%.*]], align 8
-
- // CHECK: store volatile i32 34
+ // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[I_ADDR:.*]] = alloca i32, align 4
+ // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[LEN:.*]] = alloca i32, align 4
+ // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
+ // CHECK: store i32 %[[I]], i32* %[[I_ADDR]], align 4
+ // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
+
+ // CHECK: store volatile i32 34, i32* %[[LEN]]
len = __builtin_os_log_format_buffer_size("%d %{public}s %{private}.16P", i, data, data);
- // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
- // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
- // CHECK: store i8 3, i8* [[SUMMARY]]
- // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
- // CHECK: store i8 4, i8* [[NUM_ARGS]]
- //
- // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
- // CHECK: store i8 0, i8* [[ARG1_DESC]]
- // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
- // CHECK: store i8 4, i8* [[ARG1_SIZE]]
- // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
- // CHECK: [[ARG1_INT:%.*]] = bitcast i8* [[ARG1]] to i32*
- // CHECK: [[I2:%.*]] = load i32, i32* [[I_ADDR]]
- // CHECK: store i32 [[I2]], i32* [[ARG1_INT]]
-
- // CHECK: [[ARG2_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 8
- // CHECK: store i8 34, i8* [[ARG2_DESC]]
- // CHECK: [[ARG2_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 9
- // CHECK: store i8 8, i8* [[ARG2_SIZE]]
- // CHECK: [[ARG2:%.*]] = getelementptr i8, i8* [[BUF2]], i64 10
- // CHECK: [[ARG2_PTR:%.*]] = bitcast i8* [[ARG2]] to i8**
- // CHECK: [[DATA2:%.*]] = load i8*, i8** [[DATA_ADDR]]
- // CHECK: store i8* [[DATA2]], i8** [[ARG2_PTR]]
-
- // CHECK: [[ARG3_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 18
- // CHECK: store i8 17, i8* [[ARG3_DESC]]
- // CHECK: [[ARG3_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 19
- // CHECK: store i8 4, i8* [[ARG3_SIZE]]
- // CHECK: [[ARG3:%.*]] = getelementptr i8, i8* [[BUF2]], i64 20
- // CHECK: [[ARG3_INT:%.*]] = bitcast i8* [[ARG3]] to i32*
- // CHECK: store i32 16, i32* [[ARG3_INT]]
-
- // CHECK: [[ARG4_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 24
- // CHECK: store i8 49, i8* [[ARG4_DESC]]
- // CHECK: [[ARG4_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 25
- // CHECK: store i8 8, i8* [[ARG4_SIZE]]
- // CHECK: [[ARG4:%.*]] = getelementptr i8, i8* [[BUF2]], i64 26
- // CHECK: [[ARG4_PTR:%.*]] = bitcast i8* [[ARG4]] to i8**
- // CHECK: [[DATA3:%.*]] = load i8*, i8** [[DATA_ADDR]]
- // CHECK: store i8* [[DATA3]], i8** [[ARG4_PTR]]
-
+ // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]]
+ // CHECK: %[[V2:.*]] = load i32, i32* %[[I_ADDR]]
+ // CHECK: %[[V3:.*]] = load i8*, i8** %[[DATA_ADDR]]
+ // CHECK: %[[V4:.*]] = ptrtoint i8* %[[V3]] to i64
+ // CHECK: %[[V5:.*]] = load i8*, i8** %[[DATA_ADDR]]
+ // CHECK: %[[V6:.*]] = ptrtoint i8* %[[V5]] to i64
+ // CHECK: call void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49(i8* %[[V1]], i32 %[[V2]], i64 %[[V4]], i32 16, i64 %[[V6]])
__builtin_os_log_format(buf, "%d %{public}s %{private}.16P", i, data, data);
}
-// CHECK-LABEL: define void @test_builtin_os_log_errno
-// CHECK: (i8* [[BUF:%.*]], i8* [[DATA:%.*]])
-void test_builtin_os_log_errno(void *buf, const char *data) {
- volatile int len;
- // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
- // CHECK: store i8* [[DATA]], i8** [[DATA_ADDR:%.*]], align 8
-
- // CHECK: store volatile i32 2
- len = __builtin_os_log_format_buffer_size("%S");
-
- // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
- // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
- // CHECK: store i8 2, i8* [[SUMMARY]]
- // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
- // CHECK: store i8 1, i8* [[NUM_ARGS]]
-
- // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
- // CHECK: store i8 96, i8* [[ARG1_DESC]]
- // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
- // CHECK: store i8 0, i8* [[ARG1_SIZE]]
- // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
- // CHECK: [[ARG1_INT:%.*]] = bitcast i8* [[ARG1]] to i32*
- // CHECK: store i32 0, i32* [[ARG1_INT]]
-
- __builtin_os_log_format(buf, "%m");
-}
+// CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49
+// CHECK: (i8* %[[BUFFER:.*]], i32 %[[ARG0:.*]], i64 %[[ARG1:.*]], i32 %[[ARG2:.*]], i64 %[[ARG3:.*]])
+
+// CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
+// CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
+// CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
+// CHECK: %[[ARG2_ADDR:.*]] = alloca i32, align 4
+// CHECK: %[[ARG3_ADDR:.*]] = alloca i64, align 8
+// CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
+// CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
+// CHECK: store i64 %[[ARG1]], i64* %[[ARG1_ADDR]], align 8
+// CHECK: store i32 %[[ARG2]], i32* %[[ARG2_ADDR]], align 4
+// CHECK: store i64 %[[ARG3]], i64* %[[ARG3_ADDR]], align 8
+// CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
+// CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
+// CHECK: store i8 3, i8* %[[SUMMARY]], align 1
+// CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
+// CHECK: store i8 4, i8* %[[NUMARGS]], align 1
+// CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
+// CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
+// CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
+// CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
+// CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
+// CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
+// CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
+// CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
+// CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 8
+// CHECK: store i8 34, i8* %[[ARGDESCRIPTOR1]], align 1
+// CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 9
+// CHECK: store i8 8, i8* %[[ARGSIZE2]], align 1
+// CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 10
+// CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i64*
+// CHECK: %[[V1:.*]] = load i64, i64* %[[ARG1_ADDR]], align 8
+// CHECK: store i64 %[[V1]], i64* %[[ARGDATACAST4]], align 1
+// CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, i8* %[[BUF]], i64 18
+// CHECK: store i8 17, i8* %[[ARGDESCRIPTOR5]], align 1
+// CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, i8* %[[BUF]], i64 19
+// CHECK: store i8 4, i8* %[[ARGSIZE6]], align 1
+// CHECK: %[[ARGDATA7:.*]] = getelementptr i8, i8* %[[BUF]], i64 20
+// CHECK: %[[ARGDATACAST8:.*]] = bitcast i8* %[[ARGDATA7]] to i32*
+// CHECK: %[[V2:.*]] = load i32, i32* %[[ARG2_ADDR]], align 4
+// CHECK: store i32 %[[V2]], i32* %[[ARGDATACAST8]], align 1
+// CHECK: %[[ARGDESCRIPTOR9:.*]] = getelementptr i8, i8* %[[BUF]], i64 24
+// CHECK: store i8 49, i8* %[[ARGDESCRIPTOR9]], align 1
+// CHECK: %[[ARGSIZE10:.*]] = getelementptr i8, i8* %[[BUF]], i64 25
+// CHECK: store i8 8, i8* %[[ARGSIZE10]], align 1
+// CHECK: %[[ARGDATA11:.*]] = getelementptr i8, i8* %[[BUF]], i64 26
+// CHECK: %[[ARGDATACAST12:.*]] = bitcast i8* %[[ARGDATA11]] to i64*
+// CHECK: %[[V3:.*]] = load i64, i64* %[[ARG3_ADDR]], align 8
+// CHECK: store i64 %[[V3]], i64* %[[ARGDATACAST12]], align 1
// CHECK-LABEL: define void @test_builtin_os_log_wide
-// CHECK: (i8* [[BUF:%.*]], i8* [[DATA:%.*]], i32* [[STR:%.*]])
+// CHECK: (i8* %[[BUF:.*]], i8* %[[DATA:.*]], i32* %[[STR:.*]])
typedef int wchar_t;
void test_builtin_os_log_wide(void *buf, const char *data, wchar_t *str) {
volatile int len;
- // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
- // CHECK: store i8* [[DATA]], i8** [[DATA_ADDR:%.*]], align 8
- // CHECK: store i32* [[STR]], i32** [[STR_ADDR:%.*]],
- // CHECK: store volatile i32 12
+ // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[STR_ADDR:.*]] = alloca i32*, align 8
+ // CHECK: %[[LEN:.*]] = alloca i32, align 4
+ // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
+ // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
+ // CHECK: store i32* %[[STR]], i32** %[[STR_ADDR]], align 8
+
+ // CHECK: store volatile i32 12, i32* %[[LEN]], align 4
len = __builtin_os_log_format_buffer_size("%S", str);
- // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
- // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
- // CHECK: store i8 2, i8* [[SUMMARY]]
- // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
- // CHECK: store i8 1, i8* [[NUM_ARGS]]
-
- // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
- // CHECK: store i8 80, i8* [[ARG1_DESC]]
- // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
- // CHECK: store i8 8, i8* [[ARG1_SIZE]]
- // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
- // CHECK: [[ARG1_PTR:%.*]] = bitcast i8* [[ARG1]] to i32**
- // CHECK: [[STR2:%.*]] = load i32*, i32** [[STR_ADDR]]
- // CHECK: store i32* [[STR2]], i32** [[ARG1_PTR]]
+ // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
+ // CHECK: %[[V2:.*]] = load i32*, i32** %[[STR_ADDR]], align 8
+ // CHECK: %[[V3:.*]] = ptrtoint i32* %[[V2]] to i64
+ // CHECK: call void @__os_log_helper_1_2_1_8_80(i8* %[[V1]], i64 %[[V3]])
__builtin_os_log_format(buf, "%S", str);
}
+// CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_8_80
+// CHECK: (i8* %[[BUFFER:.*]], i64 %[[ARG0:.*]])
+
+// CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
+// CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
+// CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
+// CHECK: store i64 %[[ARG0]], i64* %[[ARG0_ADDR]], align 8
+// CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
+// CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
+// CHECK: store i8 2, i8* %[[SUMMARY]], align 1
+// CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
+// CHECK: store i8 1, i8* %[[NUMARGS]], align 1
+// CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
+// CHECK: store i8 80, i8* %[[ARGDESCRIPTOR]], align 1
+// CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
+// CHECK: store i8 8, i8* %[[ARGSIZE]], align 1
+// CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
+// CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i64*
+// CHECK: %[[V0:.*]] = load i64, i64* %[[ARG0_ADDR]], align 8
+// CHECK: store i64 %[[V0]], i64* %[[ARGDATACAST]], align 1
+
// CHECK-LABEL: define void @test_builtin_os_log_precision_width
-// CHECK: (i8* [[BUF:%.*]], i8* [[DATA:%.*]], i32 [[PRECISION:%.*]], i32 [[WIDTH:%.*]])
+// CHECK: (i8* %[[BUF:.*]], i8* %[[DATA:.*]], i32 %[[PRECISION:.*]], i32 %[[WIDTH:.*]])
void test_builtin_os_log_precision_width(void *buf, const char *data,
int precision, int width) {
volatile int len;
- // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
- // CHECK: store i8* [[DATA]], i8** [[DATA_ADDR:%.*]], align 8
- // CHECK: store i32 [[PRECISION]], i32* [[PRECISION_ADDR:%.*]], align 4
- // CHECK: store i32 [[WIDTH]], i32* [[WIDTH_ADDR:%.*]], align 4
-
- // CHECK: store volatile i32 24,
+ // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[PRECISION_ADDR:.*]] = alloca i32, align 4
+ // CHECK: %[[WIDTH_ADDR:.*]] = alloca i32, align 4
+ // CHECK: %[[LEN:.*]] = alloca i32, align 4
+ // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
+ // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
+ // CHECK: store i32 %[[PRECISION]], i32* %[[PRECISION_ADDR]], align 4
+ // CHECK: store i32 %[[WIDTH]], i32* %[[WIDTH_ADDR]], align 4
+
+ // CHECK: store volatile i32 24, i32* %[[LEN]], align 4
len = __builtin_os_log_format_buffer_size("Hello %*.*s World", precision, width, data);
- // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
- // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
- // CHECK: store i8 2, i8* [[SUMMARY]]
- // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
- // CHECK: store i8 3, i8* [[NUM_ARGS]]
-
- // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
- // CHECK: store i8 0, i8* [[ARG1_DESC]]
- // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
- // CHECK: store i8 4, i8* [[ARG1_SIZE]]
- // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
- // CHECK: [[ARG1_INT:%.*]] = bitcast i8* [[ARG1]] to i32*
- // CHECK: [[ARG1_VAL:%.*]] = load i32, i32* [[PRECISION_ADDR]]
- // CHECK: store i32 [[ARG1_VAL]], i32* [[ARG1_INT]]
-
- // CHECK: [[ARG2_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 8
- // CHECK: store i8 16, i8* [[ARG2_DESC]]
- // CHECK: [[ARG2_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 9
- // CHECK: store i8 4, i8* [[ARG2_SIZE]]
- // CHECK: [[ARG2:%.*]] = getelementptr i8, i8* [[BUF2]], i64 10
- // CHECK: [[ARG2_INT:%.*]] = bitcast i8* [[ARG2]] to i32*
- // CHECK: [[ARG2_VAL:%.*]] = load i32, i32* [[WIDTH_ADDR]]
- // CHECK: store i32 [[ARG2_VAL]], i32* [[ARG2_INT]]
-
- // CHECK: [[ARG3_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 14
- // CHECK: store i8 32, i8* [[ARG3_DESC]]
- // CHECK: [[ARG3_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 15
- // CHECK: store i8 8, i8* [[ARG3_SIZE]]
- // CHECK: [[ARG3:%.*]] = getelementptr i8, i8* [[BUF2]], i64 16
- // CHECK: [[ARG3_PTR:%.*]] = bitcast i8* [[ARG3]] to i8**
- // CHECK: [[DATA2:%.*]] = load i8*, i8** [[DATA_ADDR]]
- // CHECK: store i8* [[DATA2]], i8** [[ARG3_PTR]]
-
+ // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
+ // CHECK: %[[V2:.*]] = load i32, i32* %[[PRECISION_ADDR]], align 4
+ // CHECK: %[[V3:.*]] = load i32, i32* %[[WIDTH_ADDR]], align 4
+ // CHECK: %[[V4:.*]] = load i8*, i8** %[[DATA_ADDR]], align 8
+ // CHECK: %[[V5:.*]] = ptrtoint i8* %[[V4]] to i64
+ // CHECK: call void @__os_log_helper_1_2_3_4_0_4_16_8_32(i8* %[[V1]], i32 %[[V2]], i32 %[[V3]], i64 %[[V5]])
__builtin_os_log_format(buf, "Hello %*.*s World", precision, width, data);
}
+// CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_3_4_0_4_16_8_32
+// CHECK: (i8* %[[BUFFER:.*]], i32 %[[ARG0:.*]], i32 %[[ARG1:.*]], i64 %[[ARG2:.*]])
+
+// CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
+// CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
+// CHECK: %[[ARG1_ADDR:.*]] = alloca i32, align 4
+// CHECK: %[[ARG2_ADDR:.*]] = alloca i64, align 8
+// CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
+// CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
+// CHECK: store i32 %[[ARG1]], i32* %[[ARG1_ADDR]], align 4
+// CHECK: store i64 %[[ARG2]], i64* %[[ARG2_ADDR]], align 8
+// CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
+// CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
+// CHECK: store i8 2, i8* %[[SUMMARY]], align 1
+// CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
+// CHECK: store i8 3, i8* %[[NUMARGS]], align 1
+// CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
+// CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
+// CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
+// CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
+// CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
+// CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
+// CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
+// CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
+// CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 8
+// CHECK: store i8 16, i8* %[[ARGDESCRIPTOR1]], align 1
+// CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 9
+// CHECK: store i8 4, i8* %[[ARGSIZE2]], align 1
+// CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 10
+// CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i32*
+// CHECK: %[[V1:.*]] = load i32, i32* %[[ARG1_ADDR]], align 4
+// CHECK: store i32 %[[V1]], i32* %[[ARGDATACAST4]], align 1
+// CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, i8* %[[BUF]], i64 14
+// CHECK: store i8 32, i8* %[[ARGDESCRIPTOR5]], align 1
+// CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, i8* %[[BUF]], i64 15
+// CHECK: store i8 8, i8* %[[ARGSIZE6]], align 1
+// CHECK: %[[ARGDATA7:.*]] = getelementptr i8, i8* %[[BUF]], i64 16
+// CHECK: %[[ARGDATACAST8:.*]] = bitcast i8* %[[ARGDATA7]] to i64*
+// CHECK: %[[V2:.*]] = load i64, i64* %[[ARG2_ADDR]], align 8
+// CHECK: store i64 %[[V2]], i64* %[[ARGDATACAST8]], align 1
+
// CHECK-LABEL: define void @test_builtin_os_log_invalid
-// CHECK: (i8* [[BUF:%.*]], i32 [[DATA:%.*]])
+// CHECK: (i8* %[[BUF:.*]], i32 %[[DATA:.*]])
void test_builtin_os_log_invalid(void *buf, int data) {
volatile int len;
- // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
- // CHECK: store i32 [[DATA]], i32* [[DATA_ADDR:%.*]]
+ // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[DATA_ADDR:.*]] = alloca i32, align 4
+ // CHECK: %[[LEN:.*]] = alloca i32, align 4
+ // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
+ // CHECK: store i32 %[[DATA]], i32* %[[DATA_ADDR]], align 4
- // CHECK: store volatile i32 8,
+ // CHECK: store volatile i32 8, i32* %[[LEN]], align 4
len = __builtin_os_log_format_buffer_size("invalid specifier %: %d even a trailing one%", data);
- // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
- // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
- // CHECK: store i8 0, i8* [[SUMMARY]]
- // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
- // CHECK: store i8 1, i8* [[NUM_ARGS]]
-
- // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
- // CHECK: store i8 0, i8* [[ARG1_DESC]]
- // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
- // CHECK: store i8 4, i8* [[ARG1_SIZE]]
- // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
- // CHECK: [[ARG1_INT:%.*]] = bitcast i8* [[ARG1]] to i32*
- // CHECK: [[ARG1_VAL:%.*]] = load i32, i32* [[DATA_ADDR]]
- // CHECK: store i32 [[ARG1_VAL]], i32* [[ARG1_INT]]
+ // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
+ // CHECK: %[[V2:.*]] = load i32, i32* %[[DATA_ADDR]], align 4
+ // CHECK: call void @__os_log_helper_1_0_1_4_0(i8* %[[V1]], i32 %[[V2]])
__builtin_os_log_format(buf, "invalid specifier %: %d even a trailing one%", data);
}
+// CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_4_0
+// CHECK: (i8* %[[BUFFER:.*]], i32 %[[ARG0:.*]])
+
+// CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
+// CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
+// CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
+// CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
+// CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
+// CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
+// CHECK: store i8 0, i8* %[[SUMMARY]], align 1
+// CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
+// CHECK: store i8 1, i8* %[[NUMARGS]], align 1
+// CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
+// CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
+// CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
+// CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
+// CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
+// CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
+// CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
+// CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
+
// CHECK-LABEL: define void @test_builtin_os_log_percent
-// CHECK: (i8* [[BUF:%.*]], i8* [[DATA1:%.*]], i8* [[DATA2:%.*]])
+// CHECK: (i8* %[[BUF:.*]], i8* %[[DATA1:.*]], i8* %[[DATA2:.*]])
// Check that the %% which does not consume any argument is correctly handled
void test_builtin_os_log_percent(void *buf, const char *data1, const char *data2) {
volatile int len;
- // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
- // CHECK: store i8* [[DATA1]], i8** [[DATA1_ADDR:%.*]], align 8
- // CHECK: store i8* [[DATA2]], i8** [[DATA2_ADDR:%.*]], align 8
- // CHECK: store volatile i32 22
+ // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[DATA1_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[DATA2_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[LEN:.*]] = alloca i32, align 4
+ // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
+ // CHECK: store i8* %[[DATA1]], i8** %[[DATA1_ADDR]], align 8
+ // CHECK: store i8* %[[DATA2]], i8** %[[DATA2_ADDR]], align 8
+ // CHECK: store volatile i32 22, i32* %[[LEN]], align 4
+
len = __builtin_os_log_format_buffer_size("%s %% %s", data1, data2);
- // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
- // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
- // CHECK: store i8 2, i8* [[SUMMARY]]
- // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
- // CHECK: store i8 2, i8* [[NUM_ARGS]]
- //
- // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
- // CHECK: store i8 32, i8* [[ARG1_DESC]]
- // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
- // CHECK: store i8 8, i8* [[ARG1_SIZE]]
- // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
- // CHECK: [[ARG1_PTR:%.*]] = bitcast i8* [[ARG1]] to i8**
- // CHECK: [[DATA1:%.*]] = load i8*, i8** [[DATA1_ADDR]]
- // CHECK: store i8* [[DATA1]], i8** [[ARG1_PTR]]
- //
- // CHECK: [[ARG2_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 12
- // CHECK: store i8 32, i8* [[ARG2_DESC]]
- // CHECK: [[ARG2_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 13
- // CHECK: store i8 8, i8* [[ARG2_SIZE]]
- // CHECK: [[ARG2:%.*]] = getelementptr i8, i8* [[BUF2]], i64 14
- // CHECK: [[ARG2_PTR:%.*]] = bitcast i8* [[ARG2]] to i8**
- // CHECK: [[DATA2:%.*]] = load i8*, i8** [[DATA2_ADDR]]
- // CHECK: store i8* [[DATA2]], i8** [[ARG2_PTR]]
+ // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
+ // CHECK: %[[V2:.*]] = load i8*, i8** %[[DATA1_ADDR]], align 8
+ // CHECK: %[[V3:.*]] = ptrtoint i8* %[[V2]] to i64
+ // CHECK: %[[V4:.*]] = load i8*, i8** %[[DATA2_ADDR]], align 8
+ // CHECK: %[[V5:.*]] = ptrtoint i8* %[[V4]] to i64
+ // CHECK: call void @__os_log_helper_1_2_2_8_32_8_32(i8* %[[V1]], i64 %[[V3]], i64 %[[V5]])
+
__builtin_os_log_format(buf, "%s %% %s", data1, data2);
}
-#endif \ No newline at end of file
+// CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_2_8_32_8_32
+// CHECK: (i8* %[[BUFFER:.*]], i64 %[[ARG0:.*]], i64 %[[ARG1:.*]])
+
+// CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
+// CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
+// CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
+// CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
+// CHECK: store i64 %[[ARG0]], i64* %[[ARG0_ADDR]], align 8
+// CHECK: store i64 %[[ARG1]], i64* %[[ARG1_ADDR]], align 8
+// CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
+// CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
+// CHECK: store i8 2, i8* %[[SUMMARY]], align 1
+// CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
+// CHECK: store i8 2, i8* %[[NUMARGS]], align 1
+// CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
+// CHECK: store i8 32, i8* %[[ARGDESCRIPTOR]], align 1
+// CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
+// CHECK: store i8 8, i8* %[[ARGSIZE]], align 1
+// CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
+// CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i64*
+// CHECK: %[[V0:.*]] = load i64, i64* %[[ARG0_ADDR]], align 8
+// CHECK: store i64 %[[V0]], i64* %[[ARGDATACAST]], align 1
+// CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 12
+// CHECK: store i8 32, i8* %[[ARGDESCRIPTOR1]], align 1
+// CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 13
+// CHECK: store i8 8, i8* %[[ARGSIZE2]], align 1
+// CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 14
+// CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i64*
+// CHECK: %[[V1:.*]] = load i64, i64* %[[ARG1_ADDR]], align 8
+// CHECK: store i64 %[[V1]], i64* %[[ARGDATACAST4]], align 1
+
+// Check that the following two functions call the same helper function.
+
+// CHECK-LABEL: define void @test_builtin_os_log_merge_helper0
+// CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
+void test_builtin_os_log_merge_helper0(void *buf, int i, double d) {
+ __builtin_os_log_format(buf, "%d %f", i, d);
+}
+
+// CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_2_4_0_8_0(
+
+// CHECK-LABEL: define void @test_builtin_os_log_merge_helper1
+// CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
+void test_builtin_os_log_merge_helper1(void *buf, unsigned u, long long ll) {
+ __builtin_os_log_format(buf, "%u %lld", u, ll);
+}
+
+// Check that this function doesn't write past the end of array 'buf'.
+
+// CHECK-LABEL: define void @test_builtin_os_log_errno
+void test_builtin_os_log_errno() {
+ // CHECK: %[[VLA:.*]] = alloca i8, i64 4, align 16
+ // CHECK: call void @__os_log_helper_16_2_1_0_96(i8* %[[VLA]])
+
+ char buf[__builtin_os_log_format_buffer_size("%m")];
+ __builtin_os_log_format(buf, "%m");
+}
+
+// CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_16_2_1_0_96
+// CHECK: (i8* %[[BUFFER:.*]])
+
+// CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
+// CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
+// CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
+// CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
+// CHECK: store i8 2, i8* %[[SUMMARY]], align 16
+// CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
+// CHECK: store i8 1, i8* %[[NUMARGS]], align 1
+// CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
+// CHECK: store i8 96, i8* %[[ARGDESCRIPTOR]], align 2
+// CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
+// CHECK: store i8 0, i8* %[[ARGSIZE]], align 1
+// CHECK-NEXT: ret void
+
+// CHECK-LABEL: define void @test_builtin_os_log_long_double
+// CHECK: (i8* %[[BUF:.*]], x86_fp80 %[[LD:.*]])
+void test_builtin_os_log_long_double(void *buf, long double ld) {
+ // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
+ // CHECK: %[[LD_ADDR:.*]] = alloca x86_fp80, align 16
+ // CHECK: %[[COERCE:.*]] = alloca i128, align 16
+ // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
+ // CHECK: store x86_fp80 %[[LD]], x86_fp80* %[[LD_ADDR]], align 16
+ // CHECK: %[[V0:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
+ // CHECK: %[[V1:.*]] = load x86_fp80, x86_fp80* %[[LD_ADDR]], align 16
+ // CHECK: %[[V2:.*]] = bitcast x86_fp80 %[[V1]] to i80
+ // CHECK: %[[V3:.*]] = zext i80 %[[V2]] to i128
+ // CHECK: store i128 %[[V3]], i128* %[[COERCE]], align 16
+ // CHECK: %[[V4:.*]] = bitcast i128* %[[COERCE]] to { i64, i64 }*
+ // CHECK: %[[V5:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V4]], i32 0, i32 0
+ // CHECK: %[[V6:.*]] = load i64, i64* %[[V5]], align 16
+ // CHECK: %[[V7:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V4]], i32 0, i32 1
+ // CHECK: %[[V8:.*]] = load i64, i64* %[[V7]], align 8
+ // CHECK: call void @__os_log_helper_1_0_1_16_0(i8* %[[V0]], i64 %[[V6]], i64 %[[V8]])
+
+ __builtin_os_log_format(buf, "%Lf", ld);
+}
+
+// CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_16_0
+// CHECK: (i8* %[[BUFFER:.*]], i64 %[[ARG0_COERCE0:.*]], i64 %[[ARG0_COERCE1:.*]])
+
+// CHECK: %[[ARG0:.*]] = alloca i128, align 16
+// CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
+// CHECK: %[[ARG0_ADDR:.*]] = alloca i128, align 16
+// CHECK: %[[V0:.*]] = bitcast i128* %[[ARG0]] to { i64, i64 }*
+// CHECK: %[[V1:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V0]], i32 0, i32 0
+// CHECK: store i64 %[[ARG0_COERCE0]], i64* %[[V1]], align 16
+// CHECK: %[[V2:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V0]], i32 0, i32 1
+// CHECK: store i64 %[[ARG0_COERCE1]], i64* %[[V2]], align 8
+// CHECK: %[[ARG01:.*]] = load i128, i128* %[[ARG0]], align 16
+// CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
+// CHECK: store i128 %[[ARG01]], i128* %[[ARG0_ADDR]], align 16
+// CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
+// CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
+// CHECK: store i8 0, i8* %[[SUMMARY]], align 1
+// CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
+// CHECK: store i8 1, i8* %[[NUMARGS]], align 1
+// CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
+// CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
+// CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
+// CHECK: store i8 16, i8* %[[ARGSIZE]], align 1
+// CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
+// CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i128*
+// CHECK: %[[V3:.*]] = load i128, i128* %[[ARG0_ADDR]], align 16
+// CHECK: store i128 %[[V3]], i128* %[[ARGDATACAST]], align 1
+
+#endif
diff --git a/test/CodeGen/catch-undef-behavior.c b/test/CodeGen/catch-undef-behavior.c
index e67f0a1f8eec..7915ed9db1c1 100644
--- a/test/CodeGen/catch-undef-behavior.c
+++ b/test/CodeGen/catch-undef-behavior.c
@@ -59,8 +59,7 @@ int bar(int *a) {
// CHECK-COMMON-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 3
// CHECK-COMMON-NEXT: icmp eq i64 %[[MISALIGN]], 0
- // CHECK-UBSAN: %[[ARG:.*]] = ptrtoint
- // CHECK-UBSAN-NEXT: call void @__ubsan_handle_type_mismatch_v1(i8* bitcast ({{.*}} @[[LINE_200]] to i8*), i64 %[[ARG]])
+ // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(i8* bitcast ({{.*}} @[[LINE_200]] to i8*), i64 %[[PTRINT]])
// CHECK-TRAP: call void @llvm.trap() [[NR_NUW]]
// CHECK-TRAP-NEXT: unreachable
diff --git a/test/CodeGen/cetintrin.c b/test/CodeGen/cetintrin.c
new file mode 100644
index 000000000000..085462a6626d
--- /dev/null
+++ b/test/CodeGen/cetintrin.c
@@ -0,0 +1,84 @@
+// RUN: %clang_cc1 -ffreestanding %s -triple=i386-apple-darwin -target-feature +shstk -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +shstk -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=X86_64
+
+#include <immintrin.h>
+
+void test_incsspd(int a) {
+ // CHECK-LABEL: @test_incsspd
+ // CHECK: call void @llvm.x86.incsspd(i32 %{{[0-9]+}})
+ _incsspd(a);
+}
+
+#ifdef __x86_64__
+void test_incsspq(int a) {
+ // X86_64-LABEL: @test_incsspq
+ // X86_64: call void @llvm.x86.incsspq(i64 %{{[a-z0-9.]+}})
+ _incsspq(a);
+}
+#endif
+
+unsigned int test_rdsspd(unsigned int a) {
+ // CHECK-LABEL: @test_rdsspd
+ // CHECK: call i32 @llvm.x86.rdsspd(i32 %{{[a-z0-9.]+}})
+ return _rdsspd(a);
+}
+
+#ifdef __x86_64__
+unsigned long long test_rdsspq(unsigned long long a) {
+ // X86_64-LABEL: @test_rdsspq
+ // X86_64: call i64 @llvm.x86.rdsspq(i64 %{{[a-z0-9.]+}})
+ return _rdsspq(a);
+}
+#endif
+
+void test_saveprevssp() {
+ // CHECK-LABEL: @test_saveprevssp
+ // CHECK: call void @llvm.x86.saveprevssp()
+ _saveprevssp();
+}
+
+void test_rstorssp(void * __p) {
+ // CHECK-LABEL: @test_rstorssp
+ // CHECK: call void @llvm.x86.rstorssp(i8* %{{[a-z0-9.]+}})
+ _rstorssp(__p);
+}
+
+void test_wrssd(unsigned int __a, void * __p) {
+ // CHECK-LABEL: @test_wrssd
+ // CHECK: call void @llvm.x86.wrssd(i32 %{{[a-z0-9.]+}}, i8* %{{[a-z0-9.]+}})
+ _wrssd(__a, __p);
+}
+
+#ifdef __x86_64__
+void test_wrssq(unsigned long long __a, void * __p) {
+ // X86_64-LABEL: @test_wrssq
+ // X86_64: call void @llvm.x86.wrssq(i64 %{{[a-z0-9.]+}}, i8* %{{[a-z0-9.]+}})
+ _wrssq(__a, __p);
+}
+#endif
+
+void test_wrussd(unsigned int __a, void * __p) {
+ // CHECK-LABEL: @test_wrussd
+ // CHECK: call void @llvm.x86.wrussd(i32 %{{[a-z0-9.]+}}, i8* %{{[a-z0-9.]+}})
+ _wrussd(__a, __p);
+}
+
+#ifdef __x86_64__
+void test_wrussq(unsigned long long __a, void * __p) {
+ // X86_64-LABEL: @test_wrussq
+ // X86_64: call void @llvm.x86.wrussq(i64 %{{[a-z0-9.]+}}, i8* %{{[a-z0-9.]+}})
+ _wrussq(__a, __p);
+}
+#endif
+
+void test_setssbsy() {
+ // CHECK-LABEL: @test_setssbsy
+ // CHECK: call void @llvm.x86.setssbsy()
+ _setssbsy();
+}
+
+void test_clrssbsy(void * __p) {
+ // CHECK-LABEL: @test_clrssbsy
+ // CHECK: call void @llvm.x86.clrssbsy(i8* %{{[a-z0-9.]+}})
+ _clrssbsy(__p);
+}
diff --git a/test/CodeGen/cfi-icall-cross-dso.c b/test/CodeGen/cfi-icall-cross-dso.c
index 636a9e4aedb4..43ab0e73b14a 100644
--- a/test/CodeGen/cfi-icall-cross-dso.c
+++ b/test/CodeGen/cfi-icall-cross-dso.c
@@ -46,7 +46,7 @@ void caller(void (*f)()) {
// Check that we emit both string and hash based type entries for static void g(),
// and don't emit them for the declaration of h().
-// CHECK: define internal void @g({{.*}} !type [[TVOID:![0-9]+]] !type [[TVOID_ID:![0-9]+]]
+// CHECK: define internal void @g({{.*}} !type [[TVOID:![0-9]+]] !type [[TVOID_GENERALIZED:![0-9]+]] !type [[TVOID_ID:![0-9]+]]
static void g(void) {}
// CHECK: declare void @h({{[^!]*$}}
@@ -60,9 +60,9 @@ Fn h1() {
return &h;
}
-// CHECK: define void @bar({{.*}} !type [[TNOPROTO:![0-9]+]] !type [[TNOPROTO_ID:![0-9]+]]
+// CHECK: define void @bar({{.*}} !type [[TNOPROTO:![0-9]+]] !type [[TNOPROTO_GENERALIZED:![0-9]+]] !type [[TNOPROTO_ID:![0-9]+]]
// ITANIUM: define available_externally void @foo({{[^!]*$}}
-// MS: define linkonce_odr void @foo({{.*}} !type [[TNOPROTO]] !type [[TNOPROTO_ID]]
+// MS: define linkonce_odr void @foo({{.*}} !type [[TNOPROTO]] !type [[TNOPROTO_GENERALIZED:![0-9]+]] !type [[TNOPROTO_ID]]
inline void foo() {}
void bar() { foo(); }
@@ -71,11 +71,15 @@ void bar() { foo(); }
// Check that the type entries are correct.
// ITANIUM: [[TVOID]] = !{i64 0, !"_ZTSFvvE"}
+// ITANIUM: [[TVOID_GENERALIZED]] = !{i64 0, !"_ZTSFvvE.generalized"}
// ITANIUM: [[TVOID_ID]] = !{i64 0, i64 9080559750644022485}
// ITANIUM: [[TNOPROTO]] = !{i64 0, !"_ZTSFvE"}
+// ITANIUM: [[TNOPROTO_GENERALIZED]] = !{i64 0, !"_ZTSFvE.generalized"}
// ITANIUM: [[TNOPROTO_ID]] = !{i64 0, i64 6588678392271548388}
// MS: [[TVOID]] = !{i64 0, !"?6AXXZ"}
+// MS: [[TVOID_GENERALIZED]] = !{i64 0, !"?6AXXZ.generalized"}
// MS: [[TVOID_ID]] = !{i64 0, i64 5113650790573562461}
// MS: [[TNOPROTO]] = !{i64 0, !"?6AX@Z"}
+// MS: [[TNOPROTO_GENERALIZED]] = !{i64 0, !"?6AX@Z.generalized"}
// MS: [[TNOPROTO_ID]] = !{i64 0, i64 4195979634929632483}
diff --git a/test/CodeGen/cfi-icall-generalize.c b/test/CodeGen/cfi-icall-generalize.c
new file mode 100644
index 000000000000..c7c7b30a7a21
--- /dev/null
+++ b/test/CodeGen/cfi-icall-generalize.c
@@ -0,0 +1,19 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -fsanitize=cfi-icall -fsanitize-trap=cfi-icall -emit-llvm -o - %s | FileCheck --check-prefix=CHECK --check-prefix=UNGENERALIZED %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -fsanitize=cfi-icall -fsanitize-trap=cfi-icall -fsanitize-cfi-icall-generalize-pointers -emit-llvm -o - %s | FileCheck --check-prefix=CHECK --check-prefix=GENERALIZED %s
+
+// Test that const char* is generalized to const void* and that const char** is
+// generalized to void*
+
+// CHECK: define i32** @f({{.*}} !type [[TYPE:![0-9]+]] !type [[TYPE_GENERALIZED:![0-9]+]]
+int** f(const char *a, const char **b) {
+ return (int**)0;
+}
+
+void g(int** (*fp)(const char *, const char **)) {
+ // UNGENERALIZED: call i1 @llvm.type.test(i8* {{.*}}, metadata !"_ZTSFPPiPKcPS2_E")
+ // GENERALIZED: call i1 @llvm.type.test(i8* {{.*}}, metadata !"_ZTSFPvPKvS_E.generalized")
+ fp(0, 0);
+}
+
+// CHECK: [[TYPE]] = !{i64 0, !"_ZTSFPPiPKcPS2_E"}
+// CHECK: [[TYPE_GENERALIZED]] = !{i64 0, !"_ZTSFPvPKvS_E.generalized"}
diff --git a/test/CodeGen/cfi-icall.c b/test/CodeGen/cfi-icall.c
index ed34f4f44beb..5f346b66e81e 100644
--- a/test/CodeGen/cfi-icall.c
+++ b/test/CodeGen/cfi-icall.c
@@ -3,22 +3,26 @@
// Tests that we assign appropriate identifiers to unprototyped functions.
-// CHECK: define void @f({{.*}} !type [[TVOID:![0-9]+]]
+// CHECK: define void @f({{.*}} !type [[TVOID:![0-9]+]] !type [[TVOID_GENERALIZED:![0-9]+]]
void f() {
}
void xf();
-// CHECK: define void @g({{.*}} !type [[TINT:![0-9]+]]
+// CHECK: define void @g({{.*}} !type [[TINT:![0-9]+]] !type [[TINT_GENERALIZED:![0-9]+]]
void g(int b) {
void (*fp)() = b ? f : xf;
// ITANIUM: call i1 @llvm.type.test(i8* {{.*}}, metadata !"_ZTSFvE")
fp();
}
-// CHECK: declare !type [[TVOID:![0-9]+]] void @xf({{.*}}
+// CHECK: declare !type [[TVOID]] !type [[TVOID_GENERALIZED]] void @xf({{.*}}
// ITANIUM-DAG: [[TVOID]] = !{i64 0, !"_ZTSFvE"}
+// ITANIUM-DAG: [[TVOID_GENERALIZED]] = !{i64 0, !"_ZTSFvE.generalized"}
// ITANIUM-DAG: [[TINT]] = !{i64 0, !"_ZTSFviE"}
+// ITANIUM-DAG: [[TINT_GENERALIZED]] = !{i64 0, !"_ZTSFviE.generalized"}
// MS-DAG: [[TVOID]] = !{i64 0, !"?6AX@Z"}
+// MS-DAG: [[TVOID_GENERALIZED]] = !{i64 0, !"?6AX@Z.generalized"}
// MS-DAG: [[TINT]] = !{i64 0, !"?6AXH@Z"}
+// MS-DAG: [[TINT_GENERALIZED]] = !{i64 0, !"?6AXH@Z.generalized"}
diff --git a/test/CodeGen/cfi-unrelated-cast.cpp b/test/CodeGen/cfi-unrelated-cast.cpp
new file mode 100644
index 000000000000..d01cdf00b754
--- /dev/null
+++ b/test/CodeGen/cfi-unrelated-cast.cpp
@@ -0,0 +1,37 @@
+// STL allocators should not have unrelated-cast tests applied
+// RUN: %clang_cc1 -flto -triple x86_64-unknown-linux -fvisibility hidden -fsanitize=cfi-unrelated-cast -emit-llvm -o - %s | FileCheck %s
+
+#include <stddef.h>
+
+template<class T>
+class myalloc {
+ public:
+ // CHECK: define{{.*}}allocateE{{.}}
+ // CHECK-NOT: llvm.type.test
+ T *allocate(size_t sz) {
+ return (T*)::operator new(sz);
+ }
+
+ // CHECK: define{{.*}}allocateE{{.}}PKv
+ // CHECK-NOT: llvm.type.test
+ T *allocate(size_t sz, const void *ptr) {
+ return (T*)::operator new(sz);
+ }
+
+ // CHECK: define{{.*}}differentName
+ // CHECK: llvm.type.test
+ T *differentName(size_t sz, const void *ptr) {
+ return (T*)::operator new(sz);
+ }
+};
+
+class C1 {
+ virtual void f() {}
+};
+
+C1 *f1() {
+ myalloc<C1> allocator;
+ (void)allocator.allocate(16);
+ (void)allocator.allocate(16, 0);
+ (void)allocator.differentName(16, 0);
+}
diff --git a/test/CodeGen/complex-builtins.c b/test/CodeGen/complex-builtins.c
new file mode 100644
index 000000000000..dbf3b5901866
--- /dev/null
+++ b/test/CodeGen/complex-builtins.c
@@ -0,0 +1,206 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm %s | FileCheck %s -check-prefix=NO__ERRNO
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s -check-prefix=HAS_ERRNO
+
+// Test attributes and codegen of complex builtins.
+
+void foo(float f) {
+ __builtin_cabs(f); __builtin_cabsf(f); __builtin_cabsl(f);
+
+// NO__ERRNO: declare double @cabs(double, double) [[READNONE:#[0-9]+]]
+// NO__ERRNO: declare float @cabsf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE:#[0-9]+]]
+// HAS_ERRNO: declare double @cabs(double, double) [[NOT_READNONE:#[0-9]+]]
+// HAS_ERRNO: declare float @cabsf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_cacos(f); __builtin_cacosf(f); __builtin_cacosl(f);
+
+// NO__ERRNO: declare { double, double } @cacos(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cacosf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cacos(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @cacosf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_cacosh(f); __builtin_cacoshf(f); __builtin_cacoshl(f);
+
+// NO__ERRNO: declare { double, double } @cacosh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cacosh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_carg(f); __builtin_cargf(f); __builtin_cargl(f);
+
+// NO__ERRNO: declare double @carg(double, double) [[READNONE]]
+// NO__ERRNO: declare float @cargf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @carg(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @cargf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_casin(f); __builtin_casinf(f); __builtin_casinl(f);
+
+// NO__ERRNO: declare { double, double } @casin(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @casinf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @casin(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @casinf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_casinh(f); __builtin_casinhf(f); __builtin_casinhl(f);
+
+// NO__ERRNO: declare { double, double } @casinh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @casinhf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @casinh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @casinhf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_catan(f); __builtin_catanf(f); __builtin_catanl(f);
+
+// NO__ERRNO: declare { double, double } @catan(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @catanf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @catan(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @catanf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_catanh(f); __builtin_catanhf(f); __builtin_catanhl(f);
+
+// NO__ERRNO: declare { double, double } @catanh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @catanhf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @catanh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @catanhf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_ccos(f); __builtin_ccosf(f); __builtin_ccosl(f);
+
+// NO__ERRNO: declare { double, double } @ccos(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @ccosf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @ccos(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @ccosf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_ccosh(f); __builtin_ccoshf(f); __builtin_ccoshl(f);
+
+// NO__ERRNO: declare { double, double } @ccosh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @ccosh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_cexp(f); __builtin_cexpf(f); __builtin_cexpl(f);
+
+// NO__ERRNO: declare { double, double } @cexp(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cexpf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cexp(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @cexpf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_cimag(f); __builtin_cimagf(f); __builtin_cimagl(f);
+
+// NO__ERRNO-NOT: .cimag
+// NO__ERRNO-NOT: @cimag
+// HAS_ERRNO-NOT: .cimag
+// HAS_ERRNO-NOT: @cimag
+
+ __builtin_conj(f); __builtin_conjf(f); __builtin_conjl(f);
+
+// NO__ERRNO-NOT: .conj
+// NO__ERRNO-NOT: @conj
+// HAS_ERRNO-NOT: .conj
+// HAS_ERRNO-NOT: @conj
+
+ __builtin_clog(f); __builtin_clogf(f); __builtin_clogl(f);
+
+// NO__ERRNO: declare { double, double } @clog(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @clogf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @clog(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @clogf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_cproj(f); __builtin_cprojf(f); __builtin_cprojl(f);
+
+// NO__ERRNO: declare { double, double } @cproj(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cprojf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cproj(double, double) [[READNONE:#[0-9]+]]
+// HAS_ERRNO: declare <2 x float> @cprojf(<2 x float>) [[READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_cpow(f,f); __builtin_cpowf(f,f); __builtin_cpowl(f,f);
+
+// NO__ERRNO: declare { double, double } @cpow(double, double, double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* byval align 16, { x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cpow(double, double, double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* byval align 16, { x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_creal(f); __builtin_crealf(f); __builtin_creall(f);
+
+// NO__ERRNO-NOT: .creal
+// NO__ERRNO-NOT: @creal
+// HAS_ERRNO-NOT: .creal
+// HAS_ERRNO-NOT: @creal
+
+ __builtin_csin(f); __builtin_csinf(f); __builtin_csinl(f);
+
+// NO__ERRNO: declare { double, double } @csin(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @csinf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @csin(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @csinf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_csinh(f); __builtin_csinhf(f); __builtin_csinhl(f);
+
+// NO__ERRNO: declare { double, double } @csinh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @csinhf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @csinh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @csinhf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_csqrt(f); __builtin_csqrtf(f); __builtin_csqrtl(f);
+
+// NO__ERRNO: declare { double, double } @csqrt(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @csqrt(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_ctan(f); __builtin_ctanf(f); __builtin_ctanl(f);
+
+// NO__ERRNO: declare { double, double } @ctan(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @ctanf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @ctan(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @ctanf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ __builtin_ctanh(f); __builtin_ctanhf(f); __builtin_ctanhl(f);
+
+// NO__ERRNO: declare { double, double } @ctanh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @ctanh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+};
+
+
+// NO__ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
+// NO__ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
+
+// HAS_ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
+// HAS_ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
+
diff --git a/test/CodeGen/complex-libcalls.c b/test/CodeGen/complex-libcalls.c
new file mode 100644
index 000000000000..db56628835fd
--- /dev/null
+++ b/test/CodeGen/complex-libcalls.c
@@ -0,0 +1,208 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm %s | FileCheck %s -check-prefix=NO__ERRNO
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s -check-prefix=HAS_ERRNO
+
+// Test attributes and builtin codegen of complex library calls.
+
+void foo(float f) {
+ cabs(f); cabsf(f); cabsl(f);
+
+// NO__ERRNO: declare double @cabs(double, double) [[READNONE:#[0-9]+]]
+// NO__ERRNO: declare float @cabsf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE:#[0-9]+]]
+// HAS_ERRNO: declare double @cabs(double, double) [[NOT_READNONE:#[0-9]+]]
+// HAS_ERRNO: declare float @cabsf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ cacos(f); cacosf(f); cacosl(f);
+
+// NO__ERRNO: declare { double, double } @cacos(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cacosf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cacos(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @cacosf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ cacosh(f); cacoshf(f); cacoshl(f);
+
+// NO__ERRNO: declare { double, double } @cacosh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cacosh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ carg(f); cargf(f); cargl(f);
+
+// NO__ERRNO: declare double @carg(double, double) [[READNONE]]
+// NO__ERRNO: declare float @cargf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @carg(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @cargf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ casin(f); casinf(f); casinl(f);
+
+// NO__ERRNO: declare { double, double } @casin(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @casinf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @casin(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @casinf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ casinh(f); casinhf(f); casinhl(f);
+
+// NO__ERRNO: declare { double, double } @casinh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @casinhf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @casinh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @casinhf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ catan(f); catanf(f); catanl(f);
+
+// NO__ERRNO: declare { double, double } @catan(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @catanf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @catan(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @catanf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ catanh(f); catanhf(f); catanhl(f);
+
+// NO__ERRNO: declare { double, double } @catanh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @catanhf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @catanh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @catanhf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ ccos(f); ccosf(f); ccosl(f);
+
+// NO__ERRNO: declare { double, double } @ccos(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @ccosf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @ccos(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @ccosf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ ccosh(f); ccoshf(f); ccoshl(f);
+
+// NO__ERRNO: declare { double, double } @ccosh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @ccosh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ cexp(f); cexpf(f); cexpl(f);
+
+// NO__ERRNO: declare { double, double } @cexp(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cexpf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cexp(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @cexpf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ cimag(f); cimagf(f); cimagl(f);
+
+// NO__ERRNO-NOT: .cimag
+// NO__ERRNO-NOT: @cimag
+// HAS_ERRNO-NOT: .cimag
+// HAS_ERRNO-NOT: @cimag
+
+ conj(f); conjf(f); conjl(f);
+
+// NO__ERRNO: declare { double, double } @conj(double, double) [[READNONE:#[0-9]+]]
+// NO__ERRNO: declare <2 x float> @conjf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @conjl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @conj(double, double) [[READNONE:#[0-9]+]]
+// HAS_ERRNO: declare <2 x float> @conjf(<2 x float>) [[READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @conjl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ clog(f); clogf(f); clogl(f);
+
+// NO__ERRNO: declare { double, double } @clog(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @clogf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @clog(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @clogf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ cproj(f); cprojf(f); cprojl(f);
+
+// NO__ERRNO: declare { double, double } @cproj(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cprojf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cproj(double, double) [[READNONE]]
+// HAS_ERRNO: declare <2 x float> @cprojf(<2 x float>) [[READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ cpow(f,f); cpowf(f,f); cpowl(f,f);
+
+// NO__ERRNO: declare { double, double } @cpow(double, double, double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* byval align 16, { x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @cpow(double, double, double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* byval align 16, { x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ creal(f); crealf(f); creall(f);
+
+// NO__ERRNO-NOT: .creal
+// NO__ERRNO-NOT: @creal
+// HAS_ERRNO-NOT: .creal
+// HAS_ERRNO-NOT: @creal
+
+ csin(f); csinf(f); csinl(f);
+
+// NO__ERRNO: declare { double, double } @csin(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @csinf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @csin(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @csinf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ csinh(f); csinhf(f); csinhl(f);
+
+// NO__ERRNO: declare { double, double } @csinh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @csinhf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @csinh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @csinhf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ csqrt(f); csqrtf(f); csqrtl(f);
+
+// NO__ERRNO: declare { double, double } @csqrt(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @csqrt(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ ctan(f); ctanf(f); ctanl(f);
+
+// NO__ERRNO: declare { double, double } @ctan(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @ctanf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @ctan(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @ctanf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+
+ ctanh(f); ctanhf(f); ctanhl(f);
+
+// NO__ERRNO: declare { double, double } @ctanh(double, double) [[READNONE]]
+// NO__ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[READNONE]]
+// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+// HAS_ERRNO: declare { double, double } @ctanh(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NOT_READNONE]]
+// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* byval align 16) [[NOT_READNONE]]
+};
+
+
+// NO__ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
+// NO__ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
+
+// HAS_ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
+// HAS_ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
+
diff --git a/test/CodeGen/darwin-ppc-varargs.c b/test/CodeGen/darwin-ppc-varargs.c
new file mode 100644
index 000000000000..c2a0d192233c
--- /dev/null
+++ b/test/CodeGen/darwin-ppc-varargs.c
@@ -0,0 +1,28 @@
+// RUN: %clang_cc1 -triple powerpc-apple-macosx10.5.0 -target-feature +altivec -Os -emit-llvm -o - %s | FileCheck %s
+
+int f(__builtin_va_list args) { return __builtin_va_arg(args, int); }
+
+// CHECK: @f(i8* {{.*}}[[PARAM:%[a-zA-Z0-9]+]])
+// CHECK: [[BITCAST:%[0-9]+]] = bitcast i8* [[PARAM]] to i32*
+// CHECK: [[VALUE:%[0-9]+]] = load i32, i32* [[BITCAST]], align 4
+// CHECK: ret i32 [[VALUE]]
+
+void h(vector int);
+int g(__builtin_va_list args) {
+ int i = __builtin_va_arg(args, int);
+ h(__builtin_va_arg(args, vector int));
+ int j = __builtin_va_arg(args, int);
+ return i + j;
+}
+
+// CHECK: @g(i8* {{.*}}[[PARAM:%[a-zA-Z0-9]+]])
+// CHECK: [[NEXT:%[-_.a-zA-Z0-9]+]] = getelementptr inbounds i8, i8* [[PARAM]], i32 4
+// CHECK: [[BITCAST:%[0-9]+]] = bitcast i8* [[PARAM]] to i32*
+// CHECK: [[LOAD:%[0-9]+]] = load i32, i32* [[BITCAST]], align 4
+// CHECK: [[PTRTOINT:%[0-9]+]] = ptrtoint i8* [[NEXT]] to i32
+// CHECK: [[ADD:%[0-9]+]] = add i32 [[PTRTOINT]], 15
+// CHECK: [[AND:%[0-9]+]] = and i32 [[ADD]], -16
+// CHECK: [[INTTOPTR:%[0-9]+]] = inttoptr i32 [[AND]] to <4 x i32>*
+// CHECK: [[ARG:%[0-9]]] = load <4 x i32>, <4 x i32>* [[INTTOPTR]], align 16
+// CHECK: call void @h(<4 x i32> [[ARG]]
+
diff --git a/test/CodeGen/debug-info-attributed-stmt.c b/test/CodeGen/debug-info-attributed-stmt.c
new file mode 100644
index 000000000000..b60aaf66ff88
--- /dev/null
+++ b/test/CodeGen/debug-info-attributed-stmt.c
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 -triple x86_64-unk-unk -debug-info-kind=limited -emit-llvm %s -o - | FileCheck %s
+
+void f(_Bool b)
+{
+#pragma nounroll
+ while (b);
+}
+
+// CHECK: br label {{.*}}, !dbg ![[NUM:[0-9]+]]
+// CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !dbg ![[NUM]]
+// CHECK: br label {{.*}}, !dbg ![[NUM]], !llvm.loop
+// CHECK: ![[NUM]] = !DILocation(line: 6,
diff --git a/test/CodeGen/debug-info-block-vars.c b/test/CodeGen/debug-info-block-vars.c
new file mode 100644
index 000000000000..e0bb61e5e85f
--- /dev/null
+++ b/test/CodeGen/debug-info-block-vars.c
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -x c -fblocks -debug-info-kind=standalone -emit-llvm -O0 \
+// RUN: -triple x86_64-apple-darwin -o - %s | FileCheck %s
+// RUN: %clang_cc1 -x c -fblocks -debug-info-kind=standalone -emit-llvm -O1 \
+// RUN: -triple x86_64-apple-darwin -o - %s \
+// RUN: | FileCheck --check-prefix=CHECK-OPT %s
+
+// CHECK: define internal void @__f_block_invoke(i8* %.block_descriptor)
+// CHECK: %.block_descriptor.addr = alloca i8*, align 8
+// CHECK: %block.addr = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
+// CHECK: store i8* %.block_descriptor, i8** %.block_descriptor.addr, align 8
+// CHECK: call void @llvm.dbg.declare(metadata i8** %.block_descriptor.addr,
+// CHECK-SAME: metadata !DIExpression())
+// CHECK-OPT-NOT: alloca
+// CHECK-OPT: call void @llvm.dbg.value(metadata i8* %.block_descriptor,
+// CHECK-OPT-SAME: metadata !DIExpression())
+void f() {
+ a(^{
+ b();
+ });
+}
diff --git a/test/CodeGen/debug-info-global-constant.c b/test/CodeGen/debug-info-global-constant.c
index 4175f24675e5..8cb7f44ff3f0 100644
--- a/test/CodeGen/debug-info-global-constant.c
+++ b/test/CodeGen/debug-info-global-constant.c
@@ -5,11 +5,10 @@
// exactly once.
// CHECK: @i = internal constant i32 1, align 4, !dbg ![[I:[0-9]+]]
-// CHECK: ![[I]] = !DIGlobalVariableExpression(var: ![[VAR:.*]], expr: ![[EXPR:[0-9]+]])
+// CHECK: ![[I]] = !DIGlobalVariableExpression(var: ![[VAR:.*]], expr: !DIExpression(DW_OP_constu, 1, DW_OP_stack_value))
// CHECK: ![[VAR]] = distinct !DIGlobalVariable(name: "i",
// CHECK: !DICompileUnit({{.*}}globals: ![[GLOBALS:[0-9]+]])
// CHECK: ![[GLOBALS]] = !{![[I]]}
-// CHECK: ![[EXPR]] = !DIExpression(DW_OP_constu, 1, DW_OP_stack_value)
static const int i = 1;
void g(const int *, int);
diff --git a/test/CodeGen/debug-info-lto.c b/test/CodeGen/debug-info-lto.c
new file mode 100644
index 000000000000..5dab0a12641f
--- /dev/null
+++ b/test/CodeGen/debug-info-lto.c
@@ -0,0 +1,4 @@
+// RUN: %clang_cc1 -flto -emit-llvm -debug-info-kind=standalone %s -o - | FileCheck %s
+// RUN: %clang_cc1 -flto=thin -emit-llvm -debug-info-kind=standalone %s -o - | FileCheck %s
+// The "o" in LTO stands for optimization!
+// CHECK: !DICompileUnit({{.*}} isOptimized: true
diff --git a/test/CodeGen/debug-info-preprocessed-file.i b/test/CodeGen/debug-info-preprocessed-file.i
new file mode 100644
index 000000000000..d231b45d67c2
--- /dev/null
+++ b/test/CodeGen/debug-info-preprocessed-file.i
@@ -0,0 +1,11 @@
+# 1 "/foo/bar/preprocessed-input.c"
+# 1 "<built-in>" 1
+# 1 "<built-in>" 3
+# 318 "<built-in>" 3
+# 1 "<command line>" 1
+# 1 "<built-in>" 2
+# 1 "preprocessed-input.c" 2
+
+// RUN: %clang -g -c -S -emit-llvm -o - %s | FileCheck %s
+// CHECK: !DICompileUnit(language: DW_LANG_C99, file: ![[FILE:[0-9]+]]
+// CHECK: ![[FILE]] = !DIFile(filename: "/foo/bar/preprocessed-input.c"
diff --git a/test/CodeGen/debug-info-static-const-fp.c b/test/CodeGen/debug-info-static-const-fp.c
index 4dfe057f2b6e..1b1da09f9e59 100644
--- a/test/CodeGen/debug-info-static-const-fp.c
+++ b/test/CodeGen/debug-info-static-const-fp.c
@@ -33,22 +33,19 @@ int main() {
return hVal + fVal + dVal + ldVal;
}
-// CHECK: !DIGlobalVariableExpression(var: [[HVAL:.*]], expr: [[HEXPR:.*]])
+// CHECK: !DIGlobalVariableExpression(var: [[HVAL:.*]], expr: !DIExpression(DW_OP_constu, 16502, DW_OP_stack_value))
// CHECK: [[HVAL]] = distinct !DIGlobalVariable(name: "hVal",
// CHECK-SAME: isLocal: true, isDefinition: true
-// CHECK: [[HEXPR]] = !DIExpression(DW_OP_constu, 16502, DW_OP_stack_value)
-// CHECK: !DIGlobalVariableExpression(var: [[FVAL:.*]], expr: [[FEXPR:.*]])
+// CHECK: !DIGlobalVariableExpression(var: [[FVAL:.*]], expr: !DIExpression(DW_OP_constu, 3238681178, DW_OP_stack_value))
// CHECK: [[FVAL]] = distinct !DIGlobalVariable(name: "fVal",
// CHECK-SAME: isLocal: true, isDefinition: true
-// CHECK: [[FEXPR]] = !DIExpression(DW_OP_constu, 3238681178, DW_OP_stack_value)
-// CHECK: !DIGlobalVariableExpression(var: [[DVAL:.*]], expr: [[DEXPR:.*]])
+// CHECK: !DIGlobalVariableExpression(var: [[DVAL:.*]], expr: !DIExpression(DW_OP_constu, 4658387303597904457, DW_OP_stack_value))
// CHECK: [[DVAL]] = distinct !DIGlobalVariable(name: "dVal",
// CHECK-SAME: isLocal: true, isDefinition: true
-// CHECK: [[DEXPR]] = !DIExpression(DW_OP_constu, 4658387303597904457, DW_OP_stack_value)
// CHECK-LDlg-DAG: [[LDVAL:.*]] = distinct !DIGlobalVariable(name: "ldVal", {{.*}}, isLocal: true, isDefinition: true)
-// CHECK-LDlg-DAG: !DIGlobalVariableExpression(var: [[LDVAL]])
+// CHECK-LDlg-DAG: !DIGlobalVariableExpression(var: [[LDVAL]], expr: !DIExpression())
// CHECK-LDsm-DAG: [[LDVAL:.*]] = distinct !DIGlobalVariable(name: "ldVal", {{.*}}, isLocal: true, isDefinition: true)
// CHECK-LDsm-DAG: !DIGlobalVariableExpression(var: [[LDVAL]], expr:
diff --git a/test/CodeGen/debug-info-static.c b/test/CodeGen/debug-info-static.c
index 016f1e6e6cc5..d6ade2aee56c 100644
--- a/test/CodeGen/debug-info-static.c
+++ b/test/CodeGen/debug-info-static.c
@@ -2,7 +2,7 @@
// CHECK: @f.xyzzy = internal global i32 0, align 4, !dbg [[XYZZY:![0-9]+]]
-// CHECK: [[XYZZY]] = !DIGlobalVariableExpression(var: [[VAR:.*]])
+// CHECK: [[XYZZY]] = !DIGlobalVariableExpression(var: [[VAR:.*]], expr: !DIExpression())
// CHECK: [[VAR]] = distinct !DIGlobalVariable
void f(void)
{
diff --git a/test/CodeGen/debug-info-vla.c b/test/CodeGen/debug-info-vla.c
index 3b69773207b2..7928ca761397 100644
--- a/test/CodeGen/debug-info-vla.c
+++ b/test/CodeGen/debug-info-vla.c
@@ -3,8 +3,7 @@
void testVLAwithSize(int s)
{
// CHECK: dbg.declare
-// CHECK: dbg.declare({{.*}}, metadata ![[VAR:.*]], metadata ![[EXPR:.*]])
-// CHECK: ![[EXPR]] = !DIExpression()
+// CHECK: dbg.declare({{.*}}, metadata ![[VAR:.*]], metadata !DIExpression())
// CHECK: ![[VAR]] = !DILocalVariable(name: "vla",{{.*}} line: [[@LINE+1]]
int vla[s];
int i;
diff --git a/test/CodeGen/finite-math.c b/test/CodeGen/finite-math.c
index 90a83fa30958..d1a2956b69fe 100644
--- a/test/CodeGen/finite-math.c
+++ b/test/CodeGen/finite-math.c
@@ -1,6 +1,7 @@
// RUN: %clang_cc1 -ffinite-math-only -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=FINITE
// RUN: %clang_cc1 -fno-signed-zeros -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=NSZ
// RUN: %clang_cc1 -freciprocal-math -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=RECIP
+// RUN: %clang_cc1 -mreassociate -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=REASSOC
float f0, f1, f2;
@@ -10,6 +11,7 @@ void foo(void) {
// FINITE: fadd nnan ninf
// NSZ: fadd nsz
// RECIP: fadd arcp
+ // REASSOC: fadd reassoc
f0 = f1 + f2;
// CHECK: ret
diff --git a/test/CodeGen/fma-builtins.c b/test/CodeGen/fma-builtins.c
index 91a2efe7637e..6f792a78d84c 100644
--- a/test/CodeGen/fma-builtins.c
+++ b/test/CodeGen/fma-builtins.c
@@ -4,161 +4,221 @@
#include <immintrin.h>
__m128 test_mm_fmadd_ps(__m128 a, __m128 b, __m128 c) {
+ // CHECK-LABEL: test_mm_fmadd_ps
// CHECK: @llvm.x86.fma.vfmadd.ps
return _mm_fmadd_ps(a, b, c);
}
__m128d test_mm_fmadd_pd(__m128d a, __m128d b, __m128d c) {
+ // CHECK-LABEL: test_mm_fmadd_pd
// CHECK: @llvm.x86.fma.vfmadd.pd
return _mm_fmadd_pd(a, b, c);
}
__m128 test_mm_fmadd_ss(__m128 a, __m128 b, __m128 c) {
+ // CHECK-LABEL: test_mm_fmadd_ss
// CHECK: @llvm.x86.fma.vfmadd.ss
return _mm_fmadd_ss(a, b, c);
}
__m128d test_mm_fmadd_sd(__m128d a, __m128d b, __m128d c) {
+ // CHECK-LABEL: test_mm_fmadd_sd
// CHECK: @llvm.x86.fma.vfmadd.sd
return _mm_fmadd_sd(a, b, c);
}
__m128 test_mm_fmsub_ps(__m128 a, __m128 b, __m128 c) {
- // CHECK: @llvm.x86.fma.vfmsub.ps
+ // CHECK-LABEL: test_mm_fmsub_ps
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps(<4 x float> %{{.+}}, <4 x float> %{{.+}}, <4 x float> [[NEG]])
return _mm_fmsub_ps(a, b, c);
}
__m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) {
- // CHECK: @llvm.x86.fma.vfmsub.pd
+ // CHECK-LABEL: test_mm_fmsub_pd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd(<2 x double> %{{.+}}, <2 x double> %{{.+}}, <2 x double> [[NEG]])
return _mm_fmsub_pd(a, b, c);
}
__m128 test_mm_fmsub_ss(__m128 a, __m128 b, __m128 c) {
- // CHECK: @llvm.x86.fma.vfmsub.ss
+ // CHECK-LABEL: test_mm_fmsub_ss
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ss(<4 x float> %{{.+}}, <4 x float> %{{.+}}, <4 x float> [[NEG]])
return _mm_fmsub_ss(a, b, c);
}
__m128d test_mm_fmsub_sd(__m128d a, __m128d b, __m128d c) {
- // CHECK: @llvm.x86.fma.vfmsub.sd
+ // CHECK-LABEL: test_mm_fmsub_sd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.sd(<2 x double> %{{.+}}, <2 x double> %{{.+}}, <2 x double> [[NEG]])
return _mm_fmsub_sd(a, b, c);
}
__m128 test_mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) {
- // CHECK: @llvm.x86.fma.vfnmadd.ps
+ // CHECK-LABEL: test_mm_fnmadd_ps
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps(<4 x float> [[NEG]], <4 x float> %{{.+}}, <4 x float> %{{.+}})
return _mm_fnmadd_ps(a, b, c);
}
__m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) {
- // CHECK: @llvm.x86.fma.vfnmadd.pd
+ // CHECK-LABEL: test_mm_fnmadd_pd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd(<2 x double> [[NEG]], <2 x double> %{{.+}}, <2 x double> %{{.+}})
return _mm_fnmadd_pd(a, b, c);
}
__m128 test_mm_fnmadd_ss(__m128 a, __m128 b, __m128 c) {
- // CHECK: @llvm.x86.fma.vfnmadd.ss
+ // CHECK-LABEL: test_mm_fnmadd_ss
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ss(<4 x float> %{{.+}}, <4 x float> [[NEG]], <4 x float> %{{.+}})
return _mm_fnmadd_ss(a, b, c);
}
__m128d test_mm_fnmadd_sd(__m128d a, __m128d b, __m128d c) {
- // CHECK: @llvm.x86.fma.vfnmadd.sd
+ // CHECK-LABEL: test_mm_fnmadd_sd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.sd(<2 x double> %{{.+}}, <2 x double> [[NEG]], <2 x double> %{{.+}})
return _mm_fnmadd_sd(a, b, c);
}
__m128 test_mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) {
- // CHECK: @llvm.x86.fma.vfnmsub.ps
+ // CHECK-LABEL: test_mm_fnmsub_ps
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps(<4 x float> [[NEG]], <4 x float> %{{.+}}, <4 x float> [[NEG2]])
return _mm_fnmsub_ps(a, b, c);
}
__m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) {
- // CHECK: @llvm.x86.fma.vfnmsub.pd
+ // CHECK-LABEL: test_mm_fnmsub_pd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd(<2 x double> [[NEG]], <2 x double> %{{.+}}, <2 x double> [[NEG2]])
return _mm_fnmsub_pd(a, b, c);
}
__m128 test_mm_fnmsub_ss(__m128 a, __m128 b, __m128 c) {
- // CHECK: @llvm.x86.fma.vfnmsub.ss
+ // CHECK-LABEL: test_mm_fnmsub_ss
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ss(<4 x float> %{{.+}}, <4 x float> [[NEG]], <4 x float> [[NEG2]])
return _mm_fnmsub_ss(a, b, c);
}
__m128d test_mm_fnmsub_sd(__m128d a, __m128d b, __m128d c) {
- // CHECK: @llvm.x86.fma.vfnmsub.sd
+ // CHECK-LABEL: test_mm_fnmsub_sd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.sd(<2 x double> %{{.+}}, <2 x double> [[NEG]], <2 x double> [[NEG2]])
return _mm_fnmsub_sd(a, b, c);
}
__m128 test_mm_fmaddsub_ps(__m128 a, __m128 b, __m128 c) {
+ // CHECK-LABEL: test_mm_fmaddsub_ps
// CHECK: @llvm.x86.fma.vfmaddsub.ps
return _mm_fmaddsub_ps(a, b, c);
}
__m128d test_mm_fmaddsub_pd(__m128d a, __m128d b, __m128d c) {
+ // CHECK-LABEL: test_mm_fmaddsub_pd
// CHECK: @llvm.x86.fma.vfmaddsub.pd
return _mm_fmaddsub_pd(a, b, c);
}
__m128 test_mm_fmsubadd_ps(__m128 a, __m128 b, __m128 c) {
- // CHECK: @llvm.x86.fma.vfmsubadd.ps
+ // CHECK-LABEL: test_mm_fmsubadd_ps
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.+}}, <4 x float> %{{.+}}, <4 x float> [[NEG]])
return _mm_fmsubadd_ps(a, b, c);
}
__m128d test_mm_fmsubadd_pd(__m128d a, __m128d b, __m128d c) {
- // CHECK: @llvm.x86.fma.vfmsubadd.pd
+ // CHECK-LABEL: test_mm_fmsubadd_pd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.+}}, <2 x double> %{{.+}}, <2 x double> [[NEG]])
return _mm_fmsubadd_pd(a, b, c);
}
__m256 test_mm256_fmadd_ps(__m256 a, __m256 b, __m256 c) {
+ // CHECK-LABEL: test_mm256_fmadd_ps
// CHECK: @llvm.x86.fma.vfmadd.ps.256
return _mm256_fmadd_ps(a, b, c);
}
__m256d test_mm256_fmadd_pd(__m256d a, __m256d b, __m256d c) {
+ // CHECK-LABEL: test_mm256_fmadd_pd
// CHECK: @llvm.x86.fma.vfmadd.pd.256
return _mm256_fmadd_pd(a, b, c);
}
__m256 test_mm256_fmsub_ps(__m256 a, __m256 b, __m256 c) {
- // CHECK: @llvm.x86.fma.vfmsub.ps.256
+ // CHECK-LABEL: test_mm256_fmsub_ps
+ // CHECK: [[NEG:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps.256(<8 x float> %{{.+}}, <8 x float> %{{.+}}, <8 x float> [[NEG]])
return _mm256_fmsub_ps(a, b, c);
}
__m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) {
- // CHECK: @llvm.x86.fma.vfmsub.pd.256
+ // CHECK-LABEL: test_mm256_fmsub_pd
+ // CHECK: [[NEG:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd.256(<4 x double> %{{.+}}, <4 x double> %{{.+}}, <4 x double> [[NEG]])
return _mm256_fmsub_pd(a, b, c);
}
__m256 test_mm256_fnmadd_ps(__m256 a, __m256 b, __m256 c) {
- // CHECK: @llvm.x86.fma.vfnmadd.ps.256
+ // CHECK-LABEL: test_mm256_fnmadd_ps
+ // CHECK: [[NEG:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps.256(<8 x float> [[NEG]], <8 x float> %{{.+}}, <8 x float> %{{.+}})
return _mm256_fnmadd_ps(a, b, c);
}
__m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) {
- // CHECK: @llvm.x86.fma.vfnmadd.pd.256
+ // CHECK-LABEL: test_mm256_fnmadd_pd
+ // CHECK: [[NEG:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd.256(<4 x double> [[NEG]], <4 x double> %{{.+}}, <4 x double> %{{.+}})
return _mm256_fnmadd_pd(a, b, c);
}
__m256 test_mm256_fnmsub_ps(__m256 a, __m256 b, __m256 c) {
- // CHECK: @llvm.x86.fma.vfnmsub.ps.256
+ // CHECK-LABEL: test_mm256_fnmsub_ps
+ // CHECK: [[NEG:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: [[NEG2:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps.256(<8 x float> [[NEG]], <8 x float> %{{.+}}, <8 x float> [[NEG2]])
return _mm256_fnmsub_ps(a, b, c);
}
__m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) {
- // CHECK: @llvm.x86.fma.vfnmsub.pd.256
+ // CHECK-LABEL: test_mm256_fnmsub_pd
+ // CHECK: [[NEG:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd.256(<4 x double> [[NEG]], <4 x double> %{{.+}}, <4 x double> [[NEG2]])
return _mm256_fnmsub_pd(a, b, c);
}
__m256 test_mm256_fmaddsub_ps(__m256 a, __m256 b, __m256 c) {
+ // CHECK-LABEL: test_mm256_fmaddsub_ps
// CHECK: @llvm.x86.fma.vfmaddsub.ps.256
return _mm256_fmaddsub_ps(a, b, c);
}
__m256d test_mm256_fmaddsub_pd(__m256d a, __m256d b, __m256d c) {
+ // CHECK-LABEL: test_mm256_fmaddsub_pd
// CHECK: @llvm.x86.fma.vfmaddsub.pd.256
return _mm256_fmaddsub_pd(a, b, c);
}
__m256 test_mm256_fmsubadd_ps(__m256 a, __m256 b, __m256 c) {
- // CHECK: @llvm.x86.fma.vfmsubadd.ps.256
+ // CHECK-LABEL: test_mm256_fmsubadd_ps
+ // CHECK: [[NEG:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.+}}, <8 x float> [[NEG]])
return _mm256_fmsubadd_ps(a, b, c);
}
__m256d test_mm256_fmsubadd_pd(__m256d a, __m256d b, __m256d c) {
- // CHECK: @llvm.x86.fma.vfmsubadd.pd.256
+ // CHECK-LABEL: test_mm256_fmsubadd_pd
+ // CHECK: [[NEG:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.+}}, <4 x double> %{{.+}}, <4 x double> [[NEG]])
return _mm256_fmsubadd_pd(a, b, c);
}
diff --git a/test/CodeGen/fma4-builtins.c b/test/CodeGen/fma4-builtins.c
index 3ca5fac68892..c848d4a751da 100644
--- a/test/CodeGen/fma4-builtins.c
+++ b/test/CodeGen/fma4-builtins.c
@@ -17,85 +17,101 @@ __m128d test_mm_macc_pd(__m128d a, __m128d b, __m128d c) {
__m128 test_mm_macc_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_macc_ss
- // CHECK: @llvm.x86.fma.vfmadd.ss
+ // CHECK: @llvm.x86.fma4.vfmadd.ss
return _mm_macc_ss(a, b, c);
}
__m128d test_mm_macc_sd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_macc_sd
- // CHECK: @llvm.x86.fma.vfmadd.sd
+ // CHECK: @llvm.x86.fma4.vfmadd.sd
return _mm_macc_sd(a, b, c);
}
__m128 test_mm_msub_ps(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_msub_ps
- // CHECK: @llvm.x86.fma.vfmsub.ps
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps(<4 x float> %{{.+}}, <4 x float> %{{.+}}, <4 x float> [[NEG]])
return _mm_msub_ps(a, b, c);
}
__m128d test_mm_msub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_msub_pd
- // CHECK: @llvm.x86.fma.vfmsub.pd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd(<2 x double> %{{.+}}, <2 x double> %{{.+}}, <2 x double> [[NEG]])
return _mm_msub_pd(a, b, c);
}
__m128 test_mm_msub_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_msub_ss
- // CHECK: @llvm.x86.fma.vfmsub.ss
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma4.vfmadd.ss(<4 x float> %{{.+}}, <4 x float> %{{.+}}, <4 x float> [[NEG]])
return _mm_msub_ss(a, b, c);
}
__m128d test_mm_msub_sd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_msub_sd
- // CHECK: @llvm.x86.fma.vfmsub.sd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma4.vfmadd.sd(<2 x double> %{{.+}}, <2 x double> %{{.+}}, <2 x double> [[NEG]])
return _mm_msub_sd(a, b, c);
}
__m128 test_mm_nmacc_ps(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_nmacc_ps
- // CHECK: @llvm.x86.fma.vfnmadd.ps
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps(<4 x float> [[NEG]], <4 x float> %{{.+}}, <4 x float> %{{.+}})
return _mm_nmacc_ps(a, b, c);
}
__m128d test_mm_nmacc_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_nmacc_pd
- // CHECK: @llvm.x86.fma.vfnmadd.pd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd(<2 x double> [[NEG]], <2 x double> %{{.+}}, <2 x double> %{{.+}})
return _mm_nmacc_pd(a, b, c);
}
__m128 test_mm_nmacc_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_nmacc_ss
- // CHECK: @llvm.x86.fma.vfnmadd.ss
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma4.vfmadd.ss(<4 x float> [[NEG]], <4 x float> %{{.+}}, <4 x float> %{{.+}})
return _mm_nmacc_ss(a, b, c);
}
__m128d test_mm_nmacc_sd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_nmacc_sd
- // CHECK: @llvm.x86.fma.vfnmadd.sd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma4.vfmadd.sd(<2 x double> [[NEG]], <2 x double> %{{.+}}, <2 x double> %{{.+}})
return _mm_nmacc_sd(a, b, c);
}
__m128 test_mm_nmsub_ps(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_nmsub_ps
- // CHECK: @llvm.x86.fma.vfnmsub.ps
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps(<4 x float> [[NEG]], <4 x float> %{{.+}}, <4 x float> [[NEG2]])
return _mm_nmsub_ps(a, b, c);
}
__m128d test_mm_nmsub_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_nmsub_pd
- // CHECK: @llvm.x86.fma.vfnmsub.pd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd(<2 x double> [[NEG]], <2 x double> %{{.+}}, <2 x double> [[NEG2]])
return _mm_nmsub_pd(a, b, c);
}
__m128 test_mm_nmsub_ss(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_nmsub_ss
- // CHECK: @llvm.x86.fma.vfnmsub.ss
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma4.vfmadd.ss(<4 x float> [[NEG]], <4 x float> %{{.+}}, <4 x float> [[NEG2]])
return _mm_nmsub_ss(a, b, c);
}
__m128d test_mm_nmsub_sd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_nmsub_sd
- // CHECK: @llvm.x86.fma.vfnmsub.sd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma4.vfmadd.sd(<2 x double> [[NEG]], <2 x double> %{{.+}}, <2 x double> [[NEG2]])
return _mm_nmsub_sd(a, b, c);
}
@@ -113,13 +129,15 @@ __m128d test_mm_maddsub_pd(__m128d a, __m128d b, __m128d c) {
__m128 test_mm_msubadd_ps(__m128 a, __m128 b, __m128 c) {
// CHECK-LABEL: test_mm_msubadd_ps
- // CHECK: @llvm.x86.fma.vfmsubadd.ps
+ // CHECK: [[NEG:%.+]] = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.+}}, <4 x float> %{{.+}}, <4 x float> [[NEG]])
return _mm_msubadd_ps(a, b, c);
}
__m128d test_mm_msubadd_pd(__m128d a, __m128d b, __m128d c) {
// CHECK-LABEL: test_mm_msubadd_pd
- // CHECK: @llvm.x86.fma.vfmsubadd.pd
+ // CHECK: [[NEG:%.+]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.+}}, <2 x double> %{{.+}}, <2 x double> [[NEG]])
return _mm_msubadd_pd(a, b, c);
}
@@ -137,37 +155,45 @@ __m256d test_mm256_macc_pd(__m256d a, __m256d b, __m256d c) {
__m256 test_mm256_msub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_msub_ps
- // CHECK: @llvm.x86.fma.vfmsub.ps.256
+ // CHECK: [[NEG:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps.256(<8 x float> %{{.+}}, <8 x float> %{{.+}}, <8 x float> [[NEG]])
return _mm256_msub_ps(a, b, c);
}
__m256d test_mm256_msub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_msub_pd
- // CHECK: @llvm.x86.fma.vfmsub.pd.256
+ // CHECK: [[NEG:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd.256(<4 x double> %{{.+}}, <4 x double> %{{.+}}, <4 x double> [[NEG]])
return _mm256_msub_pd(a, b, c);
}
__m256 test_mm256_nmacc_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_nmacc_ps
- // CHECK: @llvm.x86.fma.vfnmadd.ps.256
+ // CHECK: [[NEG:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps.256(<8 x float> [[NEG]], <8 x float> %{{.+}}, <8 x float> %{{.+}})
return _mm256_nmacc_ps(a, b, c);
}
__m256d test_mm256_nmacc_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_nmacc_pd
- // CHECK: @llvm.x86.fma.vfnmadd.pd.256
+ // CHECK: [[NEG:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd.256(<4 x double> [[NEG]], <4 x double> %{{.+}}, <4 x double> %{{.+}})
return _mm256_nmacc_pd(a, b, c);
}
__m256 test_mm256_nmsub_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_nmsub_ps
- // CHECK: @llvm.x86.fma.vfnmsub.ps.256
+ // CHECK: [[NEG:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: [[NEG2:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: @llvm.x86.fma.vfmadd.ps.256(<8 x float> [[NEG]], <8 x float> %{{.+}}, <8 x float> [[NEG2]])
return _mm256_nmsub_ps(a, b, c);
}
__m256d test_mm256_nmsub_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_nmsub_pd
- // CHECK: @llvm.x86.fma.vfnmsub.pd.256
+ // CHECK: [[NEG:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: [[NEG2:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmadd.pd.256(<4 x double> [[NEG]], <4 x double> %{{.+}}, <4 x double> [[NEG2]])
return _mm256_nmsub_pd(a, b, c);
}
@@ -185,12 +211,14 @@ __m256d test_mm256_maddsub_pd(__m256d a, __m256d b, __m256d c) {
__m256 test_mm256_msubadd_ps(__m256 a, __m256 b, __m256 c) {
// CHECK-LABEL: test_mm256_msubadd_ps
- // CHECK: @llvm.x86.fma.vfmsubadd.ps.256
+ // CHECK: [[NEG:%.+]] = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{.*}}
+ // CHECK: @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.+}}, <8 x float> [[NEG]])
return _mm256_msubadd_ps(a, b, c);
}
__m256d test_mm256_msubadd_pd(__m256d a, __m256d b, __m256d c) {
// CHECK-LABEL: test_mm256_msubadd_pd
- // CHECK: @llvm.x86.fma.vfmsubadd.pd.256
+ // CHECK: [[NEG:%.+]] = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %{{.+}}
+ // CHECK: @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.+}}, <4 x double> %{{.+}}, <4 x double> [[NEG]])
return _mm256_msubadd_pd(a, b, c);
}
diff --git a/test/CodeGen/fp16-ops.c b/test/CodeGen/fp16-ops.c
index c96727f3d3fb..f2ed667341bb 100644
--- a/test/CodeGen/fp16-ops.c
+++ b/test/CodeGen/fp16-ops.c
@@ -1,8 +1,9 @@
// REQUIRES: arm-registered-target
-// RUN: %clang_cc1 -emit-llvm -o - -triple arm-none-linux-gnueabi %s | FileCheck %s --check-prefix=NOHALF --check-prefix=CHECK
-// RUN: %clang_cc1 -emit-llvm -o - -triple aarch64-none-linux-gnueabi %s | FileCheck %s --check-prefix=NOHALF --check-prefix=CHECK
-// RUN: %clang_cc1 -emit-llvm -o - -triple arm-none-linux-gnueabi -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=HALF --check-prefix=CHECK
-// RUN: %clang_cc1 -emit-llvm -o - -triple aarch64-none-linux-gnueabi -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=HALF --check-prefix=CHECK
+// RUN: %clang_cc1 -emit-llvm -o - -triple arm-none-linux-gnueabi %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
+// RUN: %clang_cc1 -emit-llvm -o - -triple aarch64-none-linux-gnueabi %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
+// RUN: %clang_cc1 -emit-llvm -o - -triple x86_64-linux-gnu %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
+// RUN: %clang_cc1 -emit-llvm -o - -triple arm-none-linux-gnueabi -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
+// RUN: %clang_cc1 -emit-llvm -o - -triple aarch64-none-linux-gnueabi -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
// RUN: %clang_cc1 -emit-llvm -o - -triple arm-none-linux-gnueabi -fnative-half-type %s \
// RUN: | FileCheck %s --check-prefix=NATIVE-HALF
// RUN: %clang_cc1 -emit-llvm -o - -triple aarch64-none-linux-gnueabi -fnative-half-type %s \
@@ -16,20 +17,19 @@ volatile int i0;
volatile __fp16 h0 = 0.0, h1 = 1.0, h2;
volatile float f0, f1, f2;
volatile double d0;
+short s0;
void foo(void) {
// CHECK-LABEL: define void @foo()
// Check unary ops
- // NOHALF: [[F16TOF32:call float @llvm.convert.from.fp16.f32]]
- // HALF: [[F16TOF32:fpext half]]
+ // NOTNATIVE: [[F16TOF32:fpext half]]
// CHECK: fptoui float
// NATIVE-HALF: fptoui half
test = (h0);
// CHECK: uitofp i32
- // NOHALF: [[F32TOF16:call i16 @llvm.convert.to.fp16.f32]]
- // HALF: [[F32TOF16:fptrunc float]]
+ // NOTNATIVE: [[F32TOF16:fptrunc float]]
// NATIVE-HALF: uitofp i32 {{.*}} to half
h0 = (test);
// CHECK: [[F16TOF32]]
@@ -38,8 +38,7 @@ void foo(void) {
test = (!h1);
// CHECK: [[F16TOF32]]
// CHECK: fsub float
- // NOHALF: [[F32TOF16]]
- // HALF: [[F32TOF16]]
+ // NOTNATIVE: [[F32TOF16]]
// NATIVE-HALF: fsub half
h1 = -h1;
// CHECK: [[F16TOF32]]
@@ -76,8 +75,6 @@ void foo(void) {
// NATIVE-HALF: fmul half
h1 = h0 * h2;
// CHECK: [[F16TOF32]]
- // NOHALF: [[F32TOF16]]
- // NOHALF: [[F16TOF32]]
// CHECK: fmul float
// CHECK: [[F32TOF16]]
// NATIVE-HALF: fmul half
@@ -107,7 +104,6 @@ void foo(void) {
// NATIVE-HALF: fdiv half
h1 = (h0 / h2);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fdiv float
// CHECK: [[F32TOF16]]
// NATIVE-HALF: fdiv half
@@ -137,7 +133,6 @@ void foo(void) {
// NATIVE-HALF: fadd half
h1 = (h2 + h0);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fadd float
// CHECK: [[F32TOF16]]
// NATIVE-HALF: fadd half
@@ -167,7 +162,6 @@ void foo(void) {
// NATIVE-HALF: fsub half
h1 = (h2 - h0);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fsub float
// CHECK: [[F32TOF16]]
// NATIVE-HALF: fsub half
@@ -196,7 +190,6 @@ void foo(void) {
// NATIVE-HALF: fcmp olt half
test = (h2 < h0);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fcmp olt float
// NATIVE-HALF: fcmp olt half
test = (h2 < (__fp16)42.0);
@@ -225,7 +218,6 @@ void foo(void) {
// NATIVE-HALF: fcmp ogt half
test = (h0 > h2);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fcmp ogt float
// NATIVE-HALF: fcmp ogt half
test = ((__fp16)42.0 > h2);
@@ -254,7 +246,6 @@ void foo(void) {
// NATIVE-HALF: fcmp ole half
test = (h2 <= h0);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fcmp ole float
// NATIVE-HALF: fcmp ole half
test = (h2 <= (__fp16)42.0);
@@ -284,7 +275,6 @@ void foo(void) {
// NATIVE-HALF: fcmp oge half
test = (h0 >= h2);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fcmp oge float
// NATIVE-HALF: fcmp oge half
test = (h0 >= (__fp16)-2.0);
@@ -313,7 +303,6 @@ void foo(void) {
// NATIVE-HALF: fcmp oeq half
test = (h1 == h2);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fcmp oeq float
// NATIVE-HALF: fcmp oeq half
test = (h1 == (__fp16)1.0);
@@ -342,7 +331,6 @@ void foo(void) {
// NATIVE-HALF: fcmp une half
test = (h1 != h2);
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fcmp une float
// NATIVE-HALF: fcmp une half
test = (h1 != (__fp16)1.0);
@@ -374,8 +362,7 @@ void foo(void) {
h1 = (h1 ? h2 : h0);
// Check assignments (inc. compound)
h0 = h1;
- // NOHALF: [[F32TOF16]]
- // HALF: store {{.*}} half 0xHC000
+ // NOTNATIVE: store {{.*}} half 0xHC000
// NATIVE-HALF: store {{.*}} half 0xHC000
h0 = (__fp16)-2.0f;
// CHECK: [[F32TOF16]]
@@ -398,7 +385,6 @@ void foo(void) {
// NATIVE-HALF: fadd half
h0 += h1;
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fadd float
// CHECK: [[F32TOF16]]
// NATIVE-HALF: fadd half
@@ -433,7 +419,6 @@ void foo(void) {
// NATIVE-HALF: fsub half
h0 -= h1;
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fsub float
// CHECK: [[F32TOF16]]
// NATIVE-HALF: fsub half
@@ -468,7 +453,6 @@ void foo(void) {
// NATIVE-HALF: fmul half
h0 *= h1;
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fmul float
// CHECK: [[F32TOF16]]
// NATIVE-HALF: fmul half
@@ -503,7 +487,6 @@ void foo(void) {
// NATIVE-HALF: fdiv half
h0 /= h1;
// CHECK: [[F16TOF32]]
- // NOHALF: [[F16TOF32]]
// CHECK: fdiv float
// CHECK: [[F32TOF16]]
// NATIVE-HALF: fdiv half
@@ -532,27 +515,29 @@ void foo(void) {
h0 /= i0;
// Check conversions to/from double
- // NOHALF: call i16 @llvm.convert.to.fp16.f64(
- // HALF: fptrunc double {{.*}} to half
+ // NOTNATIVE: fptrunc double {{.*}} to half
// NATIVE-HALF: fptrunc double {{.*}} to half
h0 = d0;
// CHECK: [[MID:%.*]] = fptrunc double {{%.*}} to float
- // NOHALF: call i16 @llvm.convert.to.fp16.f32(float [[MID]])
- // HALF: fptrunc float [[MID]] to half
+ // NOTNATIVE: fptrunc float [[MID]] to half
// NATIVE-HALF: [[MID:%.*]] = fptrunc double {{%.*}} to float
// NATIVE-HALF: fptrunc float {{.*}} to half
h0 = (float)d0;
- // NOHALF: call double @llvm.convert.from.fp16.f64(
- // HALF: fpext half {{.*}} to double
+ // NOTNATIVE: fpext half {{.*}} to double
// NATIVE-HALF: fpext half {{.*}} to double
d0 = h0;
- // NOHALF: [[MID:%.*]] = call float @llvm.convert.from.fp16.f32(
- // HALF: [[MID:%.*]] = fpext half {{.*}} to float
+ // NOTNATIVE: [[MID:%.*]] = fpext half {{.*}} to float
// CHECK: fpext float [[MID]] to double
// NATIVE-HALF: [[MID:%.*]] = fpext half {{.*}} to float
// NATIVE-HALF: fpext float [[MID]] to double
d0 = (float)h0;
+
+ // NOTNATIVE: [[V1:%.*]] = load i16, i16* @s0
+ // NOTNATIVE: [[CONV:%.*]] = sitofp i16 [[V1]] to float
+ // NOTNATIVE: [[TRUNC:%.*]] = fptrunc float [[CONV]] to half
+ // NOTNATIVE: store volatile half [[TRUNC]], half* @h0
+ h0 = s0;
}
diff --git a/test/CodeGen/fp16vec-ops.c b/test/CodeGen/fp16vec-ops.c
new file mode 100644
index 000000000000..2eb75a48e30a
--- /dev/null
+++ b/test/CodeGen/fp16vec-ops.c
@@ -0,0 +1,163 @@
+// REQUIRES: arm-registered-target
+// RUN: %clang_cc1 -triple arm64-apple-ios9 -emit-llvm -o - -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=CHECK
+// RUN: %clang_cc1 -triple armv7-apple-ios9 -emit-llvm -o - -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=CHECK
+// RUN: %clang_cc1 -triple x86_64-apple-macos10.13 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK
+
+typedef __fp16 half4 __attribute__ ((vector_size (8)));
+typedef short short4 __attribute__ ((vector_size (8)));
+
+half4 hv0, hv1;
+short4 sv0;
+
+// CHECK-LABEL: testFP16Vec0
+// CHECK: %[[V0:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV:.*]] = fpext <4 x half> %[[V0]] to <4 x float>
+// CHECK: %[[V1:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV1:.*]] = fpext <4 x half> %[[V1]] to <4 x float>
+// CHECK: %[[ADD:.*]] = fadd <4 x float> %[[CONV]], %[[CONV1]]
+// CHECK: %[[CONV2:.*]] = fptrunc <4 x float> %[[ADD]] to <4 x half>
+// CHECK: store <4 x half> %[[CONV2]], <4 x half>* @hv0, align 8
+// CHECK: %[[V2:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV3:.*]] = fpext <4 x half> %[[V2]] to <4 x float>
+// CHECK: %[[V3:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV4:.*]] = fpext <4 x half> %[[V3]] to <4 x float>
+// CHECK: %[[SUB:.*]] = fsub <4 x float> %[[CONV3]], %[[CONV4]]
+// CHECK: %[[CONV5:.*]] = fptrunc <4 x float> %[[SUB]] to <4 x half>
+// CHECK: store <4 x half> %[[CONV5]], <4 x half>* @hv0, align 8
+// CHECK: %[[V4:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV6:.*]] = fpext <4 x half> %[[V4]] to <4 x float>
+// CHECK: %[[V5:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV7:.*]] = fpext <4 x half> %[[V5]] to <4 x float>
+// CHECK: %[[MUL:.*]] = fmul <4 x float> %[[CONV6]], %[[CONV7]]
+// CHECK: %[[CONV8:.*]] = fptrunc <4 x float> %[[MUL]] to <4 x half>
+// CHECK: store <4 x half> %[[CONV8]], <4 x half>* @hv0, align 8
+// CHECK: %[[V6:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV9:.*]] = fpext <4 x half> %[[V6]] to <4 x float>
+// CHECK: %[[V7:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV10:.*]] = fpext <4 x half> %[[V7]] to <4 x float>
+// CHECK: %[[DIV:.*]] = fdiv <4 x float> %[[CONV9]], %[[CONV10]]
+// CHECK: %[[CONV11:.*]] = fptrunc <4 x float> %[[DIV]] to <4 x half>
+// CHECK: store <4 x half> %[[CONV11]], <4 x half>* @hv0, align 8
+
+void testFP16Vec0() {
+ hv0 = hv0 + hv1;
+ hv0 = hv0 - hv1;
+ hv0 = hv0 * hv1;
+ hv0 = hv0 / hv1;
+}
+
+// CHECK-LABEL: testFP16Vec1
+// CHECK: %[[V0:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV:.*]] = fpext <4 x half> %[[V0]] to <4 x float>
+// CHECK: %[[V1:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV1:.*]] = fpext <4 x half> %[[V1]] to <4 x float>
+// CHECK: %[[ADD:.*]] = fadd <4 x float> %[[CONV1]], %[[CONV]]
+// CHECK: %[[CONV2:.*]] = fptrunc <4 x float> %[[ADD]] to <4 x half>
+// CHECK: store <4 x half> %[[CONV2]], <4 x half>* @hv0, align 8
+// CHECK: %[[V2:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV3:.*]] = fpext <4 x half> %[[V2]] to <4 x float>
+// CHECK: %[[V3:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV4:.*]] = fpext <4 x half> %[[V3]] to <4 x float>
+// CHECK: %[[SUB:.*]] = fsub <4 x float> %[[CONV4]], %[[CONV3]]
+// CHECK: %[[CONV5:.*]] = fptrunc <4 x float> %[[SUB]] to <4 x half>
+// CHECK: store <4 x half> %[[CONV5]], <4 x half>* @hv0, align 8
+// CHECK: %[[V4:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV6:.*]] = fpext <4 x half> %[[V4]] to <4 x float>
+// CHECK: %[[V5:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV7:.*]] = fpext <4 x half> %[[V5]] to <4 x float>
+// CHECK: %[[MUL:.*]] = fmul <4 x float> %[[CONV7]], %[[CONV6]]
+// CHECK: %[[CONV8:.*]] = fptrunc <4 x float> %[[MUL]] to <4 x half>
+// CHECK: store <4 x half> %[[CONV8]], <4 x half>* @hv0, align 8
+// CHECK: %[[V6:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV9:.*]] = fpext <4 x half> %[[V6]] to <4 x float>
+// CHECK: %[[V7:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV10:.*]] = fpext <4 x half> %[[V7]] to <4 x float>
+// CHECK: %[[DIV:.*]] = fdiv <4 x float> %[[CONV10]], %[[CONV9]]
+// CHECK: %[[CONV11:.*]] = fptrunc <4 x float> %[[DIV]] to <4 x half>
+// CHECK: store <4 x half> %[[CONV11]], <4 x half>* @hv0, align 8
+
+void testFP16Vec1() {
+ hv0 += hv1;
+ hv0 -= hv1;
+ hv0 *= hv1;
+ hv0 /= hv1;
+}
+
+// CHECK-LABEL: testFP16Vec2
+// CHECK: %[[CADDR:.*]] = alloca i32, align 4
+// CHECK: store i32 %[[C:.*]], i32* %[[CADDR]], align 4
+// CHECK: %[[V0:.*]] = load i32, i32* %[[CADDR]], align 4
+// CHECK: %[[TOBOOL:.*]] = icmp ne i32 %[[V0]], 0
+// CHECK: br i1 %[[TOBOOL]], label %{{.*}}, label %{{.*}}
+//
+// CHECK: %[[V1:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: br label %{{.*}}
+//
+// CHECK: %[[V2:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: br label %{{.*}}
+//
+// CHECK: %[[COND:.*]] = phi <4 x half> [ %[[V1]], %{{.*}} ], [ %[[V2]], %{{.*}} ]
+// CHECK: store <4 x half> %[[COND]], <4 x half>* @hv0, align 8
+
+void testFP16Vec2(int c) {
+ hv0 = c ? hv0 : hv1;
+}
+
+// CHECK-LABEL: testFP16Vec3
+// CHECK: %[[V0:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV:.*]] = fpext <4 x half> %[[V0]] to <4 x float>
+// CHECK: %[[V1:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV1:.*]] = fpext <4 x half> %[[V1]] to <4 x float>
+// CHECK: %[[CMP:.*]] = fcmp oeq <4 x float> %[[CONV]], %[[CONV1]]
+// CHECK: %[[SEXT:.*]] = sext <4 x i1> %[[CMP]] to <4 x i32>
+// CHECK: %[[CONV2:.*]] = trunc <4 x i32> %[[SEXT]] to <4 x i16>
+// CHECK: store <4 x i16> %[[CONV2]], <4 x i16>* @sv0, align 8
+// CHECK: %[[V2:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV3:.*]] = fpext <4 x half> %[[V2]] to <4 x float>
+// CHECK: %[[V3:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV4:.*]] = fpext <4 x half> %[[V3]] to <4 x float>
+// CHECK: %[[CMP5:.*]] = fcmp une <4 x float> %[[CONV3]], %[[CONV4]]
+// CHECK: %[[SEXT6:.*]] = sext <4 x i1> %[[CMP5]] to <4 x i32>
+// CHECK: %[[CONV7:.*]] = trunc <4 x i32> %[[SEXT6]] to <4 x i16>
+// CHECK: store <4 x i16> %[[CONV7]], <4 x i16>* @sv0, align 8
+// CHECK: %[[V4:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV8:.*]] = fpext <4 x half> %[[V4]] to <4 x float>
+// CHECK: %[[V5:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV9:.*]] = fpext <4 x half> %[[V5]] to <4 x float>
+// CHECK: %[[CMP10:.*]] = fcmp olt <4 x float> %[[CONV8]], %[[CONV9]]
+// CHECK: %[[SEXT11:.*]] = sext <4 x i1> %[[CMP10]] to <4 x i32>
+// CHECK: %[[CONV12:.*]] = trunc <4 x i32> %[[SEXT11]] to <4 x i16>
+// CHECK: store <4 x i16> %[[CONV12]], <4 x i16>* @sv0, align 8
+// CHECK: %[[V6:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV13:.*]] = fpext <4 x half> %[[V6]] to <4 x float>
+// CHECK: %[[V7:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV14:.*]] = fpext <4 x half> %[[V7]] to <4 x float>
+// CHECK: %[[CMP15:.*]] = fcmp ogt <4 x float> %[[CONV13]], %[[CONV14]]
+// CHECK: %[[SEXT16:.*]] = sext <4 x i1> %[[CMP15]] to <4 x i32>
+// CHECK: %[[CONV17:.*]] = trunc <4 x i32> %[[SEXT16]] to <4 x i16>
+// CHECK: store <4 x i16> %[[CONV17]], <4 x i16>* @sv0, align 8
+// CHECK: %[[V8:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV18:.*]] = fpext <4 x half> %[[V8]] to <4 x float>
+// CHECK: %[[V9:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV19:.*]] = fpext <4 x half> %[[V9]] to <4 x float>
+// CHECK: %[[CMP20:.*]] = fcmp ole <4 x float> %[[CONV18]], %[[CONV19]]
+// CHECK: %[[SEXT21:.*]] = sext <4 x i1> %[[CMP20]] to <4 x i32>
+// CHECK: %[[CONV22:.*]] = trunc <4 x i32> %[[SEXT21]] to <4 x i16>
+// CHECK: store <4 x i16> %[[CONV22]], <4 x i16>* @sv0, align 8
+// CHECK: %[[V10:.*]] = load <4 x half>, <4 x half>* @hv0, align 8
+// CHECK: %[[CONV23:.*]] = fpext <4 x half> %[[V10]] to <4 x float>
+// CHECK: %[[V11:.*]] = load <4 x half>, <4 x half>* @hv1, align 8
+// CHECK: %[[CONV24:.*]] = fpext <4 x half> %[[V11]] to <4 x float>
+// CHECK: %[[CMP25:.*]] = fcmp oge <4 x float> %[[CONV23]], %[[CONV24]]
+// CHECK: %[[SEXT26:.*]] = sext <4 x i1> %[[CMP25]] to <4 x i32>
+// CHECK: %[[CONV27:.*]] = trunc <4 x i32> %[[SEXT26]] to <4 x i16>
+// CHECK: store <4 x i16> %[[CONV27]], <4 x i16>* @sv0, align 8
+
+void testFP16Vec3() {
+ sv0 = hv0 == hv1;
+ sv0 = hv0 != hv1;
+ sv0 = hv0 < hv1;
+ sv0 = hv0 > hv1;
+ sv0 = hv0 <= hv1;
+ sv0 = hv0 >= hv1;
+}
diff --git a/test/CodeGen/function-attributes.c b/test/CodeGen/function-attributes.c
index 2139f6fe6546..e14397440100 100644
--- a/test/CodeGen/function-attributes.c
+++ b/test/CodeGen/function-attributes.c
@@ -1,5 +1,6 @@
// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -disable-llvm-passes -Os -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -disable-llvm-passes -Os -std=c99 -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -disable-llvm-passes -Os -std=c99 -o - %s | FileCheck %s
// CHECK: define signext i8 @f0(i32 %x) [[NUW:#[0-9]+]]
// CHECK: define zeroext i8 @f1(i32 %x) [[NUW]]
// CHECK: define void @f2(i8 signext %x) [[NUW]]
diff --git a/test/CodeGen/hexagon-inline-asm.c b/test/CodeGen/hexagon-inline-asm.c
index cda3d0dcb6bd..3de3f58baf3b 100644
--- a/test/CodeGen/hexagon-inline-asm.c
+++ b/test/CodeGen/hexagon-inline-asm.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -triple hexagon-unknown-elf -target-feature +hvx -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple hexagon-unknown-elf -target-feature +hvx -target-feature +hvx-length64b -emit-llvm -o - %s | FileCheck %s
typedef int v64 __attribute__((__vector_size__(64)))
__attribute__((aligned(64)));
@@ -15,3 +15,9 @@ void foo(v64 v0, v64 v1, v64 *p) {
asm ("%0 = memw(##%1)" : "=r"(r) : "s"(&g));
// CHECK: call i32 asm "$0 = memw(##$1)", "=r,s"(i32* @g)
}
+
+void fred(unsigned *p, unsigned m, unsigned v) {
+ asm ("memw(%0++%1) = %2" : : "r"(p),"a"(m),"r"(v) : "memory");
+// CHECK: call void asm sideeffect "memw($0++$1) = $2", "r,a,r,~{memory}"(i32* %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+}
+
diff --git a/test/CodeGen/instrument-functions.c b/test/CodeGen/instrument-functions.c
index 454dc4de5220..c075c3972dd7 100644
--- a/test/CodeGen/instrument-functions.c
+++ b/test/CodeGen/instrument-functions.c
@@ -1,18 +1,34 @@
-// RUN: %clang_cc1 -S -debug-info-kind=standalone -emit-llvm -o - %s -finstrument-functions | FileCheck %s
+// RUN: %clang_cc1 -S -debug-info-kind=standalone -emit-llvm -o - %s -finstrument-functions -disable-llvm-passes | FileCheck %s
+// RUN: %clang_cc1 -S -debug-info-kind=standalone -emit-llvm -o - %s -finstrument-function-entry-bare -disable-llvm-passes | FileCheck -check-prefix=BARE %s
-// CHECK: @test1
int test1(int x) {
-// CHECK: call void @__cyg_profile_func_enter({{.*}}, !dbg
-// CHECK: call void @__cyg_profile_func_exit({{.*}}, !dbg
+// CHECK: @test1(i32 {{.*}}%x) #[[ATTR1:[0-9]+]]
// CHECK: ret
+
+// BARE: @test1(i32 {{.*}}%x) #[[ATTR1:[0-9]+]]
+// BARE: ret
return x;
}
-// CHECK: @test2
int test2(int) __attribute__((no_instrument_function));
int test2(int x) {
-// CHECK-NOT: __cyg_profile_func_enter
-// CHECK-NOT: __cyg_profile_func_exit
+// CHECK: @test2(i32 {{.*}}%x) #[[ATTR2:[0-9]+]]
// CHECK: ret
+
+// BARE: @test2(i32 {{.*}}%x) #[[ATTR2:[0-9]+]]
+// BARE: ret
return x;
}
+
+// CHECK: attributes #[[ATTR1]] =
+// CHECK-SAME: "instrument-function-entry"="__cyg_profile_func_enter"
+// CHECK-SAME: "instrument-function-exit"="__cyg_profile_func_exit"
+
+// BARE: attributes #[[ATTR1]] =
+// BARE-SAME: "instrument-function-entry-inlined"="__cyg_profile_func_enter_bare"
+
+// CHECK: attributes #[[ATTR2]] =
+// CHECK-NOT: "instrument-function-entry"
+
+// BARE: attributes #[[ATTR2]] =
+// BARE-NOT: "instrument-function-entry"
diff --git a/test/CodeGen/libcall-declarations.c b/test/CodeGen/libcall-declarations.c
index 5a0b2ba0e636..7492995be892 100644
--- a/test/CodeGen/libcall-declarations.c
+++ b/test/CodeGen/libcall-declarations.c
@@ -312,314 +312,310 @@ void *use[] = {
F(__cospif), F(__tanpi), F(__tanpif), F(__exp10), F(__exp10f)
};
-// CHECK-NOERRNO: declare double @atan2(double, double) [[NUW:#[0-9]+]]
-// CHECK-NOERRNO: declare float @atan2f(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare i32 @abs(i32) [[NUW]]
-// CHECK-NOERRNO: declare i64 @labs(i64) [[NUW]]
-// CHECK-NOERRNO: declare i64 @llabs(i64) [[NUW]]
-// CHECK-NOERRNO: declare double @copysign(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @copysignf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @copysignl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @fabs(double) [[NUW]]
-// CHECK-NOERRNO: declare float @fabsf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @fabsl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @fmod(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @fmodf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @fmodl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @ldexp(double, i32) [[NUW]]
-// CHECK-NOERRNO: declare float @ldexpf(float, i32) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[NUW]]
-// CHECK-NOERRNO: declare double @nan(i8*) [[NUW]]
-// CHECK-NOERRNO: declare float @nanf(i8*) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @nanl(i8*) [[NUW]]
-// CHECK-NOERRNO: declare double @pow(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @powf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @acos(double) [[NUW]]
-// CHECK-NOERRNO: declare float @acosf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @acosl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @acosh(double) [[NUW]]
-// CHECK-NOERRNO: declare float @acoshf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @acoshl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @asin(double) [[NUW]]
-// CHECK-NOERRNO: declare float @asinf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @asinl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @asinh(double) [[NUW]]
-// CHECK-NOERRNO: declare float @asinhf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @asinhl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @atan(double) [[NUW]]
-// CHECK-NOERRNO: declare float @atanf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @atanl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @atanh(double) [[NUW]]
-// CHECK-NOERRNO: declare float @atanhf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @atanhl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @cbrt(double) [[NUW]]
-// CHECK-NOERRNO: declare float @cbrtf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @ceil(double) [[NUW]]
-// CHECK-NOERRNO: declare float @ceilf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @ceill(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @cos(double) [[NUW]]
-// CHECK-NOERRNO: declare float @cosf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @cosl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @cosh(double) [[NUW]]
-// CHECK-NOERRNO: declare float @coshf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @coshl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @erf(double) [[NUW]]
-// CHECK-NOERRNO: declare float @erff(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @erfl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @erfc(double) [[NUW]]
-// CHECK-NOERRNO: declare float @erfcf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @erfcl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @exp(double) [[NUW]]
-// CHECK-NOERRNO: declare float @expf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @expl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @exp2(double) [[NUW]]
-// CHECK-NOERRNO: declare float @exp2f(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @exp2l(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @expm1(double) [[NUW]]
-// CHECK-NOERRNO: declare float @expm1f(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @expm1l(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @fdim(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @fdimf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @floor(double) [[NUW]]
-// CHECK-NOERRNO: declare float @floorf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @floorl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @fma(double, double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @fmaf(float, float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @fmax(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @fmaxf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @fmaxl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @fmin(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @fminf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @hypot(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @hypotf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare i32 @ilogb(double) [[NUW]]
-// CHECK-NOERRNO: declare i32 @ilogbf(float) [[NUW]]
-// CHECK-NOERRNO: declare i32 @ilogbl(x86_fp80) [[NUW]]
+// CHECK-NOERRNO: declare double @atan2(double, double) [[NUWRN:#[0-9]+]]
+// CHECK-NOERRNO: declare float @atan2f(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare i32 @abs(i32) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @labs(i64) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @llabs(i64) [[NUWRN]]
+// CHECK-NOERRNO: declare double @copysign(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @copysignf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @copysignl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @fabs(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @fabsf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @fabsl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @fmod(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @fmodf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @fmodl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @ldexp(double, i32) [[NUWRN]]
+// CHECK-NOERRNO: declare float @ldexpf(float, i32) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[NUWRN]]
+// CHECK-NOERRNO: declare double @nan(i8*) [[NUWRO:#[0-9]+]]
+// CHECK-NOERRNO: declare float @nanf(i8*) [[NUWRO]]
+// CHECK-NOERRNO: declare x86_fp80 @nanl(i8*) [[NUWRO]]
+// CHECK-NOERRNO: declare double @pow(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @powf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @acos(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @acosf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @acosl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @acosh(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @acoshf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @acoshl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @asin(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @asinf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @asinl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @asinh(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @asinhf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @asinhl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @atan(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @atanf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @atanl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @atanh(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @atanhf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @atanhl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @cbrt(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @cbrtf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @ceil(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @ceilf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @ceill(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @cos(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @cosf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @cosl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @cosh(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @coshf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @coshl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @erf(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @erff(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @erfl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @erfc(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @erfcf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @erfcl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @exp(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @expf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @expl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @exp2(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @exp2f(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @exp2l(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @expm1(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @expm1f(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @expm1l(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @fdim(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @fdimf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @floor(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @floorf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @floorl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @fma(double, double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @fmaf(float, float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @fmax(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @fmaxf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @fmaxl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @fmin(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @fminf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @hypot(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @hypotf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare i32 @ilogb(double) [[NUWRN]]
+// CHECK-NOERRNO: declare i32 @ilogbf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare i32 @ilogbl(x86_fp80) [[NUWRN]]
// CHECK-NOERRNO: declare double @lgamma(double) [[NONCONST:#[0-9]+]]
// CHECK-NOERRNO: declare float @lgammaf(float) [[NONCONST]]
// CHECK-NOERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NONCONST]]
-// CHECK-NOERRNO: declare i64 @llrint(double) [[NUW]]
-// CHECK-NOERRNO: declare i64 @llrintf(float) [[NUW]]
-// CHECK-NOERRNO: declare i64 @llrintl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare i64 @llround(double) [[NUW]]
-// CHECK-NOERRNO: declare i64 @llroundf(float) [[NUW]]
-// CHECK-NOERRNO: declare i64 @llroundl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @log(double) [[NUW]]
-// CHECK-NOERRNO: declare float @logf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @logl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @log10(double) [[NUW]]
-// CHECK-NOERRNO: declare float @log10f(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @log10l(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @log1p(double) [[NUW]]
-// CHECK-NOERRNO: declare float @log1pf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @log1pl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @log2(double) [[NUW]]
-// CHECK-NOERRNO: declare float @log2f(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @log2l(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @logb(double) [[NUW]]
-// CHECK-NOERRNO: declare float @logbf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @logbl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare i64 @lrint(double) [[NUW]]
-// CHECK-NOERRNO: declare i64 @lrintf(float) [[NUW]]
-// CHECK-NOERRNO: declare i64 @lrintl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare i64 @lround(double) [[NUW]]
-// CHECK-NOERRNO: declare i64 @lroundf(float) [[NUW]]
-// CHECK-NOERRNO: declare i64 @lroundl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @nearbyint(double) [[NUW]]
-// CHECK-NOERRNO: declare float @nearbyintf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @nearbyintl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @nextafter(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @nextafterf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @nexttoward(double, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare float @nexttowardf(float, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @remainder(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @remainderf(float, float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @rint(double) [[NUW]]
-// CHECK-NOERRNO: declare float @rintf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @rintl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @round(double) [[NUW]]
-// CHECK-NOERRNO: declare float @roundf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @roundl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @scalbln(double, i64) [[NUW]]
-// CHECK-NOERRNO: declare float @scalblnf(float, i64) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[NUW]]
-// CHECK-NOERRNO: declare double @scalbn(double, i32) [[NUW]]
-// CHECK-NOERRNO: declare float @scalbnf(float, i32) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[NUW]]
-// CHECK-NOERRNO: declare double @sin(double) [[NUW]]
-// CHECK-NOERRNO: declare float @sinf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @sinl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @sinh(double) [[NUW]]
-// CHECK-NOERRNO: declare float @sinhf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @sinhl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @sqrt(double) [[NUW]]
-// CHECK-NOERRNO: declare float @sqrtf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @sqrtl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @tan(double) [[NUW]]
-// CHECK-NOERRNO: declare float @tanf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @tanl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @tanh(double) [[NUW]]
-// CHECK-NOERRNO: declare float @tanhf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @tanhl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @tgamma(double) [[NUW]]
-// CHECK-NOERRNO: declare float @tgammaf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @tgammal(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @trunc(double) [[NUW]]
-// CHECK-NOERRNO: declare float @truncf(float) [[NUW]]
-// CHECK-NOERRNO: declare x86_fp80 @truncl(x86_fp80) [[NUW]]
-// CHECK-NOERRNO: declare double @cabs(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @cabsf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @cacos(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @cacosf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @cacosh(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare double @carg(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @cargf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @casin(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @casinf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @casinh(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @casinhf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @catan(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @catanf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @catanh(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @catanhf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @ccos(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @ccosf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @ccosh(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @cexp(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @cexpf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare double @cimag(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @cimagf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @conj(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @conjf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @clog(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @clogf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @cproj(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @cprojf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @cpow(double, double, double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare double @creal(double, double) [[NUW]]
-// CHECK-NOERRNO: declare float @crealf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @csin(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @csinf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @csinh(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @csinhf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @csqrt(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @ctan(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @ctanf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare { double, double } @ctanh(double, double) [[NUW]]
-// CHECK-NOERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NUW]]
-// CHECK-NOERRNO: declare double @__sinpi(double) [[NUW]]
-// CHECK-NOERRNO: declare float @__sinpif(float) [[NUW]]
-// CHECK-NOERRNO: declare double @__cospi(double) [[NUW]]
-// CHECK-NOERRNO: declare float @__cospif(float) [[NUW]]
-// CHECK-NOERRNO: declare double @__tanpi(double) [[NUW]]
-// CHECK-NOERRNO: declare float @__tanpif(float) [[NUW]]
-// CHECK-NOERRNO: declare double @__exp10(double) [[NUW]]
-// CHECK-NOERRNO: declare float @__exp10f(float) [[NUW]]
+// CHECK-NOERRNO: declare i64 @llrint(double) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @llrintf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @llrintl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @llround(double) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @llroundf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @llroundl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @log(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @logf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @logl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @log10(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @log10f(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @log10l(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @log1p(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @log1pf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @log1pl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @log2(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @log2f(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @log2l(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @logb(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @logbf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @logbl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @lrint(double) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @lrintf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @lrintl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @lround(double) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @lroundf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare i64 @lroundl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @nearbyint(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @nearbyintf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @nearbyintl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @nextafter(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @nextafterf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @nexttoward(double, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare float @nexttowardf(float, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @remainder(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @remainderf(float, float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @rint(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @rintf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @rintl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @round(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @roundf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @roundl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @scalbln(double, i64) [[NUWRN]]
+// CHECK-NOERRNO: declare float @scalblnf(float, i64) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[NUWRN]]
+// CHECK-NOERRNO: declare double @scalbn(double, i32) [[NUWRN]]
+// CHECK-NOERRNO: declare float @scalbnf(float, i32) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[NUWRN]]
+// CHECK-NOERRNO: declare double @sin(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @sinf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @sinl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @sinh(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @sinhf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @sinhl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @sqrt(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @sqrtf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @sqrtl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @tan(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @tanf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @tanl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @tanh(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @tanhf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @tanhl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @tgamma(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @tgammaf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @tgammal(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @trunc(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @truncf(float) [[NUWRN]]
+// CHECK-NOERRNO: declare x86_fp80 @truncl(x86_fp80) [[NUWRN]]
+// CHECK-NOERRNO: declare double @cabs(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @cabsf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @cacos(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @cacosf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @cacosh(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare double @carg(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @cargf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @casin(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @casinf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @casinh(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @casinhf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @catan(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @catanf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @catanh(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @catanhf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @ccos(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @ccosf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @ccosh(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @cexp(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @cexpf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare double @cimag(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @cimagf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @conj(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @conjf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @clog(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @clogf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @cproj(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @cprojf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @cpow(double, double, double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare double @creal(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @crealf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @csin(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @csinf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @csinh(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @csinhf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @csqrt(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @ctan(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @ctanf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare { double, double } @ctanh(double, double) [[NUWRN]]
+// CHECK-NOERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NUWRN]]
+// CHECK-NOERRNO: declare double @__sinpi(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @__sinpif(float) [[NUWRN]]
+// CHECK-NOERRNO: declare double @__cospi(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @__cospif(float) [[NUWRN]]
+// CHECK-NOERRNO: declare double @__tanpi(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @__tanpif(float) [[NUWRN]]
+// CHECK-NOERRNO: declare double @__exp10(double) [[NUWRN]]
+// CHECK-NOERRNO: declare float @__exp10f(float) [[NUWRN]]
-// CHECK-ERRNO: declare i32 @abs(i32) [[NUW:#[0-9]+]]
-// CHECK-ERRNO: declare i64 @labs(i64) [[NUW]]
-// CHECK-ERRNO: declare i64 @llabs(i64) [[NUW]]
-// CHECK-ERRNO: declare double @copysign(double, double) [[NUW]]
-// CHECK-ERRNO: declare float @copysignf(float, float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @copysignl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @fabs(double) [[NUW]]
-// CHECK-ERRNO: declare float @fabsf(float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @fabsl(x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @nan(i8*) [[NUW]]
-// CHECK-ERRNO: declare float @nanf(i8*) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @nanl(i8*) [[NUW]]
-// CHECK-ERRNO: declare double @ceil(double) [[NUW]]
-// CHECK-ERRNO: declare float @ceilf(float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @ceill(x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @floor(double) [[NUW]]
-// CHECK-ERRNO: declare float @floorf(float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @floorl(x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @fmax(double, double) [[NUW]]
-// CHECK-ERRNO: declare float @fmaxf(float, float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @fmaxl(x86_fp80, x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @fmin(double, double) [[NUW]]
-// CHECK-ERRNO: declare float @fminf(float, float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) [[NUW]]
+// CHECK-ERRNO: declare i32 @abs(i32) [[NUWRN:#[0-9]+]]
+// CHECK-ERRNO: declare i64 @labs(i64) [[NUWRN]]
+// CHECK-ERRNO: declare i64 @llabs(i64) [[NUWRN]]
+// CHECK-ERRNO: declare double @copysign(double, double) [[NUWRN]]
+// CHECK-ERRNO: declare float @copysignf(float, float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @copysignl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @fabs(double) [[NUWRN]]
+// CHECK-ERRNO: declare float @fabsf(float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @fabsl(x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @nan(i8*) [[NUWRO:#[0-9]+]]
+// CHECK-ERRNO: declare float @nanf(i8*) [[NUWRO]]
+// CHECK-ERRNO: declare x86_fp80 @nanl(i8*) [[NUWRO]]
+// CHECK-ERRNO: declare double @ceil(double) [[NUWRN]]
+// CHECK-ERRNO: declare float @ceilf(float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @ceill(x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @floor(double) [[NUWRN]]
+// CHECK-ERRNO: declare float @floorf(float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @floorl(x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @fmax(double, double) [[NUWRN]]
+// CHECK-ERRNO: declare float @fmaxf(float, float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @fmaxl(x86_fp80, x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @fmin(double, double) [[NUWRN]]
+// CHECK-ERRNO: declare float @fminf(float, float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) [[NUWRN]]
// CHECK-ERRNO: declare double @lgamma(double) [[NONCONST:#[0-9]+]]
// CHECK-ERRNO: declare float @lgammaf(float) [[NONCONST]]
// CHECK-ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NONCONST]]
-// CHECK-ERRNO: declare double @nearbyint(double) [[NUW]]
-// CHECK-ERRNO: declare float @nearbyintf(float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @nearbyintl(x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @rint(double) [[NUW]]
-// CHECK-ERRNO: declare float @rintf(float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @rintl(x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @round(double) [[NUW]]
-// CHECK-ERRNO: declare float @roundf(float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @roundl(x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @trunc(double) [[NUW]]
-// CHECK-ERRNO: declare float @truncf(float) [[NUW]]
-// CHECK-ERRNO: declare x86_fp80 @truncl(x86_fp80) [[NUW]]
-// CHECK-ERRNO: declare double @cabs(double, double) [[NUW]]
-// CHECK-ERRNO: declare float @cabsf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @cacos(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @cacosf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @cacosh(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare double @carg(double, double) [[NUW]]
-// CHECK-ERRNO: declare float @cargf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @casin(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @casinf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @casinh(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @casinhf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @catan(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @catanf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @catanh(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @catanhf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @ccos(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @ccosf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @ccosh(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @cexp(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @cexpf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare double @cimag(double, double) [[NUW]]
-// CHECK-ERRNO: declare float @cimagf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @conj(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @conjf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @clog(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @clogf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @cproj(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @cprojf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @cpow(double, double, double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NUW]]
-// CHECK-ERRNO: declare double @creal(double, double) [[NUW]]
-// CHECK-ERRNO: declare float @crealf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @csin(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @csinf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @csinh(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @csinhf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @csqrt(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @ctan(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @ctanf(<2 x float>) [[NUW]]
-// CHECK-ERRNO: declare { double, double } @ctanh(double, double) [[NUW]]
-// CHECK-ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NUW]]
+// CHECK-ERRNO: declare double @nearbyint(double) [[NUWRN]]
+// CHECK-ERRNO: declare float @nearbyintf(float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @nearbyintl(x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @rint(double) [[NUWRN]]
+// CHECK-ERRNO: declare float @rintf(float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @rintl(x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @round(double) [[NUWRN]]
+// CHECK-ERRNO: declare float @roundf(float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @roundl(x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @trunc(double) [[NUWRN]]
+// CHECK-ERRNO: declare float @truncf(float) [[NUWRN]]
+// CHECK-ERRNO: declare x86_fp80 @truncl(x86_fp80) [[NUWRN]]
+// CHECK-ERRNO: declare double @cabs(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare float @cabsf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @cacos(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @cacosf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @cacosh(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare double @carg(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare float @cargf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @casin(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @casinf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @casinh(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @casinhf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @catan(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @catanf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @catanh(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @catanhf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @ccos(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @ccosf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @ccosh(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @cexp(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @cexpf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare double @cimag(double, double) [[NUWRN]]
+// CHECK-ERRNO: declare float @cimagf(<2 x float>) [[NUWRN]]
+// CHECK-ERRNO: declare { double, double } @conj(double, double) [[NUWRN]]
+// CHECK-ERRNO: declare <2 x float> @conjf(<2 x float>) [[NUWRN]]
+// CHECK-ERRNO: declare { double, double } @clog(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @clogf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @cproj(double, double) [[NUWRN]]
+// CHECK-ERRNO: declare <2 x float> @cprojf(<2 x float>) [[NUWRN]]
+// CHECK-ERRNO: declare { double, double } @cpow(double, double, double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare double @creal(double, double) [[NUWRN]]
+// CHECK-ERRNO: declare float @crealf(<2 x float>) [[NUWRN]]
+// CHECK-ERRNO: declare { double, double } @csin(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @csinf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @csinh(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @csinhf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @csqrt(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @ctan(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @ctanf(<2 x float>) [[NONCONST]]
+// CHECK-ERRNO: declare { double, double } @ctanh(double, double) [[NONCONST]]
+// CHECK-ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NONCONST]]
-// CHECK-NOERRNO: attributes [[NUW]] = { nounwind readnone{{.*}} }
-// CHECK-NOERRNO: attributes [[NONCONST]] = {
-// CHECK-NOERRNO-NOT: readnone
-// CHECK-NOERRNO-SAME: nounwind{{.*}} }
+// CHECK-NOERRNO: attributes [[NUWRN]] = { nounwind readnone{{.*}} }
+// CHECK-NOERRNO: attributes [[NUWRO]] = { nounwind readonly{{.*}} }
-// CHECK-ERRNO: attributes [[NONCONST]] = {
-// CHECK-ERRNO-NOT: readnone
-// CHECK-ERRNO-SAME: nounwind{{.*}} }
-// CHECK-ERRNO: attributes [[NUW]] = { nounwind readnone{{.*}} }
+// CHECK-ERRNO: attributes [[NUWRN]] = { nounwind readnone{{.*}} }
+// CHECK-ERRNO: attributes [[NUWRO]] = { nounwind readonly{{.*}} }
diff --git a/test/CodeGen/libcalls.c b/test/CodeGen/libcalls.c
index 3a8207b2bebc..1b314f777b4c 100644
--- a/test/CodeGen/libcalls.c
+++ b/test/CodeGen/libcalls.c
@@ -6,29 +6,28 @@
// CHECK-NO-LABEL: define void @test_sqrt
// CHECK-FAST-LABEL: define void @test_sqrt
void test_sqrt(float a0, double a1, long double a2) {
- // Following llvm-gcc's lead, we never emit these as intrinsics;
- // no-math-errno isn't good enough. We could probably use intrinsics
- // with appropriate guards if it proves worthwhile.
-
// CHECK-YES: call float @sqrtf
- // CHECK-NO: call float @sqrtf
+ // CHECK-NO: call float @llvm.sqrt.f32(float
+ // CHECK-FAST: call float @llvm.sqrt.f32(float
float l0 = sqrtf(a0);
// CHECK-YES: call double @sqrt
- // CHECK-NO: call double @sqrt
+ // CHECK-NO: call double @llvm.sqrt.f64(double
+ // CHECK-FAST: call double @llvm.sqrt.f64(double
double l1 = sqrt(a1);
// CHECK-YES: call x86_fp80 @sqrtl
- // CHECK-NO: call x86_fp80 @sqrtl
+ // CHECK-NO: call x86_fp80 @llvm.sqrt.f80(x86_fp80
+ // CHECK-FAST: call x86_fp80 @llvm.sqrt.f80(x86_fp80
long double l2 = sqrtl(a2);
}
// CHECK-YES: declare float @sqrtf(float)
// CHECK-YES: declare double @sqrt(double)
// CHECK-YES: declare x86_fp80 @sqrtl(x86_fp80)
-// CHECK-NO: declare float @sqrtf(float) [[NUW_RN:#[0-9]+]]
-// CHECK-NO: declare double @sqrt(double) [[NUW_RN]]
-// CHECK-NO: declare x86_fp80 @sqrtl(x86_fp80) [[NUW_RN]]
+// CHECK-NO: declare float @llvm.sqrt.f32(float)
+// CHECK-NO: declare double @llvm.sqrt.f64(double)
+// CHECK-NO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80)
// CHECK-FAST: declare float @llvm.sqrt.f32(float)
// CHECK-FAST: declare double @llvm.sqrt.f64(double)
// CHECK-FAST: declare x86_fp80 @llvm.sqrt.f80(x86_fp80)
@@ -59,22 +58,22 @@ void test_pow(float a0, double a1, long double a2) {
// CHECK-YES-LABEL: define void @test_fma
// CHECK-NO-LABEL: define void @test_fma
void test_fma(float a0, double a1, long double a2) {
- // CHECK-YES: call float @llvm.fma.f32
+ // CHECK-YES: call float @fmaf
// CHECK-NO: call float @llvm.fma.f32
float l0 = fmaf(a0, a0, a0);
- // CHECK-YES: call double @llvm.fma.f64
+ // CHECK-YES: call double @fma
// CHECK-NO: call double @llvm.fma.f64
double l1 = fma(a1, a1, a1);
- // CHECK-YES: call x86_fp80 @llvm.fma.f80
+ // CHECK-YES: call x86_fp80 @fmal
// CHECK-NO: call x86_fp80 @llvm.fma.f80
long double l2 = fmal(a2, a2, a2);
}
-// CHECK-YES: declare float @llvm.fma.f32(float, float, float) [[NUW_RN:#[0-9]+]]
-// CHECK-YES: declare double @llvm.fma.f64(double, double, double) [[NUW_RN]]
-// CHECK-YES: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[NUW_RN]]
+// CHECK-YES: declare float @fmaf(float, float, float)
+// CHECK-YES: declare double @fma(double, double, double)
+// CHECK-YES: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80)
// CHECK-NO: declare float @llvm.fma.f32(float, float, float) [[NUW_RN2:#[0-9]+]]
// CHECK-NO: declare double @llvm.fma.f64(double, double, double) [[NUW_RN2]]
// CHECK-NO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[NUW_RN2]]
@@ -86,7 +85,7 @@ void test_builtins(double d, float f, long double ld) {
double atan_ = atan(d);
long double atanl_ = atanl(ld);
float atanf_ = atanf(f);
-// CHECK-NO: declare double @atan(double) [[NUW_RN]]
+// CHECK-NO: declare double @atan(double) [[NUW_RN:#[0-9]+]]
// CHECK-NO: declare x86_fp80 @atanl(x86_fp80) [[NUW_RN]]
// CHECK-NO: declare float @atanf(float) [[NUW_RN]]
// CHECK-YES-NOT: declare double @atan(double) [[NUW_RN]]
@@ -106,9 +105,9 @@ void test_builtins(double d, float f, long double ld) {
double exp_ = exp(d);
long double expl_ = expl(ld);
float expf_ = expf(f);
-// CHECK-NO: declare double @exp(double) [[NUW_RN]]
-// CHECK-NO: declare x86_fp80 @expl(x86_fp80) [[NUW_RN]]
-// CHECK-NO: declare float @expf(float) [[NUW_RN]]
+// CHECK-NO: declare double @llvm.exp.f64(double) [[NUW_RNI]]
+// CHECK-NO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[NUW_RNI]]
+// CHECK-NO: declare float @llvm.exp.f32(float) [[NUW_RNI]]
// CHECK-YES-NOT: declare double @exp(double) [[NUW_RN]]
// CHECK-YES-NOT: declare x86_fp80 @expl(x86_fp80) [[NUW_RN]]
// CHECK-YES-NOT: declare float @expf(float) [[NUW_RN]]
@@ -116,15 +115,13 @@ void test_builtins(double d, float f, long double ld) {
double log_ = log(d);
long double logl_ = logl(ld);
float logf_ = logf(f);
-// CHECK-NO: declare double @log(double) [[NUW_RN]]
-// CHECK-NO: declare x86_fp80 @logl(x86_fp80) [[NUW_RN]]
-// CHECK-NO: declare float @logf(float) [[NUW_RN]]
+// CHECK-NO: declare double @llvm.log.f64(double) [[NUW_RNI]]
+// CHECK-NO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[NUW_RNI]]
+// CHECK-NO: declare float @llvm.log.f32(float) [[NUW_RNI]]
// CHECK-YES-NOT: declare double @log(double) [[NUW_RN]]
// CHECK-YES-NOT: declare x86_fp80 @logl(x86_fp80) [[NUW_RN]]
// CHECK-YES-NOT: declare float @logf(float) [[NUW_RN]]
}
-// CHECK-YES: attributes [[NUW_RN]] = { nounwind readnone speculatable }
-
-// CHECK-NO: attributes [[NUW_RN]] = { nounwind readnone{{.*}} }
-// CHECK-NO: attributes [[NUW_RNI]] = { nounwind readnone speculatable }
+// CHECK-NO-DAG: attributes [[NUW_RN]] = { nounwind readnone{{.*}} }
+// CHECK-NO-DAG: attributes [[NUW_RNI]] = { nounwind readnone speculatable }
diff --git a/test/CodeGen/linux-arm-atomic.c b/test/CodeGen/linux-arm-atomic.c
index 116925a585b6..b8535f824827 100644
--- a/test/CodeGen/linux-arm-atomic.c
+++ b/test/CodeGen/linux-arm-atomic.c
@@ -2,7 +2,6 @@
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=armv6-unknown-linux | FileCheck %s
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=thumbv7-unknown-linux | FileCheck %s
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=armv6-unknown-freebsd | FileCheck %s
-// RUN: %clang_cc1 %s -emit-llvm -o - -triple=armv6-unknown-bitrig | FileCheck %s
typedef int _Atomic_word;
_Atomic_word exchange_and_add(volatile _Atomic_word *__mem, int __val) {
diff --git a/test/CodeGen/long-call-attr.c b/test/CodeGen/long-call-attr.c
new file mode 100644
index 000000000000..82433caf76fd
--- /dev/null
+++ b/test/CodeGen/long-call-attr.c
@@ -0,0 +1,21 @@
+// RUN: %clang_cc1 -triple mips-linux-gnu -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple mips64-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+void __attribute__((long_call)) foo1 (void);
+void __attribute__((short_call)) foo4 (void);
+
+void __attribute__((far)) foo2 (void) {}
+
+// CHECK: define void @foo2() [[FAR:#[0-9]+]]
+
+void __attribute__((near)) foo3 (void) { foo1(); foo4(); }
+
+// CHECK: define void @foo3() [[NEAR:#[0-9]+]]
+
+// CHECK: declare void @foo1() [[LONGDECL:#[0-9]+]]
+// CHECK: declare void @foo4() [[SHORTDECL:#[0-9]+]]
+
+// CHECK: attributes [[FAR]] = { {{.*}} "long-call" {{.*}} }
+// CHECK: attributes [[NEAR]] = { {{.*}} "short-call" {{.*}} }
+// CHECK: attributes [[LONGDECL]] = { {{.*}} "long-call" {{.*}} }
+// CHECK: attributes [[SHORTDECL]] = { {{.*}} "short-call" {{.*}} }
diff --git a/test/CodeGen/mangle-blocks.c b/test/CodeGen/mangle-blocks.c
index e8de92d8b400..4ea5a550c8bd 100644
--- a/test/CodeGen/mangle-blocks.c
+++ b/test/CodeGen/mangle-blocks.c
@@ -12,12 +12,12 @@ void (^mangle(void))(void) {
}
// CHECK: @__func__.__mangle_block_invoke_2 = private unnamed_addr constant [22 x i8] c"mangle_block_invoke_2\00", align 1
-// CHECK: @.str = private unnamed_addr constant {{.*}}, align 1
-// CHECK: @.str.1 = private unnamed_addr constant [7 x i8] c"mangle\00", align 1
+// CHECK: @.str{{.*}} = private unnamed_addr constant {{.*}}, align 1
+// CHECK: @.str[[STR1:.*]] = private unnamed_addr constant [7 x i8] c"mangle\00", align 1
// CHECK: define internal void @__mangle_block_invoke(i8* %.block_descriptor)
// CHECK: define internal void @__mangle_block_invoke_2(i8* %.block_descriptor){{.*}}{
-// CHECK: call void @__assert_rtn(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @__func__.__mangle_block_invoke_2, i32 0, i32 0), i8* getelementptr inbounds {{.*}}, i32 9, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.1, i32 0, i32 0))
+// CHECK: call void @__assert_rtn(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @__func__.__mangle_block_invoke_2, i32 0, i32 0), i8* getelementptr inbounds {{.*}}, i32 9, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str[[STR1]], i32 0, i32 0))
// CHECK: }
diff --git a/test/CodeGen/math-builtins.c b/test/CodeGen/math-builtins.c
new file mode 100644
index 000000000000..799d91b4ec00
--- /dev/null
+++ b/test/CodeGen/math-builtins.c
@@ -0,0 +1,578 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm %s | FileCheck %s -check-prefix=NO__ERRNO
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s -check-prefix=HAS_ERRNO
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown-gnu -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s --check-prefix=HAS_ERRNO_GNU
+// RUN: %clang_cc1 -triple x86_64-unknown-windows-msvc -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s --check-prefix=HAS_ERRNO_WIN
+
+// Test attributes and codegen of math builtins.
+
+void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {
+ f = __builtin_fmod(f,f); f = __builtin_fmodf(f,f); f = __builtin_fmodl(f,f);
+
+// NO__ERRNO: frem double
+// NO__ERRNO: frem float
+// NO__ERRNO: frem x86_fp80
+// HAS_ERRNO: declare double @fmod(double, double) [[NOT_READNONE:#[0-9]+]]
+// HAS_ERRNO: declare float @fmodf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @fmodl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ __builtin_atan2(f,f); __builtin_atan2f(f,f) ; __builtin_atan2l(f, f);
+
+// NO__ERRNO: declare double @atan2(double, double) [[READNONE:#[0-9]+]]
+// NO__ERRNO: declare float @atan2f(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @atan2(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @atan2f(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ __builtin_copysign(f,f); __builtin_copysignf(f,f);__builtin_copysignl(f,f);
+
+// NO__ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]]
+// NO__ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]]
+// HAS_ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_fabs(f); __builtin_fabsf(f); __builtin_fabsl(f);
+
+// NO__ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_frexp(f,i); __builtin_frexpf(f,i); __builtin_frexpl(f,i);
+
+// NO__ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE:#[0-9]+]]
+// NO__ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]]
+// NO__ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]]
+
+ __builtin_huge_val(); __builtin_huge_valf(); __builtin_huge_vall();
+
+// NO__ERRNO-NOT: .huge
+// NO__ERRNO-NOT: @huge
+// HAS_ERRNO-NOT: .huge
+// HAS_ERRNO-NOT: @huge
+
+ __builtin_inf(); __builtin_inff(); __builtin_infl();
+
+// NO__ERRNO-NOT: .inf
+// NO__ERRNO-NOT: @inf
+// HAS_ERRNO-NOT: .inf
+// HAS_ERRNO-NOT: @inf
+
+ __builtin_ldexp(f,f); __builtin_ldexpf(f,f); __builtin_ldexpl(f,f);
+
+// NO__ERRNO: declare double @ldexp(double, i32) [[READNONE]]
+// NO__ERRNO: declare float @ldexpf(float, i32) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[READNONE]]
+// HAS_ERRNO: declare double @ldexp(double, i32) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @ldexpf(float, i32) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[NOT_READNONE]]
+
+ __builtin_modf(f,d); __builtin_modff(f,fp); __builtin_modfl(f,l);
+
+// NO__ERRNO: declare double @modf(double, double*) [[NOT_READNONE]]
+// NO__ERRNO: declare float @modff(float, float*) [[NOT_READNONE]]
+// NO__ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @modf(double, double*) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @modff(float, float*) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]]
+
+ __builtin_nan(c); __builtin_nanf(c); __builtin_nanl(c);
+
+// NO__ERRNO: declare double @nan(i8*) [[READNONE]]
+// NO__ERRNO: declare float @nanf(i8*) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @nanl(i8*) [[READNONE]]
+// HAS_ERRNO: declare double @nan(i8*) [[READNONE:#[0-9]+]]
+// HAS_ERRNO: declare float @nanf(i8*) [[READNONE]]
+// HAS_ERRNO: declare x86_fp80 @nanl(i8*) [[READNONE]]
+
+ __builtin_nans(c); __builtin_nansf(c); __builtin_nansl(c);
+
+// NO__ERRNO: declare double @nans(i8*) [[READNONE]]
+// NO__ERRNO: declare float @nansf(i8*) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @nansl(i8*) [[READNONE]]
+// HAS_ERRNO: declare double @nans(i8*) [[READNONE]]
+// HAS_ERRNO: declare float @nansf(i8*) [[READNONE]]
+// HAS_ERRNO: declare x86_fp80 @nansl(i8*) [[READNONE]]
+
+ __builtin_pow(f,f); __builtin_powf(f,f); __builtin_powl(f,f);
+
+// NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @pow(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @powf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ __builtin_powi(f,f); __builtin_powif(f,f); __builtin_powil(f,f);
+
+// NO__ERRNO: declare double @llvm.powi.f64(double, i32) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.powi.f32(float, i32) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.powi.f80(x86_fp80, i32) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.powi.f64(double, i32) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.powi.f32(float, i32) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.powi.f80(x86_fp80, i32) [[READNONE_INTRINSIC]]
+
+ /* math */
+ __builtin_acos(f); __builtin_acosf(f); __builtin_acosl(f);
+
+// NO__ERRNO: declare double @acos(double) [[READNONE]]
+// NO__ERRNO: declare float @acosf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @acosl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @acos(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @acosf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @acosl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_acosh(f); __builtin_acoshf(f); __builtin_acoshl(f);
+
+// NO__ERRNO: declare double @acosh(double) [[READNONE]]
+// NO__ERRNO: declare float @acoshf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @acosh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @acoshf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_asin(f); __builtin_asinf(f); __builtin_asinl(f);
+
+// NO__ERRNO: declare double @asin(double) [[READNONE]]
+// NO__ERRNO: declare float @asinf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @asinl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @asin(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @asinf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @asinl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_asinh(f); __builtin_asinhf(f); __builtin_asinhl(f);
+
+// NO__ERRNO: declare double @asinh(double) [[READNONE]]
+// NO__ERRNO: declare float @asinhf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @asinh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @asinhf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_atan(f); __builtin_atanf(f); __builtin_atanl(f);
+
+// NO__ERRNO: declare double @atan(double) [[READNONE]]
+// NO__ERRNO: declare float @atanf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @atanl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @atan(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @atanf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @atanl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_atanh(f); __builtin_atanhf(f); __builtin_atanhl(f);
+
+// NO__ERRNO: declare double @atanh(double) [[READNONE]]
+// NO__ERRNO: declare float @atanhf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @atanh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @atanhf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_cbrt(f); __builtin_cbrtf(f); __builtin_cbrtl(f);
+
+// NO__ERRNO: declare double @cbrt(double) [[READNONE]]
+// NO__ERRNO: declare float @cbrtf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @cbrt(double) [[READNONE]]
+// HAS_ERRNO: declare float @cbrtf(float) [[READNONE]]
+// HAS_ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]]
+
+ __builtin_ceil(f); __builtin_ceilf(f); __builtin_ceill(f);
+
+// NO__ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_cos(f); __builtin_cosf(f); __builtin_cosl(f);
+
+// NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @cos(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @cosf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_cosh(f); __builtin_coshf(f); __builtin_coshl(f);
+
+// NO__ERRNO: declare double @cosh(double) [[READNONE]]
+// NO__ERRNO: declare float @coshf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @coshl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @cosh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @coshf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @coshl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_erf(f); __builtin_erff(f); __builtin_erfl(f);
+
+// NO__ERRNO: declare double @erf(double) [[READNONE]]
+// NO__ERRNO: declare float @erff(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @erfl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @erf(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @erff(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @erfl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_erfc(f); __builtin_erfcf(f); __builtin_erfcl(f);
+
+// NO__ERRNO: declare double @erfc(double) [[READNONE]]
+// NO__ERRNO: declare float @erfcf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @erfc(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @erfcf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_exp(f); __builtin_expf(f); __builtin_expl(f);
+
+// NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @exp(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @expf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @expl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_exp2(f); __builtin_exp2f(f); __builtin_exp2l(f);
+
+// NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @exp2(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @exp2f(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_expm1(f); __builtin_expm1f(f); __builtin_expm1l(f);
+
+// NO__ERRNO: declare double @expm1(double) [[READNONE]]
+// NO__ERRNO: declare float @expm1f(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @expm1(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @expm1f(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_fdim(f,f); __builtin_fdimf(f,f); __builtin_fdiml(f,f);
+
+// NO__ERRNO: declare double @fdim(double, double) [[READNONE]]
+// NO__ERRNO: declare float @fdimf(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @fdim(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @fdimf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ __builtin_floor(f); __builtin_floorf(f); __builtin_floorl(f);
+
+// NO__ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_fma(f,f,f); __builtin_fmaf(f,f,f); __builtin_fmal(f,f,f);
+
+// NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @fma(double, double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @fmaf(float, float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+// On GNU or Win, fma never sets errno, so we can convert to the intrinsic.
+
+// HAS_ERRNO_GNU: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC:#[0-9]+]]
+// HAS_ERRNO_GNU: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO_GNU: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+
+// HAS_ERRNO_WIN: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC:#[0-9]+]]
+// HAS_ERRNO_WIN: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
+// Long double is just double on win, so no f80 use/declaration.
+// HAS_ERRNO_WIN-NOT: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80)
+
+ __builtin_fmax(f,f); __builtin_fmaxf(f,f); __builtin_fmaxl(f,f);
+
+// NO__ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_fmin(f,f); __builtin_fminf(f,f); __builtin_fminl(f,f);
+
+// NO__ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_hypot(f,f); __builtin_hypotf(f,f); __builtin_hypotl(f,f);
+
+// NO__ERRNO: declare double @hypot(double, double) [[READNONE]]
+// NO__ERRNO: declare float @hypotf(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @hypot(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @hypotf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ __builtin_ilogb(f); __builtin_ilogbf(f); __builtin_ilogbl(f);
+
+// NO__ERRNO: declare i32 @ilogb(double) [[READNONE]]
+// NO__ERRNO: declare i32 @ilogbf(float) [[READNONE]]
+// NO__ERRNO: declare i32 @ilogbl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i32 @ilogb(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i32 @ilogbf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i32 @ilogbl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_lgamma(f); __builtin_lgammaf(f); __builtin_lgammal(f);
+
+// NO__ERRNO: declare double @lgamma(double) [[NOT_READNONE]]
+// NO__ERRNO: declare float @lgammaf(float) [[NOT_READNONE]]
+// NO__ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @lgamma(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @lgammaf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_llrint(f); __builtin_llrintf(f); __builtin_llrintl(f);
+
+// NO__ERRNO: declare i64 @llrint(double) [[READNONE]]
+// NO__ERRNO: declare i64 @llrintf(float) [[READNONE]]
+// NO__ERRNO: declare i64 @llrintl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i64 @llrint(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @llrintf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @llrintl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_llround(f); __builtin_llroundf(f); __builtin_llroundl(f);
+
+// NO__ERRNO: declare i64 @llround(double) [[READNONE]]
+// NO__ERRNO: declare i64 @llroundf(float) [[READNONE]]
+// NO__ERRNO: declare i64 @llroundl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i64 @llround(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @llroundf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @llroundl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_log(f); __builtin_logf(f); __builtin_logl(f);
+
+// NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @log(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @logf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @logl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_log10(f); __builtin_log10f(f); __builtin_log10l(f);
+
+// NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @log10(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @log10f(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_log1p(f); __builtin_log1pf(f); __builtin_log1pl(f);
+
+// NO__ERRNO: declare double @log1p(double) [[READNONE]]
+// NO__ERRNO: declare float @log1pf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @log1p(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @log1pf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_log2(f); __builtin_log2f(f); __builtin_log2l(f);
+
+// NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @log2(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @log2f(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_logb(f); __builtin_logbf(f); __builtin_logbl(f);
+
+// NO__ERRNO: declare double @logb(double) [[READNONE]]
+// NO__ERRNO: declare float @logbf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @logbl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @logb(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @logbf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @logbl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_lrint(f); __builtin_lrintf(f); __builtin_lrintl(f);
+
+// NO__ERRNO: declare i64 @lrint(double) [[READNONE]]
+// NO__ERRNO: declare i64 @lrintf(float) [[READNONE]]
+// NO__ERRNO: declare i64 @lrintl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i64 @lrint(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @lrintf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @lrintl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_lround(f); __builtin_lroundf(f); __builtin_lroundl(f);
+
+// NO__ERRNO: declare i64 @lround(double) [[READNONE]]
+// NO__ERRNO: declare i64 @lroundf(float) [[READNONE]]
+// NO__ERRNO: declare i64 @lroundl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i64 @lround(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @lroundf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @lroundl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_nearbyint(f); __builtin_nearbyintf(f); __builtin_nearbyintl(f);
+
+// NO__ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_nextafter(f,f); __builtin_nextafterf(f,f); __builtin_nextafterl(f,f);
+
+// NO__ERRNO: declare double @nextafter(double, double) [[READNONE]]
+// NO__ERRNO: declare float @nextafterf(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @nextafter(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @nextafterf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ __builtin_nexttoward(f,f); __builtin_nexttowardf(f,f);__builtin_nexttowardl(f,f);
+
+// NO__ERRNO: declare double @nexttoward(double, x86_fp80) [[READNONE]]
+// NO__ERRNO: declare float @nexttowardf(float, x86_fp80) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @nexttoward(double, x86_fp80) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @nexttowardf(float, x86_fp80) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ __builtin_remainder(f,f); __builtin_remainderf(f,f); __builtin_remainderl(f,f);
+
+// NO__ERRNO: declare double @remainder(double, double) [[READNONE]]
+// NO__ERRNO: declare float @remainderf(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @remainder(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @remainderf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ __builtin_remquo(f,f,i); __builtin_remquof(f,f,i); __builtin_remquol(f,f,i);
+
+// NO__ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]]
+// NO__ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]]
+// NO__ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]]
+
+ __builtin_rint(f); __builtin_rintf(f); __builtin_rintl(f);
+
+// NO__ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_round(f); __builtin_roundf(f); __builtin_roundl(f);
+
+// NO__ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ __builtin_scalbln(f,f); __builtin_scalblnf(f,f); __builtin_scalblnl(f,f);
+
+// NO__ERRNO: declare double @scalbln(double, i64) [[READNONE]]
+// NO__ERRNO: declare float @scalblnf(float, i64) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[READNONE]]
+// HAS_ERRNO: declare double @scalbln(double, i64) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @scalblnf(float, i64) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[NOT_READNONE]]
+
+ __builtin_scalbn(f,f); __builtin_scalbnf(f,f); __builtin_scalbnl(f,f);
+
+// NO__ERRNO: declare double @scalbn(double, i32) [[READNONE]]
+// NO__ERRNO: declare float @scalbnf(float, i32) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[READNONE]]
+// HAS_ERRNO: declare double @scalbn(double, i32) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @scalbnf(float, i32) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[NOT_READNONE]]
+
+ __builtin_sin(f); __builtin_sinf(f); __builtin_sinl(f);
+
+// NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @sin(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @sinf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_sinh(f); __builtin_sinhf(f); __builtin_sinhl(f);
+
+// NO__ERRNO: declare double @sinh(double) [[READNONE]]
+// NO__ERRNO: declare float @sinhf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @sinh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @sinhf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_sqrt(f); __builtin_sqrtf(f); __builtin_sqrtl(f);
+
+// NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @sqrt(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @sqrtf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_tan(f); __builtin_tanf(f); __builtin_tanl(f);
+
+// NO__ERRNO: declare double @tan(double) [[READNONE]]
+// NO__ERRNO: declare float @tanf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @tan(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @tanf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_tanh(f); __builtin_tanhf(f); __builtin_tanhl(f);
+
+// NO__ERRNO: declare double @tanh(double) [[READNONE]]
+// NO__ERRNO: declare float @tanhf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @tanh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @tanhf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_tgamma(f); __builtin_tgammaf(f); __builtin_tgammal(f);
+
+// NO__ERRNO: declare double @tgamma(double) [[READNONE]]
+// NO__ERRNO: declare float @tgammaf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @tgamma(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @tgammaf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[NOT_READNONE]]
+
+ __builtin_trunc(f); __builtin_truncf(f); __builtin_truncl(f);
+
+// NO__ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]]
+};
+
+
+// NO__ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
+// NO__ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
+// NO__ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
+
+// HAS_ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
+// HAS_ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
+// HAS_ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
+
+// HAS_ERRNO_GNU: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
+// HAS_ERRNO_WIN: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
+
diff --git a/test/CodeGen/math-libcalls.c b/test/CodeGen/math-libcalls.c
new file mode 100644
index 000000000000..39bcb4454d7c
--- /dev/null
+++ b/test/CodeGen/math-libcalls.c
@@ -0,0 +1,547 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm %s | FileCheck %s --check-prefix=NO__ERRNO
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s --check-prefix=HAS_ERRNO
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown-gnu -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s --check-prefix=HAS_ERRNO_GNU
+// RUN: %clang_cc1 -triple x86_64-unknown-windows-msvc -w -S -o - -emit-llvm -fmath-errno %s | FileCheck %s --check-prefix=HAS_ERRNO_WIN
+
+// Test attributes and builtin codegen of math library calls.
+
+void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {
+ f = fmod(f,f); f = fmodf(f,f); f = fmodl(f,f);
+
+// NO__ERRNO: frem double
+// NO__ERRNO: frem float
+// NO__ERRNO: frem x86_fp80
+// HAS_ERRNO: declare double @fmod(double, double) [[NOT_READNONE:#[0-9]+]]
+// HAS_ERRNO: declare float @fmodf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @fmodl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ atan2(f,f); atan2f(f,f) ; atan2l(f, f);
+
+// NO__ERRNO: declare double @atan2(double, double) [[READNONE:#[0-9]+]]
+// NO__ERRNO: declare float @atan2f(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @atan2(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @atan2f(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ copysign(f,f); copysignf(f,f);copysignl(f,f);
+
+// NO__ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]]
+// NO__ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.copysign.f64(double, double) [[READNONE_INTRINSIC:#[0-9]+]]
+// HAS_ERRNO: declare float @llvm.copysign.f32(float, float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.copysign.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+
+ fabs(f); fabsf(f); fabsl(f);
+
+// NO__ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.fabs.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.fabs.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.fabs.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ frexp(f,i); frexpf(f,i); frexpl(f,i);
+
+// NO__ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE:#[0-9]+]]
+// NO__ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]]
+// NO__ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]]
+
+ ldexp(f,f); ldexpf(f,f); ldexpl(f,f);
+
+// NO__ERRNO: declare double @ldexp(double, i32) [[READNONE]]
+// NO__ERRNO: declare float @ldexpf(float, i32) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[READNONE]]
+// HAS_ERRNO: declare double @ldexp(double, i32) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @ldexpf(float, i32) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[NOT_READNONE]]
+
+ modf(f,d); modff(f,fp); modfl(f,l);
+
+// NO__ERRNO: declare double @modf(double, double*) [[NOT_READNONE]]
+// NO__ERRNO: declare float @modff(float, float*) [[NOT_READNONE]]
+// NO__ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @modf(double, double*) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @modff(float, float*) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]]
+
+ nan(c); nanf(c); nanl(c);
+
+// NO__ERRNO: declare double @nan(i8*) [[READONLY:#[0-9]+]]
+// NO__ERRNO: declare float @nanf(i8*) [[READONLY]]
+// NO__ERRNO: declare x86_fp80 @nanl(i8*) [[READONLY]]
+// HAS_ERRNO: declare double @nan(i8*) [[READONLY:#[0-9]+]]
+// HAS_ERRNO: declare float @nanf(i8*) [[READONLY]]
+// HAS_ERRNO: declare x86_fp80 @nanl(i8*) [[READONLY]]
+
+ pow(f,f); powf(f,f); powl(f,f);
+
+// NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @pow(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @powf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ /* math */
+ acos(f); acosf(f); acosl(f);
+
+// NO__ERRNO: declare double @acos(double) [[READNONE]]
+// NO__ERRNO: declare float @acosf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @acosl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @acos(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @acosf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @acosl(x86_fp80) [[NOT_READNONE]]
+
+ acosh(f); acoshf(f); acoshl(f);
+
+// NO__ERRNO: declare double @acosh(double) [[READNONE]]
+// NO__ERRNO: declare float @acoshf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @acosh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @acoshf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[NOT_READNONE]]
+
+ asin(f); asinf(f); asinl(f);
+
+// NO__ERRNO: declare double @asin(double) [[READNONE]]
+// NO__ERRNO: declare float @asinf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @asinl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @asin(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @asinf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @asinl(x86_fp80) [[NOT_READNONE]]
+
+ asinh(f); asinhf(f); asinhl(f);
+
+// NO__ERRNO: declare double @asinh(double) [[READNONE]]
+// NO__ERRNO: declare float @asinhf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @asinh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @asinhf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[NOT_READNONE]]
+
+ atan(f); atanf(f); atanl(f);
+
+// NO__ERRNO: declare double @atan(double) [[READNONE]]
+// NO__ERRNO: declare float @atanf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @atanl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @atan(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @atanf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @atanl(x86_fp80) [[NOT_READNONE]]
+
+ atanh(f); atanhf(f); atanhl(f);
+
+// NO__ERRNO: declare double @atanh(double) [[READNONE]]
+// NO__ERRNO: declare float @atanhf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @atanh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @atanhf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[NOT_READNONE]]
+
+ cbrt(f); cbrtf(f); cbrtl(f);
+
+// NO__ERRNO: declare double @cbrt(double) [[READNONE]]
+// NO__ERRNO: declare float @cbrtf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @cbrt(double) [[READNONE:#[0-9]+]]
+// HAS_ERRNO: declare float @cbrtf(float) [[READNONE]]
+// HAS_ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]]
+
+ ceil(f); ceilf(f); ceill(f);
+
+// NO__ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.ceil.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.ceil.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.ceil.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ cos(f); cosf(f); cosl(f);
+
+// NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @cos(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @cosf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80) [[NOT_READNONE]]
+
+ cosh(f); coshf(f); coshl(f);
+
+// NO__ERRNO: declare double @cosh(double) [[READNONE]]
+// NO__ERRNO: declare float @coshf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @coshl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @cosh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @coshf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @coshl(x86_fp80) [[NOT_READNONE]]
+
+ erf(f); erff(f); erfl(f);
+
+// NO__ERRNO: declare double @erf(double) [[READNONE]]
+// NO__ERRNO: declare float @erff(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @erfl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @erf(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @erff(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @erfl(x86_fp80) [[NOT_READNONE]]
+
+ erfc(f); erfcf(f); erfcl(f);
+
+// NO__ERRNO: declare double @erfc(double) [[READNONE]]
+// NO__ERRNO: declare float @erfcf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @erfc(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @erfcf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[NOT_READNONE]]
+
+ exp(f); expf(f); expl(f);
+
+// NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @exp(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @expf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @expl(x86_fp80) [[NOT_READNONE]]
+
+ exp2(f); exp2f(f); exp2l(f);
+
+// NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @exp2(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @exp2f(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80) [[NOT_READNONE]]
+
+ expm1(f); expm1f(f); expm1l(f);
+
+// NO__ERRNO: declare double @expm1(double) [[READNONE]]
+// NO__ERRNO: declare float @expm1f(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @expm1(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @expm1f(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[NOT_READNONE]]
+
+ fdim(f,f); fdimf(f,f); fdiml(f,f);
+
+// NO__ERRNO: declare double @fdim(double, double) [[READNONE]]
+// NO__ERRNO: declare float @fdimf(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @fdim(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @fdimf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ floor(f); floorf(f); floorl(f);
+
+// NO__ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.floor.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.floor.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.floor.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ fma(f,f,f); fmaf(f,f,f); fmal(f,f,f);
+
+// NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @fma(double, double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @fmaf(float, float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+// On GNU or Win, fma never sets errno, so we can convert to the intrinsic.
+
+// HAS_ERRNO_GNU: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC:#[0-9]+]]
+// HAS_ERRNO_GNU: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO_GNU: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+
+// HAS_ERRNO_WIN: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC:#[0-9]+]]
+// HAS_ERRNO_WIN: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]]
+// Long double is just double on win, so no f80 use/declaration.
+// HAS_ERRNO_WIN-NOT: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80)
+
+ fmax(f,f); fmaxf(f,f); fmaxl(f,f);
+
+// NO__ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.maxnum.f64(double, double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.maxnum.f32(float, float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.maxnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+
+ fmin(f,f); fminf(f,f); fminl(f,f);
+
+// NO__ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.minnum.f64(double, double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.minnum.f32(float, float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]]
+
+ hypot(f,f); hypotf(f,f); hypotl(f,f);
+
+// NO__ERRNO: declare double @hypot(double, double) [[READNONE]]
+// NO__ERRNO: declare float @hypotf(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @hypot(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @hypotf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ ilogb(f); ilogbf(f); ilogbl(f);
+
+// NO__ERRNO: declare i32 @ilogb(double) [[READNONE]]
+// NO__ERRNO: declare i32 @ilogbf(float) [[READNONE]]
+// NO__ERRNO: declare i32 @ilogbl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i32 @ilogb(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i32 @ilogbf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i32 @ilogbl(x86_fp80) [[NOT_READNONE]]
+
+ lgamma(f); lgammaf(f); lgammal(f);
+
+// NO__ERRNO: declare double @lgamma(double) [[NOT_READNONE]]
+// NO__ERRNO: declare float @lgammaf(float) [[NOT_READNONE]]
+// NO__ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @lgamma(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @lgammaf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]]
+
+ llrint(f); llrintf(f); llrintl(f);
+
+// NO__ERRNO: declare i64 @llrint(double) [[READNONE]]
+// NO__ERRNO: declare i64 @llrintf(float) [[READNONE]]
+// NO__ERRNO: declare i64 @llrintl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i64 @llrint(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @llrintf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @llrintl(x86_fp80) [[NOT_READNONE]]
+
+ llround(f); llroundf(f); llroundl(f);
+
+// NO__ERRNO: declare i64 @llround(double) [[READNONE]]
+// NO__ERRNO: declare i64 @llroundf(float) [[READNONE]]
+// NO__ERRNO: declare i64 @llroundl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i64 @llround(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @llroundf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @llroundl(x86_fp80) [[NOT_READNONE]]
+
+ log(f); logf(f); logl(f);
+
+// NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @log(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @logf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @logl(x86_fp80) [[NOT_READNONE]]
+
+ log10(f); log10f(f); log10l(f);
+
+// NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @log10(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @log10f(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80) [[NOT_READNONE]]
+
+ log1p(f); log1pf(f); log1pl(f);
+
+// NO__ERRNO: declare double @log1p(double) [[READNONE]]
+// NO__ERRNO: declare float @log1pf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @log1p(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @log1pf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[NOT_READNONE]]
+
+ log2(f); log2f(f); log2l(f);
+
+// NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @log2(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @log2f(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80) [[NOT_READNONE]]
+
+ logb(f); logbf(f); logbl(f);
+
+// NO__ERRNO: declare double @logb(double) [[READNONE]]
+// NO__ERRNO: declare float @logbf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @logbl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @logb(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @logbf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @logbl(x86_fp80) [[NOT_READNONE]]
+
+ lrint(f); lrintf(f); lrintl(f);
+
+// NO__ERRNO: declare i64 @lrint(double) [[READNONE]]
+// NO__ERRNO: declare i64 @lrintf(float) [[READNONE]]
+// NO__ERRNO: declare i64 @lrintl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i64 @lrint(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @lrintf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @lrintl(x86_fp80) [[NOT_READNONE]]
+
+ lround(f); lroundf(f); lroundl(f);
+
+// NO__ERRNO: declare i64 @lround(double) [[READNONE]]
+// NO__ERRNO: declare i64 @lroundf(float) [[READNONE]]
+// NO__ERRNO: declare i64 @lroundl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare i64 @lround(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @lroundf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare i64 @lroundl(x86_fp80) [[NOT_READNONE]]
+
+ nearbyint(f); nearbyintf(f); nearbyintl(f);
+
+// NO__ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.nearbyint.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.nearbyint.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.nearbyint.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ nextafter(f,f); nextafterf(f,f); nextafterl(f,f);
+
+// NO__ERRNO: declare double @nextafter(double, double) [[READNONE]]
+// NO__ERRNO: declare float @nextafterf(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @nextafter(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @nextafterf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ nexttoward(f,f); nexttowardf(f,f);nexttowardl(f,f);
+
+// NO__ERRNO: declare double @nexttoward(double, x86_fp80) [[READNONE]]
+// NO__ERRNO: declare float @nexttowardf(float, x86_fp80) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @nexttoward(double, x86_fp80) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @nexttowardf(float, x86_fp80) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ remainder(f,f); remainderf(f,f); remainderl(f,f);
+
+// NO__ERRNO: declare double @remainder(double, double) [[READNONE]]
+// NO__ERRNO: declare float @remainderf(float, float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @remainder(double, double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @remainderf(float, float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[NOT_READNONE]]
+
+ remquo(f,f,i); remquof(f,f,i); remquol(f,f,i);
+
+// NO__ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]]
+// NO__ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]]
+// NO__ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]]
+
+ rint(f); rintf(f); rintl(f);
+
+// NO__ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.rint.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.rint.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.rint.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ round(f); roundf(f); roundl(f);
+
+// NO__ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.round.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.round.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.round.f80(x86_fp80) [[READNONE_INTRINSIC]]
+
+ scalbln(f,f); scalblnf(f,f); scalblnl(f,f);
+
+// NO__ERRNO: declare double @scalbln(double, i64) [[READNONE]]
+// NO__ERRNO: declare float @scalblnf(float, i64) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[READNONE]]
+// HAS_ERRNO: declare double @scalbln(double, i64) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @scalblnf(float, i64) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[NOT_READNONE]]
+
+ scalbn(f,f); scalbnf(f,f); scalbnl(f,f);
+
+// NO__ERRNO: declare double @scalbn(double, i32) [[READNONE]]
+// NO__ERRNO: declare float @scalbnf(float, i32) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[READNONE]]
+// HAS_ERRNO: declare double @scalbn(double, i32) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @scalbnf(float, i32) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[NOT_READNONE]]
+
+ sin(f); sinf(f); sinl(f);
+
+// NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @sin(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @sinf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80) [[NOT_READNONE]]
+
+ sinh(f); sinhf(f); sinhl(f);
+
+// NO__ERRNO: declare double @sinh(double) [[READNONE]]
+// NO__ERRNO: declare float @sinhf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @sinh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @sinhf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[NOT_READNONE]]
+
+ sqrt(f); sqrtf(f); sqrtl(f);
+
+// NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @sqrt(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @sqrtf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80) [[NOT_READNONE]]
+
+ tan(f); tanf(f); tanl(f);
+
+// NO__ERRNO: declare double @tan(double) [[READNONE]]
+// NO__ERRNO: declare float @tanf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @tan(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @tanf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80) [[NOT_READNONE]]
+
+ tanh(f); tanhf(f); tanhl(f);
+
+// NO__ERRNO: declare double @tanh(double) [[READNONE]]
+// NO__ERRNO: declare float @tanhf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @tanh(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @tanhf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[NOT_READNONE]]
+
+ tgamma(f); tgammaf(f); tgammal(f);
+
+// NO__ERRNO: declare double @tgamma(double) [[READNONE]]
+// NO__ERRNO: declare float @tgammaf(float) [[READNONE]]
+// NO__ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[READNONE]]
+// HAS_ERRNO: declare double @tgamma(double) [[NOT_READNONE]]
+// HAS_ERRNO: declare float @tgammaf(float) [[NOT_READNONE]]
+// HAS_ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[NOT_READNONE]]
+
+ trunc(f); truncf(f); truncl(f);
+
+// NO__ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare double @llvm.trunc.f64(double) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare float @llvm.trunc.f32(float) [[READNONE_INTRINSIC]]
+// HAS_ERRNO: declare x86_fp80 @llvm.trunc.f80(x86_fp80) [[READNONE_INTRINSIC]]
+};
+
+
+// NO__ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
+// NO__ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
+// NO__ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
+// NO__ERRNO: attributes [[READONLY]] = { {{.*}}readonly{{.*}} }
+
+// HAS_ERRNO: attributes [[NOT_READNONE]] = { nounwind "correctly{{.*}} }
+// HAS_ERRNO: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
+// HAS_ERRNO: attributes [[READONLY]] = { {{.*}}readonly{{.*}} }
+// HAS_ERRNO: attributes [[READNONE]] = { {{.*}}readnone{{.*}} }
+
+// HAS_ERRNO_GNU: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
+// HAS_ERRNO_WIN: attributes [[READNONE_INTRINSIC]] = { {{.*}}readnone{{.*}} }
diff --git a/test/CodeGen/mcount.c b/test/CodeGen/mcount.c
index 2284acac0f8e..98a2a6b39092 100644
--- a/test/CodeGen/mcount.c
+++ b/test/CodeGen/mcount.c
@@ -35,9 +35,9 @@ int main(void) {
return no_instrument();
}
-// CHECK: attributes #0 = { {{.*}}"counting-function"="mcount"{{.*}} }
+// CHECK: attributes #0 = { {{.*}}"instrument-function-entry-inlined"="mcount"{{.*}} }
// CHECK: attributes #1 = { {{.*}} }
-// CHECK-PREFIXED: attributes #0 = { {{.*}}"counting-function"="_mcount"{{.*}} }
+// CHECK-PREFIXED: attributes #0 = { {{.*}}"instrument-function-entry-inlined"="_mcount"{{.*}} }
// CHECK-PREFIXED: attributes #1 = { {{.*}} }
-// NO-MCOUNT-NOT: attributes #{{[0-9]}} = { {{.*}}"counting-function"={{.*}} }
-// NO-MCOUNT1-NOT: attributes #1 = { {{.*}}"counting-function"={{.*}} }
+// NO-MCOUNT-NOT: attributes #{{[0-9]}} = { {{.*}}"instrument-function-entry-inlined"={{.*}} }
+// NO-MCOUNT1-NOT: attributes #1 = { {{.*}}"instrument-function-entry-inlined"={{.*}} }
diff --git a/test/CodeGen/mozilla-ms-inline-asm.c b/test/CodeGen/mozilla-ms-inline-asm.c
index 26e8ebce1e28..0774c8cb3045 100644
--- a/test/CodeGen/mozilla-ms-inline-asm.c
+++ b/test/CodeGen/mozilla-ms-inline-asm.c
@@ -33,7 +33,7 @@ void invoke(void* that, unsigned methodIndex,
// CHECK-SAME: push ecx
// CHECK-SAME: mov edx,[ecx]
// CHECK-SAME: mov eax,$4
-// CHECK-SAME: call dword ptr[edx+eax*$$4]
+// CHECK-SAME: call dword ptr[edx + eax * $$4]
// CHECK-SAME: mov esp,ebp
// CHECK-SAME: pop ebp
// CHECK-SAME: ret
diff --git a/test/CodeGen/ms-annotation.c b/test/CodeGen/ms-annotation.c
new file mode 100644
index 000000000000..6f4a20c7b154
--- /dev/null
+++ b/test/CodeGen/ms-annotation.c
@@ -0,0 +1,26 @@
+// RUN: %clang_cc1 -triple i686-windows %s -fms-extensions -emit-llvm -o - | FileCheck %s
+//
+// Test that LLVM optimizations leave these intrinsics alone, for the most part.
+// RUN: %clang_cc1 -O2 -triple i686-windows %s -fms-extensions -emit-llvm -o - | FileCheck %s
+
+void test1(void) {
+ __annotation(L"a1");
+ __annotation(L"a1", L"a2");
+ __annotation(L"a1", L"a2", L"a3");
+ __annotation(L"multi " L"part " L"string");
+ __annotation(L"unicode: \u0ca0_\u0ca0");
+}
+
+// CHECK-LABEL: define void @test1()
+// CHECK: call void @llvm.codeview.annotation(metadata ![[A1:[0-9]+]])
+// CHECK: call void @llvm.codeview.annotation(metadata ![[A2:[0-9]+]])
+// CHECK: call void @llvm.codeview.annotation(metadata ![[A3:[0-9]+]])
+// CHECK: call void @llvm.codeview.annotation(metadata ![[A4:[0-9]+]])
+// CHECK: call void @llvm.codeview.annotation(metadata ![[A5:[0-9]+]])
+// CHECK: ret void
+
+// CHECK: ![[A1]] = !{!"a1"}
+// CHECK: ![[A2]] = !{!"a1", !"a2"}
+// CHECK: ![[A3]] = !{!"a1", !"a2", !"a3"}
+// CHECK: ![[A4]] = !{!"multi part string"}
+// CHECK: ![[A5]] = !{!"unicode: \E0\B2\A0_\E0\B2\A0"}
diff --git a/test/CodeGen/ms-inline-asm-64.c b/test/CodeGen/ms-inline-asm-64.c
index 69828feb3623..5b144eb7bb68 100644
--- a/test/CodeGen/ms-inline-asm-64.c
+++ b/test/CodeGen/ms-inline-asm-64.c
@@ -34,8 +34,8 @@ int t3() {
// CHECK: t3
// CHECK: call void asm sideeffect inteldialect
// CHECK-SAME: lea ebx, $0
-// CHECK-SAME: mov eax, [ebx].0
-// CHECK-SAME: mov [ebx].4, ecx
+// CHECK-SAME: mov eax, [ebx]
+// CHECK-SAME: mov [ebx + $$4], ecx
// CHECK-SAME: "*m,~{eax},~{ebx},~{dirflag},~{fpsr},~{flags}"(%struct.t3_type* %{{.*}})
}
@@ -54,7 +54,7 @@ int t4() {
// CHECK: t4
// CHECK: call void asm sideeffect inteldialect
// CHECK-SAME: lea ebx, $0
-// CHECK-SAME: mov eax, [ebx].0
-// CHECK-SAME: mov [ebx].4, ecx
+// CHECK-SAME: mov eax, [ebx]
+// CHECK-SAME: mov [ebx + $$4], ecx
// CHECK-SAME: "*m,~{eax},~{ebx},~{dirflag},~{fpsr},~{flags}"(%struct.t3_type* %{{.*}})
}
diff --git a/test/CodeGen/ms-inline-asm-enums.cpp b/test/CodeGen/ms-inline-asm-enums.cpp
new file mode 100644
index 000000000000..4e9225ab4759
--- /dev/null
+++ b/test/CodeGen/ms-inline-asm-enums.cpp
@@ -0,0 +1,55 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 %s -fasm-blocks -triple i386-apple-darwin10 -emit-llvm -o - | FileCheck %s
+
+namespace x {
+ enum { A = 12 };
+ struct y_t {
+ enum { A = 17 };
+ int r;
+ } y;
+}
+
+// CHECK-LABEL: t1
+void t1() {
+ enum { A = 1 };
+ // CHECK: call void asm
+ // CHECK-SAME: mov eax, $$12
+ __asm mov eax, x::A
+ // CHECK-SAME: mov eax, $$17
+ __asm mov eax, x::y_t::A
+ // CHECK-NEXT: call void asm
+ // CHECK-SAME: mov eax, $$1
+ __asm {mov eax, A}
+}
+
+// CHECK-LABEL: t2
+void t2() {
+ enum { A = 1, B };
+ // CHECK: call void asm
+ // CHECK-SAME: mov eax, $$21
+ __asm mov eax, (A + 9) * 2 + A
+ // CHECK-SAME: mov eax, $$4
+ __asm mov eax, A << 2
+ // CHECK-SAME: mov eax, $$2
+ __asm mov eax, B & 3
+ // CHECK-SAME: mov eax, $$5
+ __asm mov eax, 3 + (B & 3)
+ // CHECK-SAME: mov eax, $$8
+ __asm mov eax, 2 << A * B
+}
+
+// CHECK-LABEL: t3
+void t3() {
+ int arr[4];
+ enum { A = 4, B };
+ // CHECK: call void asm
+ // CHECK-SAME: mov eax, [eax + $$47]
+ __asm { mov eax, [(x::A + 9) + A * B + 3 + 3 + eax] }
+ // CHECK-NEXT: call void asm
+ // CHECK-SAME: mov eax, dword ptr $0[$$4]
+ __asm { mov eax, dword ptr [arr + A] }
+ // CHECK-NEXT: call void asm
+ // CHECK-SAME: mov eax, dword ptr $0[$$8]
+ __asm { mov eax, dword ptr A[arr + A] }
+}
+
diff --git a/test/CodeGen/ms-inline-asm-variables.c b/test/CodeGen/ms-inline-asm-variables.c
new file mode 100644
index 000000000000..f8fd227610b6
--- /dev/null
+++ b/test/CodeGen/ms-inline-asm-variables.c
@@ -0,0 +1,35 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 %s -fasm-blocks -triple i386-apple-darwin10 -emit-llvm -o - | FileCheck %s
+
+int gVar;
+void t1() {
+ // CHECK: add eax, dword ptr gVar[eax]
+ __asm add eax, dword ptr gVar[eax]
+ // CHECK: add dword ptr gVar[eax], eax
+ __asm add dword ptr [eax+gVar], eax
+ // CHECK: add ebx, dword ptr gVar[ebx + $$270]
+ __asm add ebx, dword ptr gVar[271 - 82 + 81 + ebx]
+ // CHECK: add dword ptr gVar[ebx + $$828], ebx
+ __asm add dword ptr [ebx + gVar + 828], ebx
+ // CHECK: add ecx, dword ptr gVar[ecx + ecx * $$4 + $$4590]
+ __asm add ecx, dword ptr gVar[4590 + ecx + ecx*4]
+ // CHECK: add dword ptr gVar[ecx + ecx * $$8 + $$73], ecx
+ __asm add dword ptr [gVar + ecx + 45 + 23 - 53 + 60 - 2 + ecx*8], ecx
+ // CHECK: add gVar[ecx + ebx + $$7], eax
+ __asm add 1 + 1 + 2 + 3[gVar + ecx + ebx], eax
+}
+
+void t2() {
+ int lVar;
+ // CHECK: mov eax, dword ptr ${{[0-9]}}[eax]
+ __asm mov eax, dword ptr lVar[eax]
+ // CHECK: mov dword ptr ${{[0-9]}}[eax], eax
+ __asm mov dword ptr [eax+lVar], eax
+ // CHECK: mov ebx, dword ptr ${{[0-9]}}[ebx + $$270]
+ __asm mov ebx, dword ptr lVar[271 - 82 + 81 + ebx]
+ // CHECK: mov dword ptr ${{[0-9]}}[ebx + $$828], ebx
+ __asm mov dword ptr [ebx + lVar + 828], ebx
+ // CHECK: mov ${{[0-9]}}[ebx + $$47], eax
+ __asm mov 5 + 8 + 13 + 21[lVar + ebx], eax
+}
+
diff --git a/test/CodeGen/ms-inline-asm.c b/test/CodeGen/ms-inline-asm.c
index d26a660c9b0a..5c3e3ff2a843 100644
--- a/test/CodeGen/ms-inline-asm.c
+++ b/test/CodeGen/ms-inline-asm.c
@@ -42,7 +42,7 @@ void t5(void) {
void t6(void) {
__asm int 0x2c
// CHECK: t6
-// CHECK: call void asm sideeffect inteldialect "int $$0x2c", "~{dirflag},~{fpsr},~{flags}"()
+// CHECK: call void asm sideeffect inteldialect "int $$44", "~{dirflag},~{fpsr},~{flags}"()
}
void t7() {
@@ -61,7 +61,7 @@ void t7() {
mov eax, ebx
}
// CHECK: t7
-// CHECK: call void asm sideeffect inteldialect "int $$0x2cU", "~{dirflag},~{fpsr},~{flags}"()
+// CHECK: call void asm sideeffect inteldialect "int $$44", "~{dirflag},~{fpsr},~{flags}"()
// CHECK: call void asm sideeffect inteldialect "", "~{dirflag},~{fpsr},~{flags}"()
// CHECK: call void asm sideeffect inteldialect "mov eax, ebx", "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
@@ -94,7 +94,7 @@ void t9() {
// CHECK: t9
// CHECK: call void asm sideeffect inteldialect
// CHECK-SAME: push ebx
-// CHECK-SAME: mov ebx, $$0x07
+// CHECK-SAME: mov ebx, $$7
// CHECK-SAME: pop ebx
// CHECK-SAME: "~{ebx},~{esp},~{dirflag},~{fpsr},~{flags}"()
}
@@ -229,7 +229,7 @@ void t20() {
__asm mov eax, LENGTH _bar
// CHECK: mov eax, $$2
__asm mov eax, [eax + LENGTH foo * 4]
-// CHECK: mov eax, [eax + $$1 * $$4]
+// CHECK: mov eax, [eax + $$4]
__asm mov eax, TYPE foo
// CHECK: mov eax, $$4
@@ -240,7 +240,7 @@ void t20() {
__asm mov eax, TYPE _bar
// CHECK: mov eax, $$1
__asm mov eax, [eax + TYPE foo * 4]
-// CHECK: mov eax, [eax + $$4 * $$4]
+// CHECK: mov eax, [eax + $$16]
__asm mov eax, SIZE foo
// CHECK: mov eax, $$4
@@ -249,7 +249,7 @@ void t20() {
__asm mov eax, SIZE _foo
// CHECK: mov eax, $$16
__asm mov eax, [eax + SIZE _foo * 4]
-// CHECK: mov eax, [eax + $$16 * $$4]
+// CHECK: mov eax, [eax + $$64]
__asm mov eax, SIZE _bar
// CHECK: mov eax, $$2
// CHECK: "~{eax},~{dirflag},~{fpsr},~{flags}"()
@@ -265,7 +265,7 @@ void t21() {
// CHECK: t21
// CHECK: call void asm sideeffect inteldialect
// CHECK-SAME: push ebx
-// CHECK-SAME: mov ebx, $$07H
+// CHECK-SAME: mov ebx, $$7
// CHECK-SAME: pop ebx
// CHECK-SAME: "~{ebx},~{esp},~{dirflag},~{fpsr},~{flags}"()
}
@@ -312,13 +312,13 @@ void t24() {
void t25() {
// CHECK: t25
__asm mov eax, 0ffffffffh
-// CHECK: mov eax, $$0ffffffffh
+// CHECK: mov eax, $$4294967295
__asm mov eax, 0fhU
// CHECK: mov eax, $$15
__asm mov eax, 0a2h
-// CHECK: mov eax, $$0a2h
+// CHECK: mov eax, $$162
__asm mov eax, 10100010b
-// CHECK: mov eax, $$10100010b
+// CHECK: mov eax, $$162
__asm mov eax, 10100010BU
// CHECK: mov eax, $$162
// CHECK: "~{eax},~{dirflag},~{fpsr},~{flags}"()
@@ -344,7 +344,7 @@ void t26() {
void t27() {
__asm mov eax, fs:[0h]
// CHECK: t27
-// CHECK: call void asm sideeffect inteldialect "mov eax, fs:[$$0h]", "~{eax},~{dirflag},~{fpsr},~{flags}"()
+// CHECK: call void asm sideeffect inteldialect "mov eax, fs:[$$0]", "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
void t28() {
@@ -426,18 +426,18 @@ void t33() {
void t34() {
// CHECK: t34
__asm prefetchnta 64[eax]
-// CHECK: prefetchnta $$64[eax]
+// CHECK: prefetchnta [eax + $$64]
__asm mov eax, dword ptr 4[eax]
-// CHECK: mov eax, dword ptr $$4[eax]
+// CHECK: mov eax, dword ptr [eax + $$4]
// CHECK: "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
void t35() {
// CHECK: t35
__asm prefetchnta [eax + (200*64)]
-// CHECK: prefetchnta [eax + ($$200*$$64)]
+// CHECK: prefetchnta [eax + $$12800]
__asm mov eax, dword ptr [eax + (200*64)]
-// CHECK: mov eax, dword ptr [eax + ($$200*$$64)]
+// CHECK: mov eax, dword ptr [eax + $$12800]
// CHECK: "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
@@ -446,29 +446,29 @@ void t36() {
int arr[4];
// Work around PR20368: These should be single line blocks
__asm { mov eax, 4[arr] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$4$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$4]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 4[arr + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$8$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$8]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 8[arr + 4 + 32*2 - 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$72$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$72]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 12[4 + arr] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$16$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$16]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 4[4 + arr + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$12$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$12]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 4[64 + arr + (2*32)] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$132$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$132]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 4[64 + arr - 2*32] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$4$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$4]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, [arr + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$4$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$4]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, [arr + 4 + 32*2 - 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$64$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$64]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, [4 + arr] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$4$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$4]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, [4 + arr + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$8$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$8]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, [64 + arr + (2*32)] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$128$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$128]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, [64 + arr - 2*32] }
// CHECK: call void asm sideeffect inteldialect "mov eax, $0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
}
@@ -484,13 +484,13 @@ void t37() {
__asm mov eax, (4 + 4) * 16
// CHECK: mov eax, $$128
__asm mov eax, 4 + 8 * -16
-// CHECK: mov eax, $$4294967172
+// CHECK: mov eax, $$-124
__asm mov eax, 4 + 16 / -8
// CHECK: mov eax, $$2
__asm mov eax, (16 + 16) / -8
-// CHECK: mov eax, $$4294967292
+// CHECK: mov eax, $$-4
__asm mov eax, ~15
-// CHECK: mov eax, $$4294967280
+// CHECK: mov eax, $$-16
__asm mov eax, 6 ^ 3
// CHECK: mov eax, $$5
// CHECK: "~{eax},~{dirflag},~{fpsr},~{flags}"()
@@ -501,21 +501,21 @@ void t38() {
int arr[4];
// Work around PR20368: These should be single line blocks
__asm { mov eax, 4+4[arr] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$8$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$8]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, (4+4)[arr + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$12$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$12]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 8*2[arr + 4 + 32*2 - 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$80$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$80]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 12+20[4 + arr] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$36$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$36]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 4*16+4[4 + arr + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$76$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$76]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 4*4[64 + arr + (2*32)] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$144$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$144]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 4*(4-2)[64 + arr - 2*32] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$8$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$8]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
__asm { mov eax, 32*(4-2)[arr - 2*32] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$0$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"([4 x i32]* %{{.*}})
}
void cpuid() {
@@ -527,7 +527,7 @@ void cpuid() {
typedef struct {
int a;
int b;
-} A;
+} A, *pA;
typedef struct {
int b1;
@@ -539,24 +539,28 @@ typedef struct {
A c2;
int c3;
B c4;
-} C;
+} C, *pC;
void t39() {
// CHECK-LABEL: define void @t39
__asm mov eax, [eax].A.b
-// CHECK: mov eax, [eax].4
+// CHECK: mov eax, [eax + $$4]
__asm mov eax, [eax] A.b
-// CHECK: mov eax, [eax] .4
+// CHECK: mov eax, [eax + $$4]
+ __asm mov eax, [eax] pA.b
+// CHECK: mov eax, [eax + $$4]
__asm mov eax, fs:[0] A.b
-// CHECK: mov eax, fs:[$$0] .4
+// CHECK: mov eax, fs:[$$4]
__asm mov eax, [eax].B.b2.a
-// CHECK: mov eax, [eax].4
+// CHECK: mov eax, [eax + $$4]
__asm mov eax, [eax] B.b2.b
-// CHECK: mov eax, [eax] .8
+// CHECK: mov eax, [eax + $$8]
__asm mov eax, fs:[0] C.c2.b
-// CHECK: mov eax, fs:[$$0] .8
+// CHECK: mov eax, fs:[$$8]
__asm mov eax, [eax]C.c4.b2.b
-// CHECK: mov eax, [eax].24
+// CHECK: mov eax, [eax + $$24]
+ __asm mov eax, [eax]pC.c4.b2.b
+// CHECK: mov eax, [eax + $$24]
// CHECK: "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
@@ -573,17 +577,17 @@ void t40(float a) {
void t41(unsigned short a) {
// CHECK-LABEL: define void @t41(i16 zeroext %a)
__asm mov cs, a;
-// CHECK: mov cs, word ptr $0
+// CHECK: mov cs, $0
__asm mov ds, a;
-// CHECK: mov ds, word ptr $1
+// CHECK: mov ds, $1
__asm mov es, a;
-// CHECK: mov es, word ptr $2
+// CHECK: mov es, $2
__asm mov fs, a;
-// CHECK: mov fs, word ptr $3
+// CHECK: mov fs, $3
__asm mov gs, a;
-// CHECK: mov gs, word ptr $4
+// CHECK: mov gs, $4
__asm mov ss, a;
-// CHECK: mov ss, word ptr $5
+// CHECK: mov ss, $5
// CHECK: "*m,*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i16* {{.*}}, i16* {{.*}}, i16* {{.*}}, i16* {{.*}}, i16* {{.*}}, i16* {{.*}})
}
@@ -600,37 +604,78 @@ void t43() {
C strct;
// Work around PR20368: These should be single line blocks
__asm { mov eax, 4[strct.c1] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$4$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$4]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, 4[strct.c3 + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$8$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$8]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, 8[strct.c2.a + 4 + 32*2 - 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$72$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$72]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, 12[4 + strct.c2.b] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$16$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$16]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, 4[4 + strct.c4.b2.b + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$12$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$12]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, 4[64 + strct.c1 + (2*32)] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$132$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$132]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, 4[64 + strct.c2.a - 2*32] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$4$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$4]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, [strct.c4.b1 + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$4$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$4]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, [strct.c4.b2.a + 4 + 32*2 - 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$64$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$64]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, [4 + strct.c1] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$4$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$4]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, [4 + strct.c2.b + 4] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$8$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$8]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, [64 + strct.c3 + (2*32)] }
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$128$0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
+// CHECK: call void asm sideeffect inteldialect "mov eax, $0[$$128]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
__asm { mov eax, [64 + strct.c4.b2.b - 2*32] }
// CHECK: call void asm sideeffect inteldialect "mov eax, $0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}})
}
+void t44() {
+ // CHECK-LABEL: define void @t44
+ __asm {
+ mov cr0, eax
+ mov cr2, ebx
+ mov cr3, ecx
+ mov cr4, edx
+ }
+ // CHECK: call void asm sideeffect inteldialect "mov cr0, eax\0A\09mov cr2, ebx\0A\09mov cr3, ecx\0A\09mov cr4, edx", "~{cr0},~{cr2},~{cr3},~{cr4},~{dirflag},~{fpsr},~{flags}"()
+}
+
+void t45() {
+ // CHECK-LABEL: define void @t45
+ __asm {
+ mov dr0, eax
+ mov dr1, ebx
+ mov dr2, ebx
+ mov dr3, ecx
+ mov dr6, edx
+ mov dr7, ecx
+ }
+ // CHECK: call void asm sideeffect inteldialect "mov dr0, eax\0A\09mov dr1, ebx\0A\09mov dr2, ebx\0A\09mov dr3, ecx\0A\09mov dr6, edx\0A\09mov dr7, ecx", "~{dr0},~{dr1},~{dr2},~{dr3},~{dr6},~{dr7},~{dirflag},~{fpsr},~{flags}"()
+}
+
+void t46() {
+ // CHECK-LABEL: define void @t46
+ __asm add eax, -128[eax]
+ // CHECK: call void asm sideeffect inteldialect "add eax, [eax + $$-128]", "~{eax},~{flags},~{dirflag},~{fpsr},~{flags}"()
+}
+
+void t47() {
+ // CHECK-LABEL: define void @t47
+ __asm {
+ bndmk bnd0, dword ptr [eax]
+ bndmk bnd1, dword ptr [ebx]
+ bndmk bnd2, dword ptr [ecx]
+ bndmk bnd3, dword ptr [edx]
+ }
+ // CHECK: call void asm sideeffect inteldialect "bndmk bnd0, dword ptr [eax]\0A\09bndmk bnd1, dword ptr [ebx]\0A\09bndmk bnd2, dword ptr [ecx]\0A\09bndmk bnd3, dword ptr [edx]", "~{bnd0},~{bnd1},~{bnd2},~{bnd3},~{dirflag},~{fpsr},~{flags}"()
+}
+
void dot_operator(){
-// CHECK-LABEL: define void @dot_operator
+ // CHECK-LABEL: define void @dot_operator
__asm { mov eax, 3[ebx]A.b}
-// CHECK: call void asm sideeffect inteldialect "mov eax, $$3[ebx].4", "~{eax},~{dirflag},~{fpsr},~{flags}"
+ // CHECK: call void asm sideeffect inteldialect "mov eax, [ebx + $$7]", "~{eax},~{dirflag},~{fpsr},~{flags}"
}
void call_clobber() {
@@ -693,10 +738,12 @@ void label5() {
void label6(){
__asm {
jmp short label
+ jc short label
+ jz short label
label:
}
// CHECK-LABEL: define void @label6
- // CHECK: call void asm sideeffect inteldialect "jmp {{.*}}__MSASMLABEL_.${:uid}__label\0A\09{{.*}}__MSASMLABEL_.${:uid}__label:", "~{dirflag},~{fpsr},~{flags}"()
+ // CHECK: jmp {{.*}}__MSASMLABEL_.${:uid}__label\0A\09jc {{.*}}__MSASMLABEL_.${:uid}__label\0A\09jz {{.*}}__MSASMLABEL_.${:uid}__label\0A\09{{.*}}__MSASMLABEL_.${:uid}__label:"
}
// Don't include mxcsr in the clobber list.
diff --git a/test/CodeGen/ms-inline-asm.cpp b/test/CodeGen/ms-inline-asm.cpp
index a435e4b826d8..039cde9e10ed 100644
--- a/test/CodeGen/ms-inline-asm.cpp
+++ b/test/CodeGen/ms-inline-asm.cpp
@@ -130,7 +130,7 @@ void t7_struct() {
__asm mov eax, [eax].A.b
// CHECK-LABEL: define void @_Z9t7_structv
// CHECK: call void asm sideeffect inteldialect
- // CHECK-SAME: mov eax, [eax].4
+ // CHECK-SAME: mov eax, [eax + $$4]
// CHECK-SAME: "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
@@ -142,7 +142,7 @@ void t7_typedef() {
__asm mov eax, [eax].A.b
// CHECK-LABEL: define void @_Z10t7_typedefv
// CHECK: call void asm sideeffect inteldialect
- // CHECK-SAME: mov eax, [eax].4
+ // CHECK-SAME: mov eax, [eax + $$4]
// CHECK-SAME: "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
@@ -154,7 +154,7 @@ void t7_using() {
__asm mov eax, [eax].A.b
// CHECK-LABEL: define void @_Z8t7_usingv
// CHECK: call void asm sideeffect inteldialect
- // CHECK-SAME: mov eax, [eax].4
+ // CHECK-SAME: mov eax, [eax + $$4]
// CHECK-SAME: "~{eax},~{dirflag},~{fpsr},~{flags}"()
}
@@ -180,3 +180,19 @@ void t8() {
A::g();
}
+void t9() {
+ // CHECK-LABEL: define void @_Z2t9v()
+ struct A {
+ int a;
+ int b;
+ void g() {
+ __asm mov eax, dword ptr [eax]this.b
+ // CHECK: call void asm sideeffect inteldialect
+ // CHECK-SAME: mov eax, dword ptr [eax + $$4]
+ // CHECK-SAME: "~{eax},~{dirflag},~{fpsr},~{flags}"()
+ }
+ };
+ A AA;
+ AA.g();
+}
+
diff --git a/test/CodeGen/ms-intrinsics.c b/test/CodeGen/ms-intrinsics.c
index 818be7fd7ffd..38cda9785029 100644
--- a/test/CodeGen/ms-intrinsics.c
+++ b/test/CodeGen/ms-intrinsics.c
@@ -5,7 +5,7 @@
// RUN: -triple thumbv7--windows -Oz -emit-llvm %s -o - \
// RUN: | FileCheck %s --check-prefixes CHECK,CHECK-ARM,CHECK-ARM-X64
// RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
-// RUN: -triple x86_64--windows -Oz -emit-llvm %s -o - \
+// RUN: -triple x86_64--windows -Oz -emit-llvm -target-feature +cx16 %s -o - \
// RUN: | FileCheck %s --check-prefixes CHECK,CHECK-X64,CHECK-ARM-X64,CHECK-INTEL
// intrin.h needs size_t, but -ffreestanding prevents us from getting it from
@@ -329,6 +329,27 @@ __int64 test_InterlockedCompareExchange64(__int64 volatile *Destination, __int64
// CHECK: ret i64 [[RESULT]]
// CHECK: }
+#if defined(__x86_64__)
+unsigned char test_InterlockedCompareExchange128(__int64 volatile *Destination, __int64 ExchangeHigh, __int64 ExchangeLow, __int64* ComparandResult) {
+ return _InterlockedCompareExchange128(Destination, ExchangeHigh, ExchangeLow, ComparandResult);
+}
+// CHECK-X64: define{{.*}}i8 @test_InterlockedCompareExchange128(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%ExchangeHigh, i64{{[a-z_ ]*}}%ExchangeLow, i64*{{[a-z_ ]*}}%ComparandResult){{.*}}{
+// CHECK-X64: [[DST:%[0-9]+]] = bitcast i64* %Destination to i128*
+// CHECK-X64: [[EH:%[0-9]+]] = zext i64 %ExchangeHigh to i128
+// CHECK-X64: [[EL:%[0-9]+]] = zext i64 %ExchangeLow to i128
+// CHECK-X64: [[CNR:%[0-9]+]] = bitcast i64* %ComparandResult to i128*
+// CHECK-X64: [[EHS:%[0-9]+]] = shl nuw i128 [[EH]], 64
+// CHECK-X64: [[EXP:%[0-9]+]] = or i128 [[EHS]], [[EL]]
+// CHECK-X64: [[ORG:%[0-9]+]] = load i128, i128* [[CNR]], align 16
+// CHECK-X64: [[RES:%[0-9]+]] = cmpxchg volatile i128* [[DST]], i128 [[ORG]], i128 [[EXP]] seq_cst seq_cst
+// CHECK-X64: [[OLD:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 0
+// CHECK-X64: store i128 [[OLD]], i128* [[CNR]], align 16
+// CHECK-X64: [[SUC1:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 1
+// CHECK-X64: [[SUC8:%[0-9]+]] = zext i1 [[SUC1]] to i8
+// CHECK-X64: ret i8 [[SUC8]]
+// CHECK-X64: }
+#endif
+
short test_InterlockedIncrement16(short volatile *Addend) {
return _InterlockedIncrement16(Addend);
}
diff --git a/test/CodeGen/nobuiltin.c b/test/CodeGen/nobuiltin.c
index f80c3c332ab4..543918f67491 100644
--- a/test/CodeGen/nobuiltin.c
+++ b/test/CodeGen/nobuiltin.c
@@ -4,6 +4,10 @@
// RUN: %clang_cc1 -triple x86_64-linux-gnu -fno-builtin -O1 -S -o - %s | FileCheck -check-prefix=NOSTRCPY -check-prefix=NOMEMSET %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -fno-builtin-memset -O1 -S -o - %s | FileCheck -check-prefix=STRCPY -check-prefix=NOMEMSET %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -O1 -fexperimental-new-pass-manager -S -o - %s | FileCheck -check-prefix=STRCPY -check-prefix=MEMSET %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -fno-builtin -O1 -fexperimental-new-pass-manager -S -o - %s | FileCheck -check-prefix=NOSTRCPY -check-prefix=NOMEMSET %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -fno-builtin-memset -O1 -fexperimental-new-pass-manager -S -o - %s | FileCheck -check-prefix=STRCPY -check-prefix=NOMEMSET %s
+
void PR13497() {
char content[2];
// make sure we don't optimize this call to strcpy()
diff --git a/test/CodeGen/noplt.c b/test/CodeGen/noplt.c
new file mode 100644
index 000000000000..f467199efab2
--- /dev/null
+++ b/test/CodeGen/noplt.c
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -emit-llvm -fno-plt %s -o - | FileCheck %s -check-prefix=CHECK-NOPLT
+
+// CHECK-NOPLT: Function Attrs: nonlazybind
+// CHECK-NOPLT-NEXT: declare {{.*}}i32 @foo
+int foo();
+
+int bar() {
+ return foo();
+}
diff --git a/test/CodeGen/nullptr-arithmetic.c b/test/CodeGen/nullptr-arithmetic.c
new file mode 100644
index 000000000000..ce9c9765b0f7
--- /dev/null
+++ b/test/CodeGen/nullptr-arithmetic.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -S %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -S %s -emit-llvm -triple i686-unknown-unknown -o - | FileCheck %s
+// RUN: %clang_cc1 -S %s -emit-llvm -triple x86_64-unknown-unknown -o - | FileCheck %s
+
+#include <stdint.h>
+
+// This test is meant to verify code that handles the 'p = nullptr + n' idiom
+// used by some versions of glibc and gcc. This is undefined behavior but
+// it is intended there to act like a conversion from a pointer-sized integer
+// to a pointer, and we would like to tolerate that.
+
+#define NULLPTRI8 ((int8_t*)0)
+
+// This should get the inttoptr instruction.
+int8_t *test1(intptr_t n) {
+ return NULLPTRI8 + n;
+}
+// CHECK-LABEL: test1
+// CHECK: inttoptr
+// CHECK-NOT: getelementptr
+
+// This doesn't meet the idiom because the element type is larger than a byte.
+int16_t *test2(intptr_t n) {
+ return (int16_t*)0 + n;
+}
+// CHECK-LABEL: test2
+// CHECK: getelementptr
+// CHECK-NOT: inttoptr
+
+// This doesn't meet the idiom because the offset is subtracted.
+int8_t* test3(intptr_t n) {
+ return NULLPTRI8 - n;
+}
+// CHECK-LABEL: test3
+// CHECK: getelementptr
+// CHECK-NOT: inttoptr
+
+// This checks the case where the offset isn't pointer-sized.
+// The front end will implicitly cast the offset to an integer, so we need to
+// make sure that doesn't cause problems on targets where integers and pointers
+// are not the same size.
+int8_t *test4(int8_t b) {
+ return NULLPTRI8 + b;
+}
+// CHECK-LABEL: test4
+// CHECK: inttoptr
+// CHECK-NOT: getelementptr
diff --git a/test/CodeGen/pascal-wchar-string.c b/test/CodeGen/pascal-wchar-string.c
index 626fc99f15fb..ac36e4028f4c 100644
--- a/test/CodeGen/pascal-wchar-string.c
+++ b/test/CodeGen/pascal-wchar-string.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -emit-llvm -o - %s -fpascal-strings -fshort-wchar | FileCheck %s
+// RUN: %clang_cc1 -emit-llvm -o - %s -fpascal-strings -fwchar-type=short -fno-signed-wchar | FileCheck %s
// rdar://8020384
#include <stddef.h>
diff --git a/test/CodeGen/ppc-vector-compare.cc b/test/CodeGen/ppc-vector-compare.cc
new file mode 100644
index 000000000000..e1c92bb6bef3
--- /dev/null
+++ b/test/CodeGen/ppc-vector-compare.cc
@@ -0,0 +1,34 @@
+// RUN: %clang_cc1 -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s \
+// RUN: -o - | FileCheck %s
+
+#include <altivec.h>
+
+// CHECK-LABEL: @_Z5test1Dv8_tS_
+// CHECK: @llvm.ppc.altivec.vcmpequh.p
+bool test1(vector unsigned short v1, vector unsigned short v2) {
+ return v1 == v2;
+}
+
+// CHECK-LABEL: @_Z5test2Dv2_mS_Dv2_lS0_Dv2_yS1_Dv2_xS2_Dv2_dS3_
+bool test2(vector unsigned long v1, vector unsigned long v2,
+ vector long v3, vector long v4,
+ vector unsigned long long v5, vector unsigned long long v6,
+ vector long long v7, vector long long v8,
+ vector double v9, vector double v10) {
+ // CHECK: @llvm.ppc.altivec.vcmpequd.p
+ bool res = v1 == v2;
+
+ // CHECK: @llvm.ppc.altivec.vcmpequd.p
+ res |= v3 == v4;
+
+ // CHECK: @llvm.ppc.altivec.vcmpequd.p
+ res |= v5 == v6;
+
+ // CHECK: @llvm.ppc.altivec.vcmpequd.p
+ res |= v7 == v8;
+
+ // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
+ res |= v9 == v10;
+ return res;
+}
+
diff --git a/test/CodeGen/pr34021.c b/test/CodeGen/pr34021.c
new file mode 100644
index 000000000000..3c7a75a95aa9
--- /dev/null
+++ b/test/CodeGen/pr34021.c
@@ -0,0 +1,25 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 -fms-extensions %s -triple=i686-unknown-unknown -emit-llvm -o - | FileCheck %s --check-prefix=X86
+// RUN: %clang_cc1 -fms-extensions %s -triple=x86_64-unknown-unknown -emit-llvm -o - | FileCheck %s --check-prefix=X64
+
+typedef int v4si __attribute__ ((vector_size (16)));
+v4si rep() {
+// X86-LABEL: define <4 x i32> @rep
+// X86: %[[ALLOCA0:.*]] = alloca <4 x i32>, align 16
+// X86: %[[ALLOCA1:.*]] = alloca <4 x i32>, align 16
+// X86: %[[BITCAST:.*]] = bitcast <4 x i32>* %[[ALLOCA0]] to i128*
+// X86: %[[ASM:.*]] = call i64 asm sideeffect inteldialect "", "=A,~{dirflag},~{fpsr},~{flags}"()
+// X86: %[[ZEXT:.*]] = zext i64 %[[ASM]] to i128
+// X86: store i128 %[[ZEXT]], i128* %[[BITCAST]], align 16
+// X86: %[[LOAD:.*]] = load <4 x i32>, <4 x i32>* %[[ALLOCA1]], align 16
+// X86: ret <4 x i32> %[[LOAD]]
+//
+// X64-LABEL: define <4 x i32> @rep
+// X64: %[[ALLOCA:.*]] = alloca <4 x i32>, align 16
+// X64: call void asm sideeffect inteldialect "", "~{dirflag},~{fpsr},~{flags}"()
+// X64: %[[LOAD:.*]] = load <4 x i32>, <4 x i32>* %[[ALLOCA]], align 16
+// X64: ret <4 x i32> %[[LOAD]]
+ v4si res;
+ __asm {}
+ return res;
+}
diff --git a/test/CodeGen/pragma-comment.c b/test/CodeGen/pragma-comment.c
index e20efacdcb22..fae9b8fb9ed8 100644
--- a/test/CodeGen/pragma-comment.c
+++ b/test/CodeGen/pragma-comment.c
@@ -4,6 +4,7 @@
// RUN: %clang_cc1 %s -triple thumbv7-linux-gnueabihf -fms-extensions -emit-llvm -o - | FileCheck -check-prefix LINUX %s
// RUN: %clang_cc1 %s -triple i686-pc-linux -fms-extensions -emit-llvm -o - | FileCheck -check-prefix LINUX %s
// RUN: %clang_cc1 %s -triple x86_64-scei-ps4 -fms-extensions -emit-llvm -o - | FileCheck -check-prefix PS4 %s
+// RUN: %clang_cc1 %s -triple aarch64-windows-msvc -fms-extensions -emit-llvm -o - | FileCheck %s
#pragma comment(lib, "msvcrt.lib")
#pragma comment(lib, "kernel32")
diff --git a/test/CodeGen/preserve-call-conv.c b/test/CodeGen/preserve-call-conv.c
index 6e91a8489b40..b67e29f392a4 100644
--- a/test/CodeGen/preserve-call-conv.c
+++ b/test/CodeGen/preserve-call-conv.c
@@ -1,6 +1,9 @@
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm < %s | FileCheck %s
// RUN: %clang_cc1 -triple arm64-unknown-unknown -emit-llvm < %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-windows-msvc -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-unknown-windows-msvc -emit-llvm %s -o - | FileCheck %s
+
// Check that the preserve_most calling convention attribute at the source level
// is lowered to the corresponding calling convention attrribute at the LLVM IR
// level.
diff --git a/test/CodeGen/profile-sample-accurate.c b/test/CodeGen/profile-sample-accurate.c
new file mode 100644
index 000000000000..556cad753c3c
--- /dev/null
+++ b/test/CodeGen/profile-sample-accurate.c
@@ -0,0 +1,7 @@
+// Test to ensure -emit-llvm profile-sample-accurate is honored by clang.
+// RUN: %clang -S -emit-llvm %s -fprofile-sample-accurate -o - | FileCheck %s
+
+// CHECK: define{{.*}} void @foo()
+// CHECK: attributes{{.*}} "profile-sample-accurate"
+void foo() {
+}
diff --git a/test/CodeGen/push-hidden-visibility-subclass.cpp b/test/CodeGen/push-hidden-visibility-subclass.cpp
new file mode 100644
index 000000000000..82bf65309abc
--- /dev/null
+++ b/test/CodeGen/push-hidden-visibility-subclass.cpp
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -triple x86_64-linux-unknown -emit-llvm %s -o - | FileCheck %s
+
+#pragma GCC visibility push(hidden)
+
+struct Base {
+ virtual ~Base() = default;
+ virtual void* Alloc() = 0;
+};
+
+class Child : public Base {
+public:
+ Child() = default;
+ void* Alloc();
+};
+
+void test() {
+ Child x;
+}
+
+// CHECK: @_ZTV5Child = external hidden unnamed_addr constant
diff --git a/test/CodeGen/sanitizer-special-case-list.c b/test/CodeGen/sanitizer-special-case-list.c
new file mode 100644
index 000000000000..45cfecd7efb0
--- /dev/null
+++ b/test/CodeGen/sanitizer-special-case-list.c
@@ -0,0 +1,26 @@
+// Verify that blacklist sections correctly select sanitizers to apply blacklist entries to.
+//
+// RUN: %clang_cc1 -fsanitize=unsigned-integer-overflow,cfi-icall -fsanitize-blacklist=%S/Inputs/sanitizer-special-case-list.unsanitized1.txt -emit-llvm %s -o - | FileCheck %s --check-prefix=UNSANITIZED
+// RUN: %clang_cc1 -fsanitize=unsigned-integer-overflow,cfi-icall -fsanitize-blacklist=%S/Inputs/sanitizer-special-case-list.unsanitized2.txt -emit-llvm %s -o - | FileCheck %s --check-prefix=UNSANITIZED
+// RUN: %clang_cc1 -fsanitize=unsigned-integer-overflow,cfi-icall -fsanitize-blacklist=%S/Inputs/sanitizer-special-case-list.unsanitized3.txt -emit-llvm %s -o - | FileCheck %s --check-prefix=UNSANITIZED
+// RUN: %clang_cc1 -fsanitize=unsigned-integer-overflow,cfi-icall -fsanitize-blacklist=%S/Inputs/sanitizer-special-case-list.unsanitized4.txt -emit-llvm %s -o - | FileCheck %s --check-prefix=UNSANITIZED
+//
+// RUN: %clang_cc1 -fsanitize=unsigned-integer-overflow,cfi-icall -fsanitize-blacklist=%S/Inputs/sanitizer-special-case-list.sanitized.txt -emit-llvm %s -o - | FileCheck %s --check-prefix=SANITIZED
+
+unsigned i;
+
+// SANITIZED: @overflow
+// UNSANITIZED: @overflow
+unsigned overflow() {
+ // SANITIZED: call {{.*}}void @__ubsan
+ // UNSANITIZED-NOT: call {{.*}}void @__ubsan
+ return i * 37;
+}
+
+// SANITIZED: @cfi
+// UNSANITIZED: @cfi
+void cfi(void (*fp)()) {
+ // SANITIZED: llvm.type.test
+ // UNSANITIZED-NOT: llvm.type.test
+ fp();
+}
diff --git a/test/CodeGen/sse2-builtins.c b/test/CodeGen/sse2-builtins.c
index ca51314d80b8..c2279cb88109 100644
--- a/test/CodeGen/sse2-builtins.c
+++ b/test/CodeGen/sse2-builtins.c
@@ -97,13 +97,25 @@ __m128i test_mm_andnot_si128(__m128i A, __m128i B) {
__m128i test_mm_avg_epu8(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_avg_epu8
- // CHECK: call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-NOT: call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
+ // CHECK: zext <16 x i8> %{{.*}} to <16 x i16>
+ // CHECK: add <16 x i16> %{{.*}}, %{{.*}}
+ // CHECK: add <16 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK: lshr <16 x i16> %{{.*}}, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ // CHECK:trunc <16 x i16> %{{.*}} to <16 x i8>
return _mm_avg_epu8(A, B);
}
__m128i test_mm_avg_epu16(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_avg_epu16
- // CHECK: call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK-NOT: call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
+ // CHECK: zext <8 x i16> %{{.*}} to <8 x i32>
+ // CHECK: add <8 x i32> %{{.*}}, %{{.*}}
+ // CHECK: add <8 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: lshr <8 x i32> %{{.*}}, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ // CHECK: trunc <8 x i32> %{{.*}} to <8 x i16>
return _mm_avg_epu16(A, B);
}
diff --git a/test/CodeGen/ssse3-builtins.c b/test/CodeGen/ssse3-builtins.c
index b2279e277cd2..4fd22aa79b46 100644
--- a/test/CodeGen/ssse3-builtins.c
+++ b/test/CodeGen/ssse3-builtins.c
@@ -7,19 +7,25 @@
__m128i test_mm_abs_epi8(__m128i a) {
// CHECK-LABEL: test_mm_abs_epi8
- // CHECK: call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %{{.*}})
+ // CHECK: [[SUB:%.+]] = sub <16 x i8> zeroinitializer, [[A:%.+]]
+ // CHECK: [[CMP:%.+]] = icmp sgt <16 x i8> [[A]], zeroinitializer
+ // CHECK: %{{.*}} = select <16 x i1> [[CMP]], <16 x i8> [[A]], <16 x i8> [[SUB]]
return _mm_abs_epi8(a);
}
__m128i test_mm_abs_epi16(__m128i a) {
// CHECK-LABEL: test_mm_abs_epi16
- // CHECK: call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %{{.*}})
+ // CHECK: [[SUB:%.+]] = sub <8 x i16> zeroinitializer, [[A:%.+]]
+ // CHECK: [[CMP:%.+]] = icmp sgt <8 x i16> [[A]], zeroinitializer
+ // CHECK: %{{.*}} = select <8 x i1> [[CMP]], <8 x i16> [[A]], <8 x i16> [[SUB]]
return _mm_abs_epi16(a);
}
__m128i test_mm_abs_epi32(__m128i a) {
// CHECK-LABEL: test_mm_abs_epi32
- // CHECK: call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %{{.*}})
+ // CHECK: [[SUB:%.+]] = sub <4 x i32> zeroinitializer, [[A:%.+]]
+ // CHECK: [[CMP:%.+]] = icmp sgt <4 x i32> [[A]], zeroinitializer
+ // CHECK: %{{.*}} = select <4 x i1> [[CMP]], <4 x i32> [[A]], <4 x i32> [[SUB]]
return _mm_abs_epi32(a);
}
diff --git a/test/CodeGen/string-literal-short-wstring.c b/test/CodeGen/string-literal-short-wstring.c
index 01de6a4d8027..fb1fe0cad0a8 100644
--- a/test/CodeGen/string-literal-short-wstring.c
+++ b/test/CodeGen/string-literal-short-wstring.c
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -x c++ -triple %itanium_abi_triple -emit-llvm -fshort-wchar %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=ITANIUM
-// RUN: %clang_cc1 -x c++ -triple %ms_abi_triple -emit-llvm -fshort-wchar %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=MSABI
+// RUN: %clang_cc1 -x c++ -triple %itanium_abi_triple -emit-llvm -fwchar-type=short -fno-signed-wchar %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=ITANIUM
+// RUN: %clang_cc1 -x c++ -triple %ms_abi_triple -emit-llvm -fwchar-type=short -fno-signed-wchar %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=MSABI
// Runs in c++ mode so that wchar_t is available.
// XFAIL: hexagon
diff --git a/test/CodeGen/string-literal-unicode-conversion.c b/test/CodeGen/string-literal-unicode-conversion.c
index 23205b80b027..71286b1fed23 100644
--- a/test/CodeGen/string-literal-unicode-conversion.c
+++ b/test/CodeGen/string-literal-unicode-conversion.c
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-C %s
// RUN: %clang_cc1 -x c++ -std=c++0x -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-CPP0X %s
-// RUN: %clang_cc1 -x c++ -std=c++0x -fshort-wchar -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-SHORTWCHAR %s
+// RUN: %clang_cc1 -x c++ -std=c++0x -fwchar-type=short -fno-signed-wchar -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-SHORTWCHAR %s
// This file contains a mix of ISO-8859-1 and UTF-8 encoded data.
// the literal assigned to 'aa' should be the ISO-8859-1 encoding for the code
diff --git a/test/CodeGen/target-builtin-noerror.c b/test/CodeGen/target-builtin-noerror.c
index 2a7d69f1089f..37820b33bada 100644
--- a/test/CodeGen/target-builtin-noerror.c
+++ b/test/CodeGen/target-builtin-noerror.c
@@ -72,4 +72,42 @@ void verifyfeaturestrings() {
(void)__builtin_cpu_supports("avx512pf");
(void)__builtin_cpu_supports("avx512vbmi");
(void)__builtin_cpu_supports("avx512ifma");
+ (void)__builtin_cpu_supports("avx5124vnniw");
+ (void)__builtin_cpu_supports("avx5124fmaps");
+ (void)__builtin_cpu_supports("avx512vpopcntdq");
+}
+
+void verifycpustrings() {
+ (void)__builtin_cpu_is("amd");
+ (void)__builtin_cpu_is("amdfam10h");
+ (void)__builtin_cpu_is("amdfam15h");
+ (void)__builtin_cpu_is("amdfam17h");
+ (void)__builtin_cpu_is("atom");
+ (void)__builtin_cpu_is("barcelona");
+ (void)__builtin_cpu_is("bdver1");
+ (void)__builtin_cpu_is("bdver2");
+ (void)__builtin_cpu_is("bdver3");
+ (void)__builtin_cpu_is("bdver4");
+ (void)__builtin_cpu_is("bonnell");
+ (void)__builtin_cpu_is("broadwell");
+ (void)__builtin_cpu_is("btver1");
+ (void)__builtin_cpu_is("btver2");
+ (void)__builtin_cpu_is("cannonlake");
+ (void)__builtin_cpu_is("core2");
+ (void)__builtin_cpu_is("corei7");
+ (void)__builtin_cpu_is("haswell");
+ (void)__builtin_cpu_is("intel");
+ (void)__builtin_cpu_is("istanbul");
+ (void)__builtin_cpu_is("ivybridge");
+ (void)__builtin_cpu_is("knl");
+ (void)__builtin_cpu_is("knm");
+ (void)__builtin_cpu_is("nehalem");
+ (void)__builtin_cpu_is("sandybridge");
+ (void)__builtin_cpu_is("shanghai");
+ (void)__builtin_cpu_is("silvermont");
+ (void)__builtin_cpu_is("skylake");
+ (void)__builtin_cpu_is("skylake-avx512");
+ (void)__builtin_cpu_is("slm");
+ (void)__builtin_cpu_is("westmere");
+ (void)__builtin_cpu_is("znver1");
}
diff --git a/test/CodeGen/target-data.c b/test/CodeGen/target-data.c
index 851ce5831fa3..3869afec7858 100644
--- a/test/CodeGen/target-data.c
+++ b/test/CodeGen/target-data.c
@@ -116,11 +116,11 @@
// RUN: %clang_cc1 -triple nvptx-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=NVPTX
-// NVPTX: target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
+// NVPTX: target datalayout = "e-p:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64"
// RUN: %clang_cc1 -triple nvptx64-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=NVPTX64
-// NVPTX64: target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+// NVPTX64: target datalayout = "e-i64:64-i128:128-v16:16-v32:32-n16:32:64"
// RUN: %clang_cc1 -triple r600-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=R600
diff --git a/test/CodeGen/tbaa-array.cpp b/test/CodeGen/tbaa-array.cpp
new file mode 100644
index 000000000000..86ca5ccb40dc
--- /dev/null
+++ b/test/CodeGen/tbaa-array.cpp
@@ -0,0 +1,18 @@
+// RUN: %clang_cc1 -triple x86_64-linux -O1 -disable-llvm-passes %s \
+// RUN: -emit-llvm -o - | FileCheck %s
+//
+// Check that we generate correct TBAA information for accesses to array
+// elements.
+
+struct A { int i; };
+struct B { A a[1]; };
+
+int foo(B *b) {
+// CHECK-LABEL: _Z3fooP1B
+// CHECK: load i32, {{.*}}, !tbaa [[TAG_A_i:!.*]]
+ return b->a->i;
+}
+
+// CHECK-DAG: [[TAG_A_i]] = !{[[TYPE_A:!.*]], [[TYPE_int:!.*]], i64 0}
+// CHECK-DAG: [[TYPE_A]] = !{!"_ZTS1A", !{{.*}}, i64 0}
+// CHECK-DAG: [[TYPE_int]] = !{!"int", !{{.*}}, i64 0}
diff --git a/test/CodeGen/tbaa-cast.cpp b/test/CodeGen/tbaa-cast.cpp
new file mode 100644
index 000000000000..2b9e31086664
--- /dev/null
+++ b/test/CodeGen/tbaa-cast.cpp
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -triple x86_64-linux -O1 -disable-llvm-passes %s \
+// RUN: -emit-llvm -o - | FileCheck %s
+//
+// Check that we generate correct TBAA information for lvalues constructed
+// with use of casts.
+
+struct V {
+ unsigned n;
+};
+
+struct S {
+ char bytes[4];
+};
+
+void foo(S *p) {
+// CHECK-LABEL: _Z3fooP1S
+// CHECK: store i32 5, {{.*}}, !tbaa [[TAG_V_n:!.*]]
+ ((V*)p->bytes)->n = 5;
+}
+
+// CHECK-DAG: [[TAG_V_n]] = !{[[TYPE_V:!.*]], [[TYPE_int:!.*]], i64 0}
+// CHECK-DAG: [[TYPE_V]] = !{!"_ZTS1V", !{{.*}}, i64 0}
+// CHECK-DAG: [[TYPE_int]] = !{!"int", !{{.*}}, i64 0}
diff --git a/test/CodeGen/tbaa-for-vptr.cpp b/test/CodeGen/tbaa-for-vptr.cpp
index 7b8ae2099e47..6136874cbfcb 100644
--- a/test/CodeGen/tbaa-for-vptr.cpp
+++ b/test/CodeGen/tbaa-for-vptr.cpp
@@ -23,12 +23,12 @@ void CallFoo(A *a, int (A::*fp)() const) {
}
// CHECK-LABEL: @_Z7CallFoo
-// CHECK: %{{.*}} = load {{.*}} !tbaa ![[NUM:[0-9]+]]
+// CHECK: %{{.*}} = load i32 (%struct.A*)**, {{.*}} !tbaa ![[NUM:[0-9]+]]
// CHECK: br i1
-// CHECK: load {{.*}}, !tbaa ![[NUM]]
+// CHECK: load i8*, {{.*}}, !tbaa ![[NUM]]
//
// CHECK-LABEL: @_ZN1AC2Ev
-// CHECK: store {{.*}} !tbaa ![[NUM]]
+// CHECK: store i32 (...)** {{.*}}, !tbaa ![[NUM]]
//
// CHECK: [[NUM]] = !{[[TYPE:!.*]], [[TYPE]], i64 0}
// CHECK: [[TYPE]] = !{!"vtable pointer", !{{.*}}
diff --git a/test/CodeGen/tbaa-reference.cpp b/test/CodeGen/tbaa-reference.cpp
new file mode 100644
index 000000000000..ecdbfbee7f14
--- /dev/null
+++ b/test/CodeGen/tbaa-reference.cpp
@@ -0,0 +1,37 @@
+// RUN: %clang_cc1 -triple x86_64-linux -O1 -disable-llvm-passes %s -emit-llvm -o - | FileCheck %s
+//
+// Check that we generate correct TBAA information for reference accesses.
+
+struct S;
+
+struct B {
+ S &s;
+ B(S &s);
+ S &get();
+};
+
+B::B(S &s) : s(s) {
+// CHECK-LABEL: _ZN1BC2ER1S
+// Check initialization of the reference parameter.
+// CHECK: store %struct.S* {{.*}}, %struct.S** {{.*}}, !tbaa [[TAG_pointer:!.*]]
+
+// Check loading of the reference parameter.
+// CHECK: load %struct.S*, %struct.S** {{.*}}, !tbaa [[TAG_pointer]]
+
+// Check initialization of the reference member.
+// CHECK: store %struct.S* {{.*}}, %struct.S** {{.*}}, !tbaa [[TAG_pointer]]
+}
+
+S &B::get() {
+// CHECK-LABEL: _ZN1B3getEv
+// Check that we access the reference as a structure member.
+// CHECK: load %struct.S*, %struct.S** {{.*}}, !tbaa [[TAG_B_s:!.*]]
+ return s;
+}
+
+// CHECK-DAG: [[TAG_pointer]] = !{[[TYPE_pointer:!.*]], [[TYPE_pointer]], i64 0}
+// CHECK-DAG: [[TAG_B_s]] = !{[[TYPE_B:!.*]], [[TYPE_pointer]], i64 0}
+//
+// CHECK-DAG: [[TYPE_B]] = !{!"_ZTS1B", [[TYPE_pointer]], i64 0}
+// CHECK-DAG: [[TYPE_pointer]] = !{!"any pointer", [[TYPE_char:!.*]], i64 0}
+// CHECK-DAG: [[TYPE_char]] = !{!"omnipotent char", {{!.*}}, i64 0}
diff --git a/test/CodeGen/tbm-builtins.c b/test/CodeGen/tbm-builtins.c
index 8e031408a4b6..136a1d41c4ec 100644
--- a/test/CodeGen/tbm-builtins.c
+++ b/test/CodeGen/tbm-builtins.c
@@ -1,8 +1,4 @@
-// RUN: %clang_cc1 -ffreestanding %s -O3 -triple=x86_64-unknown-unknown -target-feature +tbm -emit-llvm -o - | FileCheck %s
-// FIXME: The code generation checks for add/sub and/or are depending on the optimizer.
-// The REQUIRES keyword will be removed when the FIXME is complete.
-// REQUIRES: x86-registered-target
-
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +tbm -emit-llvm -o - | FileCheck %s
#include <x86intrin.h>
@@ -28,134 +24,136 @@ unsigned long long test__bextri_u64_bigint(unsigned long long a) {
unsigned int test__blcfill_u32(unsigned int a) {
// CHECK-LABEL: test__blcfill_u32
- // CHECK: [[TMP:%.*]] = add i32 [[SRC:%.*]], 1
- // CHECK-NEXT: %{{.*}} = and i32 [[TMP]], [[SRC]]
+ // CHECK: [[TMP:%.*]] = add i32 %{{.*}}, 1
+ // CHECK: %{{.*}} = and i32 %{{.*}}, [[TMP]]
return __blcfill_u32(a);
}
unsigned long long test__blcfill_u64(unsigned long long a) {
// CHECK-LABEL: test__blcfill_u64
- // CHECK: [[TMPT:%.*]] = add i64 [[SRC:%.*]], 1
- // CHECK-NEXT: %{{.*}} = and i64 [[TMP]], [[SRC]]
+ // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1
+ // CHECK: %{{.*}} = and i64 %{{.*}}, [[TMP]]
return __blcfill_u64(a);
}
unsigned int test__blci_u32(unsigned int a) {
// CHECK-LABEL: test__blci_u32
- // CHECK: [[TMP:%.*]] = sub i32 -2, [[SRC:%.*]]
- // CHECK-NEXT: %{{.*}} = or i32 [[TMP]], [[SRC]]
+ // CHECK: [[TMP1:%.*]] = add i32 %{{.*}}, 1
+ // CHECK: [[TMP2:%.*]] = xor i32 [[TMP1]], -1
+ // CHECK: %{{.*}} = or i32 %{{.*}}, [[TMP2]]
return __blci_u32(a);
}
unsigned long long test__blci_u64(unsigned long long a) {
// CHECK-LABEL: test__blci_u64
- // CHECK: [[TMP:%.*]] = sub i64 -2, [[SRC:%.*]]
- // CHECK-NEXT: %{{.*}} = or i64 [[TMP]], [[SRC]]
+ // CHECK: [[TMP1:%.*]] = add i64 %{{.*}}, 1
+ // CHECK: [[TMP2:%.*]] = xor i64 [[TMP1]], -1
+ // CHECK: %{{.*}} = or i64 %{{.*}}, [[TMP2]]
return __blci_u64(a);
}
unsigned int test__blcic_u32(unsigned int a) {
// CHECK-LABEL: test__blcic_u32
- // CHECK: [[TMP1:%.*]] = xor i32 [[SRC:%.*]], -1
- // CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SRC]], 1
- // CHECK-NEXT: {{.*}} = and i32 [[TMP2]], [[TMP1]]
+ // CHECK: [[TMP1:%.*]] = xor i32 %{{.*}}, -1
+ // CHECK: [[TMP2:%.*]] = add i32 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = and i32 [[TMP1]], [[TMP2]]
return __blcic_u32(a);
}
unsigned long long test__blcic_u64(unsigned long long a) {
// CHECK-LABEL: test__blcic_u64
- // CHECK: [[TMP1:%.*]] = xor i64 [[SRC:%.*]], -1
- // CHECK-NEXT: [[TMP2:%.*]] = add i64 [[SRC]], 1
- // CHECK-NEXT: {{.*}} = and i64 [[TMP2]], [[TMP1]]
+ // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1
+ // CHECK: [[TMP2:%.*]] = add i64 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = and i64 [[TMP1]], [[TMP2]]
return __blcic_u64(a);
}
unsigned int test__blcmsk_u32(unsigned int a) {
// CHECK-LABEL: test__blcmsk_u32
- // CHECK: [[TMP:%.*]] = add i32 [[SRC:%.*]], 1
- // CHECK-NEXT: {{.*}} = xor i32 [[TMP]], [[SRC]]
+ // CHECK: [[TMP:%.*]] = add i32 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = xor i32 %{{.*}}, [[TMP]]
return __blcmsk_u32(a);
}
unsigned long long test__blcmsk_u64(unsigned long long a) {
// CHECK-LABEL: test__blcmsk_u64
- // CHECK: [[TMP:%.*]] = add i64 [[SRC:%.*]], 1
- // CHECK-NEXT: {{.*}} = xor i64 [[TMP]], [[SRC]]
+ // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = xor i64 %{{.*}}, [[TMP]]
return __blcmsk_u64(a);
}
unsigned int test__blcs_u32(unsigned int a) {
// CHECK-LABEL: test__blcs_u32
- // CHECK: [[TMP:%.*]] = add i32 [[SRC:%.*]], 1
- // CHECK-NEXT: {{.*}} = or i32 [[TMP]], [[SRC]]
+ // CHECK: [[TMP:%.*]] = add i32 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = or i32 %{{.*}}, [[TMP]]
return __blcs_u32(a);
}
unsigned long long test__blcs_u64(unsigned long long a) {
// CHECK-LABEL: test__blcs_u64
- // CHECK: [[TMP:%.*]] = add i64 [[SRC:%.*]], 1
- // CHECK-NEXT: {{.*}} = or i64 [[TMP]], [[SRC]]
+ // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = or i64 %{{.*}}, [[TMP]]
return __blcs_u64(a);
}
unsigned int test__blsfill_u32(unsigned int a) {
// CHECK-LABEL: test__blsfill_u32
- // CHECK: [[TMP:%.*]] = add i32 [[SRC:%.*]], -1
- // CHECK-NEXT: {{.*}} = or i32 [[TMP]], [[SRC]]
+ // CHECK: [[TMP:%.*]] = sub i32 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = or i32 %{{.*}}, [[TMP]]
return __blsfill_u32(a);
}
unsigned long long test__blsfill_u64(unsigned long long a) {
// CHECK-LABEL: test__blsfill_u64
- // CHECK: [[TMP:%.*]] = add i64 [[SRC:%.*]], -1
- // CHECK-NEXT: {{.*}} = or i64 [[TMP]], [[SRC]]
+ // CHECK: [[TMP:%.*]] = sub i64 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = or i64 %{{.*}}, [[TMP]]
return __blsfill_u64(a);
}
unsigned int test__blsic_u32(unsigned int a) {
// CHECK-LABEL: test__blsic_u32
- // CHECK: [[TMP1:%.*]] = xor i32 [[SRC:%.*]], -1
- // CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SRC:%.*]], -1
- // CHECK-NEXT: {{.*}} = or i32 [[TMP2]], [[TMP1]]
+ // CHECK: [[TMP1:%.*]] = xor i32 %{{.*}}, -1
+ // CHECK: [[TMP2:%.*]] = sub i32 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = or i32 [[TMP1]], [[TMP2]]
return __blsic_u32(a);
}
unsigned long long test__blsic_u64(unsigned long long a) {
// CHECK-LABEL: test__blsic_u64
- // CHECK: [[TMP1:%.*]] = xor i64 [[SRC:%.*]], -1
- // CHECK-NEXT: [[TMP2:%.*]] = add i64 [[SRC:%.*]], -1
- // CHECK-NEXT: {{.*}} = or i64 [[TMP2]], [[TMP1]]
+ // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1
+ // CHECK: [[TMP2:%.*]] = sub i64 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = or i64 [[TMP1]], [[TMP2]]
return __blsic_u64(a);
}
unsigned int test__t1mskc_u32(unsigned int a) {
// CHECK-LABEL: test__t1mskc_u32
- // CHECK: [[TMP1:%.*]] = xor i32 [[SRC:%.*]], -1
- // CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SRC:%.*]], 1
- // CHECK-NEXT: {{.*}} = or i32 [[TMP2]], [[TMP1]]
+ // CHECK: [[TMP1:%.*]] = xor i32 %{{.*}}, -1
+ // CHECK: [[TMP2:%.*]] = add i32 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = or i32 [[TMP1]], [[TMP2]]
return __t1mskc_u32(a);
}
unsigned long long test__t1mskc_u64(unsigned long long a) {
// CHECK-LABEL: test__t1mskc_u64
- // CHECK: [[TMP1:%.*]] = xor i64 [[SRC:%.*]], -1
- // CHECK-NEXT: [[TMP2:%.*]] = add i64 [[SRC:%.*]], 1
- // CHECK-NEXT: {{.*}} = or i64 [[TMP2]], [[TMP1]]
+ // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1
+ // CHECK: [[TMP2:%.*]] = add i64 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = or i64 [[TMP1]], [[TMP2]]
return __t1mskc_u64(a);
}
unsigned int test__tzmsk_u32(unsigned int a) {
// CHECK-LABEL: test__tzmsk_u32
- // CHECK: [[TMP1:%.*]] = xor i32 [[SRC:%.*]], -1
- // CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SRC:%.*]], -1
- // CHECK-NEXT: {{.*}} = and i32 [[TMP2]], [[TMP1]]
+ // CHECK: [[TMP1:%.*]] = xor i32 %{{.*}}, -1
+ // CHECK: [[TMP2:%.*]] = sub i32 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = and i32 [[TMP1]], [[TMP2]]
return __tzmsk_u32(a);
}
unsigned long long test__tzmsk_u64(unsigned long long a) {
// CHECK-LABEL: test__tzmsk_u64
- // CHECK: [[TMP1:%.*]] = xor i64 [[SRC:%.*]], -1
- // CHECK-NEXT: [[TMP2:%.*]] = add i64 [[SRC:%.*]], -1
- // CHECK-NEXT: {{.*}} = and i64 [[TMP2]], [[TMP1]]
+ // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1
+ // CHECK: [[TMP2:%.*]] = sub i64 %{{.*}}, 1
+ // CHECK-NEXT: {{.*}} = and i64 [[TMP1]], [[TMP2]]
return __tzmsk_u64(a);
}
diff --git a/test/CodeGen/thinlto-debug-pm.c b/test/CodeGen/thinlto-debug-pm.c
new file mode 100644
index 000000000000..2accde1f3625
--- /dev/null
+++ b/test/CodeGen/thinlto-debug-pm.c
@@ -0,0 +1,10 @@
+// Test to ensure -fdebug-pass-manager works when invoking the
+// ThinLTO backend path with the new PM.
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 -o %t.o -flto=thin -fexperimental-new-pass-manager -triple x86_64-unknown-linux-gnu -emit-llvm-bc %s
+// RUN: llvm-lto -thinlto -o %t %t.o
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-obj -O2 -o %t2.o -x ir %t.o -fthinlto-index=%t.thinlto.bc -fdebug-pass-manager -fexperimental-new-pass-manager 2>&1 | FileCheck %s
+// CHECK: Running pass:
+
+void foo() {
+}
diff --git a/test/CodeGen/thinlto-emit-llvm.c b/test/CodeGen/thinlto-emit-llvm.c
index f611162d1999..d6ef6650243e 100644
--- a/test/CodeGen/thinlto-emit-llvm.c
+++ b/test/CodeGen/thinlto-emit-llvm.c
@@ -5,6 +5,6 @@
// RUN: %clang_cc1 -O2 -x ir %t.o -fthinlto-index=%t.thinlto.bc -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -O2 -x ir %t.o -fthinlto-index=%t.thinlto.bc -emit-llvm-bc -o - | llvm-dis -o - | FileCheck %s
-// CHECK: define void @foo()
+// CHECK: define{{.*}} void @foo()
void foo() {
}
diff --git a/test/CodeGen/ubsan-builtin-checks.c b/test/CodeGen/ubsan-builtin-checks.c
new file mode 100644
index 000000000000..9733b0e04794
--- /dev/null
+++ b/test/CodeGen/ubsan-builtin-checks.c
@@ -0,0 +1,44 @@
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -w -emit-llvm -o - %s -fsanitize=builtin | FileCheck %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -w -emit-llvm -o - %s -fsanitize=builtin | FileCheck %s --check-prefix=NOT-UB
+
+// NOT-UB-NOT: __ubsan_handle_invalid_builtin
+
+// CHECK: define void @check_ctz
+void check_ctz(int n) {
+ // CHECK: [[NOT_ZERO:%.*]] = icmp ne i32 [[N:%.*]], 0, !nosanitize
+ // CHECK-NEXT: br i1 [[NOT_ZERO]]
+ //
+ // Handler block:
+ // CHECK: call void @__ubsan_handle_invalid_builtin
+ // CHECK-NEXT: unreachable
+ //
+ // Continuation block:
+ // CHECK: call i32 @llvm.cttz.i32(i32 [[N]], i1 true)
+ __builtin_ctz(n);
+
+ // CHECK: call void @__ubsan_handle_invalid_builtin
+ __builtin_ctzl(n);
+
+ // CHECK: call void @__ubsan_handle_invalid_builtin
+ __builtin_ctzll(n);
+}
+
+// CHECK: define void @check_clz
+void check_clz(int n) {
+ // CHECK: [[NOT_ZERO:%.*]] = icmp ne i32 [[N:%.*]], 0, !nosanitize
+ // CHECK-NEXT: br i1 [[NOT_ZERO]]
+ //
+ // Handler block:
+ // CHECK: call void @__ubsan_handle_invalid_builtin
+ // CHECK-NEXT: unreachable
+ //
+ // Continuation block:
+ // CHECK: call i32 @llvm.ctlz.i32(i32 [[N]], i1 true)
+ __builtin_clz(n);
+
+ // CHECK: call void @__ubsan_handle_invalid_builtin
+ __builtin_clzl(n);
+
+ // CHECK: call void @__ubsan_handle_invalid_builtin
+ __builtin_clzll(n);
+}
diff --git a/test/CodeGen/ubsan-pass-object-size.c b/test/CodeGen/ubsan-pass-object-size.c
new file mode 100644
index 000000000000..d5d4f5a9e421
--- /dev/null
+++ b/test/CodeGen/ubsan-pass-object-size.c
@@ -0,0 +1,68 @@
+// RUN: %clang_cc1 %s -emit-llvm -w -triple x86_64-apple-darwin10 -fsanitize=array-bounds -o - | FileCheck %s
+
+// CHECK-LABEL: define i32 @foo(
+int foo(int *const p __attribute__((pass_object_size(0))), int n) {
+ int x = (p)[n];
+
+ // CHECK: [[SIZE_ALLOCA:%.*]] = alloca i64, align 8
+ // CHECK: store i64 %{{.*}}, i64* [[SIZE_ALLOCA]], align 8
+ // CHECK: [[LOAD_SIZE:%.*]] = load i64, i64* [[SIZE_ALLOCA]], align 8, !nosanitize
+ // CHECK-NEXT: [[SCALED_SIZE:%.*]] = udiv i64 [[LOAD_SIZE]], 4, !nosanitize
+ // CHECK-NEXT: [[SEXT_N:%.*]] = sext i32 %{{.*}} to i64, !nosanitize
+ // CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[SEXT_N]], [[SCALED_SIZE]], !nosanitize
+ // CHECK-NEXT: br i1 [[ICMP]], {{.*}} !nosanitize
+ // CHECK: __ubsan_handle_out_of_bounds
+
+ {
+ int **p = &p; // Shadow the parameter. The pass_object_size info is lost.
+ // CHECK-NOT: __ubsan_handle_out_of_bounds
+ x = *p[n];
+ }
+
+ // CHECK: ret i32
+ return x;
+}
+
+typedef struct {} ZeroSizedType;
+
+// CHECK-LABEL: define void @bar(
+ZeroSizedType bar(ZeroSizedType *const p __attribute__((pass_object_size(0))), int n) {
+ // CHECK-NOT: __ubsan_handle_out_of_bounds
+ // CHECK: ret void
+ return p[n];
+}
+
+// CHECK-LABEL: define i32 @baz(
+int baz(int *const p __attribute__((pass_object_size(1))), int n) {
+ // CHECK: __ubsan_handle_out_of_bounds
+ // CHECK: ret i32
+ return p[n];
+}
+
+// CHECK-LABEL: define i32 @mat(
+int mat(int *const p __attribute__((pass_object_size(2))), int n) {
+ // CHECK-NOT: __ubsan_handle_out_of_bounds
+ // CHECK: ret i32
+ return p[n];
+}
+
+// CHECK-LABEL: define i32 @pat(
+int pat(int *const p __attribute__((pass_object_size(3))), int n) {
+ // CHECK-NOT: __ubsan_handle_out_of_bounds
+ // CHECK: ret i32
+ return p[n];
+}
+
+// CHECK-LABEL: define i32 @cat(
+int cat(int p[static 10], int n) {
+ // CHECK-NOT: __ubsan_handle_out_of_bounds
+ // CHECK: ret i32
+ return p[n];
+}
+
+// CHECK-LABEL: define i32 @bat(
+int bat(int n, int p[n]) {
+ // CHECK-NOT: __ubsan_handle_out_of_bounds
+ // CHECK: ret i32
+ return p[n];
+}
diff --git a/test/CodeGen/unsigned-overflow-minimal.c b/test/CodeGen/unsigned-overflow-minimal.c
new file mode 100644
index 000000000000..d4b89664f840
--- /dev/null
+++ b/test/CodeGen/unsigned-overflow-minimal.c
@@ -0,0 +1,21 @@
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsanitize=unsigned-integer-overflow -fsanitize-minimal-runtime %s -emit-llvm -o - | FileCheck %s
+
+unsigned long li, lj, lk;
+
+// CHECK-LABEL: define void @testlongadd()
+void testlongadd() {
+ // CHECK: call void @__ubsan_handle_add_overflow_minimal_abort()
+ li = lj + lk;
+}
+
+// CHECK-LABEL: define void @testlongsub()
+void testlongsub() {
+ // CHECK: call void @__ubsan_handle_sub_overflow_minimal_abort()
+ li = lj - lk;
+}
+
+// CHECK-LABEL: define void @testlongmul()
+void testlongmul() {
+ // CHECK: call void @__ubsan_handle_mul_overflow_minimal_abort()
+ li = lj * lk;
+}
diff --git a/test/CodeGen/verify-debuginfo.ll b/test/CodeGen/verify-debuginfo.ll
new file mode 100644
index 000000000000..0a1858544f5c
--- /dev/null
+++ b/test/CodeGen/verify-debuginfo.ll
@@ -0,0 +1,17 @@
+; REQUIRES: x86-registered-target
+; RUN: %clang_cc1 -triple i386-apple-darwin -disable-llvm-optzns -S %s -o - 2>&1 \
+; RUN: | FileCheck %s
+; CHECK: invalid global variable ref
+; CHECK: warning: ignoring invalid debug info in {{.*}}.ll
+
+@global = common global i32 0, align 4, !dbg !2
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!5, !6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "adrian", emissionKind: FullDebug, globals: !{!3})
+!1 = !DIFile(filename: "broken.c", directory: "/")
+!2 = !DIGlobalVariableExpression(var: !3, expr: !DIExpression())
+!3 = !DIGlobalVariable(name: "g", scope: !0, file: !1, line: 1, type: !1, isLocal: false, isDefinition: true)
+!5 = !{i32 2, !"Dwarf Version", i32 4}
+!6 = !{i32 1, !"Debug Info Version", i32 3}
diff --git a/test/CodeGen/wchar-size.c b/test/CodeGen/wchar-size.c
index 38da462d98cb..28cd5d14d77a 100644
--- a/test/CodeGen/wchar-size.c
+++ b/test/CodeGen/wchar-size.c
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s -check-prefix=LONG-WCHAR
// RUN: %clang_cc1 -triple x86_64-unknown-windows-msvc -emit-llvm -o - %s | FileCheck %s -check-prefix=SHORT-WCHAR
-// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -o - -fshort-wchar %s | FileCheck %s -check-prefix=SHORT-WCHAR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -o - -fwchar-type=short -fno-signed-wchar %s | FileCheck %s -check-prefix=SHORT-WCHAR
// Note: -fno-short-wchar implies the target default is used; so there is no
// need to test this separately here.
diff --git a/test/CodeGen/x86-GCC-inline-asm-Y-constraints.c b/test/CodeGen/x86-GCC-inline-asm-Y-constraints.c
new file mode 100644
index 000000000000..0e1e69cd2459
--- /dev/null
+++ b/test/CodeGen/x86-GCC-inline-asm-Y-constraints.c
@@ -0,0 +1,68 @@
+// RUN: %clang_cc1 -ffreestanding -triple=x86_64-apple-darwin -target-cpu skx %s -emit-llvm -o - | FileCheck %s
+#include <xmmintrin.h>
+// This test is complimented by the .ll test under llvm/test/MC/X86/.
+// At this level we can only check if the constarints are passed correctly
+// from inline asm to llvm IR.
+
+// CHECK-LABEL: @f_Ym
+void f_Ym(__m64 m)
+ {
+ // CHECK: movq $0, %mm1
+ // CHECK-SAME: "=^Ym,~{dirflag},~{fpsr},~{flags}"
+ __asm__ volatile ("movq %0, %%mm1\n\t"
+ :"=Ym" (m));
+}
+
+// CHECK-LABEL: f_Yi
+void f_Yi(__m128 x, __m128 y, __m128 z)
+ {
+ // CHECK: vpaddq
+ // CHECK-SAME: "=^Yi,^Yi,^Yi,~{dirflag},~{fpsr},~{flags}"
+ __asm__ volatile ("vpaddq %0, %1, %2\n\t"
+ :"=Yi" (x)
+ :"Yi" (y),"Yi"(z));
+}
+
+// CHECK-LABEL: f_Yt
+void f_Yt(__m128 x, __m128 y, __m128 z)
+ {
+ // CHECK: vpaddq
+ // CHECK-SAME: "=^Yt,^Yt,^Yt,~{dirflag},~{fpsr},~{flags}"
+ __asm__ volatile ("vpaddq %0, %1, %2\n\t"
+ :"=Yt" (x)
+ :"Yt" (y),"Yt"(z));
+}
+
+// CHECK-LABEL: f_Y2
+void f_Y2(__m128 x, __m128 y, __m128 z)
+ {
+ // CHECK: vpaddq
+ // CHECK-SAME: "=^Y2,^Y2,^Y2,~{dirflag},~{fpsr},~{flags}"
+ __asm__ volatile ("vpaddq %0, %1, %2\n\t"
+ :"=Y2" (x)
+ :"Y2" (y),"Y2"(z));
+}
+
+// CHECK-LABEL: f_Yz
+void f_Yz(__m128 x, __m128 y, __m128 z)
+ {
+ // CHECK: vpaddq
+ // CHECK-SAME: vpaddq
+ // CHECK-SAME: "=^Yi,=^Yz,^Yi,0,~{dirflag},~{fpsr},~{flags}"
+ __asm__ volatile ("vpaddq %0,%2,%1\n\t"
+ "vpaddq %1,%0,%2\n\t"
+ :"+Yi"(z),"=Yz" (x)
+ :"Yi" (y) );
+}
+
+// CHECK-LABEL: f_Y0
+void f_Y0(__m128 x, __m128 y, __m128 z)
+ {
+ // CHECK: vpaddq
+ // CHECK-SAME: "=^Yi,=^Y0,^Yi,0,~{dirflag},~{fpsr},~{flags}"
+ __asm__ volatile ("vpaddq %0,%2,%1\n\t"
+ "vpaddq %1,%0,%2\n\t"
+ :"+Yi"(z),"=Y0" (x)
+ :"Yi" (y) );
+}
+
diff --git a/test/CodeGen/x86_32-xsave.c b/test/CodeGen/x86_32-xsave.c
index b475d63fb357..f5d84e2d920a 100644
--- a/test/CodeGen/x86_32-xsave.c
+++ b/test/CodeGen/x86_32-xsave.c
@@ -15,57 +15,57 @@ void test() {
void* tmp_vp = 0;
#ifdef TEST_XSAVE
-// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVE: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVE: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVE: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVE: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi);
-// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32
-// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32
-// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
+// XSAVE: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVE: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32
+// XSAVE: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32
+// XSAVE: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
// XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])
(void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVEOPT
-// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVEOPT: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVEOPT: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVEOPT: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVEC
-// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVEC: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVEC: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVEC: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVEC: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVES
-// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVES: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVES: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVES: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVES: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi);
-// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4
-// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32
-// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32
-// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
+// XSAVES: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4
+// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVES: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32
+// XSAVES: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32
+// XSAVES: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
// XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])
(void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi);
#endif
diff --git a/test/CodeGen/x86_64-instrument-functions.c b/test/CodeGen/x86_64-instrument-functions.c
new file mode 100644
index 000000000000..686d9aa14ca7
--- /dev/null
+++ b/test/CodeGen/x86_64-instrument-functions.c
@@ -0,0 +1,38 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -S -finstrument-functions -O2 -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -S -finstrument-functions-after-inlining -O2 -o - %s | FileCheck -check-prefix=NOINLINE %s
+
+// It's not so nice having asm tests in Clang, but we need to check that we set
+// up the pipeline correctly in order to have the instrumentation inserted.
+
+int leaf(int x) {
+ return x;
+// CHECK-LABEL: leaf:
+// CHECK: callq __cyg_profile_func_enter
+// CHECK-NOT: cyg_profile
+// CHECK: callq __cyg_profile_func_exit
+// CHECK-NOT: cyg_profile
+// CHECK: ret
+}
+
+int root(int x) {
+ return leaf(x);
+// CHECK-LABEL: root:
+// CHECK: callq __cyg_profile_func_enter
+// CHECK-NOT: cyg_profile
+
+// Inlined from leaf():
+// CHECK: callq __cyg_profile_func_enter
+// CHECK-NOT: cyg_profile
+// CHECK: callq __cyg_profile_func_exit
+
+// CHECK-NOT: cyg_profile
+// CHECK: callq __cyg_profile_func_exit
+// CHECK: ret
+
+// NOINLINE-LABEL: root:
+// NOINLINE: callq __cyg_profile_func_enter
+// NOINLINE-NOT: cyg_profile
+// NOINLINE: callq __cyg_profile_func_exit
+// NOINLINE: ret
+}
diff --git a/test/CodeGen/x86_64-xsave.c b/test/CodeGen/x86_64-xsave.c
index 496e982b99c5..beb775c0e47f 100644
--- a/test/CodeGen/x86_64-xsave.c
+++ b/test/CodeGen/x86_64-xsave.c
@@ -15,105 +15,105 @@ void test() {
void* tmp_vp = 0;
#ifdef TEST_XSAVE
-// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVE: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVE: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVE: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVE: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi);
-// XSAVE: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVE: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVE: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32
-// XSAVE: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32
-// XSAVE: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
+// XSAVE: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVE: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVE: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32
+// XSAVE: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32
+// XSAVE: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
// XSAVE: call void @llvm.x86.xsave64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])
(void)__builtin_ia32_xsave64(tmp_vp, tmp_ULLi);
-// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32
-// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32
-// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
+// XSAVE: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVE: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32
+// XSAVE: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32
+// XSAVE: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
// XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])
(void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi);
-// XSAVE: [[tmp_vp_4:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVE: [[tmp_ULLi_4:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVE: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32
-// XSAVE: [[high32_4:%[0-9a-zA-z]+]] = trunc i64 [[high64_4]] to i32
-// XSAVE: [[low32_4:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_4]] to i32
+// XSAVE: [[tmp_vp_4:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVE: [[tmp_ULLi_4:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVE: [[high64_4:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_4]], 32
+// XSAVE: [[high32_4:%[0-9a-zA-Z]+]] = trunc i64 [[high64_4]] to i32
+// XSAVE: [[low32_4:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_4]] to i32
// XSAVE: call void @llvm.x86.xrstor64(i8* [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]])
(void)__builtin_ia32_xrstor64(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVEOPT
-// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVEOPT: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVEOPT: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVEOPT: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi);
-// XSAVEOPT: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVEOPT: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVEOPT: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32
-// XSAVEOPT: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32
-// XSAVEOPT: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
+// XSAVEOPT: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVEOPT: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVEOPT: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32
+// XSAVEOPT: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32
+// XSAVEOPT: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
// XSAVEOPT: call void @llvm.x86.xsaveopt64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])
(void)__builtin_ia32_xsaveopt64(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVEC
-// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVEC: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVEC: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVEC: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVEC: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi);
-// XSAVEC: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVEC: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVEC: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32
-// XSAVEC: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32
-// XSAVEC: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
+// XSAVEC: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVEC: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVEC: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32
+// XSAVEC: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32
+// XSAVEC: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
// XSAVEC: call void @llvm.x86.xsavec64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])
(void)__builtin_ia32_xsavec64(tmp_vp, tmp_ULLi);
#endif
#ifdef TEST_XSAVES
-// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
-// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32
-// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
+// XSAVES: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVES: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32
+// XSAVES: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32
+// XSAVES: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32
// XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])
(void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi);
-// XSAVES: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVES: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVES: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32
-// XSAVES: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32
-// XSAVES: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
+// XSAVES: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVES: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVES: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32
+// XSAVES: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32
+// XSAVES: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32
// XSAVES: call void @llvm.x86.xsaves64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])
(void)__builtin_ia32_xsaves64(tmp_vp, tmp_ULLi);
-// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32
-// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32
-// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
+// XSAVES: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVES: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32
+// XSAVES: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32
+// XSAVES: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32
// XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])
(void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi);
-// XSAVES: [[tmp_vp_4:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8
-// XSAVES: [[tmp_ULLi_4:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8
-// XSAVES: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32
-// XSAVES: [[high32_4:%[0-9a-zA-z]+]] = trunc i64 [[high64_4]] to i32
-// XSAVES: [[low32_4:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_4]] to i32
+// XSAVES: [[tmp_vp_4:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8
+// XSAVES: [[tmp_ULLi_4:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8
+// XSAVES: [[high64_4:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_4]], 32
+// XSAVES: [[high32_4:%[0-9a-zA-Z]+]] = trunc i64 [[high64_4]] to i32
+// XSAVES: [[low32_4:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_4]] to i32
// XSAVES: call void @llvm.x86.xrstors64(i8* [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]])
(void)__builtin_ia32_xrstors64(tmp_vp, tmp_ULLi);
#endif
diff --git a/test/CodeGen/xray-always-emit-customevent.cpp b/test/CodeGen/xray-always-emit-customevent.cpp
new file mode 100644
index 000000000000..8ac22f2a1bca
--- /dev/null
+++ b/test/CodeGen/xray-always-emit-customevent.cpp
@@ -0,0 +1,10 @@
+// RUN: %clang_cc1 -fxray-instrument -fxray-always-emit-customevents -x c++ \
+// RUN: -std=c++11 -triple x86_64-unknown-unknown -emit-llvm -o - %s \
+// RUN: | FileCheck %s
+
+// CHECK-LABEL: @_Z15neverInstrumentv
+[[clang::xray_never_instrument]] void neverInstrument() {
+ static constexpr char kPhase[] = "never";
+ __xray_customevent(kPhase, 5);
+ // CHECK: call void @llvm.xray.customevent(i8*{{.*}}, i32 5)
+}