summaryrefslogtreecommitdiff
path: root/test/CodeGen/aarch64-neon-across.c
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/aarch64-neon-across.c')
-rw-r--r--test/CodeGen/aarch64-neon-across.c112
1 files changed, 28 insertions, 84 deletions
diff --git a/test/CodeGen/aarch64-neon-across.c b/test/CodeGen/aarch64-neon-across.c
index 04a7b26e8a27..6d7a0d5bcde4 100644
--- a/test/CodeGen/aarch64-neon-across.c
+++ b/test/CodeGen/aarch64-neon-across.c
@@ -14,9 +14,7 @@ int16_t test_vaddlv_s8(int8x8_t a) {
}
// CHECK-LABEL: define i32 @test_vaddlv_s16(<4 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> [[TMP1]]) #2
+// CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a) #2
// CHECK: ret i32 [[VADDLV_I]]
int32_t test_vaddlv_s16(int16x4_t a) {
return vaddlv_s16(a);
@@ -31,9 +29,7 @@ uint16_t test_vaddlv_u8(uint8x8_t a) {
}
// CHECK-LABEL: define i32 @test_vaddlv_u16(<4 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> [[TMP1]]) #2
+// CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a) #2
// CHECK: ret i32 [[VADDLV_I]]
uint32_t test_vaddlv_u16(uint16x4_t a) {
return vaddlv_u16(a);
@@ -48,18 +44,14 @@ int16_t test_vaddlvq_s8(int8x16_t a) {
}
// CHECK-LABEL: define i32 @test_vaddlvq_s16(<8 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> [[TMP1]]) #2
+// CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a) #2
// CHECK: ret i32 [[VADDLV_I]]
int32_t test_vaddlvq_s16(int16x8_t a) {
return vaddlvq_s16(a);
}
// CHECK-LABEL: define i64 @test_vaddlvq_s32(<4 x i32> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK: [[VADDLVQ_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> [[TMP1]]) #2
+// CHECK: [[VADDLVQ_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %a) #2
// CHECK: ret i64 [[VADDLVQ_S32_I]]
int64_t test_vaddlvq_s32(int32x4_t a) {
return vaddlvq_s32(a);
@@ -74,18 +66,14 @@ uint16_t test_vaddlvq_u8(uint8x16_t a) {
}
// CHECK-LABEL: define i32 @test_vaddlvq_u16(<8 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> [[TMP1]]) #2
+// CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a) #2
// CHECK: ret i32 [[VADDLV_I]]
uint32_t test_vaddlvq_u16(uint16x8_t a) {
return vaddlvq_u16(a);
}
// CHECK-LABEL: define i64 @test_vaddlvq_u32(<4 x i32> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK: [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> [[TMP1]]) #2
+// CHECK: [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a) #2
// CHECK: ret i64 [[VADDLVQ_U32_I]]
uint64_t test_vaddlvq_u32(uint32x4_t a) {
return vaddlvq_u32(a);
@@ -100,9 +88,7 @@ int8_t test_vmaxv_s8(int8x8_t a) {
}
// CHECK-LABEL: define i16 @test_vmaxv_s16(<4 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> [[TMP1]]) #2
+// CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
// CHECK: ret i16 [[TMP2]]
int16_t test_vmaxv_s16(int16x4_t a) {
@@ -118,9 +104,7 @@ uint8_t test_vmaxv_u8(uint8x8_t a) {
}
// CHECK-LABEL: define i16 @test_vmaxv_u16(<4 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> [[TMP1]]) #2
+// CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vmaxv_u16(uint16x4_t a) {
@@ -136,9 +120,7 @@ int8_t test_vmaxvq_s8(int8x16_t a) {
}
// CHECK-LABEL: define i16 @test_vmaxvq_s16(<8 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> [[TMP1]]) #2
+// CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
// CHECK: ret i16 [[TMP2]]
int16_t test_vmaxvq_s16(int16x8_t a) {
@@ -146,9 +128,7 @@ int16_t test_vmaxvq_s16(int16x8_t a) {
}
// CHECK-LABEL: define i32 @test_vmaxvq_s32(<4 x i32> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK: [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> [[TMP1]]) #2
+// CHECK: [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a) #2
// CHECK: ret i32 [[VMAXVQ_S32_I]]
int32_t test_vmaxvq_s32(int32x4_t a) {
return vmaxvq_s32(a);
@@ -163,9 +143,7 @@ uint8_t test_vmaxvq_u8(uint8x16_t a) {
}
// CHECK-LABEL: define i16 @test_vmaxvq_u16(<8 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> [[TMP1]]) #2
+// CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vmaxvq_u16(uint16x8_t a) {
@@ -173,9 +151,7 @@ uint16_t test_vmaxvq_u16(uint16x8_t a) {
}
// CHECK-LABEL: define i32 @test_vmaxvq_u32(<4 x i32> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK: [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> [[TMP1]]) #2
+// CHECK: [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a) #2
// CHECK: ret i32 [[VMAXVQ_U32_I]]
uint32_t test_vmaxvq_u32(uint32x4_t a) {
return vmaxvq_u32(a);
@@ -190,9 +166,7 @@ int8_t test_vminv_s8(int8x8_t a) {
}
// CHECK-LABEL: define i16 @test_vminv_s16(<4 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> [[TMP1]]) #2
+// CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
// CHECK: ret i16 [[TMP2]]
int16_t test_vminv_s16(int16x4_t a) {
@@ -208,9 +182,7 @@ uint8_t test_vminv_u8(uint8x8_t a) {
}
// CHECK-LABEL: define i16 @test_vminv_u16(<4 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> [[TMP1]]) #2
+// CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vminv_u16(uint16x4_t a) {
@@ -226,9 +198,7 @@ int8_t test_vminvq_s8(int8x16_t a) {
}
// CHECK-LABEL: define i16 @test_vminvq_s16(<8 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> [[TMP1]]) #2
+// CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
// CHECK: ret i16 [[TMP2]]
int16_t test_vminvq_s16(int16x8_t a) {
@@ -236,9 +206,7 @@ int16_t test_vminvq_s16(int16x8_t a) {
}
// CHECK-LABEL: define i32 @test_vminvq_s32(<4 x i32> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK: [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> [[TMP1]]) #2
+// CHECK: [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a) #2
// CHECK: ret i32 [[VMINVQ_S32_I]]
int32_t test_vminvq_s32(int32x4_t a) {
return vminvq_s32(a);
@@ -253,9 +221,7 @@ uint8_t test_vminvq_u8(uint8x16_t a) {
}
// CHECK-LABEL: define i16 @test_vminvq_u16(<8 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> [[TMP1]]) #2
+// CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vminvq_u16(uint16x8_t a) {
@@ -263,9 +229,7 @@ uint16_t test_vminvq_u16(uint16x8_t a) {
}
// CHECK-LABEL: define i32 @test_vminvq_u32(<4 x i32> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK: [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> [[TMP1]]) #2
+// CHECK: [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a) #2
// CHECK: ret i32 [[VMINVQ_U32_I]]
uint32_t test_vminvq_u32(uint32x4_t a) {
return vminvq_u32(a);
@@ -280,9 +244,7 @@ int8_t test_vaddv_s8(int8x8_t a) {
}
// CHECK-LABEL: define i16 @test_vaddv_s16(<4 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> [[TMP1]]) #2
+// CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
// CHECK: ret i16 [[TMP2]]
int16_t test_vaddv_s16(int16x4_t a) {
@@ -298,9 +260,7 @@ uint8_t test_vaddv_u8(uint8x8_t a) {
}
// CHECK-LABEL: define i16 @test_vaddv_u16(<4 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> [[TMP1]]) #2
+// CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vaddv_u16(uint16x4_t a) {
@@ -316,9 +276,7 @@ int8_t test_vaddvq_s8(int8x16_t a) {
}
// CHECK-LABEL: define i16 @test_vaddvq_s16(<8 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> [[TMP1]]) #2
+// CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
// CHECK: ret i16 [[TMP2]]
int16_t test_vaddvq_s16(int16x8_t a) {
@@ -326,9 +284,7 @@ int16_t test_vaddvq_s16(int16x8_t a) {
}
// CHECK-LABEL: define i32 @test_vaddvq_s32(<4 x i32> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> [[TMP1]]) #2
+// CHECK: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a) #2
// CHECK: ret i32 [[VADDVQ_S32_I]]
int32_t test_vaddvq_s32(int32x4_t a) {
return vaddvq_s32(a);
@@ -343,9 +299,7 @@ uint8_t test_vaddvq_u8(uint8x16_t a) {
}
// CHECK-LABEL: define i16 @test_vaddvq_u16(<8 x i16> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> [[TMP1]]) #2
+// CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a) #2
// CHECK: [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
// CHECK: ret i16 [[TMP2]]
uint16_t test_vaddvq_u16(uint16x8_t a) {
@@ -353,45 +307,35 @@ uint16_t test_vaddvq_u16(uint16x8_t a) {
}
// CHECK-LABEL: define i32 @test_vaddvq_u32(<4 x i32> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> [[TMP1]]) #2
+// CHECK: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a) #2
// CHECK: ret i32 [[VADDVQ_U32_I]]
uint32_t test_vaddvq_u32(uint32x4_t a) {
return vaddvq_u32(a);
}
// CHECK-LABEL: define float @test_vmaxvq_f32(<4 x float> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
-// CHECK: [[VMAXVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> [[TMP1]]) #2
+// CHECK: [[VMAXVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %a) #2
// CHECK: ret float [[VMAXVQ_F32_I]]
float32_t test_vmaxvq_f32(float32x4_t a) {
return vmaxvq_f32(a);
}
// CHECK-LABEL: define float @test_vminvq_f32(<4 x float> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
-// CHECK: [[VMINVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> [[TMP1]]) #2
+// CHECK: [[VMINVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %a) #2
// CHECK: ret float [[VMINVQ_F32_I]]
float32_t test_vminvq_f32(float32x4_t a) {
return vminvq_f32(a);
}
// CHECK-LABEL: define float @test_vmaxnmvq_f32(<4 x float> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
-// CHECK: [[VMAXNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> [[TMP1]]) #2
+// CHECK: [[VMAXNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %a) #2
// CHECK: ret float [[VMAXNMVQ_F32_I]]
float32_t test_vmaxnmvq_f32(float32x4_t a) {
return vmaxnmvq_f32(a);
}
// CHECK-LABEL: define float @test_vminnmvq_f32(<4 x float> %a) #0 {
-// CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
-// CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
-// CHECK: [[VMINNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> [[TMP1]]) #2
+// CHECK: [[VMINNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %a) #2
// CHECK: ret float [[VMINNMVQ_F32_I]]
float32_t test_vminnmvq_f32(float32x4_t a) {
return vminnmvq_f32(a);