aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/Headers/smmintrin.h
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2022-07-03 14:10:23 +0000
committerDimitry Andric <dim@FreeBSD.org>2022-07-03 14:10:23 +0000
commit145449b1e420787bb99721a429341fa6be3adfb6 (patch)
tree1d56ae694a6de602e348dd80165cf881a36600ed /clang/lib/Headers/smmintrin.h
parentecbca9f5fb7d7613d2b94982c4825eb0d33d6842 (diff)
Diffstat (limited to 'clang/lib/Headers/smmintrin.h')
-rw-r--r--clang/lib/Headers/smmintrin.h487
1 files changed, 232 insertions, 255 deletions
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index 0df59c5fcc59..46fb7bcd4e09 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -17,23 +17,25 @@
#include <tmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), \
+ __min_vector_width__(128)))
/* SSE4 Rounding macros. */
-#define _MM_FROUND_TO_NEAREST_INT 0x00
-#define _MM_FROUND_TO_NEG_INF 0x01
-#define _MM_FROUND_TO_POS_INF 0x02
-#define _MM_FROUND_TO_ZERO 0x03
-#define _MM_FROUND_CUR_DIRECTION 0x04
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
-#define _MM_FROUND_RAISE_EXC 0x00
-#define _MM_FROUND_NO_EXC 0x08
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
-#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
-#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
-#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
-#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
-#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
/// Rounds up each element of the 128-bit vector of [4 x float] to an
@@ -51,7 +53,7 @@
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded up.
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
/// Rounds up each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
@@ -68,7 +70,7 @@
/// \param X
/// A 128-bit vector of [2 x double] values to be rounded up.
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@@ -93,7 +95,7 @@
/// of the result.
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@@ -118,7 +120,7 @@
/// of the result.
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
/// Rounds down each element of the 128-bit vector of [4 x float] to an
/// an integer and returns the rounded values in a 128-bit vector of
@@ -135,7 +137,7 @@
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded down.
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
/// Rounds down each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
@@ -152,7 +154,7 @@
/// \param X
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@@ -177,7 +179,7 @@
/// of the result.
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@@ -202,7 +204,7 @@
/// of the result.
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
/// Rounds each element of the 128-bit vector of [4 x float] to an
/// integer value according to the rounding control specified by the second
@@ -234,7 +236,7 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_round_ps(X, M) \
+#define _mm_round_ps(X, M) \
((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
/// Copies three upper elements of the first 128-bit vector operand to
@@ -275,9 +277,9 @@
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_round_ss(X, Y, M) \
- ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (M)))
+#define _mm_round_ss(X, Y, M) \
+ ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+ (M)))
/// Rounds each element of the 128-bit vector of [2 x double] to an
/// integer value according to the rounding control specified by the second
@@ -309,7 +311,7 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_round_pd(X, M) \
+#define _mm_round_pd(X, M) \
((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
/// Copies the upper element of the first 128-bit vector operand to the
@@ -350,9 +352,9 @@
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_round_sd(X, Y, M) \
- ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (M)))
+#define _mm_round_sd(X, Y, M) \
+ ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
+ (M)))
/* SSE4 Packed Blending Intrinsics. */
/// Returns a 128-bit vector of [2 x double] where the values are
@@ -379,9 +381,9 @@
/// When a mask bit is 1, the corresponding 64-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_blend_pd(V1, V2, M) \
- ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \
- (__v2df)(__m128d)(V2), (int)(M)))
+#define _mm_blend_pd(V1, V2, M) \
+ ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \
+ (__v2df)(__m128d)(V2), (int)(M)))
/// Returns a 128-bit vector of [4 x float] where the values are selected
/// from either the first or second operand as specified by the third
@@ -407,9 +409,9 @@
/// When a mask bit is 1, the corresponding 32-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_blend_ps(V1, V2, M) \
- ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \
- (__v4sf)(__m128)(V2), (int)(M)))
+#define _mm_blend_ps(V1, V2, M) \
+ ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
+ (int)(M)))
/// Returns a 128-bit vector of [2 x double] where the values are
/// selected from either the first or second operand as specified by the
@@ -431,11 +433,11 @@
/// position in the result. When a mask bit is 1, the corresponding 64-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
-{
- return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
- (__v2df)__M);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1,
+ __m128d __V2,
+ __m128d __M) {
+ return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2,
+ (__v2df)__M);
}
/// Returns a 128-bit vector of [4 x float] where the values are
@@ -458,11 +460,11 @@ _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
/// position in the result. When a mask bit is 1, the corresponding 32-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
-{
- return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
- (__v4sf)__M);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1,
+ __m128 __V2,
+ __m128 __M) {
+ return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2,
+ (__v4sf)__M);
}
/// Returns a 128-bit vector of [16 x i8] where the values are selected
@@ -485,11 +487,11 @@ _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
/// position in the result. When a mask bit is 1, the corresponding 8-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [16 x i8] containing the copied values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
-{
- return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
- (__v16qi)__M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1,
+ __m128i __V2,
+ __m128i __M) {
+ return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,
+ (__v16qi)__M);
}
/// Returns a 128-bit vector of [8 x i16] where the values are selected
@@ -516,9 +518,9 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// When a mask bit is 1, the corresponding 16-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [8 x i16] containing the copied values.
-#define _mm_blend_epi16(V1, V2, M) \
- ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \
- (__v8hi)(__m128i)(V2), (int)(M)))
+#define _mm_blend_epi16(V1, V2, M) \
+ ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1), \
+ (__v8hi)(__m128i)(V2), (int)(M)))
/* SSE4 Dword Multiply Instructions. */
/// Multiples corresponding elements of two 128-bit vectors of [4 x i32]
@@ -534,10 +536,9 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the products of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)((__v4su)__V1 * (__v4su)__V2);
}
/// Multiplies corresponding even-indexed elements of two 128-bit
@@ -554,10 +555,9 @@ _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [2 x i64] containing the products of both
/// operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Floating Point Dot Product Instructions. */
@@ -593,9 +593,8 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// each [4 x float] subvector. If a bit is set, the dot product is returned
/// in the corresponding element; otherwise that element is set to zero.
/// \returns A 128-bit vector of [4 x float] containing the dot product.
-#define _mm_dp_ps(X, Y, M) \
- ((__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (M)))
+#define _mm_dp_ps(X, Y, M) \
+ ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))
/// Computes the dot product of the two 128-bit vectors of [2 x double]
/// and returns it in the elements of the 128-bit result vector of
@@ -628,9 +627,9 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// to the lowest element and bit [1] corresponding to the highest element of
/// each [2 x double] vector. If a bit is set, the dot product is returned in
/// the corresponding element; otherwise that element is set to zero.
-#define _mm_dp_pd(X, Y, M) \
- ((__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (M)))
+#define _mm_dp_pd(X, Y, M) \
+ ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
+ (M)))
/* SSE4 Streaming Load Hint Instruction. */
/// Loads integer values from a 128-bit aligned memory location to a
@@ -645,10 +644,9 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// values.
/// \returns A 128-bit integer vector containing the data stored at the
/// specified memory location.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_stream_load_si128 (__m128i const *__V)
-{
- return (__m128i) __builtin_nontemporal_load ((const __v2di *) __V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_stream_load_si128(__m128i const *__V) {
+ return (__m128i)__builtin_nontemporal_load((const __v2di *)__V);
}
/* SSE4 Packed Integer Min/Max Instructions. */
@@ -665,10 +663,9 @@ _mm_stream_load_si128 (__m128i const *__V)
/// \param __V2
/// A 128-bit vector of [16 x i8]
/// \returns A 128-bit vector of [16 x i8] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi8 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_min((__v16qs) __V1, (__v16qs) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -684,10 +681,9 @@ _mm_min_epi8 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [16 x i8].
/// \returns A 128-bit vector of [16 x i8] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi8 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_max((__v16qs) __V1, (__v16qs) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -703,10 +699,9 @@ _mm_max_epi8 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [8 x u16].
/// \returns A 128-bit vector of [8 x u16] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu16 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_min((__v8hu) __V1, (__v8hu) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -722,10 +717,9 @@ _mm_min_epu16 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [8 x u16].
/// \returns A 128-bit vector of [8 x u16] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu16 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_max((__v8hu) __V1, (__v8hu) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -741,10 +735,9 @@ _mm_max_epu16 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_min((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -760,10 +753,9 @@ _mm_min_epi32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_max((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -779,10 +771,9 @@ _mm_max_epi32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x u32].
/// \returns A 128-bit vector of [4 x u32] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_min((__v4su) __V1, (__v4su) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -798,10 +789,9 @@ _mm_min_epu32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x u32].
/// \returns A 128-bit vector of [4 x u32] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_max((__v4su) __V1, (__v4su) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2);
}
/* SSE4 Insertion and Extraction from XMM Register Instructions. */
@@ -869,21 +859,24 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of parameter \a X are returned. \n
/// 11: Bits [127:96] of parameter \a X are returned.
/// \returns A 32-bit integer containing the extracted 32 bits of float data.
-#define _mm_extract_ps(X, N) \
- __builtin_bit_cast(int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
+#define _mm_extract_ps(X, N) \
+ __builtin_bit_cast( \
+ int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
/* Miscellaneous insert and extract macros. */
/* Extract a single-precision float from X at index N into D. */
-#define _MM_EXTRACT_FLOAT(D, X, N) \
- do { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); } while (0)
+#define _MM_EXTRACT_FLOAT(D, X, N) \
+ do { \
+ (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \
+ } while (0)
/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
an index suitable for _mm_insert_ps. */
#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
/* Extract a float from X at index N into the first index of the return. */
-#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
- _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+#define _MM_PICK_OUT_PS(X, N) \
+ _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
/* Insert int into packed integer array at index. */
/// Constructs a 128-bit vector of [16 x i8] by first making a copy of
@@ -926,9 +919,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 1110: Bits [119:112] of the result are used for insertion. \n
/// 1111: Bits [127:120] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi8(X, I, N) \
- ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \
- (int)(I), (int)(N)))
+#define _mm_insert_epi8(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I), \
+ (int)(N)))
/// Constructs a 128-bit vector of [4 x i32] by first making a copy of
/// the 128-bit integer vector parameter, and then inserting the 32-bit
@@ -958,9 +951,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of the result are used for insertion. \n
/// 11: Bits [127:96] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi32(X, I, N) \
- ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \
- (int)(I), (int)(N)))
+#define _mm_insert_epi32(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I), \
+ (int)(N)))
#ifdef __x86_64__
/// Constructs a 128-bit vector of [2 x i64] by first making a copy of
@@ -989,9 +982,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] of the result are used for insertion. \n
/// 1: Bits [127:64] of the result are used for insertion. \n
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi64(X, I, N) \
- ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \
- (long long)(I), (int)(N)))
+#define _mm_insert_epi64(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I), \
+ (int)(N)))
#endif /* __x86_64__ */
/* Extract int from packed integer array at index. This returns the element
@@ -1032,8 +1025,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \returns An unsigned integer, whose lower 8 bits are selected from the
/// 128-bit integer vector parameter and the remaining bits are assigned
/// zeros.
-#define _mm_extract_epi8(X, N) \
- ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
+#define _mm_extract_epi8(X, N) \
+ ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
(int)(N)))
/// Extracts a 32-bit element from the 128-bit integer vector of
@@ -1058,10 +1051,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 11: Bits [127:96] of the parameter \a X are exracted.
/// \returns An integer, whose lower 32 bits are selected from the 128-bit
/// integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi32(X, N) \
+#define _mm_extract_epi32(X, N) \
((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))
-#ifdef __x86_64__
/// Extracts a 64-bit element from the 128-bit integer vector of
/// [2 x i64], using the immediate value parameter \a N as a selector.
///
@@ -1071,7 +1063,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// long long _mm_extract_epi64(__m128i X, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
+/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction
+/// in 64-bit mode.
///
/// \param X
/// A 128-bit integer vector.
@@ -1081,9 +1074,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] are returned. \n
/// 1: Bits [127:64] are returned. \n
/// \returns A 64-bit integer.
-#define _mm_extract_epi64(X, N) \
+#define _mm_extract_epi64(X, N) \
((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))
-#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
/// Tests whether the specified bits in a 128-bit integer vector are all
@@ -1098,9 +1090,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \param __V
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testz_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
}
@@ -1116,9 +1107,8 @@ _mm_testz_si128(__m128i __M, __m128i __V)
/// \param __V
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are all ones; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
}
@@ -1135,9 +1125,8 @@ _mm_testc_si128(__m128i __M, __m128i __V)
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are neither all zeros nor all ones;
/// FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testnzc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
}
@@ -1193,7 +1182,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// \param V
/// A 128-bit integer vector selecting which bits to test in operand \a M.
/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
/// Compares each of the corresponding 64-bit values of the 128-bit
@@ -1208,9 +1197,8 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,
+ __m128i __V2) {
return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
@@ -1225,15 +1213,16 @@ _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
/// This intrinsic corresponds to the <c> VPMOVSXBW / PMOVSXBW </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign-
-/// extended to 16-bit values.
+/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
+/// sign-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi16(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6,
+ 7),
+ __v8hi);
}
/// Sign-extends each of the lower four 8-bit integer elements of a
@@ -1249,12 +1238,11 @@ _mm_cvtepi8_epi16(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi32(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
}
/// Sign-extends each of the lower two 8-bit integer elements of a
@@ -1270,12 +1258,11 @@ _mm_cvtepi8_epi32(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi64(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
}
/// Sign-extends each of the lower four 16-bit integer elements of a
@@ -1291,10 +1278,9 @@ _mm_cvtepi8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
}
/// Sign-extends each of the lower two 16-bit integer elements of a
@@ -1310,10 +1296,9 @@ _mm_cvtepi16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
}
/// Sign-extends each of the lower two 32-bit integer elements of a
@@ -1329,10 +1314,9 @@ _mm_cvtepi16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi32_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
}
/* SSE4 Packed Integer Zero-Extension. */
@@ -1349,10 +1333,11 @@ _mm_cvtepi32_epi64(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
/// zero-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi16(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,
+ 7),
+ __v8hi);
}
/// Zero-extends each of the lower four 8-bit integer elements of a
@@ -1368,10 +1353,9 @@ _mm_cvtepu8_epi16(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
}
/// Zero-extends each of the lower two 8-bit integer elements of a
@@ -1387,10 +1371,9 @@ _mm_cvtepu8_epi32(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
}
/// Zero-extends each of the lower four 16-bit integer elements of a
@@ -1406,10 +1389,9 @@ _mm_cvtepu8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
}
/// Zero-extends each of the lower two 16-bit integer elements of a
@@ -1425,10 +1407,9 @@ _mm_cvtepu16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
}
/// Zero-extends each of the lower two 32-bit integer elements of a
@@ -1444,10 +1425,9 @@ _mm_cvtepu16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu32_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
}
/* SSE4 Pack with Unsigned Saturation. */
@@ -1473,10 +1453,9 @@ _mm_cvtepu32_epi64(__m128i __V)
/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
/// are written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi32(__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
@@ -1515,9 +1494,9 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \endcode
/// \returns A 128-bit integer vector containing the sums of the sets of
/// absolute differences between both operands.
-#define _mm_mpsadbw_epu8(X, Y, M) \
- ((__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
- (__v16qi)(__m128i)(Y), (M)))
+#define _mm_mpsadbw_epu8(X, Y, M) \
+ ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
+ (__v16qi)(__m128i)(Y), (M)))
/// Finds the minimum unsigned 16-bit element in the input 128-bit
/// vector of [8 x u16] and returns it and along with its index.
@@ -1532,10 +1511,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \returns A 128-bit value where bits [15:0] contain the minimum value found
/// in parameter \a __V, bits [18:16] contain the index of the minimum value
/// and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_minpos_epu16(__m128i __V)
-{
- return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
+ return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
}
/* Handle the sse4.2 definitions here. */
@@ -1544,33 +1521,34 @@ _mm_minpos_epu16(__m128i __V)
so we'll do the same. */
#undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
/* These specify the type of data that we're comparing. */
-#define _SIDD_UBYTE_OPS 0x00
-#define _SIDD_UWORD_OPS 0x01
-#define _SIDD_SBYTE_OPS 0x02
-#define _SIDD_SWORD_OPS 0x03
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
/* These specify the type of comparison operation. */
-#define _SIDD_CMP_EQUAL_ANY 0x00
-#define _SIDD_CMP_RANGES 0x04
-#define _SIDD_CMP_EQUAL_EACH 0x08
-#define _SIDD_CMP_EQUAL_ORDERED 0x0c
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
/* These macros specify the polarity of the operation. */
-#define _SIDD_POSITIVE_POLARITY 0x00
-#define _SIDD_NEGATIVE_POLARITY 0x10
-#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
-#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
/* These macros are used in _mm_cmpXstri() to specify the return. */
-#define _SIDD_LEAST_SIGNIFICANT 0x00
-#define _SIDD_MOST_SIGNIFICANT 0x40
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
/* These macros are used in _mm_cmpXstri() to specify the return. */
-#define _SIDD_BIT_MASK 0x00
-#define _SIDD_UNIT_MASK 0x40
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
/* SSE4.2 Packed Comparison Intrinsics. */
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1625,8 +1603,8 @@ _mm_minpos_epu16(__m128i __V)
/// repeating each bit 8 or 16 times).
/// \returns Returns a 128-bit integer vector representing the result mask of
/// the comparison.
-#define _mm_cmpistrm(A, B, M) \
- ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrm(A, B, M) \
+ ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1679,9 +1657,9 @@ _mm_minpos_epu16(__m128i __V)
/// 0: The index of the least significant set bit. \n
/// 1: The index of the most significant set bit. \n
/// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpistri(A, B, M) \
- ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M)))
+#define _mm_cmpistri(A, B, M) \
+ ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -1739,9 +1717,9 @@ _mm_minpos_epu16(__m128i __V)
/// repeating each bit 8 or 16 times). \n
/// \returns Returns a 128-bit integer vector representing the result mask of
/// the comparison.
-#define _mm_cmpestrm(A, LA, B, LB, M) \
- ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrm(A, LA, B, LB, M) \
+ ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1798,9 +1776,9 @@ _mm_minpos_epu16(__m128i __V)
/// 0: The index of the least significant set bit. \n
/// 1: The index of the most significant set bit. \n
/// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpestri(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestri(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
@@ -1850,8 +1828,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the bit mask is zero and the length of the string in
/// \a B is the maximum; otherwise, returns 0.
-#define _mm_cmpistra(A, B, M) \
- ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistra(A, B, M) \
+ ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1899,8 +1877,8 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B.
/// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0.
-#define _mm_cmpistrc(A, B, M) \
- ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrc(A, B, M) \
+ ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1947,8 +1925,8 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B. \n
/// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpistro(A, B, M) \
- ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistro(A, B, M) \
+ ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1997,8 +1975,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the length of the string in \a A is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpistrs(A, B, M) \
- ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrs(A, B, M) \
+ ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2047,8 +2025,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the length of the string in \a B is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpistrz(A, B, M) \
- ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrz(A, B, M) \
+ ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2101,9 +2079,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the bit mask is zero and the length of the string in
/// \a B is the maximum, otherwise, returns 0.
-#define _mm_cmpestra(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestra(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2155,9 +2133,9 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0.
-#define _mm_cmpestrc(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrc(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2208,9 +2186,9 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B.
/// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpestro(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestro(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2263,9 +2241,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the length of the string in \a A is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpestrs(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrs(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2317,9 +2295,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the length of the string in \a B is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpestrz(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrz(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/* SSE4.2 Compare Packed Data -- Greater Than. */
@@ -2336,9 +2314,8 @@ _mm_minpos_epu16(__m128i __V)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1,
+ __m128i __V2) {
return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}