aboutsummaryrefslogtreecommitdiff
path: root/lib/Headers/altivec.h
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-01-18 16:23:48 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-01-18 16:23:48 +0000
commit06d4ba388873e6d1cfa9cd715a8935ecc8cd2097 (patch)
tree3eb853da77d46cc77c4b017525a422f9ddb1385b /lib/Headers/altivec.h
parent30d791273d07fac9c0c1641a0731191bca6e8606 (diff)
downloadsrc-06d4ba388873e6d1cfa9cd715a8935ecc8cd2097.tar.gz
src-06d4ba388873e6d1cfa9cd715a8935ecc8cd2097.zip
Notes
Diffstat (limited to 'lib/Headers/altivec.h')
-rw-r--r--lib/Headers/altivec.h411
1 files changed, 396 insertions, 15 deletions
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index f9fc64af3e74..0ac0841ae482 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -1623,6 +1623,21 @@ vec_vctuxs(vector float __a, int __b)
return __builtin_altivec_vctuxs(__a, __b);
}
+/* vec_div */
+#ifdef __VSX__
+static vector float __ATTRS_o_ai
+vec_div(vector float __a, vector float __b)
+{
+ return __builtin_vsx_xvdivsp(__a, __b);
+}
+
+static vector double __ATTRS_o_ai
+vec_div(vector double __a, vector double __b)
+{
+ return __builtin_vsx_xvdivdp(__a, __b);
+}
+#endif
+
/* vec_dss */
static void __attribute__((__always_inline__))
@@ -2253,91 +2268,273 @@ vec_vlogefp(vector float __a)
/* vec_lvsl */
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const signed char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const signed char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const float *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const float *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
/* vec_lvsr */
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const signed char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const signed char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const float *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const float *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
/* vec_madd */
@@ -2485,8 +2682,20 @@ vec_max(vector unsigned int __a, vector bool int __b)
static vector float __ATTRS_o_ai
vec_max(vector float __a, vector float __b)
{
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
return __builtin_altivec_vmaxfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_max(vector double __a, vector double __b)
+{
+ return __builtin_vsx_xvmaxdp(__a, __b);
}
+#endif
/* vec_vmaxsb */
@@ -2613,7 +2822,11 @@ vec_vmaxuw(vector unsigned int __a, vector bool int __b)
static vector float __attribute__((__always_inline__))
vec_vmaxfp(vector float __a, vector float __b)
{
+#ifdef __VSX__
+ return __builtin_vsx_xvmaxsp(__a, __b);
+#else
return __builtin_altivec_vmaxfp(__a, __b);
+#endif
}
/* vec_mergeh */
@@ -3117,9 +3330,21 @@ vec_min(vector unsigned int __a, vector bool int __b)
static vector float __ATTRS_o_ai
vec_min(vector float __a, vector float __b)
{
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
return __builtin_altivec_vminfp(__a, __b);
+#endif
}
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_min(vector double __a, vector double __b)
+{
+ return __builtin_vsx_xvmindp(__a, __b);
+}
+#endif
+
/* vec_vminsb */
static vector signed char __ATTRS_o_ai
@@ -3245,7 +3470,11 @@ vec_vminuw(vector unsigned int __a, vector bool int __b)
static vector float __attribute__((__always_inline__))
vec_vminfp(vector float __a, vector float __b)
{
+#ifdef __VSX__
+ return __builtin_vsx_xvminsp(__a, __b);
+#else
return __builtin_altivec_vminfp(__a, __b);
+#endif
}
/* vec_mladd */
@@ -4506,7 +4735,7 @@ vec_vpkswus(vector unsigned int __a, vector unsigned int __b)
// in that the vec_xor can be recognized as a vec_nor (and for P8 and
// later, possibly a vec_nand).
-vector signed char __ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_perm(vector signed char __a, vector signed char __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4521,7 +4750,7 @@ vec_perm(vector signed char __a, vector signed char __b, vector unsigned char __
#endif
}
-vector unsigned char __ATTRS_o_ai
+static vector unsigned char __ATTRS_o_ai
vec_perm(vector unsigned char __a,
vector unsigned char __b,
vector unsigned char __c)
@@ -4538,7 +4767,7 @@ vec_perm(vector unsigned char __a,
#endif
}
-vector bool char __ATTRS_o_ai
+static vector bool char __ATTRS_o_ai
vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4553,7 +4782,7 @@ vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c)
#endif
}
-vector short __ATTRS_o_ai
+static vector short __ATTRS_o_ai
vec_perm(vector short __a, vector short __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4568,7 +4797,7 @@ vec_perm(vector short __a, vector short __b, vector unsigned char __c)
#endif
}
-vector unsigned short __ATTRS_o_ai
+static vector unsigned short __ATTRS_o_ai
vec_perm(vector unsigned short __a,
vector unsigned short __b,
vector unsigned char __c)
@@ -4585,7 +4814,7 @@ vec_perm(vector unsigned short __a,
#endif
}
-vector bool short __ATTRS_o_ai
+static vector bool short __ATTRS_o_ai
vec_perm(vector bool short __a, vector bool short __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4600,7 +4829,7 @@ vec_perm(vector bool short __a, vector bool short __b, vector unsigned char __c)
#endif
}
-vector pixel __ATTRS_o_ai
+static vector pixel __ATTRS_o_ai
vec_perm(vector pixel __a, vector pixel __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4615,7 +4844,7 @@ vec_perm(vector pixel __a, vector pixel __b, vector unsigned char __c)
#endif
}
-vector int __ATTRS_o_ai
+static vector int __ATTRS_o_ai
vec_perm(vector int __a, vector int __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4628,7 +4857,7 @@ vec_perm(vector int __a, vector int __b, vector unsigned char __c)
#endif
}
-vector unsigned int __ATTRS_o_ai
+static vector unsigned int __ATTRS_o_ai
vec_perm(vector unsigned int __a, vector unsigned int __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4643,7 +4872,7 @@ vec_perm(vector unsigned int __a, vector unsigned int __b, vector unsigned char
#endif
}
-vector bool int __ATTRS_o_ai
+static vector bool int __ATTRS_o_ai
vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4658,7 +4887,7 @@ vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c)
#endif
}
-vector float __ATTRS_o_ai
+static vector float __ATTRS_o_ai
vec_perm(vector float __a, vector float __b, vector unsigned char __c)
{
#ifdef __LITTLE_ENDIAN__
@@ -4673,6 +4902,52 @@ vec_perm(vector float __a, vector float __b, vector unsigned char __c)
#endif
}
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai
+vec_perm(vector long long __a, vector long long __b, vector unsigned char __c)
+{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector long long)__builtin_altivec_vperm_4si(__b, __a, __d);
+#else
+ return (vector long long)__builtin_altivec_vperm_4si(__a, __b, __c);
+#endif
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_perm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c)
+{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned long long)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
+ return (vector unsigned long long)
+ __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
+}
+
+static vector double __ATTRS_o_ai
+vec_perm(vector double __a, vector double __b, vector unsigned char __c)
+{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector double)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
+ return (vector double)
+ __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
+}
+#endif
+
/* vec_vperm */
static vector signed char __ATTRS_o_ai
@@ -4745,6 +5020,27 @@ vec_vperm(vector float __a, vector float __b, vector unsigned char __c)
return vec_perm(__a, __b, __c);
}
+#ifdef __VSX__
+static vector long long __ATTRS_o_ai
+vec_vperm(vector long long __a, vector long long __b, vector unsigned char __c)
+{
+ return vec_perm(__a, __b, __c);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned char __c)
+{
+ return vec_perm(__a, __b, __c);
+}
+
+static vector double __ATTRS_o_ai
+vec_vperm(vector double __a, vector double __b, vector unsigned char __c)
+{
+ return vec_perm(__a, __b, __c);
+}
+#endif
+
/* vec_re */
static vector float __attribute__((__always_inline__))
@@ -8368,11 +8664,11 @@ vec_sum2s(vector int __a, vector int __b)
#ifdef __LITTLE_ENDIAN__
vector int __c = (vector signed int)
vec_perm(__b, __b, (vector unsigned char)
- (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
__c = __builtin_altivec_vsum2sws(__a, __c);
return (vector signed int)
vec_perm(__c, __c, (vector unsigned char)
- (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
#else
return __builtin_altivec_vsum2sws(__a, __b);
#endif
@@ -8386,11 +8682,11 @@ vec_vsum2sws(vector int __a, vector int __b)
#ifdef __LITTLE_ENDIAN__
vector int __c = (vector signed int)
vec_perm(__b, __b, (vector unsigned char)
- (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
__c = __builtin_altivec_vsum2sws(__a, __c);
return (vector signed int)
vec_perm(__c, __c, (vector unsigned char)
- (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
#else
return __builtin_altivec_vsum2sws(__a, __b);
#endif
@@ -8661,6 +8957,91 @@ vec_vupklsh(vector pixel __a)
#endif
}
+/* vec_vsx_ld */
+
+#ifdef __VSX__
+
+static vector signed int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed int *__b)
+{
+ return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned int *__b)
+{
+ return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector float *__b)
+{
+ return (vector float)__builtin_vsx_lxvw4x(__a, __b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector signed long long *__b)
+{
+ return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector unsigned long long *__b)
+{
+ return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static vector double __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector double *__b)
+{
+ return (vector double)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+#endif
+
+/* vec_vsx_st */
+
+#ifdef __VSX__
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector signed int __a, int __b, vector signed int *__c)
+{
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector unsigned int __a, int __b, vector unsigned int *__c)
+{
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector float __a, int __b, vector float *__c)
+{
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector signed long long __a, int __b, vector signed long long *__c)
+{
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector unsigned long long __a, int __b,
+ vector unsigned long long *__c)
+{
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static void __ATTRS_o_ai
+vec_vsx_st(vector double __a, int __b, vector double *__c)
+{
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+#endif
+
/* vec_xor */
#define __builtin_altivec_vxor vec_xor