aboutsummaryrefslogtreecommitdiff
path: root/pl/math/test/mathbench_wrappers.h
diff options
context:
space:
mode:
Diffstat (limited to 'pl/math/test/mathbench_wrappers.h')
-rw-r--r--pl/math/test/mathbench_wrappers.h159
1 files changed, 116 insertions, 43 deletions
diff --git a/pl/math/test/mathbench_wrappers.h b/pl/math/test/mathbench_wrappers.h
index eba960eb96ac..fe7f8963cdee 100644
--- a/pl/math/test/mathbench_wrappers.h
+++ b/pl/math/test/mathbench_wrappers.h
@@ -23,87 +23,106 @@ powi_wrap (double x)
return __builtin_powi (x, (int) round (x));
}
-#if WANT_VMATH
-#if __aarch64__
+#if __aarch64__ && defined(__vpcs)
-static double
-__s_atan2_wrap (double x)
+__vpcs static v_double
+_Z_atan2_wrap (v_double x)
{
- return __s_atan2 (5.0, x);
+ return _ZGVnN2vv_atan2 (v_double_dup (5.0), x);
}
-static float
-__s_atan2f_wrap (float x)
+__vpcs static v_float
+_Z_atan2f_wrap (v_float x)
{
- return __s_atan2f (5.0f, x);
+ return _ZGVnN4vv_atan2f (v_float_dup (5.0f), x);
}
-static v_double
-__v_atan2_wrap (v_double x)
+__vpcs static v_float
+_Z_hypotf_wrap (v_float x)
{
- return __v_atan2 (v_double_dup (5.0), x);
+ return _ZGVnN4vv_hypotf (v_float_dup (5.0f), x);
}
-static v_float
-__v_atan2f_wrap (v_float x)
+__vpcs static v_double
+_Z_hypot_wrap (v_double x)
{
- return __v_atan2f (v_float_dup (5.0f), x);
+ return _ZGVnN2vv_hypot (v_double_dup (5.0), x);
}
-#ifdef __vpcs
-
__vpcs static v_double
-__vn_atan2_wrap (v_double x)
+xy_Z_pow (v_double x)
{
- return __vn_atan2 (v_double_dup (5.0), x);
+ return _ZGVnN2vv_pow (x, x);
}
-__vpcs static v_float
-__vn_atan2f_wrap (v_float x)
+__vpcs static v_double
+x_Z_pow (v_double x)
{
- return __vn_atan2f (v_float_dup (5.0f), x);
+ return _ZGVnN2vv_pow (x, v_double_dup (23.4));
}
__vpcs static v_double
-_Z_atan2_wrap (v_double x)
+y_Z_pow (v_double x)
{
- return _ZGVnN2vv_atan2 (v_double_dup (5.0), x);
+ return _ZGVnN2vv_pow (v_double_dup (2.34), x);
}
__vpcs static v_float
-_Z_atan2f_wrap (v_float x)
+_Z_sincosf_wrap (v_float x)
{
- return _ZGVnN4vv_atan2f (v_float_dup (5.0f), x);
+ v_float s, c;
+ _ZGVnN4vl4l4_sincosf (x, &s, &c);
+ return s + c;
}
-#endif // __vpcs
-#endif // __arch64__
-#endif // WANT_VMATH
+__vpcs static v_float
+_Z_cexpif_wrap (v_float x)
+{
+ __f32x4x2_t sc = _ZGVnN4v_cexpif (x);
+ return sc.val[0] + sc.val[1];
+}
-#if WANT_SVE_MATH
+__vpcs static v_double
+_Z_sincos_wrap (v_double x)
+{
+ v_double s, c;
+ _ZGVnN2vl8l8_sincos (x, &s, &c);
+ return s + c;
+}
-static sv_float
-__sv_atan2f_wrap (sv_float x, sv_bool pg)
+__vpcs static v_double
+_Z_cexpi_wrap (v_double x)
{
- return __sv_atan2f_x (x, svdup_n_f32 (5.0f), pg);
+ __f64x2x2_t sc = _ZGVnN2v_cexpi (x);
+ return sc.val[0] + sc.val[1];
}
+#endif // __arch64__ && __vpcs
+
+#if WANT_SVE_MATH
+
static sv_float
_Z_sv_atan2f_wrap (sv_float x, sv_bool pg)
{
- return _ZGVsMxvv_atan2f (x, svdup_n_f32 (5.0f), pg);
+ return _ZGVsMxvv_atan2f (x, svdup_f32 (5.0f), pg);
}
static sv_double
-__sv_atan2_wrap (sv_double x, sv_bool pg)
+_Z_sv_atan2_wrap (sv_double x, sv_bool pg)
{
- return __sv_atan2_x (x, svdup_n_f64 (5.0), pg);
+ return _ZGVsMxvv_atan2 (x, svdup_f64 (5.0), pg);
+}
+
+static sv_float
+_Z_sv_hypotf_wrap (sv_float x, sv_bool pg)
+{
+ return _ZGVsMxvv_hypotf (x, svdup_f32 (5.0), pg);
}
static sv_double
-_Z_sv_atan2_wrap (sv_double x, sv_bool pg)
+_Z_sv_hypot_wrap (sv_double x, sv_bool pg)
{
- return _ZGVsMxvv_atan2 (x, svdup_n_f64 (5.0), pg);
+ return _ZGVsMxvv_hypot (x, svdup_f64 (5.0), pg);
}
static sv_float
@@ -112,22 +131,76 @@ _Z_sv_powi_wrap (sv_float x, sv_bool pg)
return _ZGVsMxvv_powi (x, svcvt_s32_f32_x (pg, x), pg);
}
+static sv_double
+_Z_sv_powk_wrap (sv_double x, sv_bool pg)
+{
+ return _ZGVsMxvv_powk (x, svcvt_s64_f64_x (pg, x), pg);
+}
+
+static sv_float
+xy_Z_sv_powf (sv_float x, sv_bool pg)
+{
+ return _ZGVsMxvv_powf (x, x, pg);
+}
+
static sv_float
-__sv_powif_wrap (sv_float x, sv_bool pg)
+x_Z_sv_powf (sv_float x, sv_bool pg)
{
- return __sv_powif_x (x, svcvt_s32_f32_x (pg, x), pg);
+ return _ZGVsMxvv_powf (x, svdup_f32 (23.4f), pg);
+}
+
+static sv_float
+y_Z_sv_powf (sv_float x, sv_bool pg)
+{
+ return _ZGVsMxvv_powf (svdup_f32 (2.34f), x, pg);
}
static sv_double
-_Z_sv_powk_wrap (sv_double x, sv_bool pg)
+xy_Z_sv_pow (sv_double x, sv_bool pg)
{
- return _ZGVsMxvv_powk (x, svcvt_s64_f64_x (pg, x), pg);
+ return _ZGVsMxvv_pow (x, x, pg);
+}
+
+static sv_double
+x_Z_sv_pow (sv_double x, sv_bool pg)
+{
+ return _ZGVsMxvv_pow (x, svdup_f64 (23.4), pg);
+}
+
+static sv_double
+y_Z_sv_pow (sv_double x, sv_bool pg)
+{
+ return _ZGVsMxvv_pow (svdup_f64 (2.34), x, pg);
+}
+
+static sv_float
+_Z_sv_sincosf_wrap (sv_float x, sv_bool pg)
+{
+ float s[svcntw ()], c[svcntw ()];
+ _ZGVsMxvl4l4_sincosf (x, s, c, pg);
+ return svadd_x (pg, svld1 (pg, s), svld1 (pg, s));
+}
+
+static sv_float
+_Z_sv_cexpif_wrap (sv_float x, sv_bool pg)
+{
+ svfloat32x2_t sc = _ZGVsMxv_cexpif (x, pg);
+ return svadd_x (pg, svget2 (sc, 0), svget2 (sc, 1));
+}
+
+static sv_double
+_Z_sv_sincos_wrap (sv_double x, sv_bool pg)
+{
+ double s[svcntd ()], c[svcntd ()];
+ _ZGVsMxvl8l8_sincos (x, s, c, pg);
+ return svadd_x (pg, svld1 (pg, s), svld1 (pg, s));
}
static sv_double
-__sv_powi_wrap (sv_double x, sv_bool pg)
+_Z_sv_cexpi_wrap (sv_double x, sv_bool pg)
{
- return __sv_powi_x (x, svcvt_s64_f64_x (pg, x), pg);
+ svfloat64x2_t sc = _ZGVsMxv_cexpi (x, pg);
+ return svadd_x (pg, svget2 (sc, 0), svget2 (sc, 1));
}
#endif // WANT_SVE_MATH