diff options
author | Andrew Turner <andrew@FreeBSD.org> | 2024-02-20 09:02:15 +0000 |
---|---|---|
committer | Andrew Turner <andrew@FreeBSD.org> | 2024-02-20 09:02:15 +0000 |
commit | edc5c0de794f521eb620d2b6cbaee2434442a8f3 (patch) | |
tree | 64dfc547c0b6398e9cf94bd8175b21db8a74c814 /pl/math/v_cospif_3u2.c | |
parent | 29866ecb89620f1c798b7f5ff6710255f13aa52e (diff) | |
download | src-edc5c0de794f521eb620d2b6cbaee2434442a8f3.tar.gz src-edc5c0de794f521eb620d2b6cbaee2434442a8f3.zip |
Update the Arm Optimized Routinesvendor/arm-optimized-routines/v24.01vendor/arm-optimized-routines
Import the v24.01 release of the Arm Optimized Routines [1].
[1] https://github.com/ARM-software/optimized-routines/tree/v24.01
Sponsored by: Arm Ltd
Diffstat (limited to 'pl/math/v_cospif_3u2.c')
-rw-r--r-- | pl/math/v_cospif_3u2.c | 83 |
1 files changed, 83 insertions, 0 deletions
diff --git a/pl/math/v_cospif_3u2.c b/pl/math/v_cospif_3u2.c new file mode 100644 index 000000000000..d88aa828439d --- /dev/null +++ b/pl/math/v_cospif_3u2.c @@ -0,0 +1,83 @@ +/* + * Single-precision vector cospi function. + * + * Copyright (c) 2023, Arm Limited. + * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception + */ + +#include "mathlib.h" +#include "v_math.h" +#include "poly_advsimd_f32.h" +#include "pl_sig.h" +#include "pl_test.h" + +static const struct data +{ + float32x4_t poly[6]; + float32x4_t range_val; +} data = { + /* Taylor series coefficents for sin(pi * x). */ + .poly = { V4 (0x1.921fb6p1f), V4 (-0x1.4abbcep2f), V4 (0x1.466bc6p1f), + V4 (-0x1.32d2ccp-1f), V4 (0x1.50783p-4f), V4 (-0x1.e30750p-8f) }, + .range_val = V4 (0x1p31f), +}; + +static float32x4_t VPCS_ATTR NOINLINE +special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp) +{ + y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd)); + return v_call_f32 (cospif, x, y, cmp); +} + +/* Approximation for vector single-precision cospi(x) + Maximum Error: 3.17 ULP: + _ZGVnN4v_cospif(0x1.d341a8p-5) got 0x1.f7cd56p-1 + want 0x1.f7cd5p-1. */ +float32x4_t VPCS_ATTR V_NAME_F1 (cospi) (float32x4_t x) +{ + const struct data *d = ptr_barrier (&data); + +#if WANT_SIMD_EXCEPT + float32x4_t r = vabsq_f32 (x); + uint32x4_t cmp = vcaleq_f32 (v_f32 (0x1p32f), x); + + /* When WANT_SIMD_EXCEPT = 1, special lanes should be zero'd + to avoid them overflowing and throwing exceptions. */ + r = v_zerofy_f32 (r, cmp); + uint32x4_t odd = vshlq_n_u32 (vcvtnq_u32_f32 (r), 31); + +#else + float32x4_t r = x; + uint32x4_t cmp = vcageq_f32 (r, d->range_val); + + uint32x4_t odd + = vshlq_n_u32 (vreinterpretq_u32_s32 (vcvtaq_s32_f32 (r)), 31); + +#endif + + /* r = x - rint(x). */ + r = vsubq_f32 (r, vrndaq_f32 (r)); + + /* cospi(x) = sinpi(0.5 - abs(x)) for values -1/2 .. 1/2. */ + r = vsubq_f32 (v_f32 (0.5f), vabsq_f32 (r)); + + /* Pairwise Horner approximation for y = sin(r * pi). */ + float32x4_t r2 = vmulq_f32 (r, r); + float32x4_t r4 = vmulq_f32 (r2, r2); + float32x4_t y = vmulq_f32 (v_pw_horner_5_f32 (r2, r4, d->poly), r); + + /* Fallback to scalar. */ + if (unlikely (v_any_u32 (cmp))) + return special_case (x, y, odd, cmp); + + /* Reintroduce the sign bit for inputs which round to odd. */ + return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd)); +} + +PL_SIG (V, F, 1, cospi, -0.9, 0.9) +PL_TEST_ULP (V_NAME_F1 (cospi), 2.67) +PL_TEST_EXPECT_FENV (V_NAME_F1 (cospi), WANT_SIMD_EXCEPT) +PL_TEST_SYM_INTERVAL (V_NAME_F1 (cospi), 0, 0x1p-31, 5000) +PL_TEST_SYM_INTERVAL (V_NAME_F1 (cospi), 0x1p-31, 0.5, 10000) +PL_TEST_SYM_INTERVAL (V_NAME_F1 (cospi), 0.5, 0x1p32f, 10000) +PL_TEST_SYM_INTERVAL (V_NAME_F1 (cospi), 0x1p32f, inf, 10000) |