diff options
author | Andrew Turner <andrew@FreeBSD.org> | 2024-02-20 09:02:15 +0000 |
---|---|---|
committer | Andrew Turner <andrew@FreeBSD.org> | 2024-02-20 09:02:15 +0000 |
commit | edc5c0de794f521eb620d2b6cbaee2434442a8f3 (patch) | |
tree | 64dfc547c0b6398e9cf94bd8175b21db8a74c814 /pl/math/v_log1pf_2u1.c | |
parent | 29866ecb89620f1c798b7f5ff6710255f13aa52e (diff) |
Diffstat (limited to 'pl/math/v_log1pf_2u1.c')
-rw-r--r-- | pl/math/v_log1pf_2u1.c | 174 |
1 files changed, 70 insertions, 104 deletions
diff --git a/pl/math/v_log1pf_2u1.c b/pl/math/v_log1pf_2u1.c index 4a7732b403ec..153c88da9c88 100644 --- a/pl/math/v_log1pf_2u1.c +++ b/pl/math/v_log1pf_2u1.c @@ -8,104 +8,72 @@ #include "v_math.h" #include "pl_sig.h" #include "pl_test.h" +#include "poly_advsimd_f32.h" -#if V_SUPPORTED - -#define AbsMask 0x7fffffff -#define TinyBound 0x340 /* asuint32(0x1p-23). ulp=0.5 at 0x1p-23. */ -#define MinusOne 0xbf800000 -#define Ln2 (0x1.62e43p-1f) -#define Four 0x40800000 -#define ThreeQuarters v_u32 (0x3f400000) - -#define C(i) v_f32 (__log1pf_data.coeffs[i]) - -static inline v_f32_t -eval_poly (v_f32_t m) +const static struct data { -#ifdef V_LOG1PF_1U3 - - /* Approximate log(1+m) on [-0.25, 0.5] using Horner scheme. */ - v_f32_t p = v_fma_f32 (C (8), m, C (7)); - p = v_fma_f32 (p, m, C (6)); - p = v_fma_f32 (p, m, C (5)); - p = v_fma_f32 (p, m, C (4)); - p = v_fma_f32 (p, m, C (3)); - p = v_fma_f32 (p, m, C (2)); - p = v_fma_f32 (p, m, C (1)); - p = v_fma_f32 (p, m, C (0)); - return v_fma_f32 (m, m * p, m); - -#elif defined(V_LOG1PF_2U5) - - /* Approximate log(1+m) on [-0.25, 0.5] using Estrin scheme. */ - v_f32_t p_12 = v_fma_f32 (m, C (1), C (0)); - v_f32_t p_34 = v_fma_f32 (m, C (3), C (2)); - v_f32_t p_56 = v_fma_f32 (m, C (5), C (4)); - v_f32_t p_78 = v_fma_f32 (m, C (7), C (6)); - - v_f32_t m2 = m * m; - v_f32_t p_02 = v_fma_f32 (m2, p_12, m); - v_f32_t p_36 = v_fma_f32 (m2, p_56, p_34); - v_f32_t p_79 = v_fma_f32 (m2, C (8), p_78); - - v_f32_t m4 = m2 * m2; - v_f32_t p_06 = v_fma_f32 (m4, p_36, p_02); - - return v_fma_f32 (m4, m4 * p_79, p_06); - -#else -#error No precision specified for v_log1pf -#endif + float32x4_t poly[8], ln2; + uint32x4_t tiny_bound, minus_one, four, thresh; + int32x4_t three_quarters; +} data = { + .poly = { /* Generated using FPMinimax in [-0.25, 0.5]. First two coefficients + (1, -0.5) are not stored as they can be generated more + efficiently. */ + V4 (0x1.5555aap-2f), V4 (-0x1.000038p-2f), V4 (0x1.99675cp-3f), + V4 (-0x1.54ef78p-3f), V4 (0x1.28a1f4p-3f), V4 (-0x1.0da91p-3f), + V4 (0x1.abcb6p-4f), V4 (-0x1.6f0d5ep-5f) }, + .ln2 = V4 (0x1.62e43p-1f), + .tiny_bound = V4 (0x34000000), /* asuint32(0x1p-23). ulp=0.5 at 0x1p-23. */ + .thresh = V4 (0x4b800000), /* asuint32(INFINITY) - tiny_bound. */ + .minus_one = V4 (0xbf800000), + .four = V4 (0x40800000), + .three_quarters = V4 (0x3f400000) +}; + +static inline float32x4_t +eval_poly (float32x4_t m, const float32x4_t *p) +{ + /* Approximate log(1+m) on [-0.25, 0.5] using split Estrin scheme. */ + float32x4_t p_12 = vfmaq_f32 (v_f32 (-0.5), m, p[0]); + float32x4_t p_34 = vfmaq_f32 (p[1], m, p[2]); + float32x4_t p_56 = vfmaq_f32 (p[3], m, p[4]); + float32x4_t p_78 = vfmaq_f32 (p[5], m, p[6]); + + float32x4_t m2 = vmulq_f32 (m, m); + float32x4_t p_02 = vfmaq_f32 (m, m2, p_12); + float32x4_t p_36 = vfmaq_f32 (p_34, m2, p_56); + float32x4_t p_79 = vfmaq_f32 (p_78, m2, p[7]); + + float32x4_t m4 = vmulq_f32 (m2, m2); + float32x4_t p_06 = vfmaq_f32 (p_02, m4, p_36); + return vfmaq_f32 (p_06, m4, vmulq_f32 (m4, p_79)); } -static inline float -handle_special (float x) +static float32x4_t NOINLINE VPCS_ATTR +special_case (float32x4_t x, float32x4_t y, uint32x4_t special) { - uint32_t ix = asuint (x); - uint32_t ia = ix & AbsMask; - if (ix == 0xff800000 || ia > 0x7f800000 || ix > 0xbf800000) - { - /* x == -Inf => log1pf(x) = NaN. - x < -1.0 => log1pf(x) = NaN. - x == +/-NaN => log1pf(x) = NaN. */ -#if WANT_SIMD_EXCEPT - return __math_invalidf (asfloat (ia)); -#else - return NAN; -#endif - } - if (ix == 0xbf800000) - { - /* x == -1.0 => log1pf(x) = -Inf. */ -#if WANT_SIMD_EXCEPT - return __math_divzerof (ix); -#else - return -INFINITY; -#endif - } - /* |x| < TinyBound => log1p(x) = x. */ - return x; + return v_call_f32 (log1pf, x, y, special); } -/* Vector log1pf approximation using polynomial on reduced interval. Accuracy is - the same as for the scalar algorithm, i.e. worst-case error when using Estrin +/* Vector log1pf approximation using polynomial on reduced interval. Accuracy is roughly 2.02 ULP: log1pf(0x1.21e13ap-2) got 0x1.fe8028p-3 want 0x1.fe802cp-3. */ -VPCS_ATTR v_f32_t V_NAME (log1pf) (v_f32_t x) +VPCS_ATTR float32x4_t V_NAME_F1 (log1p) (float32x4_t x) { - v_u32_t ix = v_as_u32_f32 (x); - v_u32_t ia12 = (ix >> 20) & v_u32 (0x7f8); - v_u32_t special_cases - = v_cond_u32 (ia12 - v_u32 (TinyBound) >= (0x7f8 - TinyBound)) - | v_cond_u32 (ix >= MinusOne); - v_f32_t special_arg = x; + const struct data *d = ptr_barrier (&data); + + uint32x4_t ix = vreinterpretq_u32_f32 (x); + uint32x4_t ia = vreinterpretq_u32_f32 (vabsq_f32 (x)); + uint32x4_t special_cases + = vorrq_u32 (vcgeq_u32 (vsubq_u32 (ia, d->tiny_bound), d->thresh), + vcgeq_u32 (ix, d->minus_one)); + float32x4_t special_arg = x; #if WANT_SIMD_EXCEPT if (unlikely (v_any_u32 (special_cases))) /* Side-step special lanes so fenv exceptions are not triggered inadvertently. */ - x = v_sel_f32 (special_cases, v_f32 (1), x); + x = v_zerofy_f32 (x, special_cases); #endif /* With x + 1 = t * 2^k (where t = m + 1 and k is chosen such that m @@ -117,44 +85,42 @@ VPCS_ATTR v_f32_t V_NAME (log1pf) (v_f32_t x) scale factor s = 4*k*log(2) to ensure the scale is representable as a normalised fp32 number. */ - v_f32_t m = x + v_f32 (1.0f); + float32x4_t m = vaddq_f32 (x, v_f32 (1.0f)); /* Choose k to scale x to the range [-1/4, 1/2]. */ - v_s32_t k = (v_as_s32_f32 (m) - ThreeQuarters) & v_u32 (0xff800000); + int32x4_t k + = vandq_s32 (vsubq_s32 (vreinterpretq_s32_f32 (m), d->three_quarters), + v_s32 (0xff800000)); + uint32x4_t ku = vreinterpretq_u32_s32 (k); /* Scale x by exponent manipulation. */ - v_f32_t m_scale = v_as_f32_u32 (v_as_u32_f32 (x) - v_as_u32_s32 (k)); + float32x4_t m_scale + = vreinterpretq_f32_u32 (vsubq_u32 (vreinterpretq_u32_f32 (x), ku)); /* Scale up to ensure that the scale factor is representable as normalised fp32 number, and scale m down accordingly. */ - v_f32_t s = v_as_f32_u32 (v_u32 (Four) - k); - m_scale = m_scale + v_fma_f32 (v_f32 (0.25f), s, v_f32 (-1.0f)); + float32x4_t s = vreinterpretq_f32_u32 (vsubq_u32 (d->four, ku)); + m_scale = vaddq_f32 (m_scale, vfmaq_f32 (v_f32 (-1.0f), v_f32 (0.25f), s)); /* Evaluate polynomial on the reduced interval. */ - v_f32_t p = eval_poly (m_scale); + float32x4_t p = eval_poly (m_scale, d->poly); /* The scale factor to be applied back at the end - by multiplying float(k) by 2^-23 we get the unbiased exponent of k. */ - v_f32_t scale_back = v_to_f32_s32 (k) * v_f32 (0x1p-23f); + float32x4_t scale_back = vcvtq_f32_s32 (vshrq_n_s32 (k, 23)); /* Apply the scaling back. */ - v_f32_t y = v_fma_f32 (scale_back, v_f32 (Ln2), p); + float32x4_t y = vfmaq_f32 (p, scale_back, d->ln2); if (unlikely (v_any_u32 (special_cases))) - return v_call_f32 (handle_special, special_arg, y, special_cases); + return special_case (special_arg, y, special_cases); return y; } -VPCS_ALIAS PL_SIG (V, F, 1, log1p, -0.9, 10.0) -PL_TEST_ULP (V_NAME (log1pf), 1.53) -PL_TEST_EXPECT_FENV (V_NAME (log1pf), WANT_SIMD_EXCEPT) -PL_TEST_INTERVAL (V_NAME (log1pf), -10.0, 10.0, 10000) -PL_TEST_INTERVAL (V_NAME (log1pf), 0.0, 0x1p-23, 30000) -PL_TEST_INTERVAL (V_NAME (log1pf), 0x1p-23, 0.001, 50000) -PL_TEST_INTERVAL (V_NAME (log1pf), 0.001, 1.0, 50000) -PL_TEST_INTERVAL (V_NAME (log1pf), 0.0, -0x1p-23, 30000) -PL_TEST_INTERVAL (V_NAME (log1pf), -0x1p-23, -0.001, 30000) -PL_TEST_INTERVAL (V_NAME (log1pf), -0.001, -1.0, 50000) -PL_TEST_INTERVAL (V_NAME (log1pf), -1.0, inf, 1000) -#endif +PL_TEST_ULP (V_NAME_F1 (log1p), 1.53) +PL_TEST_EXPECT_FENV (V_NAME_F1 (log1p), WANT_SIMD_EXCEPT) +PL_TEST_SYM_INTERVAL (V_NAME_F1 (log1p), 0.0, 0x1p-23, 30000) +PL_TEST_SYM_INTERVAL (V_NAME_F1 (log1p), 0x1p-23, 1, 50000) +PL_TEST_INTERVAL (V_NAME_F1 (log1p), 1, inf, 50000) +PL_TEST_INTERVAL (V_NAME_F1 (log1p), -1.0, -inf, 1000) |