aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt/lib/builtins/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/compiler-rt/lib/builtins/ppc')
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/DD.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/atomic.exp41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/divtc3.c97
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixtfdi.c98
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixtfti.c38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixunstfdi.c57
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixunstfti.c115
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/floatditf.c33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/floattitf.c46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/floatunditf.c39
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qadd.c74
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qdiv.c52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qmul.c50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qsub.c74
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/multc3.c85
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/restFP.S45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ppc/saveFP.S42
17 files changed, 1031 insertions, 0 deletions
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/DD.h b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/DD.h
new file mode 100644
index 000000000000..8f31a962fc77
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/DD.h
@@ -0,0 +1,45 @@
+#ifndef COMPILERRT_DD_HEADER
+#define COMPILERRT_DD_HEADER
+
+#include "../int_lib.h"
+
+typedef union {
+ long double ld;
+ struct {
+ double hi;
+ double lo;
+ } s;
+} DD;
+
+typedef union {
+ double d;
+ uint64_t x;
+} doublebits;
+
+#define LOWORDER(xy, xHi, xLo, yHi, yLo) \
+ (((((xHi) * (yHi) - (xy)) + (xHi) * (yLo)) + (xLo) * (yHi)) + (xLo) * (yLo))
+
+static __inline ALWAYS_INLINE double local_fabs(double x) {
+ doublebits result = {.d = x};
+ result.x &= UINT64_C(0x7fffffffffffffff);
+ return result.d;
+}
+
+static __inline ALWAYS_INLINE double high26bits(double x) {
+ doublebits result = {.d = x};
+ result.x &= UINT64_C(0xfffffffff8000000);
+ return result.d;
+}
+
+static __inline ALWAYS_INLINE int different_sign(double x, double y) {
+ doublebits xsignbit = {.d = x}, ysignbit = {.d = y};
+ int result = (int)(xsignbit.x >> 63) ^ (int)(ysignbit.x >> 63);
+ return result;
+}
+
+long double __gcc_qadd(long double, long double);
+long double __gcc_qsub(long double, long double);
+long double __gcc_qmul(long double, long double);
+long double __gcc_qdiv(long double, long double);
+
+#endif // COMPILERRT_DD_HEADER
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/atomic.exp b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/atomic.exp
new file mode 100644
index 000000000000..98f759de982f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/atomic.exp
@@ -0,0 +1,41 @@
+__atomic_compare_exchange
+__atomic_compare_exchange_1
+__atomic_compare_exchange_2
+__atomic_compare_exchange_4
+__atomic_compare_exchange_8
+__atomic_exchange
+__atomic_exchange_1
+__atomic_exchange_2
+__atomic_exchange_4
+__atomic_exchange_8
+__atomic_fetch_add_1
+__atomic_fetch_add_2
+__atomic_fetch_add_4
+__atomic_fetch_add_8
+__atomic_fetch_and_1
+__atomic_fetch_and_2
+__atomic_fetch_and_4
+__atomic_fetch_and_8
+__atomic_fetch_or_1
+__atomic_fetch_or_2
+__atomic_fetch_or_4
+__atomic_fetch_or_8
+__atomic_fetch_sub_1
+__atomic_fetch_sub_2
+__atomic_fetch_sub_4
+__atomic_fetch_sub_8
+__atomic_fetch_xor_1
+__atomic_fetch_xor_2
+__atomic_fetch_xor_4
+__atomic_fetch_xor_8
+__atomic_is_lock_free
+__atomic_load
+__atomic_load_1
+__atomic_load_2
+__atomic_load_4
+__atomic_load_8
+__atomic_store
+__atomic_store_1
+__atomic_store_2
+__atomic_store_4
+__atomic_store_8
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/divtc3.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/divtc3.c
new file mode 100644
index 000000000000..671bd4ddbbd7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/divtc3.c
@@ -0,0 +1,97 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../int_math.h"
+#include "DD.h"
+// Use DOUBLE_PRECISION because the soft-fp method we use is logb (on the upper
+// half of the long doubles), even though this file defines complex division for
+// 128-bit floats.
+#define DOUBLE_PRECISION
+#include "../fp_lib.h"
+
+#if !defined(CRT_INFINITY) && defined(HUGE_VAL)
+#define CRT_INFINITY HUGE_VAL
+#endif // CRT_INFINITY
+
+#define makeFinite(x) \
+ { \
+ (x).s.hi = crt_copysign(crt_isinf((x).s.hi) ? 1.0 : 0.0, (x).s.hi); \
+ (x).s.lo = 0.0; \
+ }
+
+long double _Complex __divtc3(long double a, long double b, long double c,
+ long double d) {
+ DD cDD = {.ld = c};
+ DD dDD = {.ld = d};
+
+ int ilogbw = 0;
+ const double logbw =
+ __compiler_rt_logb(__compiler_rt_fmax(crt_fabs(cDD.s.hi),
+ crt_fabs(dDD.s.hi)));
+
+ if (crt_isfinite(logbw)) {
+ ilogbw = (int)logbw;
+
+ cDD.s.hi = __compiler_rt_scalbn(cDD.s.hi, -ilogbw);
+ cDD.s.lo = __compiler_rt_scalbn(cDD.s.lo, -ilogbw);
+ dDD.s.hi = __compiler_rt_scalbn(dDD.s.hi, -ilogbw);
+ dDD.s.lo = __compiler_rt_scalbn(dDD.s.lo, -ilogbw);
+ }
+
+ const long double denom =
+ __gcc_qadd(__gcc_qmul(cDD.ld, cDD.ld), __gcc_qmul(dDD.ld, dDD.ld));
+ const long double realNumerator =
+ __gcc_qadd(__gcc_qmul(a, cDD.ld), __gcc_qmul(b, dDD.ld));
+ const long double imagNumerator =
+ __gcc_qsub(__gcc_qmul(b, cDD.ld), __gcc_qmul(a, dDD.ld));
+
+ DD real = {.ld = __gcc_qdiv(realNumerator, denom)};
+ DD imag = {.ld = __gcc_qdiv(imagNumerator, denom)};
+
+ real.s.hi = __compiler_rt_scalbn(real.s.hi, -ilogbw);
+ real.s.lo = __compiler_rt_scalbn(real.s.lo, -ilogbw);
+ imag.s.hi = __compiler_rt_scalbn(imag.s.hi, -ilogbw);
+ imag.s.lo = __compiler_rt_scalbn(imag.s.lo, -ilogbw);
+
+ if (crt_isnan(real.s.hi) && crt_isnan(imag.s.hi)) {
+ DD aDD = {.ld = a};
+ DD bDD = {.ld = b};
+ DD rDD = {.ld = denom};
+
+ if ((rDD.s.hi == 0.0) && (!crt_isnan(aDD.s.hi) || !crt_isnan(bDD.s.hi))) {
+ real.s.hi = crt_copysign(CRT_INFINITY, cDD.s.hi) * aDD.s.hi;
+ real.s.lo = 0.0;
+ imag.s.hi = crt_copysign(CRT_INFINITY, cDD.s.hi) * bDD.s.hi;
+ imag.s.lo = 0.0;
+ }
+
+ else if ((crt_isinf(aDD.s.hi) || crt_isinf(bDD.s.hi)) &&
+ crt_isfinite(cDD.s.hi) && crt_isfinite(dDD.s.hi)) {
+ makeFinite(aDD);
+ makeFinite(bDD);
+ real.s.hi = CRT_INFINITY * (aDD.s.hi * cDD.s.hi + bDD.s.hi * dDD.s.hi);
+ real.s.lo = 0.0;
+ imag.s.hi = CRT_INFINITY * (bDD.s.hi * cDD.s.hi - aDD.s.hi * dDD.s.hi);
+ imag.s.lo = 0.0;
+ }
+
+ else if ((crt_isinf(cDD.s.hi) || crt_isinf(dDD.s.hi)) &&
+ crt_isfinite(aDD.s.hi) && crt_isfinite(bDD.s.hi)) {
+ makeFinite(cDD);
+ makeFinite(dDD);
+ real.s.hi =
+ crt_copysign(0.0, (aDD.s.hi * cDD.s.hi + bDD.s.hi * dDD.s.hi));
+ real.s.lo = 0.0;
+ imag.s.hi =
+ crt_copysign(0.0, (bDD.s.hi * cDD.s.hi - aDD.s.hi * dDD.s.hi));
+ imag.s.lo = 0.0;
+ }
+ }
+
+ long double _Complex z;
+ __real__ z = real.ld;
+ __imag__ z = imag.ld;
+
+ return z;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixtfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixtfdi.c
new file mode 100644
index 000000000000..a97aaf095846
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixtfdi.c
@@ -0,0 +1,98 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// int64_t __fixunstfdi(long double x);
+// This file implements the PowerPC 128-bit double-double -> int64_t conversion
+
+#include "../int_math.h"
+#include "DD.h"
+
+uint64_t __fixtfdi(long double input) {
+ const DD x = {.ld = input};
+ const doublebits hibits = {.d = x.s.hi};
+
+ const uint32_t absHighWord =
+ (uint32_t)(hibits.x >> 32) & UINT32_C(0x7fffffff);
+ const uint32_t absHighWordMinusOne = absHighWord - UINT32_C(0x3ff00000);
+
+ // If (1.0 - tiny) <= input < 0x1.0p63:
+ if (UINT32_C(0x03f00000) > absHighWordMinusOne) {
+ // Do an unsigned conversion of the absolute value, then restore the sign.
+ const int unbiasedHeadExponent = absHighWordMinusOne >> 20;
+
+ int64_t result = hibits.x & INT64_C(0x000fffffffffffff); // mantissa(hi)
+ result |= INT64_C(0x0010000000000000); // matissa(hi) with implicit bit
+ result <<= 10; // mantissa(hi) with one zero preceding bit.
+
+ const int64_t hiNegationMask = ((int64_t)(hibits.x)) >> 63;
+
+ // If the tail is non-zero, we need to patch in the tail bits.
+ if (0.0 != x.s.lo) {
+ const doublebits lobits = {.d = x.s.lo};
+ int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
+ tailMantissa |= INT64_C(0x0010000000000000);
+
+ // At this point we have the mantissa of |tail|
+ // We need to negate it if head and tail have different signs.
+ const int64_t loNegationMask = ((int64_t)(lobits.x)) >> 63;
+ const int64_t negationMask = loNegationMask ^ hiNegationMask;
+ tailMantissa = (tailMantissa ^ negationMask) - negationMask;
+
+ // Now we have the mantissa of tail as a signed 2s-complement integer
+
+ const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
+
+ // Shift the tail mantissa into the right position, accounting for the
+ // bias of 10 that we shifted the head mantissa by.
+ tailMantissa >>=
+ (unbiasedHeadExponent - (biasedTailExponent - (1023 - 10)));
+
+ result += tailMantissa;
+ }
+
+ result >>= (62 - unbiasedHeadExponent);
+
+ // Restore the sign of the result and return
+ result = (result ^ hiNegationMask) - hiNegationMask;
+ return result;
+ }
+
+ // Edge cases handled here:
+
+ // |x| < 1, result is zero.
+ if (1.0 > crt_fabs(x.s.hi))
+ return INT64_C(0);
+
+ // x very close to INT64_MIN, care must be taken to see which side we are on.
+ if (x.s.hi == -0x1.0p63) {
+
+ int64_t result = INT64_MIN;
+
+ if (0.0 < x.s.lo) {
+ // If the tail is positive, the correct result is something other than
+ // INT64_MIN. we'll need to figure out what it is.
+
+ const doublebits lobits = {.d = x.s.lo};
+ int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
+ tailMantissa |= INT64_C(0x0010000000000000);
+
+ // Now we negate the tailMantissa
+ tailMantissa = (tailMantissa ^ INT64_C(-1)) + INT64_C(1);
+
+ // And shift it by the appropriate amount
+ const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
+ tailMantissa >>= 1075 - biasedTailExponent;
+
+ result -= tailMantissa;
+ }
+
+ return result;
+ }
+
+ // Signed overflows, infinities, and NaNs
+ if (x.s.hi > 0.0)
+ return INT64_MAX;
+ else
+ return INT64_MIN;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixtfti.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixtfti.c
new file mode 100644
index 000000000000..4180e7494d3b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixtfti.c
@@ -0,0 +1,38 @@
+//===--- lib/builtins/ppc/fixtfti.c - Convert long double->int128 *-C -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements converting the 128bit IBM/PowerPC long double (double-
+// double) data type to a signed 128 bit integer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../int_math.h"
+
+// Convert long double into a signed 128-bit integer.
+__int128_t __fixtfti(long double input) {
+
+ // If we are trying to convert a NaN, return the NaN bit pattern.
+ if (crt_isnan(input)) {
+ return ((__uint128_t)0x7FF8000000000000ll) << 64 |
+ (__uint128_t)0x0000000000000000ll;
+ }
+
+ // Note: overflow is an undefined behavior for this conversion.
+ // For this reason, overflow is not checked here.
+
+ // If the long double is negative, use unsigned conversion from its absolute
+ // value.
+ if (input < 0.0) {
+ __uint128_t result = (__uint128_t)(-input);
+ return -((__int128_t)result);
+ }
+
+ // Otherwise, use unsigned conversion from the input value.
+ __uint128_t result = (__uint128_t)input;
+ return result;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixunstfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixunstfdi.c
new file mode 100644
index 000000000000..8d53f372527a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixunstfdi.c
@@ -0,0 +1,57 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// uint64_t __fixunstfdi(long double x);
+// This file implements the PowerPC 128-bit double-double -> uint64_t conversion
+
+#include "DD.h"
+
+uint64_t __fixunstfdi(long double input) {
+ const DD x = {.ld = input};
+ const doublebits hibits = {.d = x.s.hi};
+
+ const uint32_t highWordMinusOne =
+ (uint32_t)(hibits.x >> 32) - UINT32_C(0x3ff00000);
+
+ // If (1.0 - tiny) <= input < 0x1.0p64:
+ if (UINT32_C(0x04000000) > highWordMinusOne) {
+ const int unbiasedHeadExponent = highWordMinusOne >> 20;
+
+ uint64_t result = hibits.x & UINT64_C(0x000fffffffffffff); // mantissa(hi)
+ result |= UINT64_C(0x0010000000000000); // matissa(hi) with implicit bit
+ result <<= 11; // mantissa(hi) left aligned in the int64 field.
+
+ // If the tail is non-zero, we need to patch in the tail bits.
+ if (0.0 != x.s.lo) {
+ const doublebits lobits = {.d = x.s.lo};
+ int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
+ tailMantissa |= INT64_C(0x0010000000000000);
+
+ // At this point we have the mantissa of |tail|
+
+ const int64_t negationMask = ((int64_t)(lobits.x)) >> 63;
+ tailMantissa = (tailMantissa ^ negationMask) - negationMask;
+
+ // Now we have the mantissa of tail as a signed 2s-complement integer
+
+ const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
+
+ // Shift the tail mantissa into the right position, accounting for the
+ // bias of 11 that we shifted the head mantissa by.
+ tailMantissa >>=
+ (unbiasedHeadExponent - (biasedTailExponent - (1023 - 11)));
+
+ result += tailMantissa;
+ }
+
+ result >>= (63 - unbiasedHeadExponent);
+ return result;
+ }
+
+ // Edge cases are handled here, with saturation.
+ if (1.0 > x.s.hi)
+ return UINT64_C(0);
+ else
+ return UINT64_MAX;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixunstfti.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixunstfti.c
new file mode 100644
index 000000000000..2469585369c1
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/fixunstfti.c
@@ -0,0 +1,115 @@
+//===-- lib/builtins/ppc/fixunstfti.c - Convert long double->int128 *-C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements converting the 128bit IBM/PowerPC long double (double-
+// double) data type to an unsigned 128 bit integer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../int_math.h"
+#define BIAS 1023
+
+// Convert long double into an unsigned 128-bit integer.
+__uint128_t __fixunstfti(long double input) {
+
+ // If we are trying to convert a NaN, return the NaN bit pattern.
+ if (crt_isnan(input)) {
+ return ((__uint128_t)0x7FF8000000000000ll) << 64 |
+ (__uint128_t)0x0000000000000000ll;
+ }
+
+ __uint128_t result, hiResult, loResult;
+ int hiExponent, loExponent, shift;
+ // The long double representation, with the high and low portions of
+ // the long double, and the corresponding bit patterns of each double.
+ union {
+ long double ld;
+ double d[2]; // [0] is the high double, [1] is the low double.
+ unsigned long long ull[2]; // High and low doubles as 64-bit integers.
+ } ldUnion;
+
+ // If the long double is less than 1.0 or negative,
+ // return 0.
+ if (input < 1.0)
+ return 0;
+
+ // Retrieve the 64-bit patterns of high and low doubles.
+ // Compute the unbiased exponent of both high and low doubles by
+ // removing the signs, isolating the exponent, and subtracting
+ // the bias from it.
+ ldUnion.ld = input;
+ hiExponent = ((ldUnion.ull[0] & 0x7FFFFFFFFFFFFFFFll) >> 52) - BIAS;
+ loExponent = ((ldUnion.ull[1] & 0x7FFFFFFFFFFFFFFFll) >> 52) - BIAS;
+
+ // Convert each double into int64; they will be added to the int128 result.
+ // CASE 1: High or low double fits in int64
+ // - Convert the each double normally into int64.
+ //
+ // CASE 2: High or low double does not fit in int64
+ // - Scale the double to fit within a 64-bit integer
+ // - Calculate the shift (amount to scale the double by in the int128)
+ // - Clear all the bits of the exponent (with 0x800FFFFFFFFFFFFF)
+ // - Add BIAS+53 (0x4350000000000000) to exponent to correct the value
+ // - Scale (move) the double to the correct place in the int128
+ // (Move it by 2^53 places)
+ //
+ // Note: If the high double is assumed to be positive, an unsigned conversion
+ // from long double to 64-bit integer is needed. The low double can be either
+ // positive or negative, so a signed conversion is needed to retain the result
+ // of the low double and to ensure it does not simply get converted to 0.
+
+ // CASE 1 - High double fits in int64.
+ if (hiExponent < 63) {
+ hiResult = (unsigned long long)ldUnion.d[0];
+ } else if (hiExponent < 128) {
+ // CASE 2 - High double does not fit in int64, scale and convert it.
+ shift = hiExponent - 54;
+ ldUnion.ull[0] &= 0x800FFFFFFFFFFFFFll;
+ ldUnion.ull[0] |= 0x4350000000000000ll;
+ hiResult = (unsigned long long)ldUnion.d[0];
+ hiResult <<= shift;
+ } else {
+ // Detect cases for overflow. When the exponent of the high
+ // double is greater than 128 bits and when the long double
+ // input is positive, return the max 128-bit integer.
+ // For negative inputs with exponents > 128, return 1, like gcc.
+ if (ldUnion.d[0] > 0) {
+ return ((__uint128_t)0xFFFFFFFFFFFFFFFFll) << 64 |
+ (__uint128_t)0xFFFFFFFFFFFFFFFFll;
+ } else {
+ return ((__uint128_t)0x0000000000000000ll) << 64 |
+ (__uint128_t)0x0000000000000001ll;
+ }
+ }
+
+ // CASE 1 - Low double fits in int64.
+ if (loExponent < 63) {
+ loResult = (long long)ldUnion.d[1];
+ } else {
+ // CASE 2 - Low double does not fit in int64, scale and convert it.
+ shift = loExponent - 54;
+ ldUnion.ull[1] &= 0x800FFFFFFFFFFFFFll;
+ ldUnion.ull[1] |= 0x4350000000000000ll;
+ loResult = (long long)ldUnion.d[1];
+ loResult <<= shift;
+ }
+
+ // If the low double is negative, it may change the integer value of the
+ // whole number if the absolute value of its fractional part is bigger than
+ // the fractional part of the high double. Because both doubles cannot
+ // overlap, this situation only occurs when the high double has no
+ // fractional part.
+ ldUnion.ld = input;
+ if ((ldUnion.d[0] == (double)hiResult) &&
+ (ldUnion.d[1] < (double)((__int128_t)loResult)))
+ loResult--;
+
+ // Add the high and low doublewords together to form a 128 bit integer.
+ result = loResult + hiResult;
+ return result;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floatditf.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floatditf.c
new file mode 100644
index 000000000000..4c365418f082
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floatditf.c
@@ -0,0 +1,33 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __floatditf(long long x);
+// This file implements the PowerPC long long -> long double conversion
+
+#include "DD.h"
+
+long double __floatditf(int64_t a) {
+
+ static const double twop32 = 0x1.0p32;
+ static const double twop52 = 0x1.0p52;
+
+ doublebits low = {.d = twop52};
+ low.x |= a & UINT64_C(0x00000000ffffffff); // 0x1.0p52 + low 32 bits of a.
+
+ const double high_addend = (double)((int32_t)(a >> 32)) * twop32 - twop52;
+
+ // At this point, we have two double precision numbers
+ // high_addend and low.d, and we wish to return their sum
+ // as a canonicalized long double:
+
+ // This implementation sets the inexact flag spuriously.
+ // This could be avoided, but at some substantial cost.
+
+ DD result;
+
+ result.s.hi = high_addend + low.d;
+ result.s.lo = (high_addend - result.s.hi) + low.d;
+
+ return result.ld;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floattitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floattitf.c
new file mode 100644
index 000000000000..6deac6498128
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floattitf.c
@@ -0,0 +1,46 @@
+//===-- lib/builtins/ppc/floattitf.c - Convert int128->long double -*-C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements converting a signed 128 bit integer to a 128bit IBM /
+// PowerPC long double (double-double) value.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdint.h>
+
+// Conversions from signed and unsigned 64-bit int to long double.
+long double __floatditf(int64_t);
+long double __floatunditf(uint64_t);
+
+// Convert a signed 128-bit integer to long double.
+// This uses the following property: Let hi and lo be 64-bits each,
+// and let signed_val_k() and unsigned_val_k() be the value of the
+// argument interpreted as a signed or unsigned k-bit integer. Then,
+//
+// signed_val_128(hi,lo) = signed_val_64(hi) * 2^64 + unsigned_val_64(lo)
+// = (long double)hi * 2^64 + (long double)lo,
+//
+// where (long double)hi and (long double)lo are signed and
+// unsigned 64-bit integer to long double conversions, respectively.
+long double __floattitf(__int128_t arg) {
+ // Split the int128 argument into 64-bit high and low int64 parts.
+ int64_t ArgHiPart = (int64_t)(arg >> 64);
+ uint64_t ArgLoPart = (uint64_t)arg;
+
+ // Convert each 64-bit part into long double. The high part
+ // must be a signed conversion and the low part an unsigned conversion
+ // to ensure the correct result.
+ long double ConvertedHiPart = __floatditf(ArgHiPart);
+ long double ConvertedLoPart = __floatunditf(ArgLoPart);
+
+ // The low bit of ArgHiPart corresponds to the 2^64 bit in arg.
+ // Multiply the high part by 2^64 to undo the right shift by 64-bits
+ // done in the splitting. Then, add to the low part to obtain the
+ // final result.
+ return ((ConvertedHiPart * 0x1.0p64) + ConvertedLoPart);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floatunditf.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floatunditf.c
new file mode 100644
index 000000000000..fb4cd3f91d86
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/floatunditf.c
@@ -0,0 +1,39 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __floatunditf(unsigned long long x);
+// This file implements the PowerPC unsigned long long -> long double conversion
+
+#include "DD.h"
+
+long double __floatunditf(uint64_t a) {
+
+ // Begins with an exact copy of the code from __floatundidf
+
+ static const double twop52 = 0x1.0p52;
+ static const double twop84 = 0x1.0p84;
+ static const double twop84_plus_twop52 = 0x1.00000001p84;
+
+ doublebits high = {.d = twop84};
+ doublebits low = {.d = twop52};
+
+ high.x |= a >> 32; // 0x1.0p84 + high 32 bits of a
+ low.x |= a & UINT64_C(0x00000000ffffffff); // 0x1.0p52 + low 32 bits of a
+
+ const double high_addend = high.d - twop84_plus_twop52;
+
+ // At this point, we have two double precision numbers
+ // high_addend and low.d, and we wish to return their sum
+ // as a canonicalized long double:
+
+ // This implementation sets the inexact flag spuriously.
+ // This could be avoided, but at some substantial cost.
+
+ DD result;
+
+ result.s.hi = high_addend + low.d;
+ result.s.lo = (high_addend - result.s.hi) + low.d;
+
+ return result.ld;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qadd.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qadd.c
new file mode 100644
index 000000000000..6e1e63cb530e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qadd.c
@@ -0,0 +1,74 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __gcc_qadd(long double x, long double y);
+// This file implements the PowerPC 128-bit double-double add operation.
+// This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
+
+#include "DD.h"
+
+long double __gcc_qadd(long double x, long double y) {
+ static const uint32_t infinityHi = UINT32_C(0x7ff00000);
+
+ DD dst = {.ld = x}, src = {.ld = y};
+
+ register double A = dst.s.hi, a = dst.s.lo, B = src.s.hi, b = src.s.lo;
+
+ // If both operands are zero:
+ if ((A == 0.0) && (B == 0.0)) {
+ dst.s.hi = A + B;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // If either operand is NaN or infinity:
+ const doublebits abits = {.d = A};
+ const doublebits bbits = {.d = B};
+ if ((((uint32_t)(abits.x >> 32) & infinityHi) == infinityHi) ||
+ (((uint32_t)(bbits.x >> 32) & infinityHi) == infinityHi)) {
+ dst.s.hi = A + B;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // If the computation overflows:
+ // This may be playing things a little bit fast and loose, but it will do for
+ // a start.
+ const double testForOverflow = A + (B + (a + b));
+ const doublebits testbits = {.d = testForOverflow};
+ if (((uint32_t)(testbits.x >> 32) & infinityHi) == infinityHi) {
+ dst.s.hi = testForOverflow;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ double H, h;
+ double T, t;
+ double W, w;
+ double Y;
+
+ H = B + (A - (A + B));
+ T = b + (a - (a + b));
+ h = A + (B - (A + B));
+ t = a + (b - (a + b));
+
+ if (local_fabs(A) <= local_fabs(B))
+ w = (a + b) + h;
+ else
+ w = (a + b) + H;
+
+ W = (A + B) + w;
+ Y = (A + B) - W;
+ Y += w;
+
+ if (local_fabs(a) <= local_fabs(b))
+ w = t + Y;
+ else
+ w = T + Y;
+
+ dst.s.hi = Y = W + w;
+ dst.s.lo = (W - Y) + w;
+
+ return dst.ld;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qdiv.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qdiv.c
new file mode 100644
index 000000000000..35a3cbc3d3f8
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qdiv.c
@@ -0,0 +1,52 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __gcc_qdiv(long double x, long double y);
+// This file implements the PowerPC 128-bit double-double division operation.
+// This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
+
+#include "DD.h"
+
+long double __gcc_qdiv(long double a, long double b) {
+ static const uint32_t infinityHi = UINT32_C(0x7ff00000);
+ DD dst = {.ld = a}, src = {.ld = b};
+
+ register double x = dst.s.hi, x1 = dst.s.lo, y = src.s.hi, y1 = src.s.lo;
+
+ double yHi, yLo, qHi, qLo;
+ double yq, tmp, q;
+
+ q = x / y;
+
+ // Detect special cases
+ if (q == 0.0) {
+ dst.s.hi = q;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ const doublebits qBits = {.d = q};
+ if (((uint32_t)(qBits.x >> 32) & infinityHi) == infinityHi) {
+ dst.s.hi = q;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ yHi = high26bits(y);
+ qHi = high26bits(q);
+
+ yq = y * q;
+ yLo = y - yHi;
+ qLo = q - qHi;
+
+ tmp = LOWORDER(yq, yHi, yLo, qHi, qLo);
+ tmp = (x - yq) - tmp;
+ tmp = ((tmp + x1) - y1 * q) / y;
+ x = q + tmp;
+
+ dst.s.lo = (q - x) + tmp;
+ dst.s.hi = x;
+
+ return dst.ld;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qmul.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qmul.c
new file mode 100644
index 000000000000..75f519aad6f0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qmul.c
@@ -0,0 +1,50 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __gcc_qmul(long double x, long double y);
+// This file implements the PowerPC 128-bit double-double multiply operation.
+// This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
+
+#include "DD.h"
+
+long double __gcc_qmul(long double x, long double y) {
+ static const uint32_t infinityHi = UINT32_C(0x7ff00000);
+ DD dst = {.ld = x}, src = {.ld = y};
+
+ register double A = dst.s.hi, a = dst.s.lo, B = src.s.hi, b = src.s.lo;
+
+ double aHi, aLo, bHi, bLo;
+ double ab, tmp, tau;
+
+ ab = A * B;
+
+ // Detect special cases
+ if (ab == 0.0) {
+ dst.s.hi = ab;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ const doublebits abBits = {.d = ab};
+ if (((uint32_t)(abBits.x >> 32) & infinityHi) == infinityHi) {
+ dst.s.hi = ab;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // Generic cases handled here.
+ aHi = high26bits(A);
+ bHi = high26bits(B);
+ aLo = A - aHi;
+ bLo = B - bHi;
+
+ tmp = LOWORDER(ab, aHi, aLo, bHi, bLo);
+ tmp += (A * b + a * B);
+ tau = ab + tmp;
+
+ dst.s.lo = (ab - tau) + tmp;
+ dst.s.hi = tau;
+
+ return dst.ld;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qsub.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qsub.c
new file mode 100644
index 000000000000..ac08120be0bd
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/gcc_qsub.c
@@ -0,0 +1,74 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __gcc_qsub(long double x, long double y);
+// This file implements the PowerPC 128-bit double-double add operation.
+// This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
+
+#include "DD.h"
+
+long double __gcc_qsub(long double x, long double y) {
+ static const uint32_t infinityHi = UINT32_C(0x7ff00000);
+
+ DD dst = {.ld = x}, src = {.ld = y};
+
+ register double A = dst.s.hi, a = dst.s.lo, B = -src.s.hi, b = -src.s.lo;
+
+ // If both operands are zero:
+ if ((A == 0.0) && (B == 0.0)) {
+ dst.s.hi = A + B;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // If either operand is NaN or infinity:
+ const doublebits abits = {.d = A};
+ const doublebits bbits = {.d = B};
+ if ((((uint32_t)(abits.x >> 32) & infinityHi) == infinityHi) ||
+ (((uint32_t)(bbits.x >> 32) & infinityHi) == infinityHi)) {
+ dst.s.hi = A + B;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // If the computation overflows:
+ // This may be playing things a little bit fast and loose, but it will do for
+ // a start.
+ const double testForOverflow = A + (B + (a + b));
+ const doublebits testbits = {.d = testForOverflow};
+ if (((uint32_t)(testbits.x >> 32) & infinityHi) == infinityHi) {
+ dst.s.hi = testForOverflow;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ double H, h;
+ double T, t;
+ double W, w;
+ double Y;
+
+ H = B + (A - (A + B));
+ T = b + (a - (a + b));
+ h = A + (B - (A + B));
+ t = a + (b - (a + b));
+
+ if (local_fabs(A) <= local_fabs(B))
+ w = (a + b) + h;
+ else
+ w = (a + b) + H;
+
+ W = (A + B) + w;
+ Y = (A + B) - W;
+ Y += w;
+
+ if (local_fabs(a) <= local_fabs(b))
+ w = t + Y;
+ else
+ w = T + Y;
+
+ dst.s.hi = Y = W + w;
+ dst.s.lo = (W - Y) + w;
+
+ return dst.ld;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/multc3.c b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/multc3.c
new file mode 100644
index 000000000000..f1fd6816d6c8
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/multc3.c
@@ -0,0 +1,85 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../int_math.h"
+#include "DD.h"
+
+#define makeFinite(x) \
+ { \
+ (x).s.hi = crt_copysign(crt_isinf((x).s.hi) ? 1.0 : 0.0, (x).s.hi); \
+ (x).s.lo = 0.0; \
+ }
+
+#define zeroNaN(x) \
+ { \
+ if (crt_isnan((x).s.hi)) { \
+ (x).s.hi = crt_copysign(0.0, (x).s.hi); \
+ (x).s.lo = 0.0; \
+ } \
+ }
+
+long double _Complex __multc3(long double a, long double b, long double c,
+ long double d) {
+ long double ac = __gcc_qmul(a, c);
+ long double bd = __gcc_qmul(b, d);
+ long double ad = __gcc_qmul(a, d);
+ long double bc = __gcc_qmul(b, c);
+
+ DD real = {.ld = __gcc_qsub(ac, bd)};
+ DD imag = {.ld = __gcc_qadd(ad, bc)};
+
+ if (crt_isnan(real.s.hi) && crt_isnan(imag.s.hi)) {
+ int recalc = 0;
+
+ DD aDD = {.ld = a};
+ DD bDD = {.ld = b};
+ DD cDD = {.ld = c};
+ DD dDD = {.ld = d};
+
+ if (crt_isinf(aDD.s.hi) || crt_isinf(bDD.s.hi)) {
+ makeFinite(aDD);
+ makeFinite(bDD);
+ zeroNaN(cDD);
+ zeroNaN(dDD);
+ recalc = 1;
+ }
+
+ if (crt_isinf(cDD.s.hi) || crt_isinf(dDD.s.hi)) {
+ makeFinite(cDD);
+ makeFinite(dDD);
+ zeroNaN(aDD);
+ zeroNaN(bDD);
+ recalc = 1;
+ }
+
+ if (!recalc) {
+ DD acDD = {.ld = ac};
+ DD bdDD = {.ld = bd};
+ DD adDD = {.ld = ad};
+ DD bcDD = {.ld = bc};
+
+ if (crt_isinf(acDD.s.hi) || crt_isinf(bdDD.s.hi) ||
+ crt_isinf(adDD.s.hi) || crt_isinf(bcDD.s.hi)) {
+ zeroNaN(aDD);
+ zeroNaN(bDD);
+ zeroNaN(cDD);
+ zeroNaN(dDD);
+ recalc = 1;
+ }
+ }
+
+ if (recalc) {
+ real.s.hi = CRT_INFINITY * (aDD.s.hi * cDD.s.hi - bDD.s.hi * dDD.s.hi);
+ real.s.lo = 0.0;
+ imag.s.hi = CRT_INFINITY * (aDD.s.hi * dDD.s.hi + bDD.s.hi * cDD.s.hi);
+ imag.s.lo = 0.0;
+ }
+ }
+
+ long double _Complex z;
+ __real__ z = real.ld;
+ __imag__ z = imag.ld;
+
+ return z;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/restFP.S b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/restFP.S
new file mode 100644
index 000000000000..02317bd6a649
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/restFP.S
@@ -0,0 +1,45 @@
+//===-- restFP.S - Implement restFP ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// Helper function used by compiler to restore ppc floating point registers at
+// the end of the function epilog. This function returns to the address
+// in the LR slot. So a function epilog must branch (b) not branch and link
+// (bl) to this function.
+// If the compiler wants to restore f27..f31, it does a "b restFP+52"
+//
+// This function should never be exported by a shared library. Each linkage
+// unit carries its own copy of this function.
+//
+DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(restFP)
+ lfd f14,-144(r1)
+ lfd f15,-136(r1)
+ lfd f16,-128(r1)
+ lfd f17,-120(r1)
+ lfd f18,-112(r1)
+ lfd f19,-104(r1)
+ lfd f20,-96(r1)
+ lfd f21,-88(r1)
+ lfd f22,-80(r1)
+ lfd f23,-72(r1)
+ lfd f24,-64(r1)
+ lfd f25,-56(r1)
+ lfd f26,-48(r1)
+ lfd f27,-40(r1)
+ lfd f28,-32(r1)
+ lfd f29,-24(r1)
+ lfd f30,-16(r1)
+ lfd f31,-8(r1)
+ lwz r0,8(r1)
+ mtlr r0
+ blr
+
+NO_EXEC_STACK_DIRECTIVE
+
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ppc/saveFP.S b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/saveFP.S
new file mode 100644
index 000000000000..1ef5532c8a83
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ppc/saveFP.S
@@ -0,0 +1,42 @@
+//===-- saveFP.S - Implement saveFP ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// Helper function used by compiler to save ppc floating point registers in
+// function prologs. This routines also saves r0 in the LR slot.
+// If the compiler wants to save f27..f31, it does a "bl saveFP+52"
+//
+// This function should never be exported by a shared library. Each linkage
+// unit carries its own copy of this function.
+//
+DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(saveFP)
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stw r0,8(r1)
+ blr
+
+NO_EXEC_STACK_DIRECTIVE
+