aboutsummaryrefslogtreecommitdiff
path: root/lib/libc/powerpc64
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libc/powerpc64')
-rw-r--r--lib/libc/powerpc64/Makefile.inc3
-rw-r--r--lib/libc/powerpc64/Symbol.map16
-rw-r--r--lib/libc/powerpc64/_fpmath.h56
-rw-r--r--lib/libc/powerpc64/arith.h22
-rw-r--r--lib/libc/powerpc64/gd_qnan.h33
-rw-r--r--lib/libc/powerpc64/gen/Makefile.inc6
-rw-r--r--lib/libc/powerpc64/gen/_ctx_start.S68
-rw-r--r--lib/libc/powerpc64/gen/_setjmp.S153
-rw-r--r--lib/libc/powerpc64/gen/fabs.S36
-rw-r--r--lib/libc/powerpc64/gen/flt_rounds.c55
-rw-r--r--lib/libc/powerpc64/gen/fpgetmask.c47
-rw-r--r--lib/libc/powerpc64/gen/fpgetround.c47
-rw-r--r--lib/libc/powerpc64/gen/fpgetsticky.c49
-rw-r--r--lib/libc/powerpc64/gen/fpsetmask.c51
-rw-r--r--lib/libc/powerpc64/gen/fpsetround.c51
-rw-r--r--lib/libc/powerpc64/gen/infinity.c28
-rw-r--r--lib/libc/powerpc64/gen/makecontext.c127
-rw-r--r--lib/libc/powerpc64/gen/setjmp.S174
-rw-r--r--lib/libc/powerpc64/gen/signalcontext.c102
-rw-r--r--lib/libc/powerpc64/gen/sigsetjmp.S181
-rw-r--r--lib/libc/powerpc64/gen/syncicache.c100
-rw-r--r--lib/libc/powerpc64/softfloat/milieu.h48
-rw-r--r--lib/libc/powerpc64/softfloat/powerpc-gcc.h91
-rw-r--r--lib/libc/powerpc64/softfloat/softfloat.h306
-rw-r--r--lib/libc/powerpc64/string/Makefile.inc16
-rw-r--r--lib/libc/powerpc64/string/bcopy.S338
-rw-r--r--lib/libc/powerpc64/string/bcopy_resolver.c70
-rw-r--r--lib/libc/powerpc64/string/bcopy_vsx.S59
-rw-r--r--lib/libc/powerpc64/string/memcpy.S131
-rw-r--r--lib/libc/powerpc64/string/memcpy_resolver.c3
-rw-r--r--lib/libc/powerpc64/string/memcpy_vsx.S64
-rw-r--r--lib/libc/powerpc64/string/memmove.S3
-rw-r--r--lib/libc/powerpc64/string/memmove_resolver.c3
-rw-r--r--lib/libc/powerpc64/string/memmove_vsx.S4
-rw-r--r--lib/libc/powerpc64/string/strcpy.c30
-rw-r--r--lib/libc/powerpc64/string/strcpy_arch_2_05.S202
-rw-r--r--lib/libc/powerpc64/string/strcpy_resolver.c44
-rw-r--r--lib/libc/powerpc64/string/strncpy.c30
-rw-r--r--lib/libc/powerpc64/string/strncpy_arch_2_05.S129
-rw-r--r--lib/libc/powerpc64/string/strncpy_resolver.c45
40 files changed, 3021 insertions, 0 deletions
diff --git a/lib/libc/powerpc64/Makefile.inc b/lib/libc/powerpc64/Makefile.inc
new file mode 100644
index 000000000000..734afe95f243
--- /dev/null
+++ b/lib/libc/powerpc64/Makefile.inc
@@ -0,0 +1,3 @@
+# Long double is 64-bits
+SRCS+=machdep_ldisd.c
+SYM_MAPS+=${LIBC_SRCTOP}/powerpc64/Symbol.map
diff --git a/lib/libc/powerpc64/Symbol.map b/lib/libc/powerpc64/Symbol.map
new file mode 100644
index 000000000000..62f20e7f352c
--- /dev/null
+++ b/lib/libc/powerpc64/Symbol.map
@@ -0,0 +1,16 @@
+/*
+ * This only needs to contain symbols that are not listed in
+ * symbol maps from other parts of libc (i.e., not found in
+ * stdlib/Symbol.map, string/Symbol.map, sys/Symbol.map, ...).
+ */
+FBSD_1.0 {
+ _mcount;
+ __flt_rounds;
+ brk;
+ fpgetmask;
+ fpgetround;
+ fpgetsticky;
+ fpsetmask;
+ fpsetround;
+ sbrk;
+};
diff --git a/lib/libc/powerpc64/_fpmath.h b/lib/libc/powerpc64/_fpmath.h
new file mode 100644
index 000000000000..9bc7450aacaf
--- /dev/null
+++ b/lib/libc/powerpc64/_fpmath.h
@@ -0,0 +1,56 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2003 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+union IEEEl2bits {
+ long double e;
+ struct {
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+ unsigned int manl :32;
+ unsigned int manh :20;
+ unsigned int exp :11;
+ unsigned int sign :1;
+#else /* _BYTE_ORDER == _LITTLE_ENDIAN */
+ unsigned int sign :1;
+ unsigned int exp :11;
+ unsigned int manh :20;
+ unsigned int manl :32;
+#endif
+ } bits;
+};
+
+#define mask_nbit_l(u) ((void)0)
+#define LDBL_IMPLICIT_NBIT
+#define LDBL_NBIT 0
+
+#define LDBL_MANH_SIZE 20
+#define LDBL_MANL_SIZE 32
+
+#define LDBL_TO_ARRAY32(u, a) do { \
+ (a)[0] = (uint32_t)(u).bits.manl; \
+ (a)[1] = (uint32_t)(u).bits.manh; \
+} while(0)
diff --git a/lib/libc/powerpc64/arith.h b/lib/libc/powerpc64/arith.h
new file mode 100644
index 000000000000..8ac7860d6137
--- /dev/null
+++ b/lib/libc/powerpc64/arith.h
@@ -0,0 +1,22 @@
+/*
+ * MD header for contrib/gdtoa
+ */
+
+/*
+ * NOTE: The definitions in this file must be correct or strtod(3) and
+ * floating point formats in printf(3) will break! The file can be
+ * generated by running contrib/gdtoa/arithchk.c on the target
+ * architecture. See contrib/gdtoa/gdtoaimp.h for details.
+ */
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define IEEE_8087
+#define Arith_Kind_ASL 1
+#else
+#define IEEE_MC68k
+#define Arith_Kind_ASL 2
+#endif
+#define Long int
+#define Intcast (int)(long)
+#define Double_Align
+#define X64_bit_pointers
diff --git a/lib/libc/powerpc64/gd_qnan.h b/lib/libc/powerpc64/gd_qnan.h
new file mode 100644
index 000000000000..3e78a83adc71
--- /dev/null
+++ b/lib/libc/powerpc64/gd_qnan.h
@@ -0,0 +1,33 @@
+/*
+ * MD header for contrib/gdtoa
+ *
+ * This file can be generated by compiling and running contrib/gdtoa/qnan.c
+ * on the target architecture after arith.h has been generated.
+ */
+
+#define f_QNAN 0x7fc00000
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define d_QNAN0 0x0
+#define d_QNAN1 0x7ff80000
+#define ld_QNAN0 0x0
+#define ld_QNAN1 0x7ff80000
+#define ld_QNAN2 0x0
+#define ld_QNAN3 0x0
+#define ldus_QNAN0 0x0
+#define ldus_QNAN1 0x0
+#define ldus_QNAN2 0x0
+#define ldus_QNAN3 0x7ff8
+#define ldus_QNAN4 0x0
+#else
+#define d_QNAN0 0x7ff80000
+#define d_QNAN1 0x0
+#define ld_QNAN0 0x7ff80000
+#define ld_QNAN1 0x0
+#define ld_QNAN2 0x0
+#define ld_QNAN3 0x0
+#define ldus_QNAN0 0x7ff8
+#define ldus_QNAN1 0x0
+#define ldus_QNAN2 0x0
+#define ldus_QNAN3 0x0
+#define ldus_QNAN4 0x0
+#endif
diff --git a/lib/libc/powerpc64/gen/Makefile.inc b/lib/libc/powerpc64/gen/Makefile.inc
new file mode 100644
index 000000000000..c48ff05ae552
--- /dev/null
+++ b/lib/libc/powerpc64/gen/Makefile.inc
@@ -0,0 +1,6 @@
+SRCS += _ctx_start.S fabs.S flt_rounds.c fpgetmask.c fpgetround.c \
+ fpgetsticky.c fpsetmask.c fpsetround.c \
+ infinity.c ldexp.c makecontext.c _setjmp.S \
+ setjmp.S sigsetjmp.S signalcontext.c syncicache.c \
+ trivial-getcontextx.c
+
diff --git a/lib/libc/powerpc64/gen/_ctx_start.S b/lib/libc/powerpc64/gen/_ctx_start.S
new file mode 100644
index 000000000000..98225f9c1138
--- /dev/null
+++ b/lib/libc/powerpc64/gen/_ctx_start.S
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2004 Suleiman Souhlal
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+ #include <machine/asm.h>
+ .globl CNAME(_ctx_done)
+ .globl CNAME(abort)
+
+ ENTRY(_ctx_start)
+#if !defined(_CALL_ELF) || _CALL_ELF == 1
+ /* Load values from function descriptor */
+ ld %r2,8(%r14)
+ ld %r14,0(%r14)
+#else
+ /*
+ * The stack frame was already set up in makecontext(),
+ * so we can safely use the guaranteed fields here.
+ *
+ * Note we do step on the allocated stack frame's TOC,
+ * but since we never return from this function (i.e.
+ * never restore the stack frame) this should be safe.
+ */
+ std %r2,24(%r1) /* save TOC */
+
+ /* Load global entry point */
+ mr %r12,%r14
+#endif
+ mtlr %r14
+ blrl /* branch to start function */
+ mr %r3,%r15 /* pass pointer to ucontext as argument */
+ nop
+#if defined(_CALL_ELF) && _CALL_ELF != 1
+ /* Restore TOC */
+ ld %r2,24(%r1)
+#endif
+ bl CNAME(_ctx_done) /* branch to ctxt completion func */
+ /*
+ * we should never return from the
+ * above branch.
+ */
+ nop
+ bl CNAME(abort) /* abort */
+ nop
+ END(_ctx_start)
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/lib/libc/powerpc64/gen/_setjmp.S b/lib/libc/powerpc64/gen/_setjmp.S
new file mode 100644
index 000000000000..94a744b4fa28
--- /dev/null
+++ b/lib/libc/powerpc64/gen/_setjmp.S
@@ -0,0 +1,153 @@
+/*-
+ * Copyright (c) 2002 Peter Grehan.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/* $NetBSD: _setjmp.S,v 1.1 1997/03/29 20:55:53 thorpej Exp $ */
+
+#include <machine/asm.h>
+/*
+ * C library -- _setjmp, _longjmp
+ *
+ * _longjmp(a,v)
+ * will generate a "return(v?v:1)" from the last call to
+ * _setjmp(a)
+ * by restoring registers from the stack.
+ * The previous signal state is NOT restored.
+ *
+ * jmpbuf layout:
+ * +------------+
+ * | unused |
+ * +------------+
+ * | unused |
+ * | |
+ * | (4 words) |
+ * | |
+ * +------------+
+ * | saved regs |
+ * | ... |
+ */
+
+ENTRY(_setjmp)
+ mflr %r11
+ mfcr %r12
+ mr %r10,%r1
+ mr %r9,%r2
+ std %r9,40 + 0*8(%r3)
+ stfd %f14,40 + 23*8(%r3)
+ std %r10,40 + 1*8(%r3)
+ stfd %f15,40 + 24*8(%r3)
+ std %r11,40 + 2*8(%r3)
+ stfd %f16,40 + 25*8(%r3)
+ std %r12,40 + 3*8(%r3)
+ stfd %f17,40 + 26*8(%r3)
+ std %r13,40 + 4*8(%r3)
+ stfd %f18,40 + 27*8(%r3)
+ std %r14,40 + 5*8(%r3)
+ stfd %f19,40 + 28*8(%r3)
+ std %r15,40 + 6*8(%r3)
+ stfd %f20,40 + 29*8(%r3)
+ std %r16,40 + 7*8(%r3)
+ stfd %f21,40 + 30*8(%r3)
+ std %r17,40 + 8*8(%r3)
+ stfd %f22,40 + 31*8(%r3)
+ std %r18,40 + 9*8(%r3)
+ stfd %f23,40 + 32*8(%r3)
+ std %r19,40 + 10*8(%r3)
+ stfd %f24,40 + 33*8(%r3)
+ std %r20,40 + 11*8(%r3)
+ stfd %f25,40 + 34*8(%r3)
+ std %r21,40 + 12*8(%r3)
+ stfd %f26,40 + 35*8(%r3)
+ std %r22,40 + 13*8(%r3)
+ stfd %f27,40 + 36*8(%r3)
+ std %r23,40 + 14*8(%r3)
+ stfd %f28,40 + 37*8(%r3)
+ std %r24,40 + 15*8(%r3)
+ stfd %f29,40 + 38*8(%r3)
+ std %r25,40 + 16*8(%r3)
+ stfd %f30,40 + 39*8(%r3)
+ std %r26,40 + 17*8(%r3)
+ stfd %f31,40 + 40*8(%r3)
+ std %r27,40 + 18*8(%r3)
+ std %r28,40 + 19*8(%r3)
+ std %r29,40 + 20*8(%r3)
+ std %r30,40 + 21*8(%r3)
+ std %r31,40 + 22*8(%r3)
+ li %r3,0
+ blr
+END(_setjmp)
+
+ENTRY(_longjmp)
+ ld %r9,40 + 0*8(%r3)
+ lfd %f14,40 + 23*8(%r3)
+ ld %r10,40 + 1*8(%r3)
+ lfd %f15,40 + 24*8(%r3)
+ ld %r11,40 + 2*8(%r3)
+ lfd %f16,40 + 25*8(%r3)
+ ld %r12,40 + 3*8(%r3)
+ lfd %f17,40 + 26*8(%r3)
+ ld %r14,40 + 5*8(%r3)
+ lfd %f18,40 + 27*8(%r3)
+ ld %r15,40 + 6*8(%r3)
+ lfd %f19,40 + 28*8(%r3)
+ ld %r16,40 + 7*8(%r3)
+ lfd %f20,40 + 29*8(%r3)
+ ld %r17,40 + 8*8(%r3)
+ lfd %f21,40 + 30*8(%r3)
+ ld %r18,40 + 9*8(%r3)
+ lfd %f22,40 + 31*8(%r3)
+ ld %r19,40 + 10*8(%r3)
+ lfd %f23,40 + 32*8(%r3)
+ ld %r20,40 + 11*8(%r3)
+ lfd %f24,40 + 33*8(%r3)
+ ld %r21,40 + 12*8(%r3)
+ lfd %f25,40 + 34*8(%r3)
+ ld %r22,40 + 13*8(%r3)
+ lfd %f26,40 + 35*8(%r3)
+ ld %r23,40 + 14*8(%r3)
+ lfd %f27,40 + 36*8(%r3)
+ ld %r24,40 + 15*8(%r3)
+ lfd %f28,40 + 37*8(%r3)
+ ld %r25,40 + 16*8(%r3)
+ lfd %f29,40 + 38*8(%r3)
+ ld %r26,40 + 17*8(%r3)
+ lfd %f30,40 + 39*8(%r3)
+ ld %r27,40 + 18*8(%r3)
+ lfd %f31,40 + 40*8(%r3)
+ ld %r28,40 + 19*8(%r3)
+ ld %r29,40 + 20*8(%r3)
+ ld %r30,40 + 21*8(%r3)
+ ld %r31,40 + 22*8(%r3)
+
+ mtlr %r11
+ mtcr %r12
+ mr %r2,%r9
+ mr %r1,%r10
+ or. %r3,%r4,%r4
+ bnelr
+ li %r3,1
+ blr
+END(_longjmp)
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/lib/libc/powerpc64/gen/fabs.S b/lib/libc/powerpc64/gen/fabs.S
new file mode 100644
index 000000000000..d9dbf9d86fe0
--- /dev/null
+++ b/lib/libc/powerpc64/gen/fabs.S
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2004 Peter Grehan.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+/*
+ * double fabs(double)
+ */
+ENTRY(fabs)
+ fabs %f1,%f1
+ blr
+END(fabs)
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/lib/libc/powerpc64/gen/flt_rounds.c b/lib/libc/powerpc64/gen/flt_rounds.c
new file mode 100644
index 000000000000..800ec6944d79
--- /dev/null
+++ b/lib/libc/powerpc64/gen/flt_rounds.c
@@ -0,0 +1,55 @@
+/* $NetBSD: flt_rounds.c,v 1.4.10.3 2002/03/22 20:41:53 nathanw Exp $ */
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 1996 Mark Brinicombe
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <machine/float.h>
+
+#ifndef _SOFT_FLOAT
+static const int map[] = {
+ 1, /* round to nearest */
+ 0, /* round to zero */
+ 2, /* round to positive infinity */
+ 3 /* round to negative infinity */
+};
+
+int
+__flt_rounds()
+{
+ uint64_t fpscr;
+
+ __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ return map[(fpscr & 0x03)];
+}
+#endif
diff --git a/lib/libc/powerpc64/gen/fpgetmask.c b/lib/libc/powerpc64/gen/fpgetmask.c
new file mode 100644
index 000000000000..6817a32bdc65
--- /dev/null
+++ b/lib/libc/powerpc64/gen/fpgetmask.c
@@ -0,0 +1,47 @@
+/* $NetBSD: fpgetmask.c,v 1.3 2002/01/13 21:45:47 thorpej Exp $ */
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dan Winship.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/types.h>
+#include <ieeefp.h>
+
+#ifndef _SOFT_FLOAT
+fp_except_t
+fpgetmask()
+{
+ u_int64_t fpscr;
+
+ __asm__("mffs %0" : "=f"(fpscr));
+ return ((fp_except_t)((fpscr >> 3) & 0x1f));
+}
+#endif
diff --git a/lib/libc/powerpc64/gen/fpgetround.c b/lib/libc/powerpc64/gen/fpgetround.c
new file mode 100644
index 000000000000..b135807b613f
--- /dev/null
+++ b/lib/libc/powerpc64/gen/fpgetround.c
@@ -0,0 +1,47 @@
+/* $NetBSD: fpgetround.c,v 1.3 2002/01/13 21:45:47 thorpej Exp $ */
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dan Winship.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/types.h>
+#include <ieeefp.h>
+
+#ifndef _SOFT_FLOAT
+fp_rnd_t
+fpgetround()
+{
+ u_int64_t fpscr;
+
+ __asm__("mffs %0" : "=f"(fpscr));
+ return ((fp_rnd_t)(fpscr & 0x3));
+}
+#endif
diff --git a/lib/libc/powerpc64/gen/fpgetsticky.c b/lib/libc/powerpc64/gen/fpgetsticky.c
new file mode 100644
index 000000000000..3512c97f8cf9
--- /dev/null
+++ b/lib/libc/powerpc64/gen/fpgetsticky.c
@@ -0,0 +1,49 @@
+/* $NetBSD: fpgetsticky.c,v 1.3 2002/01/13 21:45:48 thorpej Exp $ */
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dan Winship.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "namespace.h"
+
+#include <sys/types.h>
+#include <ieeefp.h>
+
+#ifndef _SOFT_FLOAT
+fp_except_t
+fpgetsticky()
+{
+ u_int64_t fpscr;
+
+ __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ return ((fp_except_t)((fpscr >> 25) & 0x1f));
+}
+#endif
diff --git a/lib/libc/powerpc64/gen/fpsetmask.c b/lib/libc/powerpc64/gen/fpsetmask.c
new file mode 100644
index 000000000000..4170b385e9e4
--- /dev/null
+++ b/lib/libc/powerpc64/gen/fpsetmask.c
@@ -0,0 +1,51 @@
+/* $NetBSD: fpsetmask.c,v 1.3 2002/01/13 21:45:48 thorpej Exp $ */
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dan Winship.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/types.h>
+#include <ieeefp.h>
+
+#ifndef _SOFT_FLOAT
+fp_except_t
+fpsetmask(fp_except_t mask)
+{
+ u_int64_t fpscr;
+ fp_except_t old;
+
+ __asm__("mffs %0" : "=f"(fpscr));
+ old = (fp_except_t)((fpscr >> 3) & 0x1f);
+ fpscr = (fpscr & 0xffffff07) | ((mask & 0x1f) << 3);
+ __asm__ __volatile("mtfsf 0xff,%0" :: "f"(fpscr));
+ return (old);
+}
+#endif
diff --git a/lib/libc/powerpc64/gen/fpsetround.c b/lib/libc/powerpc64/gen/fpsetround.c
new file mode 100644
index 000000000000..2a70fd781474
--- /dev/null
+++ b/lib/libc/powerpc64/gen/fpsetround.c
@@ -0,0 +1,51 @@
+/* $NetBSD: fpsetround.c,v 1.3 2002/01/13 21:45:48 thorpej Exp $ */
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dan Winship.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/types.h>
+#include <ieeefp.h>
+
+#ifndef _SOFT_FLOAT
+fp_rnd_t
+fpsetround(fp_rnd_t rnd_dir)
+{
+ u_int64_t fpscr;
+ fp_rnd_t old;
+
+ __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ old = (fp_rnd_t)(fpscr & 0x3);
+ fpscr = (fpscr & 0xfffffffc) | rnd_dir;
+ __asm__ __volatile("mtfsf 0xff,%0" :: "f"(fpscr));
+ return (old);
+}
+#endif
diff --git a/lib/libc/powerpc64/gen/infinity.c b/lib/libc/powerpc64/gen/infinity.c
new file mode 100644
index 000000000000..6e11371e6eb3
--- /dev/null
+++ b/lib/libc/powerpc64/gen/infinity.c
@@ -0,0 +1,28 @@
+#include <sys/cdefs.h>
+#if 0
+#if defined(LIBC_SCCS) && !defined(lint)
+__RCSID("$NetBSD: infinity.c,v 1.2 1998/11/14 19:31:02 christos Exp $");
+#endif /* LIBC_SCCS and not lint */
+#endif
+/* infinity.c */
+
+#include <math.h>
+
+/* bytes for +Infinity on powerpc */
+const union __infinity_un __infinity = {
+#if BYTE_ORDER == BIG_ENDIAN
+ { 0x7f, 0xf0, 0, 0, 0, 0, 0, 0 }
+#else
+ { 0, 0, 0, 0, 0, 0, 0xf0, 0x7f }
+#endif
+};
+
+/* bytes for NaN */
+const union __nan_un __nan = {
+#if BYTE_ORDER == BIG_ENDIAN
+ {0xff, 0xc0, 0, 0}
+#else
+ { 0, 0, 0xc0, 0xff }
+#endif
+};
+
diff --git a/lib/libc/powerpc64/gen/makecontext.c b/lib/libc/powerpc64/gen/makecontext.c
new file mode 100644
index 000000000000..9e3a976fa1bd
--- /dev/null
+++ b/lib/libc/powerpc64/gen/makecontext.c
@@ -0,0 +1,127 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2004 Suleiman Souhlal
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <ucontext.h>
+
+__weak_reference(__makecontext, makecontext);
+
+void _ctx_done(ucontext_t *ucp);
+void _ctx_start(void);
+
+void
+_ctx_done(ucontext_t *ucp)
+{
+ if (ucp->uc_link == NULL)
+ exit(0);
+ else {
+ /* invalidate context */
+ ucp->uc_mcontext.mc_len = 0;
+
+ setcontext((const ucontext_t *)ucp->uc_link);
+
+ abort(); /* should never return from above call */
+ }
+}
+
+void
+__makecontext(ucontext_t *ucp, void (*start)(void), int argc, ...)
+{
+ mcontext_t *mc;
+ char *sp;
+ va_list ap;
+ int i, regargs, stackargs;
+
+ /* Sanity checks */
+ if ((ucp == NULL) || (argc < 0)
+ || (ucp->uc_stack.ss_sp == NULL)
+ || (ucp->uc_stack.ss_size < MINSIGSTKSZ)) {
+ /* invalidate context */
+ ucp->uc_mcontext.mc_len = 0;
+ return;
+ }
+
+ /*
+ * The stack must have space for the frame pointer, saved
+ * link register, overflow arguments, and be 16-byte
+ * aligned.
+ */
+ stackargs = (argc > 8) ? argc - 8 : 0;
+ sp = (char *) ucp->uc_stack.ss_sp + ucp->uc_stack.ss_size
+ - sizeof(uintptr_t)*(stackargs + 6);
+ sp = (char *)((uintptr_t)sp & ~0x1f);
+
+ mc = &ucp->uc_mcontext;
+
+ /*
+ * Up to 8 register args. Assumes all args are 64-bit and
+ * integer only. Not sure how to cater for floating point.
+ */
+ regargs = (argc > 8) ? 8 : argc;
+ va_start(ap, argc);
+ for (i = 0; i < regargs; i++)
+ mc->mc_gpr[3 + i] = va_arg(ap, uint64_t);
+
+ /*
+ * Overflow args go onto the stack
+ */
+ if (argc > 8) {
+ uint64_t *argp;
+
+ /* Skip past frame pointer and saved LR */
+#if !defined(_CALL_ELF) || _CALL_ELF == 1
+ argp = (uint64_t *)sp + 6;
+#else
+ argp = (uint64_t *)sp + 4;
+#endif
+
+ for (i = 0; i < stackargs; i++)
+ *argp++ = va_arg(ap, uint64_t);
+ }
+ va_end(ap);
+
+ /*
+ * Use caller-saved regs 14/15 to hold params that _ctx_start
+ * will use to invoke the user-supplied func
+ */
+#if !defined(_CALL_ELF) || _CALL_ELF == 1
+ /* Cast to ensure this is treated as a function descriptor. */
+ mc->mc_srr0 = *(uintptr_t *)_ctx_start;
+#else
+ mc->mc_srr0 = (uintptr_t) _ctx_start;
+ mc->mc_gpr[12] = (uintptr_t) _ctx_start;/* required for prologue */
+#endif
+ mc->mc_gpr[1] = (uintptr_t) sp; /* new stack pointer */
+ mc->mc_gpr[14] = (uintptr_t) start; /* r14 <- start */
+ mc->mc_gpr[15] = (uintptr_t) ucp; /* r15 <- ucp */
+}
diff --git a/lib/libc/powerpc64/gen/setjmp.S b/lib/libc/powerpc64/gen/setjmp.S
new file mode 100644
index 000000000000..7cf48d63ec82
--- /dev/null
+++ b/lib/libc/powerpc64/gen/setjmp.S
@@ -0,0 +1,174 @@
+/*-
+ * Copyright (c) 2002 Peter Grehan.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/* $NetBSD: setjmp.S,v 1.3 1998/10/03 12:30:38 tsubai Exp $ */
+
+#include <machine/asm.h>
+#include <sys/syscall.h>
+
+/*
+ * C library -- setjmp, longjmp
+ *
+ * longjmp(a,v)
+ * will generate a "return(v?v:1)" from the last call to
+ * setjmp(a)
+ * by restoring registers from the stack.
+ * The previous signal state is restored.
+ *
+ * jmpbuf layout:
+ * +------------+
+ * | unused |
+ * +------------+
+ * | sig state |
+ * | |
+ * | (4 words) |
+ * | |
+ * +------------+
+ * | saved regs |
+ * | ... |
+ */
+
+ENTRY(setjmp)
+ mr %r6,%r3
+ li %r3,1 /* SIG_BLOCK, but doesn't matter */
+ /* since set == NULL */
+ li %r4,0 /* set = NULL */
+ mr %r5,%r6 /* &oset */
+ addi %r5,%r5,4
+ li %r0, SYS_sigprocmask /*sigprocmask(SIG_BLOCK, NULL, &oset)*/
+ sc /*assume no error XXX */
+ mflr %r11 /* r11 <- link reg */
+ mfcr %r12 /* r12 <- condition reg */
+ mr %r10,%r1 /* r10 <- stackptr */
+ mr %r9,%r2 /* r9 <- global ptr */
+
+ std %r9,40 + 0*8(%r6)
+ stfd %f14,40 + 23*8(%r6)
+ std %r10,40 + 1*8(%r6)
+ stfd %f15,40 + 24*8(%r6)
+ std %r11,40 + 2*8(%r6)
+ stfd %f16,40 + 25*8(%r6)
+ std %r12,40 + 3*8(%r6)
+ stfd %f17,40 + 26*8(%r6)
+ std %r13,40 + 4*8(%r6)
+ stfd %f18,40 + 27*8(%r6)
+ std %r14,40 + 5*8(%r6)
+ stfd %f19,40 + 28*8(%r6)
+ std %r15,40 + 6*8(%r6)
+ stfd %f20,40 + 29*8(%r6)
+ std %r16,40 + 7*8(%r6)
+ stfd %f21,40 + 30*8(%r6)
+ std %r17,40 + 8*8(%r6)
+ stfd %f22,40 + 31*8(%r6)
+ std %r18,40 + 9*8(%r6)
+ stfd %f23,40 + 32*8(%r6)
+ std %r19,40 + 10*8(%r6)
+ stfd %f24,40 + 33*8(%r6)
+ std %r20,40 + 11*8(%r6)
+ stfd %f25,40 + 34*8(%r6)
+ std %r21,40 + 12*8(%r6)
+ stfd %f26,40 + 35*8(%r6)
+ std %r22,40 + 13*8(%r6)
+ stfd %f27,40 + 36*8(%r6)
+ std %r23,40 + 14*8(%r6)
+ stfd %f28,40 + 37*8(%r6)
+ std %r24,40 + 15*8(%r6)
+ stfd %f29,40 + 38*8(%r6)
+ std %r25,40 + 16*8(%r6)
+ stfd %f30,40 + 39*8(%r6)
+ std %r26,40 + 17*8(%r6)
+ stfd %f31,40 + 40*8(%r6)
+ std %r27,40 + 18*8(%r6)
+ std %r28,40 + 19*8(%r6)
+ std %r29,40 + 20*8(%r6)
+ std %r30,40 + 21*8(%r6)
+ std %r31,40 + 22*8(%r6)
+
+ /* XXX Altivec regs */
+
+ li %r3,0 /* return (0) */
+ blr
+END(setjmp)
+
+ WEAK_REFERENCE(__longjmp, longjmp)
+ENTRY(__longjmp)
+ ld %r9,40 + 0*8(%r3)
+ lfd %f14,40 + 23*8(%r3)
+ ld %r10,40 + 1*8(%r3)
+ lfd %f15,40 + 24*8(%r3)
+ ld %r11,40 + 2*8(%r3)
+ lfd %f16,40 + 25*8(%r3)
+ ld %r12,40 + 3*8(%r3)
+ lfd %f17,40 + 26*8(%r3)
+ ld %r14,40 + 5*8(%r3)
+ lfd %f18,40 + 27*8(%r3)
+ ld %r15,40 + 6*8(%r3)
+ lfd %f19,40 + 28*8(%r3)
+ ld %r16,40 + 7*8(%r3)
+ lfd %f20,40 + 29*8(%r3)
+ ld %r17,40 + 8*8(%r3)
+ lfd %f21,40 + 30*8(%r3)
+ ld %r18,40 + 9*8(%r3)
+ lfd %f22,40 + 31*8(%r3)
+ ld %r19,40 + 10*8(%r3)
+ lfd %f23,40 + 32*8(%r3)
+ ld %r20,40 + 11*8(%r3)
+ lfd %f24,40 + 33*8(%r3)
+ ld %r21,40 + 12*8(%r3)
+ lfd %f25,40 + 34*8(%r3)
+ ld %r22,40 + 13*8(%r3)
+ lfd %f26,40 + 35*8(%r3)
+ ld %r23,40 + 14*8(%r3)
+ lfd %f27,40 + 36*8(%r3)
+ ld %r24,40 + 15*8(%r3)
+ lfd %f28,40 + 37*8(%r3)
+ ld %r25,40 + 16*8(%r3)
+ lfd %f29,40 + 38*8(%r3)
+ ld %r26,40 + 17*8(%r3)
+ lfd %f30,40 + 39*8(%r3)
+ ld %r27,40 + 18*8(%r3)
+ lfd %f31,40 + 40*8(%r3)
+ ld %r28,40 + 19*8(%r3)
+ ld %r29,40 + 20*8(%r3)
+ ld %r30,40 + 21*8(%r3)
+ ld %r31,40 + 22*8(%r3)
+ mr %r6,%r4 /* save val param */
+ mtlr %r11 /* r11 -> link reg */
+ mtcr %r12 /* r12 -> condition reg */
+ mr %r2,%r9 /* r9 -> global ptr */
+ mr %r1,%r10 /* r10 -> stackptr */
+ mr %r4,%r3
+ li %r3,3 /* SIG_SETMASK */
+ addi %r4,%r4,4 /* &set */
+ li %r5,0 /* oset = NULL */
+ li %r0,SYS_sigprocmask /* sigprocmask(SIG_SET, &set, NULL) */
+ sc /* assume no error XXX */
+ or. %r3,%r6,%r6
+ bnelr
+ li %r3,1
+ blr
+END(__longjmp)
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/lib/libc/powerpc64/gen/signalcontext.c b/lib/libc/powerpc64/gen/signalcontext.c
new file mode 100644
index 000000000000..de0b2109bba4
--- /dev/null
+++ b/lib/libc/powerpc64/gen/signalcontext.c
@@ -0,0 +1,102 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2004 Marcel Moolenaar, Peter Grehan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/ucontext.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <strings.h>
+
+typedef void (*handler_t)(uint32_t, uint32_t, uint32_t);
+
+/* Prototypes */
+static void ctx_wrapper(ucontext_t *ucp, handler_t func, uint32_t sig,
+ uint32_t sig_si, uint32_t sig_uc);
+
+__weak_reference(__signalcontext, signalcontext);
+
+int
+__signalcontext(ucontext_t *ucp, int sig, __sighandler_t *func)
+{
+ siginfo_t *sig_si;
+ ucontext_t *sig_uc;
+ uintptr_t sp;
+
+ /* Bail out if we don't have a valid ucontext pointer. */
+ if (ucp == NULL)
+ abort();
+
+ /*
+ * Build a 16-byte-aligned signal frame
+ */
+ sp = (ucp->uc_mcontext.mc_gpr[1] - sizeof(ucontext_t)) & ~15UL;
+ sig_uc = (ucontext_t *)sp;
+ bcopy(ucp, sig_uc, sizeof(*sig_uc));
+ sp = (sp - sizeof(siginfo_t)) & ~15UL;
+ sig_si = (siginfo_t *)sp;
+ bzero(sig_si, sizeof(*sig_si));
+ sig_si->si_signo = sig;
+
+ /*
+ * Subtract 48 bytes from stack to allow for frameptr
+ */
+ sp -= 6*sizeof(uint64_t);
+ sp &= ~15UL;
+
+ /*
+ * Setup the ucontext of the signal handler.
+ */
+ bzero(&ucp->uc_mcontext, sizeof(ucp->uc_mcontext));
+ ucp->uc_link = sig_uc;
+ sigdelset(&ucp->uc_sigmask, sig);
+
+ ucp->uc_mcontext.mc_vers = _MC_VERSION;
+ ucp->uc_mcontext.mc_len = sizeof(struct __mcontext);
+ ucp->uc_mcontext.mc_srr0 = (uint64_t) ctx_wrapper;
+ ucp->uc_mcontext.mc_gpr[1] = (uint64_t) sp;
+ ucp->uc_mcontext.mc_gpr[3] = (uint64_t) func;
+ ucp->uc_mcontext.mc_gpr[4] = (uint64_t) sig;
+ ucp->uc_mcontext.mc_gpr[5] = (uint64_t) sig_si;
+ ucp->uc_mcontext.mc_gpr[6] = (uint64_t) sig_uc;
+
+ return (0);
+}
+
+static void
+ctx_wrapper(ucontext_t *ucp, handler_t func, uint32_t sig, uint32_t sig_si,
+ uint32_t sig_uc)
+{
+
+ (*func)(sig, sig_si, sig_uc);
+ if (ucp->uc_link == NULL)
+ exit(0);
+ setcontext((const ucontext_t *)ucp->uc_link);
+ /* should never get here */
+ abort();
+ /* NOTREACHED */
+}
diff --git a/lib/libc/powerpc64/gen/sigsetjmp.S b/lib/libc/powerpc64/gen/sigsetjmp.S
new file mode 100644
index 000000000000..58fa188daaa4
--- /dev/null
+++ b/lib/libc/powerpc64/gen/sigsetjmp.S
@@ -0,0 +1,181 @@
+/*-
+ * Copyright (c) 2002 Peter Grehan.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/* $NetBSD: sigsetjmp.S,v 1.4 1998/10/03 12:30:38 tsubai Exp $ */
+
+#include <machine/asm.h>
+/*
+ * C library -- sigsetjmp, siglongjmp
+ *
+ * siglongjmp(a,v)
+ * will generate a "return(v?v:1)" from the last call to
+ * sigsetjmp(a, savemask)
+ * by restoring registers from the stack.
+ * The previous signal state is restored if savemask is non-zero
+ *
+ * jmpbuf layout:
+ * +------------+
+ * | savemask |
+ * +------------+
+ * | sig state |
+ * | |
+ * | (4 words) |
+ * | |
+ * +------------+
+ * | saved regs |
+ * | ... |
+ */
+
+
+#include <sys/syscall.h>
+
+ENTRY(sigsetjmp)
+ mr %r6,%r3
+ stw %r4,0(%r3)
+ or. %r7,%r4,%r4
+ beq 1f
+ li %r3,1 /* SIG_BLOCK, but doesn't matter */
+ /* since set == NULL */
+ li %r4,0 /* set = NULL */
+ mr %r5,%r6 /* &oset */
+ addi %r5,%r5,4
+ li %r0, SYS_sigprocmask /* sigprocmask(SIG_BLOCK, NULL, &oset)*/
+ sc /* assume no error XXX */
+1:
+ mflr %r11
+ mfcr %r12
+ mr %r10,%r1
+ mr %r9,%r2
+
+ std %r9,40 + 0*8(%r6)
+ stfd %f14,40 + 23*8(%r6)
+ std %r10,40 + 1*8(%r6)
+ stfd %f15,40 + 24*8(%r6)
+ std %r11,40 + 2*8(%r6)
+ stfd %f16,40 + 25*8(%r6)
+ std %r12,40 + 3*8(%r6)
+ stfd %f17,40 + 26*8(%r6)
+ std %r13,40 + 4*8(%r6)
+ stfd %f18,40 + 27*8(%r6)
+ std %r14,40 + 5*8(%r6)
+ stfd %f19,40 + 28*8(%r6)
+ std %r15,40 + 6*8(%r6)
+ stfd %f20,40 + 29*8(%r6)
+ std %r16,40 + 7*8(%r6)
+ stfd %f21,40 + 30*8(%r6)
+ std %r17,40 + 8*8(%r6)
+ stfd %f22,40 + 31*8(%r6)
+ std %r18,40 + 9*8(%r6)
+ stfd %f23,40 + 32*8(%r6)
+ std %r19,40 + 10*8(%r6)
+ stfd %f24,40 + 33*8(%r6)
+ std %r20,40 + 11*8(%r6)
+ stfd %f25,40 + 34*8(%r6)
+ std %r21,40 + 12*8(%r6)
+ stfd %f26,40 + 35*8(%r6)
+ std %r22,40 + 13*8(%r6)
+ stfd %f27,40 + 36*8(%r6)
+ std %r23,40 + 14*8(%r6)
+ stfd %f28,40 + 37*8(%r6)
+ std %r24,40 + 15*8(%r6)
+ stfd %f29,40 + 38*8(%r6)
+ std %r25,40 + 16*8(%r6)
+ stfd %f30,40 + 39*8(%r6)
+ std %r26,40 + 17*8(%r6)
+ stfd %f31,40 + 40*8(%r6)
+ std %r27,40 + 18*8(%r6)
+ std %r28,40 + 19*8(%r6)
+ std %r29,40 + 20*8(%r6)
+ std %r30,40 + 21*8(%r6)
+ std %r31,40 + 22*8(%r6)
+
+ li %r3,0
+ blr
+END(sigsetjmp)
+
+ENTRY(siglongjmp)
+ ld %r9,40 + 0*8(%r3)
+ lfd %f14,40 + 23*8(%r3)
+ ld %r10,40 + 1*8(%r3)
+ lfd %f15,40 + 24*8(%r3)
+ ld %r11,40 + 2*8(%r3)
+ lfd %f16,40 + 25*8(%r3)
+ ld %r12,40 + 3*8(%r3)
+ lfd %f17,40 + 26*8(%r3)
+ ld %r14,40 + 5*8(%r3)
+ lfd %f18,40 + 27*8(%r3)
+ ld %r15,40 + 6*8(%r3)
+ lfd %f19,40 + 28*8(%r3)
+ ld %r16,40 + 7*8(%r3)
+ lfd %f20,40 + 29*8(%r3)
+ ld %r17,40 + 8*8(%r3)
+ lfd %f21,40 + 30*8(%r3)
+ ld %r18,40 + 9*8(%r3)
+ lfd %f22,40 + 31*8(%r3)
+ ld %r19,40 + 10*8(%r3)
+ lfd %f23,40 + 32*8(%r3)
+ ld %r20,40 + 11*8(%r3)
+ lfd %f24,40 + 33*8(%r3)
+ ld %r21,40 + 12*8(%r3)
+ lfd %f25,40 + 34*8(%r3)
+ ld %r22,40 + 13*8(%r3)
+ lfd %f26,40 + 35*8(%r3)
+ ld %r23,40 + 14*8(%r3)
+ lfd %f27,40 + 36*8(%r3)
+ ld %r24,40 + 15*8(%r3)
+ lfd %f28,40 + 37*8(%r3)
+ ld %r25,40 + 16*8(%r3)
+ lfd %f29,40 + 38*8(%r3)
+ ld %r26,40 + 17*8(%r3)
+ lfd %f30,40 + 39*8(%r3)
+ ld %r27,40 + 18*8(%r3)
+ lfd %f31,40 + 40*8(%r3)
+ ld %r28,40 + 19*8(%r3)
+ ld %r29,40 + 20*8(%r3)
+ ld %r30,40 + 21*8(%r3)
+ ld %r31,40 + 22*8(%r3)
+
+ lwz %r7,0(%r3)
+ mr %r6,%r4
+ mtlr %r11
+ mtcr %r12
+ mr %r2,%r9
+ mr %r1,%r10
+ or. %r7,%r7,%r7
+ beq 1f
+ mr %r4,%r3
+ li %r3,3 /* SIG_SETMASK */
+ addi %r4,%r4,4 /* &set */
+ li %r5,0 /* oset = NULL */
+ li %r0,SYS_sigprocmask /* sigprocmask(SIG_SET, &set, NULL) */
+ sc /* assume no error XXX */
+1:
+ or. %r3,%r6,%r6
+ bnelr
+ li %r3,1
+ blr
+END(siglongjmp)
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/lib/libc/powerpc64/gen/syncicache.c b/lib/libc/powerpc64/gen/syncicache.c
new file mode 100644
index 000000000000..7885a36bd1d1
--- /dev/null
+++ b/lib/libc/powerpc64/gen/syncicache.c
@@ -0,0 +1,100 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (C) 1995-1997, 1999 Wolfgang Solfrank.
+ * Copyright (C) 1995-1997, 1999 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: syncicache.c,v 1.2 1999/05/05 12:36:40 tsubai Exp $
+ */
+
+#include <sys/param.h>
+#if defined(_KERNEL) || defined(_STANDALONE)
+#include <sys/time.h>
+#include <sys/proc.h>
+#include <vm/vm.h>
+#endif
+#include <sys/sysctl.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+
+#ifdef _STANDALONE
+int cacheline_size = 32;
+#endif
+
+#if !defined(_KERNEL) && !defined(_STANDALONE)
+#include <stdlib.h>
+
+int cacheline_size = 0;
+
+static void getcachelinesize(void);
+
+static void
+getcachelinesize()
+{
+ static int cachemib[] = { CTL_MACHDEP, CPU_CACHELINE };
+ long clen;
+
+ clen = sizeof(cacheline_size);
+
+ if (sysctl(cachemib, nitems(cachemib), &cacheline_size, &clen,
+ NULL, 0) < 0 || !cacheline_size) {
+ abort();
+ }
+}
+#endif
+
+void
+__syncicache(void *from, int len)
+{
+ off_t l, off;
+ char *p;
+
+#if !defined(_KERNEL) && !defined(_STANDALONE)
+ if (!cacheline_size)
+ getcachelinesize();
+#endif
+
+ off = (uintptr_t)from & (cacheline_size - 1);
+ l = len += off;
+ p = (char *)from - off;
+
+ do {
+ __asm __volatile ("dcbst 0,%0" :: "r"(p));
+ p += cacheline_size;
+ } while ((l -= cacheline_size) > 0);
+ __asm __volatile ("sync");
+ p = (char *)from - off;
+ do {
+ __asm __volatile ("icbi 0,%0" :: "r"(p));
+ p += cacheline_size;
+ } while ((len -= cacheline_size) > 0);
+ __asm __volatile ("sync; isync");
+}
+
diff --git a/lib/libc/powerpc64/softfloat/milieu.h b/lib/libc/powerpc64/softfloat/milieu.h
new file mode 100644
index 000000000000..6139aa58b982
--- /dev/null
+++ b/lib/libc/powerpc64/softfloat/milieu.h
@@ -0,0 +1,48 @@
+/* $NetBSD: milieu.h,v 1.1 2000/12/29 20:13:54 bjh21 Exp $ */
+
+/*
+===============================================================================
+
+This C header file is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2a.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
+has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
+TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
+PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
+AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) they include prominent notice that the work is derivative, and (2) they
+include prominent notice akin to these four paragraphs for those parts of
+this code that are retained.
+
+===============================================================================
+*/
+
+/*
+-------------------------------------------------------------------------------
+Include common integer types and flags.
+-------------------------------------------------------------------------------
+*/
+#include "powerpc-gcc.h"
+
+/*
+-------------------------------------------------------------------------------
+Symbolic Boolean literals.
+-------------------------------------------------------------------------------
+*/
+enum {
+ FALSE = 0,
+ TRUE = 1
+};
diff --git a/lib/libc/powerpc64/softfloat/powerpc-gcc.h b/lib/libc/powerpc64/softfloat/powerpc-gcc.h
new file mode 100644
index 000000000000..d11198866e39
--- /dev/null
+++ b/lib/libc/powerpc64/softfloat/powerpc-gcc.h
@@ -0,0 +1,91 @@
+/* $NetBSD: arm-gcc.h,v 1.2 2001/02/21 18:09:25 bjh21 Exp $ */
+
+/*
+-------------------------------------------------------------------------------
+One of the macros `BIGENDIAN' or `LITTLEENDIAN' must be defined.
+-------------------------------------------------------------------------------
+*/
+#define BIGENDIAN
+
+/*
+-------------------------------------------------------------------------------
+The macro `BITS64' can be defined to indicate that 64-bit integer types are
+supported by the compiler.
+-------------------------------------------------------------------------------
+*/
+#define BITS64
+
+/*
+-------------------------------------------------------------------------------
+Each of the following `typedef's defines the most convenient type that holds
+integers of at least as many bits as specified. For example, `uint8' should
+be the most convenient type that can hold unsigned integers of as many as
+8 bits. The `flag' type must be able to hold either a 0 or 1. For most
+implementations of C, `flag', `uint8', and `int8' should all be `typedef'ed
+to the same as `int'.
+-------------------------------------------------------------------------------
+*/
+typedef int flag;
+typedef unsigned int uint8;
+typedef int int8;
+typedef unsigned int uint16;
+typedef int int16;
+typedef unsigned int uint32;
+typedef signed int int32;
+#ifdef BITS64
+typedef unsigned long long int uint64;
+typedef signed long long int int64;
+#endif
+
+/*
+-------------------------------------------------------------------------------
+Each of the following `typedef's defines a type that holds integers
+of _exactly_ the number of bits specified. For instance, for most
+implementation of C, `bits16' and `sbits16' should be `typedef'ed to
+`unsigned short int' and `signed short int' (or `short int'), respectively.
+-------------------------------------------------------------------------------
+*/
+typedef unsigned char bits8;
+typedef signed char sbits8;
+typedef unsigned short int bits16;
+typedef signed short int sbits16;
+typedef unsigned int bits32;
+typedef signed int sbits32;
+#ifdef BITS64
+typedef unsigned long long int bits64;
+typedef signed long long int sbits64;
+#endif
+
+#ifdef BITS64
+/*
+-------------------------------------------------------------------------------
+The `LIT64' macro takes as its argument a textual integer literal and
+if necessary ``marks'' the literal as having a 64-bit integer type.
+For example, the GNU C Compiler (`gcc') requires that 64-bit literals be
+appended with the letters `LL' standing for `long long', which is `gcc's
+name for the 64-bit integer type. Some compilers may allow `LIT64' to be
+defined as the identity macro: `#define LIT64( a ) a'.
+-------------------------------------------------------------------------------
+*/
+#define LIT64( a ) a##LL
+#endif
+
+/*
+-------------------------------------------------------------------------------
+The macro `INLINE' can be used before functions that should be inlined. If
+a compiler does not support explicit inlining, this macro should be defined
+to be `static'.
+-------------------------------------------------------------------------------
+*/
+#define INLINE static __inline
+
+/*
+-------------------------------------------------------------------------------
+The ARM FPA is odd in that it stores doubles high-order word first, no matter
+what the endianness of the CPU. VFP is sane.
+-------------------------------------------------------------------------------
+*/
+#if defined(SOFTFLOAT_FOR_GCC)
+#define FLOAT64_DEMANGLE(a) (a)
+#define FLOAT64_MANGLE(a) (a)
+#endif
diff --git a/lib/libc/powerpc64/softfloat/softfloat.h b/lib/libc/powerpc64/softfloat/softfloat.h
new file mode 100644
index 000000000000..b20cb3e7aa00
--- /dev/null
+++ b/lib/libc/powerpc64/softfloat/softfloat.h
@@ -0,0 +1,306 @@
+/* $NetBSD: softfloat.h,v 1.6 2002/05/12 13:12:46 bjh21 Exp $ */
+
+/* This is a derivative work. */
+
+/*
+===============================================================================
+
+This C header file is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2a.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
+has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
+TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
+PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
+AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) they include prominent notice that the work is derivative, and (2) they
+include prominent notice akin to these four paragraphs for those parts of
+this code that are retained.
+
+===============================================================================
+*/
+
+/*
+-------------------------------------------------------------------------------
+The macro `FLOATX80' must be defined to enable the extended double-precision
+floating-point format `floatx80'. If this macro is not defined, the
+`floatx80' type will not be defined, and none of the functions that either
+input or output the `floatx80' type will be defined. The same applies to
+the `FLOAT128' macro and the quadruple-precision format `float128'.
+-------------------------------------------------------------------------------
+*/
+/* #define FLOATX80 */
+/* #define FLOAT128 */
+
+#include <machine/ieeefp.h>
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE floating-point types.
+-------------------------------------------------------------------------------
+*/
+typedef unsigned int float32;
+typedef unsigned long long float64;
+#ifdef FLOATX80
+typedef struct {
+ unsigned short high;
+ unsigned long long low;
+} floatx80;
+#endif
+#ifdef FLOAT128
+typedef struct {
+ unsigned long long high, low;
+} float128;
+#endif
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE floating-point underflow tininess-detection mode.
+-------------------------------------------------------------------------------
+*/
+#ifndef SOFTFLOAT_FOR_GCC
+extern int8 float_detect_tininess;
+#endif
+enum {
+ float_tininess_after_rounding = 0,
+ float_tininess_before_rounding = 1
+};
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE floating-point rounding mode.
+-------------------------------------------------------------------------------
+*/
+extern fp_rnd_t float_rounding_mode;
+enum {
+ float_round_nearest_even = FP_RN,
+ float_round_to_zero = FP_RZ,
+ float_round_down = FP_RM,
+ float_round_up = FP_RP
+};
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE floating-point exception flags.
+-------------------------------------------------------------------------------
+*/
+typedef fp_except_t fp_except;
+
+extern fp_except float_exception_flags;
+extern fp_except float_exception_mask;
+enum {
+ float_flag_inexact = FP_X_IMP,
+ float_flag_underflow = FP_X_UFL,
+ float_flag_overflow = FP_X_OFL,
+ float_flag_divbyzero = FP_X_DZ,
+ float_flag_invalid = FP_X_INV
+};
+
+/*
+-------------------------------------------------------------------------------
+Routine to raise any or all of the software IEC/IEEE floating-point
+exception flags.
+-------------------------------------------------------------------------------
+*/
+void float_raise( fp_except );
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE integer-to-floating-point conversion routines.
+-------------------------------------------------------------------------------
+*/
+float32 int32_to_float32( int );
+float64 int32_to_float64( int );
+#ifdef FLOATX80
+floatx80 int32_to_floatx80( int );
+#endif
+#ifdef FLOAT128
+float128 int32_to_float128( int );
+#endif
+float32 int64_to_float32( long long );
+float64 int64_to_float64( long long );
+#ifdef FLOATX80
+floatx80 int64_to_floatx80( long long );
+#endif
+#ifdef FLOAT128
+float128 int64_to_float128( long long );
+#endif
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE single-precision conversion routines.
+-------------------------------------------------------------------------------
+*/
+int float32_to_int32( float32 );
+int float32_to_int32_round_to_zero( float32 );
+unsigned int float32_to_uint32_round_to_zero( float32 );
+long long float32_to_int64( float32 );
+long long float32_to_int64_round_to_zero( float32 );
+float64 float32_to_float64( float32 );
+#ifdef FLOATX80
+floatx80 float32_to_floatx80( float32 );
+#endif
+#ifdef FLOAT128
+float128 float32_to_float128( float32 );
+#endif
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE single-precision operations.
+-------------------------------------------------------------------------------
+*/
+float32 float32_round_to_int( float32 );
+float32 float32_add( float32, float32 );
+float32 float32_sub( float32, float32 );
+float32 float32_mul( float32, float32 );
+float32 float32_div( float32, float32 );
+float32 float32_rem( float32, float32 );
+float32 float32_sqrt( float32 );
+int float32_eq( float32, float32 );
+int float32_le( float32, float32 );
+int float32_lt( float32, float32 );
+int float32_eq_signaling( float32, float32 );
+int float32_le_quiet( float32, float32 );
+int float32_lt_quiet( float32, float32 );
+#ifndef SOFTFLOAT_FOR_GCC
+int float32_is_signaling_nan( float32 );
+#endif
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE double-precision conversion routines.
+-------------------------------------------------------------------------------
+*/
+int float64_to_int32( float64 );
+int float64_to_int32_round_to_zero( float64 );
+unsigned int float64_to_uint32_round_to_zero( float64 );
+long long float64_to_int64( float64 );
+long long float64_to_int64_round_to_zero( float64 );
+float32 float64_to_float32( float64 );
+#ifdef FLOATX80
+floatx80 float64_to_floatx80( float64 );
+#endif
+#ifdef FLOAT128
+float128 float64_to_float128( float64 );
+#endif
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE double-precision operations.
+-------------------------------------------------------------------------------
+*/
+float64 float64_round_to_int( float64 );
+float64 float64_add( float64, float64 );
+float64 float64_sub( float64, float64 );
+float64 float64_mul( float64, float64 );
+float64 float64_div( float64, float64 );
+float64 float64_rem( float64, float64 );
+float64 float64_sqrt( float64 );
+int float64_eq( float64, float64 );
+int float64_le( float64, float64 );
+int float64_lt( float64, float64 );
+int float64_eq_signaling( float64, float64 );
+int float64_le_quiet( float64, float64 );
+int float64_lt_quiet( float64, float64 );
+#ifndef SOFTFLOAT_FOR_GCC
+int float64_is_signaling_nan( float64 );
+#endif
+
+#ifdef FLOATX80
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE extended double-precision conversion routines.
+-------------------------------------------------------------------------------
+*/
+int floatx80_to_int32( floatx80 );
+int floatx80_to_int32_round_to_zero( floatx80 );
+long long floatx80_to_int64( floatx80 );
+long long floatx80_to_int64_round_to_zero( floatx80 );
+float32 floatx80_to_float32( floatx80 );
+float64 floatx80_to_float64( floatx80 );
+#ifdef FLOAT128
+float128 floatx80_to_float128( floatx80 );
+#endif
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE extended double-precision rounding precision. Valid
+values are 32, 64, and 80.
+-------------------------------------------------------------------------------
+*/
+extern int floatx80_rounding_precision;
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE extended double-precision operations.
+-------------------------------------------------------------------------------
+*/
+floatx80 floatx80_round_to_int( floatx80 );
+floatx80 floatx80_add( floatx80, floatx80 );
+floatx80 floatx80_sub( floatx80, floatx80 );
+floatx80 floatx80_mul( floatx80, floatx80 );
+floatx80 floatx80_div( floatx80, floatx80 );
+floatx80 floatx80_rem( floatx80, floatx80 );
+floatx80 floatx80_sqrt( floatx80 );
+int floatx80_eq( floatx80, floatx80 );
+int floatx80_le( floatx80, floatx80 );
+int floatx80_lt( floatx80, floatx80 );
+int floatx80_eq_signaling( floatx80, floatx80 );
+int floatx80_le_quiet( floatx80, floatx80 );
+int floatx80_lt_quiet( floatx80, floatx80 );
+int floatx80_is_signaling_nan( floatx80 );
+
+#endif
+
+#ifdef FLOAT128
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE quadruple-precision conversion routines.
+-------------------------------------------------------------------------------
+*/
+int float128_to_int32( float128 );
+int float128_to_int32_round_to_zero( float128 );
+long long float128_to_int64( float128 );
+long long float128_to_int64_round_to_zero( float128 );
+float32 float128_to_float32( float128 );
+float64 float128_to_float64( float128 );
+#ifdef FLOATX80
+floatx80 float128_to_floatx80( float128 );
+#endif
+
+/*
+-------------------------------------------------------------------------------
+Software IEC/IEEE quadruple-precision operations.
+-------------------------------------------------------------------------------
+*/
+float128 float128_round_to_int( float128 );
+float128 float128_add( float128, float128 );
+float128 float128_sub( float128, float128 );
+float128 float128_mul( float128, float128 );
+float128 float128_div( float128, float128 );
+float128 float128_rem( float128, float128 );
+float128 float128_sqrt( float128 );
+int float128_eq( float128, float128 );
+int float128_le( float128, float128 );
+int float128_lt( float128, float128 );
+int float128_eq_signaling( float128, float128 );
+int float128_le_quiet( float128, float128 );
+int float128_lt_quiet( float128, float128 );
+int float128_is_signaling_nan( float128 );
+
+#endif
+
diff --git a/lib/libc/powerpc64/string/Makefile.inc b/lib/libc/powerpc64/string/Makefile.inc
new file mode 100644
index 000000000000..c9d7c9d71676
--- /dev/null
+++ b/lib/libc/powerpc64/string/Makefile.inc
@@ -0,0 +1,16 @@
+MDSRCS+= \
+ bcopy.S \
+ bcopy_vsx.S \
+ bcopy_resolver.c \
+ memcpy.S \
+ memcpy_vsx.S \
+ memcpy_resolver.c \
+ memmove.S \
+ memmove_vsx.S \
+ memmove_resolver.c \
+ strncpy_arch_2_05.S \
+ strncpy.c \
+ strncpy_resolver.c \
+ strcpy_arch_2_05.S \
+ strcpy.c \
+ strcpy_resolver.c
diff --git a/lib/libc/powerpc64/string/bcopy.S b/lib/libc/powerpc64/string/bcopy.S
new file mode 100644
index 000000000000..6f6223214e26
--- /dev/null
+++ b/lib/libc/powerpc64/string/bcopy.S
@@ -0,0 +1,338 @@
+/*-
+ * Copyright (c) 2018 Instituto de Pesquisas Eldorado
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#define BLOCK_SIZE_BITS 6
+#define BLOCK_SIZE (1 << BLOCK_SIZE_BITS)
+#define BLOCK_SIZE_MASK (BLOCK_SIZE - 1)
+
+/* Minimum 8 byte alignment, to avoid cache-inhibited alignment faults.*/
+#ifndef ALIGN_MASK
+#define ALIGN_MASK 0x7
+#endif
+
+#define MULTI_PHASE_THRESHOLD 512
+
+#ifndef FN_NAME
+#ifdef MEMMOVE
+#define FN_NAME __memmove
+WEAK_REFERENCE(__memmove, memmove);
+#else
+#define FN_NAME __bcopy
+WEAK_REFERENCE(__bcopy, bcopy);
+#endif
+#endif
+
+/*
+ * r3: dst
+ * r4: src
+ * r5: len
+ */
+
+ENTRY(FN_NAME)
+ cmpld %r3, %r4 /* src == dst? nothing to do */
+ beqlr-
+ cmpdi %r5, 0 /* len == 0? nothing to do */
+ beqlr-
+
+#ifdef MEMMOVE
+ std %r3, -8(%r1) /* save dst */
+#else /* bcopy: swap src/dst */
+ mr %r0, %r3
+ mr %r3, %r4
+ mr %r4, %r0
+#endif
+
+ /* First check for relative alignment, if unaligned copy one byte at a time */
+ andi. %r8, %r3, ALIGN_MASK
+ andi. %r7, %r4, ALIGN_MASK
+ cmpd %r7, %r8
+ bne .Lunaligned
+
+
+ cmpldi %r5, MULTI_PHASE_THRESHOLD
+ bge .Lmulti_phase
+ b .Lfast_copy
+
+.Lunaligned:
+ /* forward or backward copy? */
+ cmpd %r4, %r3
+ blt .Lbackward_unaligned
+
+ /* Just need to setup increment and jump to copy */
+ li %r0, 1
+ mtctr %r5
+ b .Lsingle_1_loop
+
+.Lbackward_unaligned:
+ /* advance src and dst to last byte, set decrement and jump to copy */
+ add %r3, %r3, %r5
+ addi %r3, %r3, -1
+ add %r4, %r4, %r5
+ addi %r4, %r4, -1
+ li %r0, -1
+ mtctr %r5
+ b .Lsingle_1_loop
+
+.Lfast_copy:
+ /* align src */
+ cmpd %r4, %r3 /* forward or backward copy? */
+ blt .Lbackward_align
+
+ .align 5
+.Lalign:
+ andi. %r0, %r4, 15
+ beq .Lsingle_copy
+ lbz %r0, 0(%r4)
+ addi %r4, %r4, 1
+ stb %r0, 0(%r3)
+ addi %r3, %r3, 1
+ addi %r5, %r5, -1
+ cmpdi %r5, 0
+ beq- .Ldone
+ b .Lalign
+
+.Lbackward_align:
+ /* advance src and dst to end (past last byte) */
+ add %r3, %r3, %r5
+ add %r4, %r4, %r5
+ .align 5
+.Lbackward_align_loop:
+ andi. %r0, %r4, 15
+ beq .Lbackward_single_copy
+ lbzu %r0, -1(%r4)
+ addi %r5, %r5, -1
+ stbu %r0, -1(%r3)
+ cmpdi %r5, 0
+ beq- .Ldone
+ b .Lbackward_align_loop
+
+.Lsingle_copy:
+ /* forward copy */
+ li %r0, 1
+ li %r8, 16
+ li %r9, 0
+ b .Lsingle_phase
+
+.Lbackward_single_copy:
+ /* backward copy */
+ li %r0, -1
+ li %r8, -16
+ li %r9, -15
+ /* point src and dst to last byte */
+ addi %r3, %r3, -1
+ addi %r4, %r4, -1
+
+.Lsingle_phase:
+ srdi. %r6, %r5, 4 /* number of 16-bytes */
+ beq .Lsingle_1
+
+ /* pre-adjustment */
+ add %r3, %r3, %r9
+ add %r4, %r4, %r9
+
+ mtctr %r6
+ .align 5
+.Lsingle_16_loop:
+ ld %r6, 0(%r4)
+ ld %r7, 8(%r4)
+ add %r4, %r4, %r8
+ std %r6, 0(%r3)
+ std %r7, 8(%r3)
+ add %r3, %r3, %r8
+ bdnz .Lsingle_16_loop
+
+ /* post-adjustment */
+ sub %r3, %r3, %r9
+ sub %r4, %r4, %r9
+
+.Lsingle_1:
+ andi. %r6, %r5, 0x0f /* number of 1-bytes */
+ beq .Ldone /* 1-bytes == 0? done */
+
+ mtctr %r6
+ .align 5
+.Lsingle_1_loop:
+ lbz %r6, 0(%r4)
+ add %r4, %r4, %r0 /* increment */
+ stb %r6, 0(%r3)
+ add %r3, %r3, %r0 /* increment */
+ bdnz .Lsingle_1_loop
+
+.Ldone:
+#ifdef MEMMOVE
+ ld %r3, -8(%r1) /* restore dst */
+#endif
+ blr
+
+
+.Lmulti_phase:
+ /* set up multi-phase copy parameters */
+
+ /* r7 = bytes before the aligned section of the buffer */
+ andi. %r6, %r4, 15
+ subfic %r7, %r6, 16
+ /* r8 = bytes in and after the aligned section of the buffer */
+ sub %r8, %r5, %r7
+ /* r9 = bytes after the aligned section of the buffer */
+ andi. %r9, %r8, BLOCK_SIZE_MASK
+ /* r10 = BLOCKS in the aligned section of the buffer */
+ srdi %r10, %r8, BLOCK_SIZE_BITS
+
+ /* forward or backward copy? */
+ cmpd %r4, %r3
+ blt .Lbackward_multi_copy
+
+ /* set up forward copy parameters */
+ std %r7, -32(%r1) /* bytes to copy in phase 1 */
+ std %r10, -40(%r1) /* BLOCKS to copy in phase 2 */
+ std %r9, -48(%r1) /* bytes to copy in phase 3 */
+
+ li %r0, 1 /* increment for phases 1 and 3 */
+ li %r5, BLOCK_SIZE /* increment for phase 2 */
+
+ /* op offsets for phase 2 */
+ li %r7, 0
+ li %r8, 16
+ li %r9, 32
+ li %r10, 48
+
+ std %r8, -16(%r1) /* 16-byte increment (16) */
+ std %r7, -24(%r1) /* 16-byte pre/post adjustment (0) */
+
+ b .Lphase1
+
+.Lbackward_multi_copy:
+ /* set up backward copy parameters */
+ std %r9, -32(%r1) /* bytes to copy in phase 1 */
+ std %r10, -40(%r1) /* BLOCKS to copy in phase 2 */
+ std %r7, -48(%r1) /* bytes to copy in phase 3 */
+
+ li %r0, -1 /* increment for phases 1 and 3 */
+ add %r6, %r5, %r0 /* r6 = len - 1 */
+ li %r5, -BLOCK_SIZE /* increment for phase 2 */
+ /* advance src and dst to the last position */
+ add %r3, %r3, %r6
+ add %r4, %r4, %r6
+
+ /* op offsets for phase 2 */
+ li %r7, -15
+ li %r8, -31
+ li %r9, -47
+ li %r10, -63
+
+ add %r6, %r7, %r0 /* r6 = -16 */
+ std %r6, -16(%r1) /* 16-byte increment (-16) */
+ std %r7, -24(%r1) /* 16-byte pre/post adjustment (-15) */
+
+.Lphase1:
+ ld %r6, -32(%r1) /* bytes to copy in phase 1 */
+ cmpldi %r6, 0 /* r6 == 0? skip phase 1 */
+ beq+ .Lphase2
+
+ mtctr %r6
+ .align 5
+.Lphase1_loop:
+ lbz %r6, 0(%r4)
+ add %r4, %r4, %r0 /* phase 1 increment */
+ stb %r6, 0(%r3)
+ add %r3, %r3, %r0 /* phase 1 increment */
+ bdnz .Lphase1_loop
+
+.Lphase2:
+ ld %r6, -40(%r1) /* BLOCKS to copy in phase 2 */
+ cmpldi %r6, 0 /* %r6 == 0? skip phase 2 */
+ beq .Lphase3
+
+#ifdef FN_PHASE2
+FN_PHASE2
+#else
+ /* save registers */
+ std %r14, -56(%r1)
+ std %r15, -64(%r1)
+ std %r16, -72(%r1)
+ std %r17, -80(%r1)
+ std %r18, -88(%r1)
+ std %r19, -96(%r1)
+ std %r20, -104(%r1)
+ std %r21, -112(%r1)
+
+ addi %r18, %r7, 8
+ addi %r19, %r8, 8
+ addi %r20, %r9, 8
+ addi %r21, %r10, 8
+
+ mtctr %r6
+ .align 5
+.Lphase2_loop:
+ ldx %r14, %r7, %r4
+ ldx %r15, %r18, %r4
+ ldx %r16, %r8, %r4
+ ldx %r17, %r19, %r4
+ stdx %r14, %r7, %r3
+ stdx %r15, %r18, %r3
+ stdx %r16, %r8, %r3
+ stdx %r17, %r19, %r3
+
+ ldx %r14, %r9, %r4
+ ldx %r15, %r20, %r4
+ ldx %r16, %r10, %r4
+ ldx %r17, %r21, %r4
+ stdx %r14, %r9, %r3
+ stdx %r15, %r20, %r3
+ stdx %r16, %r10, %r3
+ stdx %r17, %r21, %r3
+
+ add %r4, %r4, %r5 /* phase 2 increment */
+ add %r3, %r3, %r5 /* phase 2 increment */
+
+ bdnz .Lphase2_loop
+
+ /* restore registers */
+ ld %r14, -56(%r1)
+ ld %r15, -64(%r1)
+ ld %r16, -72(%r1)
+ ld %r17, -80(%r1)
+ ld %r18, -88(%r1)
+ ld %r19, -96(%r1)
+ ld %r20, -104(%r1)
+ ld %r21, -112(%r1)
+#endif
+
+.Lphase3:
+ /* load registers for transitioning into the single-phase logic */
+ ld %r5, -48(%r1) /* bytes to copy in phase 3 */
+ ld %r8, -16(%r1) /* 16-byte increment */
+ ld %r9, -24(%r1) /* 16-byte pre/post adjustment */
+ b .Lsingle_phase
+
+END(FN_NAME)
+
+ .section .note.GNU-stack,"",%progbits
+
diff --git a/lib/libc/powerpc64/string/bcopy_resolver.c b/lib/libc/powerpc64/string/bcopy_resolver.c
new file mode 100644
index 000000000000..c99c53b2f9b3
--- /dev/null
+++ b/lib/libc/powerpc64/string/bcopy_resolver.c
@@ -0,0 +1,70 @@
+/*-
+ * Copyright (c) 2018 Instituto de Pesquisas Eldorado
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/cpu.h>
+#include <machine/ifunc.h>
+
+#define _CAT(a,b) a##b
+#define CAT(a,b) _CAT(a,b)
+#define CAT3(a,b,c) CAT(CAT(a,b),c)
+
+#ifdef MEMCOPY
+#define FN_NAME memcpy
+#define FN_RET void *
+#define FN_PARAMS (void *dst, const void *src, size_t len)
+
+#elif defined(MEMMOVE)
+#define FN_NAME memmove
+#define FN_RET void *
+#define FN_PARAMS (void *dst, const void *src, size_t len)
+
+#else
+#define FN_NAME bcopy
+#define FN_RET void
+#define FN_PARAMS (const void *src, void *dst, size_t len)
+#endif
+
+#define FN_NAME_NOVSX CAT(__, FN_NAME)
+#define FN_NAME_VSX CAT3(__, FN_NAME, _vsx)
+
+FN_RET FN_NAME_NOVSX FN_PARAMS;
+FN_RET FN_NAME_VSX FN_PARAMS;
+
+DEFINE_UIFUNC(, FN_RET, FN_NAME, FN_PARAMS)
+{
+ /* VSX instructions were added in POWER ISA 2.06,
+ * however it requires data to be word-aligned.
+ * Since POWER ISA 2.07B this is solved transparently
+ * by the hardware
+ */
+ if (cpu_features & PPC_FEATURE_HAS_VSX)
+ return (FN_NAME_VSX);
+ else
+ return (FN_NAME_NOVSX);
+}
diff --git a/lib/libc/powerpc64/string/bcopy_vsx.S b/lib/libc/powerpc64/string/bcopy_vsx.S
new file mode 100644
index 000000000000..f1740d3db7b5
--- /dev/null
+++ b/lib/libc/powerpc64/string/bcopy_vsx.S
@@ -0,0 +1,59 @@
+/*-
+ * Copyright (c) 2018 Instituto de Pesquisas Eldorado
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef FN_NAME
+#define FN_NAME __bcopy_vsx
+#endif
+
+/*
+ * r3: dst
+ * r4: src
+ * r5: block increment
+ * r6: blocks to copy
+ * r7/r8/r9/r10: 16-byte offsets to copy
+ */
+
+#define FN_PHASE2 \
+ mtctr %r6 ;\
+ .align 5 ;\
+.Lphase2_loop: ;\
+ lxvd2x %vs6, %r7, %r4 ;\
+ lxvd2x %vs7, %r8, %r4 ;\
+ lxvd2x %vs8, %r9, %r4 ;\
+ lxvd2x %vs9, %r10, %r4 ;\
+ stxvd2x %vs6, %r7, %r3 ;\
+ stxvd2x %vs7, %r8, %r3 ;\
+ stxvd2x %vs8, %r9, %r3 ;\
+ stxvd2x %vs9, %r10, %r3 ;\
+ /* phase 2 increment */ ;\
+ add %r4, %r4, %r5 ;\
+ add %r3, %r3, %r5 ;\
+ \
+ bdnz .Lphase2_loop ;\
+
+#include "bcopy.S"
diff --git a/lib/libc/powerpc64/string/memcpy.S b/lib/libc/powerpc64/string/memcpy.S
new file mode 100644
index 000000000000..28f3f2bb0ab4
--- /dev/null
+++ b/lib/libc/powerpc64/string/memcpy.S
@@ -0,0 +1,131 @@
+/*-
+ * Copyright (c) 2018 Instituto de Pesquisas Eldorado
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#ifndef FN_NAME
+#define FN_NAME __memcpy
+WEAK_REFERENCE(__memcpy, memcpy);
+#define BLOCK_BITS 4
+#endif
+
+#define BLOCK_BYTES (1 << BLOCK_BITS)
+#define BLOCK_MASK (BLOCK_BYTES - 1)
+
+/* Minimum 8 byte alignment, to avoid cache-inhibited alignment faults. */
+#ifndef ALIGN_MASK
+#define ALIGN_MASK 0x7
+#endif
+
+/*
+ * r3: dst
+ * r4: src
+ * r5: len
+ */
+ENTRY(FN_NAME)
+ cmpdi %r5, 0 /* len == 0? nothing to do */
+ beqlr-
+
+ /* If src and dst are relatively misaligned, do byte copies. */
+ andi. %r8, %r3, ALIGN_MASK
+ andi. %r7, %r4, ALIGN_MASK
+ cmpd %r8, %r7
+ mr %r7, %r5
+ mr %r8, %r3 /* save dst */
+ bne .Lcopy_remaining_fix_index_byte
+
+ /* align src */
+.Lalignment_loop:
+ lbz %r6, 0(%r4)
+ stb %r6, 0(%r3)
+ addi %r3, %r3, 1
+ addi %r4, %r4, 1
+ addi %r5, %r5, -1
+ cmpdi %r5, 0
+ beq .Lexit
+ andi. %r0, %r4, BLOCK_MASK
+ bne .Lalignment_loop
+
+ /* r7 = remaining, non-block, bytes */
+ andi. %r7, %r5, BLOCK_MASK
+
+ /* Check if there are blocks of BLOCK_BYTES to be copied */
+ xor. %r5, %r5, %r7
+ beq .Lcopy_remaining_fix_index_byte
+
+#ifdef FN_COPY_LOOP
+FN_COPY_LOOP
+#else
+ /* Setup to copy word with ldu and stdu */
+ ld %r6, 0(%r4)
+ ld %r9, 8(%r4)
+ std %r6, 0(%r3)
+ std %r9, 8(%r3)
+ addi %r5, %r5, -BLOCK_BYTES
+ cmpd %r5, 0
+ beq .Lcopy_remaining_fix_index_word
+
+ srdi %r5, %r5, BLOCK_BITS
+ mtctr %r5
+.Lcopy_word:
+ ldu %r6, 16(%r4)
+ ld %r9, 8(%r4)
+ stdu %r6, 16(%r3)
+ std %r9, 8(%r3)
+ bdnz .Lcopy_word
+
+.Lcopy_remaining_fix_index_word:
+ /* Check if there are remaining bytes */
+ cmpd %r7, 0
+ beq .Lexit
+ addi %r3, %r3, BLOCK_MASK
+ addi %r4, %r4, BLOCK_MASK
+ b .Lcopy_remaining
+#endif
+
+.Lcopy_remaining_fix_index_byte:
+ addi %r4, %r4, -1
+ addi %r3, %r3, -1
+
+ /* Copy remaining bytes */
+.Lcopy_remaining:
+ mtctr %r7
+.Lcopy_remaining_loop:
+ lbzu %r6, 1(%r4)
+ stbu %r6, 1(%r3)
+ bdnz .Lcopy_remaining_loop
+
+.Lexit:
+ /* Restore dst */
+ mr %r3, %r8
+ blr
+
+END(FN_NAME)
+
+ .section .note.GNU-stack,"",%progbits
+
diff --git a/lib/libc/powerpc64/string/memcpy_resolver.c b/lib/libc/powerpc64/string/memcpy_resolver.c
new file mode 100644
index 000000000000..d1ac75edf5b4
--- /dev/null
+++ b/lib/libc/powerpc64/string/memcpy_resolver.c
@@ -0,0 +1,3 @@
+
+#define MEMCOPY
+#include "bcopy_resolver.c"
diff --git a/lib/libc/powerpc64/string/memcpy_vsx.S b/lib/libc/powerpc64/string/memcpy_vsx.S
new file mode 100644
index 000000000000..69554e026f35
--- /dev/null
+++ b/lib/libc/powerpc64/string/memcpy_vsx.S
@@ -0,0 +1,64 @@
+/*-
+ * Copyright (c) 2018 Instituto de Pesquisas Eldorado
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define FN_NAME __memcpy_vsx
+#define BLOCK_BITS 6
+#define ALIGN_MASK 0xf
+
+/*
+ * r5: bytes to copy (multiple of BLOCK_BYTES)
+ *
+ */
+#define FN_COPY_LOOP \
+ /* Load CTR with number of blocks */ \
+ srdi %r5, %r5, BLOCK_BITS ;\
+ mtctr %r5 ;\
+ /* Prepare indexes to load and store data */ \
+ xor %r6, %r6, %r6 ;\
+ li %r9, 16 ;\
+ li %r10, 32 ;\
+ li %r11, 48 ;\
+.Lcopy_vsx_loop: \
+ lxvd2x %vs6, %r6, %r4 ;\
+ lxvd2x %vs7, %r9, %r4 ;\
+ lxvd2x %vs8, %r10, %r4 ;\
+ lxvd2x %vs9, %r11, %r4 ;\
+ stxvd2x %vs6, %r6, %r3 ;\
+ stxvd2x %vs7, %r9, %r3 ;\
+ stxvd2x %vs8, %r10, %r3 ;\
+ stxvd2x %vs9, %r11, %r3 ;\
+ \
+ addi %r3, %r3, BLOCK_BYTES ;\
+ addi %r4, %r4, BLOCK_BYTES ;\
+ bdnz .Lcopy_vsx_loop ;\
+ \
+ /* Check if there is remaining bytes */ \
+ cmpd %r7, 0 ;\
+ beq .Lexit ;\
+
+#include "memcpy.S"
diff --git a/lib/libc/powerpc64/string/memmove.S b/lib/libc/powerpc64/string/memmove.S
new file mode 100644
index 000000000000..3d49c57100df
--- /dev/null
+++ b/lib/libc/powerpc64/string/memmove.S
@@ -0,0 +1,3 @@
+
+#define MEMMOVE
+#include "bcopy.S"
diff --git a/lib/libc/powerpc64/string/memmove_resolver.c b/lib/libc/powerpc64/string/memmove_resolver.c
new file mode 100644
index 000000000000..fcb11ee0ae43
--- /dev/null
+++ b/lib/libc/powerpc64/string/memmove_resolver.c
@@ -0,0 +1,3 @@
+
+#define MEMMOVE
+#include "bcopy_resolver.c"
diff --git a/lib/libc/powerpc64/string/memmove_vsx.S b/lib/libc/powerpc64/string/memmove_vsx.S
new file mode 100644
index 000000000000..9e7d51ce4683
--- /dev/null
+++ b/lib/libc/powerpc64/string/memmove_vsx.S
@@ -0,0 +1,4 @@
+
+#define MEMMOVE
+#define FN_NAME __memmove_vsx
+#include "bcopy_vsx.S"
diff --git a/lib/libc/powerpc64/string/strcpy.c b/lib/libc/powerpc64/string/strcpy.c
new file mode 100644
index 000000000000..d0be3ca468a0
--- /dev/null
+++ b/lib/libc/powerpc64/string/strcpy.c
@@ -0,0 +1,30 @@
+/*-
+ * Copyright (c) 2019 Leandro Lupori
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#define WEAK_STRCPY
+#include "../../string/strcpy.c"
diff --git a/lib/libc/powerpc64/string/strcpy_arch_2_05.S b/lib/libc/powerpc64/string/strcpy_arch_2_05.S
new file mode 100644
index 000000000000..36728fa0b40f
--- /dev/null
+++ b/lib/libc/powerpc64/string/strcpy_arch_2_05.S
@@ -0,0 +1,202 @@
+/*-
+ * Copyright (c) 2018 Instituto de Pesquisas Eldorado
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include <machine/asm.h>
+#if 0
+ RCSID("$NetBSD: strcpy.S,v 1.0 2018/05/08 13:00:49 lbianc Exp $")
+#endif
+
+ENTRY(__strcpy_arch_2_05)
+ mr %r8, %r3
+
+/*
+ * Aligning the reading address, even if it is already aligned to avoid
+ * performance degradation with strings with 8 bytes or less.
+ */
+.Lalignment:
+ lbz %r0,0(%r4)
+ cmpdi cr7,%r0,0
+ stb %r0,0(%r8)
+ beq cr7,.Lexit
+ addi %r4,%r4,1
+ addi %r8,%r8,1
+ andi. %r0,%r4,0x7
+ bne .Lalignment
+
+/* Copy by double word with aligned address. */
+.Lcopy_dw:
+ ld %r0,0(%r4)
+ xor %r6,%r6,%r6
+ cmpb %r5,%r0,%r6
+ cmpdi cr7,%r5,0
+ bne cr7,.Lcheck_zero
+ /* Backward r8 to use stdu instruction in Lcopy_dw_loop */
+ addi %r8,%r8,-8
+.Lcopy_dw_loop:
+ stdu %r0,8(%r8)
+ ldu %r0,8(%r4)
+ cmpb %r5,%r0,%r6
+ cmpdi cr7,%r5,0
+ beq cr7,.Lcopy_dw_loop
+
+ addi %r8,%r8,8 /* Forward r8 to use std instruction. */
+#if defined(__BIG_ENDIAN__)
+/* Find where the zero is located. */
+.Lcheck_zero:
+ rldicr. %r5,%r0,0,7
+ beq .Lfound_on_byte_0
+ rldicr. %r7,%r0,8,7
+ beq .Lfound_on_byte_1
+ rldicr. %r7,%r0,16,7
+ beq .Lfound_on_byte_2
+ rldicr. %r7,%r0,24,7
+ beq .Lfound_on_byte_3
+ andis. %r7,%r0,0xff00
+ beq .Lfound_on_byte_4
+ andis. %r7,%r0,0xff
+ beq .Lfound_on_byte_5
+ andi. %r7,%r0,0xff00
+ beq .Lfound_on_byte_6
+
+/* Copy the last string bytes according to the string end position. */
+.Lfound_on_byte_7:
+ std %r0,0(%r8)
+ b .Lexit
+
+.Lfound_on_byte_6:
+ srdi %r6,%r0,32
+ stw %r6,0(%r8)
+ srdi %r6,%r0,16
+ sth %r6,4(%r8)
+ srdi %r6,%r0,8
+ stb %r6,6(%r8)
+ b .Lexit
+
+.Lfound_on_byte_5:
+ srdi %r6,%r0,32
+ stw %r6,0(%r8)
+ srdi %r6,%r0,16
+ sth %r6,4(%r8)
+ b .Lexit
+
+.Lfound_on_byte_4:
+ srdi %r6,%r0,32
+ stw %r6,0(%r8)
+ srdi %r6,%r0,24
+ stb %r6,4(%r8)
+ b .Lexit
+
+.Lfound_on_byte_3:
+ srdi %r6,%r0,32
+ stw %r6,0(%r8)
+ b .Lexit
+
+.Lfound_on_byte_2:
+ srdi %r6,%r0,48
+ sth %r6,0(%r8)
+ srdi %r6,%r0,40
+ stb %r6,2(%r8)
+ b .Lexit
+
+.Lfound_on_byte_1:
+ srdi %r6,%r0,48
+ sth %r6,0(%r8)
+ b .Lexit
+
+.Lfound_on_byte_0:
+ srdi %r6,%r0,56
+ stb %r6,0(%r8)
+#elif defined(__LITTLE_ENDIAN__)
+/* Find where the zero is located. */
+.Lcheck_zero:
+ andi. %r7,%r0,0xff
+ beq .Lfound_on_byte_0
+ andi. %r7,%r0,0xff00
+ beq .Lfound_on_byte_1
+ andis. %r7,%r0,0xff
+ beq .Lfound_on_byte_2
+ andis. %r7,%r0,0xff00
+ beq .Lfound_on_byte_3
+ rldicr. %r7,%r0,24,7
+ beq .Lfound_on_byte_4
+ rldicr. %r7,%r0,16,7
+ beq .Lfound_on_byte_5
+ rldicr. %r7,%r0,8,7
+ beq .Lfound_on_byte_6
+
+/* Copy the last string bytes according to the string end position. */
+.Lfound_on_byte_7:
+ std %r0,0(%r8)
+ b .Lexit
+
+.Lfound_on_byte_6:
+ stw %r0,0(%r8)
+ srdi %r6,%r0,32
+ sth %r6,4(%r8)
+ srdi %r6,%r0,48
+ stb %r6,6(%r8)
+ b .Lexit
+
+.Lfound_on_byte_5:
+ stw %r0,0(%r8)
+ srdi %r6,%r0,32
+ sth %r6,4(%r8)
+ b .Lexit
+
+.Lfound_on_byte_4:
+ stw %r0,0(%r8)
+ srdi %r6,%r0,32
+ stb %r6,4(%r8)
+ b .Lexit
+
+.Lfound_on_byte_3:
+ stw %r0,0(%r8)
+ b .Lexit
+
+.Lfound_on_byte_2:
+ sth %r0,0(%r8)
+ srdi %r6,%r0,16
+ stb %r6,2(%r8)
+ b .Lexit
+
+.Lfound_on_byte_1:
+ sth %r0,0(%r8)
+ b .Lexit
+
+.Lfound_on_byte_0:
+ stb %r0,0(%r8)
+#else
+#error "Unable to determine Endianness"
+#endif
+.Lexit:
+ blr
+
+END(__strcpy_arch_2_05)
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/lib/libc/powerpc64/string/strcpy_resolver.c b/lib/libc/powerpc64/string/strcpy_resolver.c
new file mode 100644
index 000000000000..7a64ce41c7e4
--- /dev/null
+++ b/lib/libc/powerpc64/string/strcpy_resolver.c
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2019 Leandro Lupori
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include <machine/cpu.h>
+#include <machine/ifunc.h>
+
+char *
+__strcpy_arch_2_05(char * restrict dst, const char * restrict src);
+
+char *
+__strcpy(char * restrict dst, const char * restrict src);
+
+DEFINE_UIFUNC(, char *, strcpy, (char * restrict, const char * restrict))
+{
+ if (cpu_features & PPC_FEATURE_ARCH_2_05)
+ return (__strcpy_arch_2_05);
+ else
+ return (__strcpy);
+}
diff --git a/lib/libc/powerpc64/string/strncpy.c b/lib/libc/powerpc64/string/strncpy.c
new file mode 100644
index 000000000000..aef3fb88724a
--- /dev/null
+++ b/lib/libc/powerpc64/string/strncpy.c
@@ -0,0 +1,30 @@
+/*-
+ * Copyright (c) 2019 Leandro Lupori
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#define WEAK_STRNCPY
+#include "../../string/strncpy.c"
diff --git a/lib/libc/powerpc64/string/strncpy_arch_2_05.S b/lib/libc/powerpc64/string/strncpy_arch_2_05.S
new file mode 100644
index 000000000000..f78d1c49c62a
--- /dev/null
+++ b/lib/libc/powerpc64/string/strncpy_arch_2_05.S
@@ -0,0 +1,129 @@
+/*-
+ * Copyright (c) 2018 Instituto de Pesquisas Eldorado
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+ENTRY(__strncpy_arch_2_05)
+ stdu %r1,-40(%r1)
+ mflr %r0
+ std %r0,16(%r1)
+ std %r3,32(%r1)
+
+ xor %r6,%r6,%r6 /* fixed 0 reg */
+
+/* align loop */
+ addi %r3,%r3,-1
+.Lalign_loop:
+ /* len? */
+ cmpdi %r5,0
+ beq .Lexit
+ /* aligned? */
+ andi. %r0,%r4,7
+ beq .Ldw_copy
+ /* copy */
+ lbz %r7,0(%r4)
+ stbu %r7,1(%r3)
+ addi %r4,%r4,1
+ addi %r5,%r5,-1
+ /* zero? */
+ cmpdi %r7,0
+ beq .Lzero
+ b .Lalign_loop
+
+/* dword copy loop */
+.Ldw_copy:
+ /* prepare src and dst to use load/store and update */
+ addi %r3,%r3,-7
+ addi %r4,%r4,-8
+.Ldw_copy_loop:
+ cmpdi %r5,8
+ blt .Lbyte_copy
+
+ ldu %r0,8(%r4)
+ /* check for 0 */
+ cmpb %r7,%r0,%r6
+ cmpdi %r7,0
+ bne .Lbyte_copy_and_zero
+ /* copy to dst */
+ stdu %r0,8(%r3)
+ addi %r5,%r5,-8
+ b .Ldw_copy_loop
+
+/* Copy remaining src bytes, zero-out buffer
+ * Note: r5 will be >= 8
+ */
+.Lbyte_copy_and_zero:
+ addi %r3,%r3,7
+ addi %r4,%r4,-1
+.Lbyte_copy_and_zero_loop:
+ lbzu %r7,1(%r4)
+ stbu %r7,1(%r3)
+ addi %r5,%r5,-1
+ cmpdi %r7,0
+ beq .Lzero
+ b .Lbyte_copy_and_zero_loop
+
+/* zero-out remaining dst bytes */
+.Lzero:
+ addi %r3,%r3,1
+ li %r4,0
+ /* r5 has len already */
+ bl memset
+ nop
+ b .Lexit
+
+/* copy remaining (< 8) bytes */
+.Lbyte_copy:
+ cmpdi %r5,0
+ beq .Lexit
+ addi %r3,%r3,7
+ addi %r4,%r4,7
+ mtctr %r5
+.Lbyte_copy_loop:
+ lbzu %r7,1(%r4)
+ stbu %r7,1(%r3)
+ cmpdi %r7,0
+ /* 0 found: zero out remaining bytes */
+ beq .Lbyte_copy_zero
+ bdnz .Lbyte_copy_loop
+ b .Lexit
+.Lbyte_copy_zero_loop:
+ stbu %r6,1(%r3)
+.Lbyte_copy_zero:
+ bdnz .Lbyte_copy_zero_loop
+
+.Lexit:
+ /* epilogue */
+ ld %r3,32(%r1)
+ ld %r0,16(%r1)
+ mtlr %r0
+ addi %r1,%r1,40
+ blr
+
+END(__strncpy_arch_2_05)
+
+ .section .note.GNU-stack,"",%progbits
diff --git a/lib/libc/powerpc64/string/strncpy_resolver.c b/lib/libc/powerpc64/string/strncpy_resolver.c
new file mode 100644
index 000000000000..402b5c5226d0
--- /dev/null
+++ b/lib/libc/powerpc64/string/strncpy_resolver.c
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2019 Leandro Lupori
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include <machine/cpu.h>
+#include <machine/ifunc.h>
+
+char *
+__strncpy_arch_2_05(char * restrict dst, const char * restrict src, size_t len);
+
+char *
+__strncpy(char * restrict dst, const char * restrict src, size_t len);
+
+DEFINE_UIFUNC(, char *, strncpy,
+ (char * restrict, const char * restrict, size_t))
+{
+ if (cpu_features & PPC_FEATURE_ARCH_2_05)
+ return (__strncpy_arch_2_05);
+ else
+ return (__strncpy);
+}