summaryrefslogtreecommitdiff
path: root/reference
diff options
context:
space:
mode:
authorAndrew Turner <andrew@FreeBSD.org>2016-09-16 15:19:31 +0000
committerAndrew Turner <andrew@FreeBSD.org>2016-09-16 15:19:31 +0000
commit162378196a8674c75b4e93addb18ef42cb9b7737 (patch)
tree35a65fc34832b001eb50dd981f9db6866797e750 /reference
Notes
Diffstat (limited to 'reference')
-rw-r--r--reference/bionic-a15/.deps/libbionic_a15_a-strlen.Po1
-rw-r--r--reference/bionic-a15/.deps/memcmp.Po1
-rw-r--r--reference/bionic-a15/.deps/memcpy.Po1
-rw-r--r--reference/bionic-a15/.deps/memset.Po1
-rw-r--r--reference/bionic-a15/.deps/strcmp.Po1
-rw-r--r--reference/bionic-a15/.deps/strcpy.Po1
-rw-r--r--reference/bionic-a15/memcmp.S341
-rw-r--r--reference/bionic-a15/memcpy.S325
-rw-r--r--reference/bionic-a15/memset.S159
-rw-r--r--reference/bionic-a15/strcmp.S378
-rw-r--r--reference/bionic-a15/strcpy.S136
-rw-r--r--reference/bionic-a15/strlen.c132
-rw-r--r--reference/bionic-a9/.deps/libbionic_a9_a-strlen.Po1
-rw-r--r--reference/bionic-a9/.deps/memcmp.Po1
-rw-r--r--reference/bionic-a9/.deps/memcpy.Po1
-rw-r--r--reference/bionic-a9/.deps/memset.Po1
-rw-r--r--reference/bionic-a9/.deps/strcmp.Po1
-rw-r--r--reference/bionic-a9/.deps/strcpy.Po1
-rw-r--r--reference/bionic-a9/memcmp.S341
-rw-r--r--reference/bionic-a9/memcpy.S212
-rw-r--r--reference/bionic-a9/memset.S149
-rw-r--r--reference/bionic-a9/strcmp.S545
-rw-r--r--reference/bionic-a9/strcpy.S136
-rw-r--r--reference/bionic-a9/strlen.c132
-rw-r--r--reference/bionic-c/.deps/.dirstamp0
-rw-r--r--reference/bionic-c/.deps/bcopy.Po25
-rw-r--r--reference/bionic-c/.deps/memchr.Po27
-rw-r--r--reference/bionic-c/.deps/memcmp.Po25
-rw-r--r--reference/bionic-c/.deps/memcpy.Po27
-rw-r--r--reference/bionic-c/.deps/memset.Po38
-rw-r--r--reference/bionic-c/.deps/strchr.Po25
-rw-r--r--reference/bionic-c/.deps/strcmp.Po25
-rw-r--r--reference/bionic-c/.deps/strcpy.Po25
-rw-r--r--reference/bionic-c/.deps/strlen.Po25
-rw-r--r--reference/bionic-c/.dirstamp0
-rw-r--r--reference/bionic-c/bcopy.c128
-rw-r--r--reference/bionic-c/memchr.c46
-rw-r--r--reference/bionic-c/memcmp.c51
-rw-r--r--reference/bionic-c/memcpy.c29
-rw-r--r--reference/bionic-c/memset.c44
-rw-r--r--reference/bionic-c/strchr.c44
-rw-r--r--reference/bionic-c/strcmp.c52
-rw-r--r--reference/bionic-c/strcpy.c41
-rw-r--r--reference/bionic-c/strlen.c47
-rw-r--r--reference/csl/.deps/memcpy.Po1
-rw-r--r--reference/csl/.deps/memset.Po1
-rw-r--r--reference/csl/arm_asm.h82
-rw-r--r--reference/csl/memcpy.c291
-rw-r--r--reference/csl/memset.c214
-rw-r--r--reference/glibc-c/.deps/.dirstamp0
-rw-r--r--reference/glibc-c/.deps/memchr.Po45
-rw-r--r--reference/glibc-c/.deps/memcmp.Po1
-rw-r--r--reference/glibc-c/.deps/memcpy.Po1
-rw-r--r--reference/glibc-c/.deps/memset.Po1
-rw-r--r--reference/glibc-c/.deps/strchr.Po1
-rw-r--r--reference/glibc-c/.deps/strcmp.Po1
-rw-r--r--reference/glibc-c/.deps/strcpy.Po1
-rw-r--r--reference/glibc-c/.deps/strlen.Po1
-rw-r--r--reference/glibc-c/.deps/wordcopy.Po1
-rw-r--r--reference/glibc-c/.dirstamp0
-rw-r--r--reference/glibc-c/memchr.c204
-rw-r--r--reference/glibc-c/memcmp.c369
-rw-r--r--reference/glibc-c/memcopy.h146
-rw-r--r--reference/glibc-c/memcpy.c63
-rw-r--r--reference/glibc-c/memset.c89
-rw-r--r--reference/glibc-c/pagecopy.h74
-rw-r--r--reference/glibc-c/strchr.c184
-rw-r--r--reference/glibc-c/strcmp.c45
-rw-r--r--reference/glibc-c/strcpy.c39
-rw-r--r--reference/glibc-c/strlen.c105
-rw-r--r--reference/glibc-c/wordcopy.c412
-rw-r--r--reference/glibc/.deps/memcpy.Po1
-rw-r--r--reference/glibc/.deps/memset.Po1
-rw-r--r--reference/glibc/.deps/strchr.Po1
-rw-r--r--reference/glibc/.deps/strlen.Po1
-rw-r--r--reference/glibc/memcpy.S229
-rw-r--r--reference/glibc/memset.S64
-rw-r--r--reference/glibc/strchr.S132
-rw-r--r--reference/glibc/strlen.S99
-rw-r--r--reference/helpers/bounce.c34
-rw-r--r--reference/helpers/spawn.c44
-rw-r--r--reference/newlib-c/.deps/memchr.Po1
-rw-r--r--reference/newlib-c/.deps/memcmp.Po1
-rw-r--r--reference/newlib-c/.deps/memcpy.Po1
-rw-r--r--reference/newlib-c/.deps/memset.Po1
-rw-r--r--reference/newlib-c/.deps/strchr.Po1
-rw-r--r--reference/newlib-c/.deps/strcmp.Po1
-rw-r--r--reference/newlib-c/.deps/strcpy.Po1
-rw-r--r--reference/newlib-c/.deps/strlen.Po1
-rw-r--r--reference/newlib-c/memchr.c134
-rw-r--r--reference/newlib-c/memcmp.c114
-rw-r--r--reference/newlib-c/memcpy.c110
-rw-r--r--reference/newlib-c/memset.c103
-rw-r--r--reference/newlib-c/shim.h5
-rw-r--r--reference/newlib-c/strchr.c126
-rw-r--r--reference/newlib-c/strcmp.c109
-rw-r--r--reference/newlib-c/strcpy.c100
-rw-r--r--reference/newlib-c/strlen.c88
-rw-r--r--reference/newlib-xscale/.deps/memchr.Po1
-rw-r--r--reference/newlib-xscale/.deps/memcpy.Po1
-rw-r--r--reference/newlib-xscale/.deps/memset.Po1
-rw-r--r--reference/newlib-xscale/.deps/strchr.Po1
-rw-r--r--reference/newlib-xscale/.deps/strcmp.Po1
-rw-r--r--reference/newlib-xscale/.deps/strcpy.Po1
-rw-r--r--reference/newlib-xscale/.deps/strlen.Po1
-rw-r--r--reference/newlib-xscale/memchr.c95
-rw-r--r--reference/newlib-xscale/memcpy.c94
-rw-r--r--reference/newlib-xscale/memset.c81
-rw-r--r--reference/newlib-xscale/strchr.c66
-rw-r--r--reference/newlib-xscale/strcmp.c100
-rw-r--r--reference/newlib-xscale/strcpy.c55
-rw-r--r--reference/newlib-xscale/strlen.c94
-rw-r--r--reference/newlib-xscale/xscale.h26
-rw-r--r--reference/newlib/.deps/memcpy.Po1
-rw-r--r--reference/newlib/.deps/strcmp.Po1
-rw-r--r--reference/newlib/.deps/strcpy.Po1
-rw-r--r--reference/newlib/.deps/strlen.Po1
-rw-r--r--reference/newlib/arm_asm.h98
-rw-r--r--reference/newlib/memcpy.S423
-rw-r--r--reference/newlib/shim.h5
-rw-r--r--reference/newlib/strcmp.S777
-rw-r--r--reference/newlib/strcpy.c167
-rw-r--r--reference/newlib/strlen.c179
-rw-r--r--reference/plain/.deps/memcpy.Po1
-rw-r--r--reference/plain/.deps/memset.Po1
-rw-r--r--reference/plain/.deps/strcmp.Po1
-rw-r--r--reference/plain/.deps/strcpy.Po1
-rw-r--r--reference/plain/memcpy.c44
-rw-r--r--reference/plain/memset.c42
-rw-r--r--reference/plain/strcmp.c38
-rw-r--r--reference/plain/strcpy.c38
131 files changed, 10034 insertions, 0 deletions
diff --git a/reference/bionic-a15/.deps/libbionic_a15_a-strlen.Po b/reference/bionic-a15/.deps/libbionic_a15_a-strlen.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a15/.deps/libbionic_a15_a-strlen.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a15/.deps/memcmp.Po b/reference/bionic-a15/.deps/memcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a15/.deps/memcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a15/.deps/memcpy.Po b/reference/bionic-a15/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a15/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a15/.deps/memset.Po b/reference/bionic-a15/.deps/memset.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a15/.deps/memset.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a15/.deps/strcmp.Po b/reference/bionic-a15/.deps/strcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a15/.deps/strcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a15/.deps/strcpy.Po b/reference/bionic-a15/.deps/strcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a15/.deps/strcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a15/memcmp.S b/reference/bionic-a15/memcmp.S
new file mode 100644
index 000000000000..8876a98b4f63
--- /dev/null
+++ b/reference/bionic-a15/memcmp.S
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_32_BYTE_CACHE_LINE
+#define CACHE_LINE_SIZE 32
+#else
+#define CACHE_LINE_SIZE 64
+#endif
+
+/*
+ * Optimized memcmp() for Cortex-A9.
+ */
+ .text
+ .globl memcmp
+ .type memcmp,%function
+memcmp:
+ .fnstart
+ pld [r0, #(CACHE_LINE_SIZE * 0)]
+ pld [r0, #(CACHE_LINE_SIZE * 1)]
+
+ /* take of the case where length is 0 or the buffers are the same */
+ cmp r0, r1
+ moveq r0, #0
+ bxeq lr
+
+ pld [r1, #(CACHE_LINE_SIZE * 0)]
+ pld [r1, #(CACHE_LINE_SIZE * 1)]
+
+ /* make sure we have at least 8+4 bytes, this simplify things below
+ * and avoid some overhead for small blocks
+ */
+ cmp r2, #(8+4)
+ bmi 10f
+/*
+ * Neon optimization
+ * Comparing 32 bytes at a time
+ */
+#if defined(__ARM_NEON__) && defined(NEON_UNALIGNED_ACCESS)
+ subs r2, r2, #32
+ blo 3f
+
+ /* preload all the cache lines we need. */
+ pld [r0, #(CACHE_LINE_SIZE * 2)]
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+
+1: /* The main loop compares 32 bytes at a time */
+ vld1.8 {d0 - d3}, [r0]!
+ pld [r0, #(CACHE_LINE_SIZE * 2)]
+ vld1.8 {d4 - d7}, [r1]!
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+
+ /* Start subtracting the values and merge results */
+ vsub.i8 q0, q2
+ vsub.i8 q1, q3
+ vorr q2, q0, q1
+ vorr d4, d5
+ vmov r3, ip, d4
+ /* Check if there are any differences among the 32 bytes */
+ orrs r3, ip
+ bne 2f
+ subs r2, r2, #32
+ bhs 1b
+ b 3f
+2:
+ /* Check if the difference was in the first or last 16 bytes */
+ sub r0, #32
+ vorr d0, d1
+ sub r1, #32
+ vmov r3, ip, d0
+ orrs r3, ip
+ /* if the first 16 bytes are equal, we only have to rewind 16 bytes */
+ ittt eq
+ subeq r2, #16
+ addeq r0, #16
+ addeq r1, #16
+
+3: /* fix-up the remaining count */
+ add r2, r2, #32
+
+ cmp r2, #(8+4)
+ bmi 10f
+#endif
+
+ .save {r4, lr}
+ /* save registers */
+ stmfd sp!, {r4, lr}
+
+ /* since r0 hold the result, move the first source
+ * pointer somewhere else
+ */
+ mov r4, r0
+
+ /* align first pointer to word boundary
+ * offset = -src & 3
+ */
+ rsb r3, r4, #0
+ ands r3, r3, #3
+ beq 0f
+
+ /* align first pointer */
+ sub r2, r2, r3
+1: ldrb r0, [r4], #1
+ ldrb ip, [r1], #1
+ subs r0, r0, ip
+ bne 9f
+ subs r3, r3, #1
+ bne 1b
+
+
+0: /* here the first pointer is aligned, and we have at least 4 bytes
+ * to process.
+ */
+
+ /* see if the pointers are congruent */
+ eor r0, r4, r1
+ ands r0, r0, #3
+ bne 5f
+
+ /* congruent case, 32 bytes per iteration
+ * We need to make sure there are at least 32+4 bytes left
+ * because we effectively read ahead one word, and we could
+ * read past the buffer (and segfault) if we're not careful.
+ */
+
+ ldr ip, [r1]
+ subs r2, r2, #(32 + 4)
+ bmi 1f
+
+0: pld [r4, #(CACHE_LINE_SIZE * 2)]
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+ ldr r0, [r4], #4
+ ldr lr, [r1, #4]!
+ eors r0, r0, ip
+ ldreq r0, [r4], #4
+ ldreq ip, [r1, #4]!
+ eoreqs r0, r0, lr
+ ldreq r0, [r4], #4
+ ldreq lr, [r1, #4]!
+ eoreqs r0, r0, ip
+ ldreq r0, [r4], #4
+ ldreq ip, [r1, #4]!
+ eoreqs r0, r0, lr
+ ldreq r0, [r4], #4
+ ldreq lr, [r1, #4]!
+ eoreqs r0, r0, ip
+ ldreq r0, [r4], #4
+ ldreq ip, [r1, #4]!
+ eoreqs r0, r0, lr
+ ldreq r0, [r4], #4
+ ldreq lr, [r1, #4]!
+ eoreqs r0, r0, ip
+ ldreq r0, [r4], #4
+ ldreq ip, [r1, #4]!
+ eoreqs r0, r0, lr
+ bne 2f
+ subs r2, r2, #32
+ bhs 0b
+
+ /* do we have at least 4 bytes left? */
+1: adds r2, r2, #(32 - 4 + 4)
+ bmi 4f
+
+ /* finish off 4 bytes at a time */
+3: ldr r0, [r4], #4
+ ldr ip, [r1], #4
+ eors r0, r0, ip
+ bne 2f
+ subs r2, r2, #4
+ bhs 3b
+
+ /* are we done? */
+4: adds r2, r2, #4
+ moveq r0, #0
+ beq 9f
+
+ /* finish off the remaining bytes */
+ b 8f
+
+2: /* the last 4 bytes are different, restart them */
+ sub r4, r4, #4
+ sub r1, r1, #4
+ mov r2, #4
+
+ /* process the last few bytes */
+8: ldrb r0, [r4], #1
+ ldrb ip, [r1], #1
+ // stall
+ subs r0, r0, ip
+ bne 9f
+ subs r2, r2, #1
+ bne 8b
+
+9: /* restore registers and return */
+ ldmfd sp!, {r4, lr}
+ bx lr
+
+10: /* process less than 12 bytes */
+ cmp r2, #0
+ moveq r0, #0
+ bxeq lr
+ mov r3, r0
+11:
+ ldrb r0, [r3], #1
+ ldrb ip, [r1], #1
+ subs r0, ip
+ bxne lr
+ subs r2, r2, #1
+ bne 11b
+ bx lr
+
+5: /*************** non-congruent case ***************/
+ and r0, r1, #3
+ cmp r0, #2
+ bne 4f
+
+ /* here, offset is 2 (16-bits aligned, special cased) */
+
+ /* make sure we have at least 16 bytes to process */
+ subs r2, r2, #16
+ addmi r2, r2, #16
+ bmi 8b
+
+ /* align the unaligned pointer */
+ bic r1, r1, #3
+ ldr lr, [r1], #4
+
+6: pld [r1, #(CACHE_LINE_SIZE * 2)]
+ pld [r4, #(CACHE_LINE_SIZE * 2)]
+ mov ip, lr, lsr #16
+ ldr lr, [r1], #4
+ ldr r0, [r4], #4
+ orr ip, ip, lr, lsl #16
+ eors r0, r0, ip
+ moveq ip, lr, lsr #16
+ ldreq lr, [r1], #4
+ ldreq r0, [r4], #4
+ orreq ip, ip, lr, lsl #16
+ eoreqs r0, r0, ip
+ moveq ip, lr, lsr #16
+ ldreq lr, [r1], #4
+ ldreq r0, [r4], #4
+ orreq ip, ip, lr, lsl #16
+ eoreqs r0, r0, ip
+ moveq ip, lr, lsr #16
+ ldreq lr, [r1], #4
+ ldreq r0, [r4], #4
+ orreq ip, ip, lr, lsl #16
+ eoreqs r0, r0, ip
+ bne 7f
+ subs r2, r2, #16
+ bhs 6b
+ sub r1, r1, #2
+ /* are we done? */
+ adds r2, r2, #16
+ moveq r0, #0
+ beq 9b
+ /* finish off the remaining bytes */
+ b 8b
+
+7: /* fix up the 2 pointers and fallthrough... */
+ sub r1, r1, #(4+2)
+ sub r4, r4, #4
+ mov r2, #4
+ b 8b
+
+
+4: /*************** offset is 1 or 3 (less optimized) ***************/
+
+ stmfd sp!, {r5, r6, r7}
+
+ // r5 = rhs
+ // r6 = lhs
+ // r7 = scratch
+
+ mov r5, r0, lsl #3 /* r5 = right shift */
+ rsb r6, r5, #32 /* r6 = left shift */
+
+ /* align the unaligned pointer */
+ bic r1, r1, #3
+ ldr r7, [r1], #4
+ sub r2, r2, #8
+
+6: mov ip, r7, lsr r5
+ ldr r7, [r1], #4
+ ldr r0, [r4], #4
+ orr ip, ip, r7, lsl r6
+ eors r0, r0, ip
+ moveq ip, r7, lsr r5
+ ldreq r7, [r1], #4
+ ldreq r0, [r4], #4
+ orreq ip, ip, r7, lsl r6
+ eoreqs r0, r0, ip
+ bne 7f
+ subs r2, r2, #8
+ bhs 6b
+
+ sub r1, r1, r6, lsr #3
+ ldmfd sp!, {r5, r6, r7}
+
+ /* are we done? */
+ adds r2, r2, #8
+ moveq r0, #0
+ beq 9b
+
+ /* finish off the remaining bytes */
+ b 8b
+
+7: /* fix up the 2 pointers and fallthrough... */
+ sub r1, r1, #4
+ sub r1, r1, r6, lsr #3
+ sub r4, r4, #4
+ mov r2, #4
+ ldmfd sp!, {r5, r6, r7}
+ b 8b
+ .fnend
+ .size memcmp, .-memcmp
diff --git a/reference/bionic-a15/memcpy.S b/reference/bionic-a15/memcpy.S
new file mode 100644
index 000000000000..921b1df0a309
--- /dev/null
+++ b/reference/bionic-a15/memcpy.S
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ /* Prototype: void *memcpy (void *dst, const void *src, size_t count). */
+
+ // This version is tuned for the Cortex-A15 processor.
+
+ .text
+ .syntax unified
+ .fpu neon
+
+#define CACHE_LINE_SIZE 64
+
+ .globl memcpy
+ .type memcpy,%function
+memcpy:
+ .fnstart
+ // Assumes that n >= 0, and dst, src are valid pointers.
+ // For any sizes less than 832 use the neon code that doesn't
+ // care about the src alignment. This avoids any checks
+ // for src alignment, and offers the best improvement since
+ // smaller sized copies are dominated by the overhead of
+ // the pre and post main loop.
+ // For larger copies, if src and dst cannot both be aligned to
+ // word boundaries, use the neon code.
+ // For all other copies, align dst to a double word boundary
+ // and copy using LDRD/STRD instructions.
+
+ // Save registers (r0 holds the return value):
+ // optimized push {r0, lr}.
+ .save {r0, lr}
+ pld [r1, #(CACHE_LINE_SIZE*16)]
+ push {r0, lr}
+
+ cmp r2, #16
+ blo copy_less_than_16_unknown_align
+
+ cmp r2, #832
+ bge check_alignment
+
+copy_unknown_alignment:
+ // Unknown alignment of src and dst.
+ // Assumes that the first few bytes have already been prefetched.
+
+ // Align destination to 128 bits. The mainloop store instructions
+ // require this alignment or they will throw an exception.
+ rsb r3, r0, #0
+ ands r3, r3, #0xF
+ beq 2f
+
+ // Copy up to 15 bytes (count in r3).
+ sub r2, r2, r3
+ movs ip, r3, lsl #31
+
+ itt mi
+ ldrbmi lr, [r1], #1
+ strbmi lr, [r0], #1
+ itttt cs
+ ldrbcs ip, [r1], #1
+ ldrbcs lr, [r1], #1
+ strbcs ip, [r0], #1
+ strbcs lr, [r0], #1
+
+ movs ip, r3, lsl #29
+ bge 1f
+ // Copies 4 bytes, dst 32 bits aligned before, at least 64 bits after.
+ vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
+ vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
+1: bcc 2f
+ // Copies 8 bytes, dst 64 bits aligned before, at least 128 bits after.
+ vld1.8 {d0}, [r1]!
+ vst1.8 {d0}, [r0, :64]!
+
+2: // Make sure we have at least 64 bytes to copy.
+ subs r2, r2, #64
+ blo 2f
+
+1: // The main loop copies 64 bytes at a time.
+ vld1.8 {d0 - d3}, [r1]!
+ vld1.8 {d4 - d7}, [r1]!
+ pld [r1, #(CACHE_LINE_SIZE*4)]
+ subs r2, r2, #64
+ vst1.8 {d0 - d3}, [r0, :128]!
+ vst1.8 {d4 - d7}, [r0, :128]!
+ bhs 1b
+
+2: // Fix-up the remaining count and make sure we have >= 32 bytes left.
+ adds r2, r2, #32
+ blo 3f
+
+ // 32 bytes. These cache lines were already preloaded.
+ vld1.8 {d0 - d3}, [r1]!
+ sub r2, r2, #32
+ vst1.8 {d0 - d3}, [r0, :128]!
+3: // Less than 32 left.
+ add r2, r2, #32
+ tst r2, #0x10
+ beq copy_less_than_16_unknown_align
+ // Copies 16 bytes, destination 128 bits aligned.
+ vld1.8 {d0, d1}, [r1]!
+ vst1.8 {d0, d1}, [r0, :128]!
+
+copy_less_than_16_unknown_align:
+ // Copy up to 15 bytes (count in r2).
+ movs ip, r2, lsl #29
+ bcc 1f
+ vld1.8 {d0}, [r1]!
+ vst1.8 {d0}, [r0]!
+1: bge 2f
+ vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
+ vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
+
+2: // Copy 0 to 4 bytes.
+ lsls r2, r2, #31
+ itt ne
+ ldrbne lr, [r1], #1
+ strbne lr, [r0], #1
+ itttt cs
+ ldrbcs ip, [r1], #1
+ ldrbcs lr, [r1]
+ strbcs ip, [r0], #1
+ strbcs lr, [r0]
+
+ pop {r0, pc}
+
+check_alignment:
+ // If src and dst cannot both be aligned to a word boundary,
+ // use the unaligned copy version.
+ eor r3, r0, r1
+ ands r3, r3, #0x3
+ bne copy_unknown_alignment
+
+ // To try and improve performance, stack layout changed,
+ // i.e., not keeping the stack looking like users expect
+ // (highest numbered register at highest address).
+ // TODO: Add debug frame directives.
+ // We don't need exception unwind directives, because the code below
+ // does not throw any exceptions and does not call any other functions.
+ // Generally, newlib functions like this lack debug information for
+ // assembler source.
+ .save {r4, r5}
+ strd r4, r5, [sp, #-8]!
+ .save {r6, r7}
+ strd r6, r7, [sp, #-8]!
+ .save {r8, r9}
+ strd r8, r9, [sp, #-8]!
+
+ // Optimized for already aligned dst code.
+ ands ip, r0, #3
+ bne dst_not_word_aligned
+
+word_aligned:
+ // Align the destination buffer to 8 bytes, to make sure double
+ // loads and stores don't cross a cache line boundary,
+ // as they are then more expensive even if the data is in the cache
+ // (require two load/store issue cycles instead of one).
+ // If only one of the buffers is not 8 bytes aligned,
+ // then it's more important to align dst than src,
+ // because there is more penalty for stores
+ // than loads that cross a cacheline boundary.
+ // This check and realignment are only done if there is >= 832
+ // bytes to copy.
+
+ // Dst is word aligned, but check if it is already double word aligned.
+ ands r3, r0, #4
+ beq 1f
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+ sub r2, #4
+
+1: // Can only get here if > 64 bytes to copy, so don't do check r2.
+ sub r2, #64
+
+2: // Every loop iteration copies 64 bytes.
+ .irp offset, #0, #8, #16, #24, #32
+ ldrd r4, r5, [r1, \offset]
+ strd r4, r5, [r0, \offset]
+ .endr
+
+ ldrd r4, r5, [r1, #40]
+ ldrd r6, r7, [r1, #48]
+ ldrd r8, r9, [r1, #56]
+
+ // Keep the pld as far from the next load as possible.
+ // The amount to prefetch was determined experimentally using
+ // large sizes, and verifying the prefetch size does not affect
+ // the smaller copies too much.
+ // WARNING: If the ldrd and strd instructions get too far away
+ // from each other, performance suffers. Three loads
+ // in a row is the best tradeoff.
+ pld [r1, #(CACHE_LINE_SIZE*16)]
+ strd r4, r5, [r0, #40]
+ strd r6, r7, [r0, #48]
+ strd r8, r9, [r0, #56]
+
+ add r0, r0, #64
+ add r1, r1, #64
+ subs r2, r2, #64
+ bge 2b
+
+ // Fix-up the remaining count and make sure we have >= 32 bytes left.
+ adds r2, r2, #32
+ blo 4f
+
+ // Copy 32 bytes. These cache lines were already preloaded.
+ .irp offset, #0, #8, #16, #24
+ ldrd r4, r5, [r1, \offset]
+ strd r4, r5, [r0, \offset]
+ .endr
+ add r1, r1, #32
+ add r0, r0, #32
+ sub r2, r2, #32
+4: // Less than 32 left.
+ add r2, r2, #32
+ tst r2, #0x10
+ beq 5f
+ // Copy 16 bytes.
+ .irp offset, #0, #8
+ ldrd r4, r5, [r1, \offset]
+ strd r4, r5, [r0, \offset]
+ .endr
+ add r1, r1, #16
+ add r0, r0, #16
+
+5: // Copy up to 15 bytes (count in r2).
+ movs ip, r2, lsl #29
+ bcc 1f
+ // Copy 8 bytes.
+ ldrd r4, r5, [r1], #8
+ strd r4, r5, [r0], #8
+1: bge 2f
+ // Copy 4 bytes.
+ ldr r4, [r1], #4
+ str r4, [r0], #4
+2: // Copy 0 to 4 bytes.
+ lsls r2, r2, #31
+ itt ne
+ ldrbne lr, [r1], #1
+ strbne lr, [r0], #1
+ itttt cs
+ ldrbcs ip, [r1], #1
+ ldrbcs lr, [r1]
+ strbcs ip, [r0], #1
+ strbcs lr, [r0]
+
+ // Restore registers: optimized pop {r0, pc}
+ ldrd r8, r9, [sp], #8
+ ldrd r6, r7, [sp], #8
+ ldrd r4, r5, [sp], #8
+ pop {r0, pc}
+
+dst_not_word_aligned:
+ // Align dst to word.
+ rsb ip, ip, #4
+ cmp ip, #2
+
+ itt gt
+ ldrbgt lr, [r1], #1
+ strbgt lr, [r0], #1
+
+ itt ge
+ ldrbge lr, [r1], #1
+ strbge lr, [r0], #1
+
+ ldrb lr, [r1], #1
+ strb lr, [r0], #1
+
+ sub r2, r2, ip
+
+ // Src is guaranteed to be at least word aligned by this point.
+ b word_aligned
+ .fnend
+ .size memcpy, .-memcpy
diff --git a/reference/bionic-a15/memset.S b/reference/bionic-a15/memset.S
new file mode 100644
index 000000000000..6fd7c8efb080
--- /dev/null
+++ b/reference/bionic-a15/memset.S
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+ /*
+ * Optimized memset() for ARM.
+ *
+ * memset() returns its first argument.
+ */
+
+ .fpu neon
+ .syntax unified
+
+ .globl memset
+ .type memset,%function
+memset:
+ .fnstart
+ .save {r0}
+ stmfd sp!, {r0}
+
+ // The new algorithm is slower for copies < 16 so use the old
+ // neon code in that case.
+ cmp r2, #16
+ blo set_less_than_16_unknown_align
+
+ // Use strd which requires an even and odd register so move the
+ // values so that:
+ // r0 and r1 contain the memset value
+ // r2 is the number of bytes to set
+ // r3 is the destination pointer
+ mov r3, r0
+
+ // Copy the byte value in every byte of r1.
+ mov r1, r1, lsl #24
+ orr r1, r1, r1, lsr #8
+ orr r1, r1, r1, lsr #16
+
+check_alignment:
+ // Align destination to a double word to avoid the strd crossing
+ // a cache line boundary.
+ ands ip, r3, #7
+ bne do_double_word_align
+
+double_word_aligned:
+ mov r0, r1
+
+ subs r2, #64
+ blo set_less_than_64
+
+1: // Main loop sets 64 bytes at a time.
+ .irp offset, #0, #8, #16, #24, #32, #40, #48, #56
+ strd r0, r1, [r3, \offset]
+ .endr
+
+ add r3, #64
+ subs r2, #64
+ bge 1b
+
+set_less_than_64:
+ // Restore r2 to the count of bytes left to set.
+ add r2, #64
+ lsls ip, r2, #27
+ bcc set_less_than_32
+ // Set 32 bytes.
+ .irp offset, #0, #8, #16, #24
+ strd r0, r1, [r3, \offset]
+ .endr
+ add r3, #32
+
+set_less_than_32:
+ bpl set_less_than_16
+ // Set 16 bytes.
+ .irp offset, #0, #8
+ strd r0, r1, [r3, \offset]
+ .endr
+ add r3, #16
+
+set_less_than_16:
+ // Less than 16 bytes to set.
+ lsls ip, r2, #29
+ bcc set_less_than_8
+
+ // Set 8 bytes.
+ strd r0, r1, [r3], #8
+
+set_less_than_8:
+ bpl set_less_than_4
+ // Set 4 bytes
+ str r1, [r3], #4
+
+set_less_than_4:
+ lsls ip, r2, #31
+ it ne
+ strbne r1, [r3], #1
+ itt cs
+ strbcs r1, [r3], #1
+ strbcs r1, [r3]
+
+ ldmfd sp!, {r0}
+ bx lr
+
+do_double_word_align:
+ rsb ip, ip, #8
+ sub r2, r2, ip
+ movs r0, ip, lsl #31
+ it mi
+ strbmi r1, [r3], #1
+ itt cs
+ strbcs r1, [r3], #1
+ strbcs r1, [r3], #1
+
+ // Dst is at least word aligned by this point.
+ cmp ip, #4
+ blo double_word_aligned
+ str r1, [r3], #4
+ b double_word_aligned
+
+set_less_than_16_unknown_align:
+ // Set up to 15 bytes.
+ vdup.8 d0, r1
+ movs ip, r2, lsl #29
+ bcc 1f
+ vst1.8 {d0}, [r0]!
+1: bge 2f
+ vst1.32 {d0[0]}, [r0]!
+2: movs ip, r2, lsl #31
+ it mi
+ strbmi r1, [r0], #1
+ itt cs
+ strbcs r1, [r0], #1
+ strbcs r1, [r0], #1
+ ldmfd sp!, {r0}
+ bx lr
+ .fnend
+ .size memset, .-memset
diff --git a/reference/bionic-a15/strcmp.S b/reference/bionic-a15/strcmp.S
new file mode 100644
index 000000000000..9787e2556f3e
--- /dev/null
+++ b/reference/bionic-a15/strcmp.S
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __ARMEB__
+#define S2LOMEM lsl
+#define S2LOMEMEQ lsleq
+#define S2HIMEM lsr
+#define MSB 0x000000ff
+#define LSB 0xff000000
+#define BYTE0_OFFSET 24
+#define BYTE1_OFFSET 16
+#define BYTE2_OFFSET 8
+#define BYTE3_OFFSET 0
+#else /* not __ARMEB__ */
+#define S2LOMEM lsr
+#define S2LOMEMEQ lsreq
+#define S2HIMEM lsl
+#define BYTE0_OFFSET 0
+#define BYTE1_OFFSET 8
+#define BYTE2_OFFSET 16
+#define BYTE3_OFFSET 24
+#define MSB 0xff000000
+#define LSB 0x000000ff
+#endif /* not __ARMEB__ */
+
+.syntax unified
+
+#if defined (__thumb__)
+ .thumb
+ .thumb_func
+#endif
+
+ .globl strcmp
+ .type strcmp,%function
+strcmp:
+ .fnstart
+ /* Use LDRD whenever possible. */
+
+/* The main thing to look out for when comparing large blocks is that
+ the loads do not cross a page boundary when loading past the index
+ of the byte with the first difference or the first string-terminator.
+
+ For example, if the strings are identical and the string-terminator
+ is at index k, byte by byte comparison will not load beyond address
+ s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
+ k; double word - up to 7 bytes. If the load of these bytes crosses
+ a page boundary, it might cause a memory fault (if the page is not mapped)
+ that would not have happened in byte by byte comparison.
+
+ If an address is (double) word aligned, then a load of a (double) word
+ from that address will not cross a page boundary.
+ Therefore, the algorithm below considers word and double-word alignment
+ of strings separately. */
+
+/* High-level description of the algorithm.
+
+ * The fast path: if both strings are double-word aligned,
+ use LDRD to load two words from each string in every loop iteration.
+ * If the strings have the same offset from a word boundary,
+ use LDRB to load and compare byte by byte until
+ the first string is aligned to a word boundary (at most 3 bytes).
+ This is optimized for quick return on short unaligned strings.
+ * If the strings have the same offset from a double-word boundary,
+ use LDRD to load two words from each string in every loop iteration, as in the fast path.
+ * If the strings do not have the same offset from a double-word boundary,
+ load a word from the second string before the loop to initialize the queue.
+ Use LDRD to load two words from every string in every loop iteration.
+ Inside the loop, load the second word from the second string only after comparing
+ the first word, using the queued value, to guarantee safety across page boundaries.
+ * If the strings do not have the same offset from a word boundary,
+ use LDR and a shift queue. Order of loads and comparisons matters,
+ similarly to the previous case.
+
+ * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
+ * The only difference between ARM and Thumb modes is the use of CBZ instruction.
+ * The only difference between big and little endian is the use of REV in little endian
+ to compute the return value, instead of MOV.
+*/
+
+ .macro m_cbz reg label
+#ifdef __thumb2__
+ cbz \reg, \label
+#else /* not defined __thumb2__ */
+ cmp \reg, #0
+ beq \label
+#endif /* not defined __thumb2__ */
+ .endm /* m_cbz */
+
+ .macro m_cbnz reg label
+#ifdef __thumb2__
+ cbnz \reg, \label
+#else /* not defined __thumb2__ */
+ cmp \reg, #0
+ bne \label
+#endif /* not defined __thumb2__ */
+ .endm /* m_cbnz */
+
+ .macro init
+ /* Macro to save temporary registers and prepare magic values. */
+ subs sp, sp, #16
+ strd r4, r5, [sp, #8]
+ strd r6, r7, [sp]
+ mvn r6, #0 /* all F */
+ mov r7, #0 /* all 0 */
+ .endm /* init */
+
+ .macro magic_compare_and_branch w1 w2 label
+ /* Macro to compare registers w1 and w2 and conditionally branch to label. */
+ cmp \w1, \w2 /* Are w1 and w2 the same? */
+ magic_find_zero_bytes \w1
+ it eq
+ cmpeq ip, #0 /* Is there a zero byte in w1? */
+ bne \label
+ .endm /* magic_compare_and_branch */
+
+ .macro magic_find_zero_bytes w1
+ /* Macro to find all-zero bytes in w1, result is in ip. */
+#if (defined (__ARM_FEATURE_DSP))
+ uadd8 ip, \w1, r6
+ sel ip, r7, r6
+#else /* not defined (__ARM_FEATURE_DSP) */
+ /* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
+ Coincidently, these processors only have Thumb-2 mode, where we can use the
+ the (large) magic constant available directly as an immediate in instructions.
+ Note that we cannot use the magic constant in ARM mode, where we need
+ to create the constant in a register. */
+ sub ip, \w1, #0x01010101
+ bic ip, ip, \w1
+ and ip, ip, #0x80808080
+#endif /* not defined (__ARM_FEATURE_DSP) */
+ .endm /* magic_find_zero_bytes */
+
+ .macro setup_return w1 w2
+#ifdef __ARMEB__
+ mov r1, \w1
+ mov r2, \w2
+#else /* not __ARMEB__ */
+ rev r1, \w1
+ rev r2, \w2
+#endif /* not __ARMEB__ */
+ .endm /* setup_return */
+
+ pld [r0, #0]
+ pld [r1, #0]
+
+ /* Are both strings double-word aligned? */
+ orr ip, r0, r1
+ tst ip, #7
+ bne do_align
+
+ /* Fast path. */
+ init
+
+doubleword_aligned:
+
+ /* Get here when the strings to compare are double-word aligned. */
+ /* Compare two words in every iteration. */
+ .p2align 2
+2:
+ pld [r0, #16]
+ pld [r1, #16]
+
+ /* Load the next double-word from each string. */
+ ldrd r2, r3, [r0], #8
+ ldrd r4, r5, [r1], #8
+
+ magic_compare_and_branch w1=r2, w2=r4, label=return_24
+ magic_compare_and_branch w1=r3, w2=r5, label=return_35
+ b 2b
+
+do_align:
+ /* Is the first string word-aligned? */
+ ands ip, r0, #3
+ beq word_aligned_r0
+
+ /* Fast compare byte by byte until the first string is word-aligned. */
+ /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
+ to read until the next word boundary is 4-ip. */
+ bic r0, r0, #3
+ ldr r2, [r0], #4
+ lsls ip, ip, #31
+ beq byte2
+ bcs byte3
+
+byte1:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE1_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbz reg=r3, label=fast_return
+
+byte2:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE2_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbz reg=r3, label=fast_return
+
+byte3:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE3_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbnz reg=r3, label=word_aligned_r0
+
+fast_return:
+ mov r0, ip
+ bx lr
+
+word_aligned_r0:
+ init
+ /* The first string is word-aligned. */
+ /* Is the second string word-aligned? */
+ ands ip, r1, #3
+ bne strcmp_unaligned
+
+word_aligned:
+ /* The strings are word-aligned. */
+ /* Is the first string double-word aligned? */
+ tst r0, #4
+ beq doubleword_aligned_r0
+
+ /* If r0 is not double-word aligned yet, align it by loading
+ and comparing the next word from each string. */
+ ldr r2, [r0], #4
+ ldr r4, [r1], #4
+ magic_compare_and_branch w1=r2 w2=r4 label=return_24
+
+doubleword_aligned_r0:
+ /* Get here when r0 is double-word aligned. */
+ /* Is r1 doubleword_aligned? */
+ tst r1, #4
+ beq doubleword_aligned
+
+ /* Get here when the strings to compare are word-aligned,
+ r0 is double-word aligned, but r1 is not double-word aligned. */
+
+ /* Initialize the queue. */
+ ldr r5, [r1], #4
+
+ /* Compare two words in every iteration. */
+ .p2align 2
+3:
+ pld [r0, #16]
+ pld [r1, #16]
+
+ /* Load the next double-word from each string and compare. */
+ ldrd r2, r3, [r0], #8
+ magic_compare_and_branch w1=r2 w2=r5 label=return_25
+ ldrd r4, r5, [r1], #8
+ magic_compare_and_branch w1=r3 w2=r4 label=return_34
+ b 3b
+
+ .macro miscmp_word offsetlo offsethi
+ /* Macro to compare misaligned strings. */
+ /* r0, r1 are word-aligned, and at least one of the strings
+ is not double-word aligned. */
+ /* Compare one word in every loop iteration. */
+ /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
+ OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
+
+ /* Initialize the shift queue. */
+ ldr r5, [r1], #4
+
+ /* Compare one word from each string in every loop iteration. */
+ .p2align 2
+7:
+ ldr r3, [r0], #4
+ S2LOMEM r5, r5, #\offsetlo
+ magic_find_zero_bytes w1=r3
+ cmp r7, ip, S2HIMEM #\offsetlo
+ and r2, r3, r6, S2LOMEM #\offsetlo
+ it eq
+ cmpeq r2, r5
+ bne return_25
+ ldr r5, [r1], #4
+ cmp ip, #0
+ eor r3, r2, r3
+ S2HIMEM r2, r5, #\offsethi
+ it eq
+ cmpeq r3, r2
+ bne return_32
+ b 7b
+ .endm /* miscmp_word */
+
+strcmp_unaligned:
+ /* r0 is word-aligned, r1 is at offset ip from a word. */
+ /* Align r1 to the (previous) word-boundary. */
+ bic r1, r1, #3
+
+ /* Unaligned comparison word by word using LDRs. */
+ cmp ip, #2
+ beq miscmp_word_16 /* If ip == 2. */
+ bge miscmp_word_24 /* If ip == 3. */
+ miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */
+miscmp_word_16: miscmp_word offsetlo=16 offsethi=16
+miscmp_word_24: miscmp_word offsetlo=24 offsethi=8
+
+
+return_32:
+ setup_return w1=r3, w2=r2
+ b do_return
+return_34:
+ setup_return w1=r3, w2=r4
+ b do_return
+return_25:
+ setup_return w1=r2, w2=r5
+ b do_return
+return_35:
+ setup_return w1=r3, w2=r5
+ b do_return
+return_24:
+ setup_return w1=r2, w2=r4
+
+do_return:
+
+#ifdef __ARMEB__
+ mov r0, ip
+#else /* not __ARMEB__ */
+ rev r0, ip
+#endif /* not __ARMEB__ */
+
+ /* Restore temporaries early, before computing the return value. */
+ ldrd r6, r7, [sp]
+ ldrd r4, r5, [sp, #8]
+ adds sp, sp, #16
+
+ /* There is a zero or a different byte between r1 and r2. */
+ /* r0 contains a mask of all-zero bytes in r1. */
+ /* Using r0 and not ip here because cbz requires low register. */
+ m_cbz reg=r0, label=compute_return_value
+ clz r0, r0
+ /* r0 contains the number of bits on the left of the first all-zero byte in r1. */
+ rsb r0, r0, #24
+ /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
+ lsr r1, r1, r0
+ lsr r2, r2, r0
+
+compute_return_value:
+ movs r0, #1
+ cmp r1, r2
+ /* The return value is computed as follows.
+ If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
+ If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
+ which means r0:=r0-r0-1 and r0 is #-1 at return.
+ If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
+ which means r0:=r0-r0 and r0 is #0 at return.
+ (C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
+ it ls
+ sbcls r0, r0, r0
+ bx lr
+ .fnend
+ .size strcmp, .-strcmp
diff --git a/reference/bionic-a15/strcpy.S b/reference/bionic-a15/strcpy.S
new file mode 100644
index 000000000000..9925378a157d
--- /dev/null
+++ b/reference/bionic-a15/strcpy.S
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * Copyright (c) 2008 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Android adaptation and tweak by Jim Huang <jserv@0xlab.org>.
+ */
+
+ .globl strcpy
+ .type strcpy, %function
+ .text
+
+strcpy:
+ .fnstart
+ PLD [r1, #0]
+ eor r2, r0, r1
+ mov ip, r0
+ tst r2, #3
+ bne 4f
+ tst r1, #3
+ bne 3f
+5:
+ str r5, [sp, #-4]!
+ mov r5, #0x01
+ orr r5, r5, r5, lsl #8
+ orr r5, r5, r5, lsl #16
+
+ str r4, [sp, #-4]!
+ tst r1, #4
+ ldr r3, [r1], #4
+ beq 2f
+ sub r2, r3, r5
+ bics r2, r2, r3
+ tst r2, r5, lsl #7
+ itt eq
+ streq r3, [ip], #4
+ ldreq r3, [r1], #4
+ bne 1f
+ /* Inner loop. We now know that r1 is 64-bit aligned, so we
+ can safely fetch up to two words. This allows us to avoid
+ load stalls. */
+ .p2align 2
+2:
+ PLD [r1, #8]
+ ldr r4, [r1], #4
+ sub r2, r3, r5
+ bics r2, r2, r3
+ tst r2, r5, lsl #7
+ sub r2, r4, r5
+ bne 1f
+ str r3, [ip], #4
+ bics r2, r2, r4
+ tst r2, r5, lsl #7
+ itt eq
+ ldreq r3, [r1], #4
+ streq r4, [ip], #4
+ beq 2b
+ mov r3, r4
+1:
+#ifdef __ARMEB__
+ rors r3, r3, #24
+#endif
+ strb r3, [ip], #1
+ tst r3, #0xff
+#ifdef __ARMEL__
+ ror r3, r3, #8
+#endif
+ bne 1b
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ bx lr
+
+ /* Strings have the same offset from word alignment, but it's
+ not zero. */
+3:
+ tst r1, #1
+ beq 1f
+ ldrb r2, [r1], #1
+ strb r2, [ip], #1
+ cmp r2, #0
+ it eq
+ bxeq lr
+1:
+ tst r1, #2
+ beq 5b
+ ldrh r2, [r1], #2
+#ifdef __ARMEB__
+ tst r2, #0xff00
+ iteet ne
+ strneh r2, [ip], #2
+ lsreq r2, r2, #8
+ streqb r2, [ip]
+ tstne r2, #0xff
+#else
+ tst r2, #0xff
+ itet ne
+ strneh r2, [ip], #2
+ streqb r2, [ip]
+ tstne r2, #0xff00
+#endif
+ bne 5b
+ bx lr
+
+ /* src and dst do not have a common word-alignement. Fall back to
+ byte copying. */
+4:
+ ldrb r2, [r1], #1
+ strb r2, [ip], #1
+ cmp r2, #0
+ bne 4b
+ bx lr
+
+ .fnend
diff --git a/reference/bionic-a15/strlen.c b/reference/bionic-a15/strlen.c
new file mode 100644
index 000000000000..8781d760566d
--- /dev/null
+++ b/reference/bionic-a15/strlen.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+#undef strlen
+
+#define __ARM_HAVE_PLD 1
+
+size_t strlen(const char *s)
+{
+ __builtin_prefetch(s);
+ __builtin_prefetch(s+32);
+
+ union {
+ const char *b;
+ const uint32_t *w;
+ uintptr_t i;
+ } u;
+
+ // these are some scratch variables for the asm code below
+ uint32_t v, t;
+
+ // initialize the string length to zero
+ size_t l = 0;
+
+ // align the pointer to a 32-bit word boundary
+ u.b = s;
+ while (u.i & 0x3) {
+ if (__builtin_expect(*u.b++ == 0, 0)) {
+ goto done;
+ }
+ l++;
+ }
+
+ // loop for each word, testing if it contains a zero byte
+ // if so, exit the loop and update the length.
+ // We need to process 32 bytes per loop to schedule PLD properly
+ // and achieve the maximum bus speed.
+ asm(
+ "ldr %[v], [%[s]], #4 \n"
+ "sub %[l], %[l], %[s] \n"
+ "0: \n"
+#if __ARM_HAVE_PLD
+ "pld [%[s], #64] \n"
+#endif
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+#if !defined(__OPTIMIZE_SIZE__)
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]] , #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+#endif
+ "beq 0b \n"
+ "1: \n"
+ "add %[l], %[l], %[s] \n"
+ "tst %[v], #0xFF \n"
+ "beq 2f \n"
+ "add %[l], %[l], #1 \n"
+ "tst %[v], #0xFF00 \n"
+ "beq 2f \n"
+ "add %[l], %[l], #1 \n"
+ "tst %[v], #0xFF0000 \n"
+ "addne %[l], %[l], #1 \n"
+ "2: \n"
+ : [l]"=&r"(l), [v]"=&r"(v), [t]"=&r"(t), [s]"=&r"(u.b)
+ : "%[l]"(l), "%[s]"(u.b), [mask]"r"(0x80808080UL)
+ : "cc"
+ );
+
+done:
+ return l;
+}
diff --git a/reference/bionic-a9/.deps/libbionic_a9_a-strlen.Po b/reference/bionic-a9/.deps/libbionic_a9_a-strlen.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a9/.deps/libbionic_a9_a-strlen.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a9/.deps/memcmp.Po b/reference/bionic-a9/.deps/memcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a9/.deps/memcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a9/.deps/memcpy.Po b/reference/bionic-a9/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a9/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a9/.deps/memset.Po b/reference/bionic-a9/.deps/memset.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a9/.deps/memset.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a9/.deps/strcmp.Po b/reference/bionic-a9/.deps/strcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a9/.deps/strcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a9/.deps/strcpy.Po b/reference/bionic-a9/.deps/strcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/bionic-a9/.deps/strcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/bionic-a9/memcmp.S b/reference/bionic-a9/memcmp.S
new file mode 100644
index 000000000000..8876a98b4f63
--- /dev/null
+++ b/reference/bionic-a9/memcmp.S
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_32_BYTE_CACHE_LINE
+#define CACHE_LINE_SIZE 32
+#else
+#define CACHE_LINE_SIZE 64
+#endif
+
+/*
+ * Optimized memcmp() for Cortex-A9.
+ */
+ .text
+ .globl memcmp
+ .type memcmp,%function
+memcmp:
+ .fnstart
+ pld [r0, #(CACHE_LINE_SIZE * 0)]
+ pld [r0, #(CACHE_LINE_SIZE * 1)]
+
+ /* take of the case where length is 0 or the buffers are the same */
+ cmp r0, r1
+ moveq r0, #0
+ bxeq lr
+
+ pld [r1, #(CACHE_LINE_SIZE * 0)]
+ pld [r1, #(CACHE_LINE_SIZE * 1)]
+
+ /* make sure we have at least 8+4 bytes, this simplify things below
+ * and avoid some overhead for small blocks
+ */
+ cmp r2, #(8+4)
+ bmi 10f
+/*
+ * Neon optimization
+ * Comparing 32 bytes at a time
+ */
+#if defined(__ARM_NEON__) && defined(NEON_UNALIGNED_ACCESS)
+ subs r2, r2, #32
+ blo 3f
+
+ /* preload all the cache lines we need. */
+ pld [r0, #(CACHE_LINE_SIZE * 2)]
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+
+1: /* The main loop compares 32 bytes at a time */
+ vld1.8 {d0 - d3}, [r0]!
+ pld [r0, #(CACHE_LINE_SIZE * 2)]
+ vld1.8 {d4 - d7}, [r1]!
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+
+ /* Start subtracting the values and merge results */
+ vsub.i8 q0, q2
+ vsub.i8 q1, q3
+ vorr q2, q0, q1
+ vorr d4, d5
+ vmov r3, ip, d4
+ /* Check if there are any differences among the 32 bytes */
+ orrs r3, ip
+ bne 2f
+ subs r2, r2, #32
+ bhs 1b
+ b 3f
+2:
+ /* Check if the difference was in the first or last 16 bytes */
+ sub r0, #32
+ vorr d0, d1
+ sub r1, #32
+ vmov r3, ip, d0
+ orrs r3, ip
+ /* if the first 16 bytes are equal, we only have to rewind 16 bytes */
+ ittt eq
+ subeq r2, #16
+ addeq r0, #16
+ addeq r1, #16
+
+3: /* fix-up the remaining count */
+ add r2, r2, #32
+
+ cmp r2, #(8+4)
+ bmi 10f
+#endif
+
+ .save {r4, lr}
+ /* save registers */
+ stmfd sp!, {r4, lr}
+
+ /* since r0 hold the result, move the first source
+ * pointer somewhere else
+ */
+ mov r4, r0
+
+ /* align first pointer to word boundary
+ * offset = -src & 3
+ */
+ rsb r3, r4, #0
+ ands r3, r3, #3
+ beq 0f
+
+ /* align first pointer */
+ sub r2, r2, r3
+1: ldrb r0, [r4], #1
+ ldrb ip, [r1], #1
+ subs r0, r0, ip
+ bne 9f
+ subs r3, r3, #1
+ bne 1b
+
+
+0: /* here the first pointer is aligned, and we have at least 4 bytes
+ * to process.
+ */
+
+ /* see if the pointers are congruent */
+ eor r0, r4, r1
+ ands r0, r0, #3
+ bne 5f
+
+ /* congruent case, 32 bytes per iteration
+ * We need to make sure there are at least 32+4 bytes left
+ * because we effectively read ahead one word, and we could
+ * read past the buffer (and segfault) if we're not careful.
+ */
+
+ ldr ip, [r1]
+ subs r2, r2, #(32 + 4)
+ bmi 1f
+
+0: pld [r4, #(CACHE_LINE_SIZE * 2)]
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+ ldr r0, [r4], #4
+ ldr lr, [r1, #4]!
+ eors r0, r0, ip
+ ldreq r0, [r4], #4
+ ldreq ip, [r1, #4]!
+ eoreqs r0, r0, lr
+ ldreq r0, [r4], #4
+ ldreq lr, [r1, #4]!
+ eoreqs r0, r0, ip
+ ldreq r0, [r4], #4
+ ldreq ip, [r1, #4]!
+ eoreqs r0, r0, lr
+ ldreq r0, [r4], #4
+ ldreq lr, [r1, #4]!
+ eoreqs r0, r0, ip
+ ldreq r0, [r4], #4
+ ldreq ip, [r1, #4]!
+ eoreqs r0, r0, lr
+ ldreq r0, [r4], #4
+ ldreq lr, [r1, #4]!
+ eoreqs r0, r0, ip
+ ldreq r0, [r4], #4
+ ldreq ip, [r1, #4]!
+ eoreqs r0, r0, lr
+ bne 2f
+ subs r2, r2, #32
+ bhs 0b
+
+ /* do we have at least 4 bytes left? */
+1: adds r2, r2, #(32 - 4 + 4)
+ bmi 4f
+
+ /* finish off 4 bytes at a time */
+3: ldr r0, [r4], #4
+ ldr ip, [r1], #4
+ eors r0, r0, ip
+ bne 2f
+ subs r2, r2, #4
+ bhs 3b
+
+ /* are we done? */
+4: adds r2, r2, #4
+ moveq r0, #0
+ beq 9f
+
+ /* finish off the remaining bytes */
+ b 8f
+
+2: /* the last 4 bytes are different, restart them */
+ sub r4, r4, #4
+ sub r1, r1, #4
+ mov r2, #4
+
+ /* process the last few bytes */
+8: ldrb r0, [r4], #1
+ ldrb ip, [r1], #1
+ // stall
+ subs r0, r0, ip
+ bne 9f
+ subs r2, r2, #1
+ bne 8b
+
+9: /* restore registers and return */
+ ldmfd sp!, {r4, lr}
+ bx lr
+
+10: /* process less than 12 bytes */
+ cmp r2, #0
+ moveq r0, #0
+ bxeq lr
+ mov r3, r0
+11:
+ ldrb r0, [r3], #1
+ ldrb ip, [r1], #1
+ subs r0, ip
+ bxne lr
+ subs r2, r2, #1
+ bne 11b
+ bx lr
+
+5: /*************** non-congruent case ***************/
+ and r0, r1, #3
+ cmp r0, #2
+ bne 4f
+
+ /* here, offset is 2 (16-bits aligned, special cased) */
+
+ /* make sure we have at least 16 bytes to process */
+ subs r2, r2, #16
+ addmi r2, r2, #16
+ bmi 8b
+
+ /* align the unaligned pointer */
+ bic r1, r1, #3
+ ldr lr, [r1], #4
+
+6: pld [r1, #(CACHE_LINE_SIZE * 2)]
+ pld [r4, #(CACHE_LINE_SIZE * 2)]
+ mov ip, lr, lsr #16
+ ldr lr, [r1], #4
+ ldr r0, [r4], #4
+ orr ip, ip, lr, lsl #16
+ eors r0, r0, ip
+ moveq ip, lr, lsr #16
+ ldreq lr, [r1], #4
+ ldreq r0, [r4], #4
+ orreq ip, ip, lr, lsl #16
+ eoreqs r0, r0, ip
+ moveq ip, lr, lsr #16
+ ldreq lr, [r1], #4
+ ldreq r0, [r4], #4
+ orreq ip, ip, lr, lsl #16
+ eoreqs r0, r0, ip
+ moveq ip, lr, lsr #16
+ ldreq lr, [r1], #4
+ ldreq r0, [r4], #4
+ orreq ip, ip, lr, lsl #16
+ eoreqs r0, r0, ip
+ bne 7f
+ subs r2, r2, #16
+ bhs 6b
+ sub r1, r1, #2
+ /* are we done? */
+ adds r2, r2, #16
+ moveq r0, #0
+ beq 9b
+ /* finish off the remaining bytes */
+ b 8b
+
+7: /* fix up the 2 pointers and fallthrough... */
+ sub r1, r1, #(4+2)
+ sub r4, r4, #4
+ mov r2, #4
+ b 8b
+
+
+4: /*************** offset is 1 or 3 (less optimized) ***************/
+
+ stmfd sp!, {r5, r6, r7}
+
+ // r5 = rhs
+ // r6 = lhs
+ // r7 = scratch
+
+ mov r5, r0, lsl #3 /* r5 = right shift */
+ rsb r6, r5, #32 /* r6 = left shift */
+
+ /* align the unaligned pointer */
+ bic r1, r1, #3
+ ldr r7, [r1], #4
+ sub r2, r2, #8
+
+6: mov ip, r7, lsr r5
+ ldr r7, [r1], #4
+ ldr r0, [r4], #4
+ orr ip, ip, r7, lsl r6
+ eors r0, r0, ip
+ moveq ip, r7, lsr r5
+ ldreq r7, [r1], #4
+ ldreq r0, [r4], #4
+ orreq ip, ip, r7, lsl r6
+ eoreqs r0, r0, ip
+ bne 7f
+ subs r2, r2, #8
+ bhs 6b
+
+ sub r1, r1, r6, lsr #3
+ ldmfd sp!, {r5, r6, r7}
+
+ /* are we done? */
+ adds r2, r2, #8
+ moveq r0, #0
+ beq 9b
+
+ /* finish off the remaining bytes */
+ b 8b
+
+7: /* fix up the 2 pointers and fallthrough... */
+ sub r1, r1, #4
+ sub r1, r1, r6, lsr #3
+ sub r4, r4, #4
+ mov r2, #4
+ ldmfd sp!, {r5, r6, r7}
+ b 8b
+ .fnend
+ .size memcmp, .-memcmp
diff --git a/reference/bionic-a9/memcpy.S b/reference/bionic-a9/memcpy.S
new file mode 100644
index 000000000000..b9d3c9b84e66
--- /dev/null
+++ b/reference/bionic-a9/memcpy.S
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This code assumes it is running on a processor that supports all arm v7
+ * instructions, that supports neon instructions, and that has a 32 byte
+ * cache line.
+ */
+
+ .text
+ .fpu neon
+
+#define CACHE_LINE_SIZE 32
+
+ .globl memcpy
+ .type memcpy,%function
+memcpy:
+ .fnstart
+ .save {r0, lr}
+ /* start preloading as early as possible */
+ pld [r1, #(CACHE_LINE_SIZE * 0)]
+ stmfd sp!, {r0, lr}
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+
+ // Check so divider is at least 16 bytes, needed for alignment code.
+ cmp r2, #16
+ blo 5f
+
+
+ /* check if buffers are aligned. If so, run arm-only version */
+ eor r3, r0, r1
+ ands r3, r3, #0x3
+ beq 11f
+
+ /* Check the upper size limit for Neon unaligned memory access in memcpy */
+ cmp r2, #224
+ blo 3f
+
+ /* align destination to 16 bytes for the write-buffer */
+ rsb r3, r0, #0
+ ands r3, r3, #0xF
+ beq 3f
+
+ /* copy up to 15-bytes (count in r3) */
+ sub r2, r2, r3
+ movs ip, r3, lsl #31
+ ldrmib lr, [r1], #1
+ strmib lr, [r0], #1
+ ldrcsb ip, [r1], #1
+ ldrcsb lr, [r1], #1
+ strcsb ip, [r0], #1
+ strcsb lr, [r0], #1
+ movs ip, r3, lsl #29
+ bge 1f
+ // copies 4 bytes, destination 32-bits aligned
+ vld1.32 {d0[0]}, [r1]!
+ vst1.32 {d0[0]}, [r0, :32]!
+1: bcc 2f
+ // copies 8 bytes, destination 64-bits aligned
+ vld1.8 {d0}, [r1]!
+ vst1.8 {d0}, [r0, :64]!
+2:
+ /* preload immediately the next cache line, which we may need */
+ pld [r1, #(CACHE_LINE_SIZE * 0)]
+ pld [r1, #(CACHE_LINE_SIZE * 2)]
+3:
+ /* make sure we have at least 64 bytes to copy */
+ subs r2, r2, #64
+ blo 2f
+
+ /* preload all the cache lines we need */
+ pld [r1, #(CACHE_LINE_SIZE * 4)]
+ pld [r1, #(CACHE_LINE_SIZE * 6)]
+
+1: /* The main loop copies 64 bytes at a time */
+ vld1.8 {d0 - d3}, [r1]!
+ vld1.8 {d4 - d7}, [r1]!
+ pld [r1, #(CACHE_LINE_SIZE * 6)]
+ subs r2, r2, #64
+ vst1.8 {d0 - d3}, [r0]!
+ vst1.8 {d4 - d7}, [r0]!
+ bhs 1b
+
+2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
+ add r2, r2, #64
+ subs r2, r2, #32
+ blo 4f
+
+3: /* 32 bytes at a time. These cache lines were already preloaded */
+ vld1.8 {d0 - d3}, [r1]!
+ subs r2, r2, #32
+ vst1.8 {d0 - d3}, [r0]!
+ bhs 3b
+
+4: /* less than 32 left */
+ add r2, r2, #32
+ tst r2, #0x10
+ beq 5f
+ // copies 16 bytes, 128-bits aligned
+ vld1.8 {d0, d1}, [r1]!
+ vst1.8 {d0, d1}, [r0]!
+5: /* copy up to 15-bytes (count in r2) */
+ movs ip, r2, lsl #29
+ bcc 1f
+ vld1.8 {d0}, [r1]!
+ vst1.8 {d0}, [r0]!
+1: bge 2f
+ vld1.32 {d0[0]}, [r1]!
+ vst1.32 {d0[0]}, [r0]!
+2: movs ip, r2, lsl #31
+ ldrmib r3, [r1], #1
+ ldrcsb ip, [r1], #1
+ ldrcsb lr, [r1], #1
+ strmib r3, [r0], #1
+ strcsb ip, [r0], #1
+ strcsb lr, [r0], #1
+
+ ldmfd sp!, {r0, lr}
+ bx lr
+11:
+ /* Simple arm-only copy loop to handle aligned copy operations */
+ stmfd sp!, {r4, r5, r6, r7, r8}
+ pld [r1, #(CACHE_LINE_SIZE * 4)]
+
+ /* Check alignment */
+ rsb r3, r1, #0
+ ands r3, #3
+ beq 2f
+
+ /* align source to 32 bits. We need to insert 2 instructions between
+ * a ldr[b|h] and str[b|h] because byte and half-word instructions
+ * stall 2 cycles.
+ */
+ movs r12, r3, lsl #31
+ sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
+ ldrmib r3, [r1], #1
+ ldrcsb r4, [r1], #1
+ ldrcsb r5, [r1], #1
+ strmib r3, [r0], #1
+ strcsb r4, [r0], #1
+ strcsb r5, [r0], #1
+
+2:
+ subs r2, r2, #64
+ blt 4f
+
+3: /* Main copy loop, copying 64 bytes at a time */
+ pld [r1, #(CACHE_LINE_SIZE * 8)]
+ ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
+ stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
+ ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
+ stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
+ subs r2, r2, #64
+ bge 3b
+
+4: /* Check if there are > 32 bytes left */
+ adds r2, r2, #64
+ subs r2, r2, #32
+ blt 5f
+
+ /* Copy 32 bytes */
+ ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
+ stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
+ subs r2, #32
+
+5: /* Handle any remaining bytes */
+ adds r2, #32
+ beq 6f
+
+ movs r12, r2, lsl #28
+ ldmcsia r1!, {r3, r4, r5, r6} /* 16 bytes */
+ ldmmiia r1!, {r7, r8} /* 8 bytes */
+ stmcsia r0!, {r3, r4, r5, r6}
+ stmmiia r0!, {r7, r8}
+ movs r12, r2, lsl #30
+ ldrcs r3, [r1], #4 /* 4 bytes */
+ ldrmih r4, [r1], #2 /* 2 bytes */
+ strcs r3, [r0], #4
+ strmih r4, [r0], #2
+ tst r2, #0x1
+ ldrneb r3, [r1] /* last byte */
+ strneb r3, [r0]
+6:
+ ldmfd sp!, {r4, r5, r6, r7, r8}
+ ldmfd sp!, {r0, pc}
+ .fnend
+ .size memcpy, .-memcpy
diff --git a/reference/bionic-a9/memset.S b/reference/bionic-a9/memset.S
new file mode 100644
index 000000000000..104098642d96
--- /dev/null
+++ b/reference/bionic-a9/memset.S
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This code assumes it is running on a processor that supports all arm v7
+ * instructions and that supports neon instructions.
+ */
+
+ .text
+ .fpu neon
+
+/* memset() returns its first argument. */
+ .globl memset
+ .type memset,%function
+memset:
+ .fnstart
+ # The neon memset only wins for less than 132.
+ cmp r2, #132
+ bhi 11f
+
+ .save {r0}
+ stmfd sp!, {r0}
+
+ vdup.8 q0, r1
+
+ /* make sure we have at least 32 bytes to write */
+ subs r2, r2, #32
+ blo 2f
+ vmov q1, q0
+
+1: /* The main loop writes 32 bytes at a time */
+ subs r2, r2, #32
+ vst1.8 {d0 - d3}, [r0]!
+ bhs 1b
+
+2: /* less than 32 left */
+ add r2, r2, #32
+ tst r2, #0x10
+ beq 3f
+
+ // writes 16 bytes, 128-bits aligned
+ vst1.8 {d0, d1}, [r0]!
+3: /* write up to 15-bytes (count in r2) */
+ movs ip, r2, lsl #29
+ bcc 1f
+ vst1.8 {d0}, [r0]!
+1: bge 2f
+ vst1.32 {d0[0]}, [r0]!
+2: movs ip, r2, lsl #31
+ strmib r1, [r0], #1
+ strcsb r1, [r0], #1
+ strcsb r1, [r0], #1
+ ldmfd sp!, {r0}
+ bx lr
+11:
+ /* compute the offset to align the destination
+ * offset = (4-(src&3))&3 = -src & 3
+ */
+
+ .save {r0, r4-r7, lr}
+ stmfd sp!, {r0, r4-r7, lr}
+ rsb r3, r0, #0
+ ands r3, r3, #3
+ cmp r3, r2
+ movhi r3, r2
+
+ /* splat r1 */
+ mov r1, r1, lsl #24
+ orr r1, r1, r1, lsr #8
+ orr r1, r1, r1, lsr #16
+
+ movs r12, r3, lsl #31
+ strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
+ strcsb r1, [r0], #1
+ strmib r1, [r0], #1
+ subs r2, r2, r3
+ ldmlsfd sp!, {r0, r4-r7, lr} /* return */
+ bxls lr
+
+ /* align the destination to a cache-line */
+ mov r12, r1
+ mov lr, r1
+ mov r4, r1
+ mov r5, r1
+ mov r6, r1
+ mov r7, r1
+
+ rsb r3, r0, #0
+ ands r3, r3, #0x1C
+ beq 3f
+ cmp r3, r2
+ andhi r3, r2, #0x1C
+ sub r2, r2, r3
+
+ /* conditionally writes 0 to 7 words (length in r3) */
+ movs r3, r3, lsl #28
+ stmcsia r0!, {r1, lr}
+ stmcsia r0!, {r1, lr}
+ stmmiia r0!, {r1, lr}
+ movs r3, r3, lsl #2
+ strcs r1, [r0], #4
+
+3:
+ subs r2, r2, #32
+ mov r3, r1
+ bmi 2f
+1: subs r2, r2, #32
+ stmia r0!, {r1,r3,r4,r5,r6,r7,r12,lr}
+ bhs 1b
+2: add r2, r2, #32
+
+ /* conditionally stores 0 to 31 bytes */
+ movs r2, r2, lsl #28
+ stmcsia r0!, {r1,r3,r12,lr}
+ stmmiia r0!, {r1, lr}
+ movs r2, r2, lsl #2
+ strcs r1, [r0], #4
+ strmih r1, [r0], #2
+ movs r2, r2, lsl #2
+ strcsb r1, [r0]
+ ldmfd sp!, {r0, r4-r7, lr}
+ bx lr
+ .fnend
+ .size memset, .-memset
diff --git a/reference/bionic-a9/strcmp.S b/reference/bionic-a9/strcmp.S
new file mode 100644
index 000000000000..7e48079d850b
--- /dev/null
+++ b/reference/bionic-a9/strcmp.S
@@ -0,0 +1,545 @@
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __ARMEB__
+#define S2LOMEM lsl
+#define S2LOMEMEQ lsleq
+#define S2HIMEM lsr
+#define MSB 0x000000ff
+#define LSB 0xff000000
+#define BYTE0_OFFSET 24
+#define BYTE1_OFFSET 16
+#define BYTE2_OFFSET 8
+#define BYTE3_OFFSET 0
+#else /* not __ARMEB__ */
+#define S2LOMEM lsr
+#define S2LOMEMEQ lsreq
+#define S2HIMEM lsl
+#define BYTE0_OFFSET 0
+#define BYTE1_OFFSET 8
+#define BYTE2_OFFSET 16
+#define BYTE3_OFFSET 24
+#define MSB 0xff000000
+#define LSB 0x000000ff
+#endif /* not __ARMEB__ */
+
+.syntax unified
+
+#if defined (__thumb__)
+ .thumb
+ .thumb_func
+#endif
+
+ .globl strcmp
+ .type strcmp,%function
+strcmp:
+ .fnstart
+ /* Use LDRD whenever possible. */
+
+/* The main thing to look out for when comparing large blocks is that
+ the loads do not cross a page boundary when loading past the index
+ of the byte with the first difference or the first string-terminator.
+
+ For example, if the strings are identical and the string-terminator
+ is at index k, byte by byte comparison will not load beyond address
+ s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
+ k; double word - up to 7 bytes. If the load of these bytes crosses
+ a page boundary, it might cause a memory fault (if the page is not mapped)
+ that would not have happened in byte by byte comparison.
+
+ If an address is (double) word aligned, then a load of a (double) word
+ from that address will not cross a page boundary.
+ Therefore, the algorithm below considers word and double-word alignment
+ of strings separately. */
+
+/* High-level description of the algorithm.
+
+ * The fast path: if both strings are double-word aligned,
+ use LDRD to load two words from each string in every loop iteration.
+ * If the strings have the same offset from a word boundary,
+ use LDRB to load and compare byte by byte until
+ the first string is aligned to a word boundary (at most 3 bytes).
+ This is optimized for quick return on short unaligned strings.
+ * If the strings have the same offset from a double-word boundary,
+ use LDRD to load two words from each string in every loop iteration, as in the fast path.
+ * If the strings do not have the same offset from a double-word boundary,
+ load a word from the second string before the loop to initialize the queue.
+ Use LDRD to load two words from every string in every loop iteration.
+ Inside the loop, load the second word from the second string only after comparing
+ the first word, using the queued value, to guarantee safety across page boundaries.
+ * If the strings do not have the same offset from a word boundary,
+ use LDR and a shift queue. Order of loads and comparisons matters,
+ similarly to the previous case.
+
+ * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
+ * The only difference between ARM and Thumb modes is the use of CBZ instruction.
+ * The only difference between big and little endian is the use of REV in little endian
+ to compute the return value, instead of MOV.
+*/
+
+ .macro m_cbz reg label
+#ifdef __thumb2__
+ cbz \reg, \label
+#else /* not defined __thumb2__ */
+ cmp \reg, #0
+ beq \label
+#endif /* not defined __thumb2__ */
+ .endm /* m_cbz */
+
+ .macro m_cbnz reg label
+#ifdef __thumb2__
+ cbnz \reg, \label
+#else /* not defined __thumb2__ */
+ cmp \reg, #0
+ bne \label
+#endif /* not defined __thumb2__ */
+ .endm /* m_cbnz */
+
+ .macro init
+ /* Macro to save temporary registers and prepare magic values. */
+ subs sp, sp, #16
+ strd r4, r5, [sp, #8]
+ strd r6, r7, [sp]
+ mvn r6, #0 /* all F */
+ mov r7, #0 /* all 0 */
+ .endm /* init */
+
+ .macro magic_compare_and_branch w1 w2 label
+ /* Macro to compare registers w1 and w2 and conditionally branch to label. */
+ cmp \w1, \w2 /* Are w1 and w2 the same? */
+ magic_find_zero_bytes \w1
+ it eq
+ cmpeq ip, #0 /* Is there a zero byte in w1? */
+ bne \label
+ .endm /* magic_compare_and_branch */
+
+ .macro magic_find_zero_bytes w1
+ /* Macro to find all-zero bytes in w1, result is in ip. */
+#if (defined (__ARM_FEATURE_DSP))
+ uadd8 ip, \w1, r6
+ sel ip, r7, r6
+#else /* not defined (__ARM_FEATURE_DSP) */
+ /* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
+ Coincidently, these processors only have Thumb-2 mode, where we can use the
+ the (large) magic constant available directly as an immediate in instructions.
+ Note that we cannot use the magic constant in ARM mode, where we need
+ to create the constant in a register. */
+ sub ip, \w1, #0x01010101
+ bic ip, ip, \w1
+ and ip, ip, #0x80808080
+#endif /* not defined (__ARM_FEATURE_DSP) */
+ .endm /* magic_find_zero_bytes */
+
+ .macro setup_return w1 w2
+#ifdef __ARMEB__
+ mov r1, \w1
+ mov r2, \w2
+#else /* not __ARMEB__ */
+ rev r1, \w1
+ rev r2, \w2
+#endif /* not __ARMEB__ */
+ .endm /* setup_return */
+
+ pld [r0, #0]
+ pld [r1, #0]
+
+ /* Are both strings double-word aligned? */
+ orr ip, r0, r1
+ tst ip, #7
+ bne do_align
+
+ /* Fast path. */
+ init
+
+doubleword_aligned:
+
+ /* Get here when the strings to compare are double-word aligned. */
+ /* Compare two words in every iteration. */
+ .p2align 2
+2:
+ pld [r0, #16]
+ pld [r1, #16]
+
+ /* Load the next double-word from each string. */
+ ldrd r2, r3, [r0], #8
+ ldrd r4, r5, [r1], #8
+
+ magic_compare_and_branch w1=r2, w2=r4, label=return_24
+ magic_compare_and_branch w1=r3, w2=r5, label=return_35
+ b 2b
+
+do_align:
+ /* Is the first string word-aligned? */
+ ands ip, r0, #3
+ beq word_aligned_r0
+
+ /* Fast compare byte by byte until the first string is word-aligned. */
+ /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
+ to read until the next word boundary is 4-ip. */
+ bic r0, r0, #3
+ ldr r2, [r0], #4
+ lsls ip, ip, #31
+ beq byte2
+ bcs byte3
+
+byte1:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE1_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbz reg=r3, label=fast_return
+
+byte2:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE2_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbz reg=r3, label=fast_return
+
+byte3:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE3_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbnz reg=r3, label=word_aligned_r0
+
+fast_return:
+ mov r0, ip
+ bx lr
+
+word_aligned_r0:
+ init
+ /* The first string is word-aligned. */
+ /* Is the second string word-aligned? */
+ ands ip, r1, #3
+ bne strcmp_unaligned
+
+word_aligned:
+ /* The strings are word-aligned. */
+ /* Is the first string double-word aligned? */
+ tst r0, #4
+ beq doubleword_aligned_r0
+
+ /* If r0 is not double-word aligned yet, align it by loading
+ and comparing the next word from each string. */
+ ldr r2, [r0], #4
+ ldr r4, [r1], #4
+ magic_compare_and_branch w1=r2 w2=r4 label=return_24
+
+doubleword_aligned_r0:
+ /* Get here when r0 is double-word aligned. */
+ /* Is r1 doubleword_aligned? */
+ tst r1, #4
+ beq doubleword_aligned
+
+ /* Get here when the strings to compare are word-aligned,
+ r0 is double-word aligned, but r1 is not double-word aligned. */
+
+ /* Initialize the queue. */
+ ldr r5, [r1], #4
+
+ /* Compare two words in every iteration. */
+ .p2align 2
+3:
+ pld [r0, #16]
+ pld [r1, #16]
+
+ /* Load the next double-word from each string and compare. */
+ ldrd r2, r3, [r0], #8
+ magic_compare_and_branch w1=r2 w2=r5 label=return_25
+ ldrd r4, r5, [r1], #8
+ magic_compare_and_branch w1=r3 w2=r4 label=return_34
+ b 3b
+
+ .macro miscmp_word offsetlo offsethi
+ /* Macro to compare misaligned strings. */
+ /* r0, r1 are word-aligned, and at least one of the strings
+ is not double-word aligned. */
+ /* Compare one word in every loop iteration. */
+ /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
+ OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
+
+ /* Initialize the shift queue. */
+ ldr r5, [r1], #4
+
+ /* Compare one word from each string in every loop iteration. */
+ .p2align 2
+7:
+ ldr r3, [r0], #4
+ S2LOMEM r5, r5, #\offsetlo
+ magic_find_zero_bytes w1=r3
+ cmp r7, ip, S2HIMEM #\offsetlo
+ and r2, r3, r6, S2LOMEM #\offsetlo
+ it eq
+ cmpeq r2, r5
+ bne return_25
+ ldr r5, [r1], #4
+ cmp ip, #0
+ eor r3, r2, r3
+ S2HIMEM r2, r5, #\offsethi
+ it eq
+ cmpeq r3, r2
+ bne return_32
+ b 7b
+ .endm /* miscmp_word */
+
+return_32:
+ setup_return w1=r3, w2=r2
+ b do_return
+return_34:
+ setup_return w1=r3, w2=r4
+ b do_return
+return_25:
+ setup_return w1=r2, w2=r5
+ b do_return
+return_35:
+ setup_return w1=r3, w2=r5
+ b do_return
+return_24:
+ setup_return w1=r2, w2=r4
+
+do_return:
+
+#ifdef __ARMEB__
+ mov r0, ip
+#else /* not __ARMEB__ */
+ rev r0, ip
+#endif /* not __ARMEB__ */
+
+ /* Restore temporaries early, before computing the return value. */
+ ldrd r6, r7, [sp]
+ ldrd r4, r5, [sp, #8]
+ adds sp, sp, #16
+
+ /* There is a zero or a different byte between r1 and r2. */
+ /* r0 contains a mask of all-zero bytes in r1. */
+ /* Using r0 and not ip here because cbz requires low register. */
+ m_cbz reg=r0, label=compute_return_value
+ clz r0, r0
+ /* r0 contains the number of bits on the left of the first all-zero byte in r1. */
+ rsb r0, r0, #24
+ /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
+ lsr r1, r1, r0
+ lsr r2, r2, r0
+
+compute_return_value:
+ movs r0, #1
+ cmp r1, r2
+ /* The return value is computed as follows.
+ If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
+ If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
+ which means r0:=r0-r0-1 and r0 is #-1 at return.
+ If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
+ which means r0:=r0-r0 and r0 is #0 at return.
+ (C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
+ it ls
+ sbcls r0, r0, r0
+ bx lr
+
+ /* The code from the previous version of strcmp.S handles all of the
+ * cases where the first string and seconds string cannot both be
+ * aligned to a word boundary faster than the new algorithm. See
+ * bionic/libc/arch-arm/cortex-a15/bionic/strcmp.S for the unedited
+ * version of the code.
+ */
+strcmp_unaligned:
+ wp1 .req r0
+ wp2 .req r1
+ b1 .req r2
+ w1 .req r4
+ w2 .req r5
+ t1 .req ip
+ @ r3 is scratch
+
+2:
+ mov b1, #1
+ orr b1, b1, b1, lsl #8
+ orr b1, b1, b1, lsl #16
+
+ and t1, wp2, #3
+ bic wp2, wp2, #3
+ ldr w1, [wp1], #4
+ ldr w2, [wp2], #4
+ cmp t1, #2
+ beq 2f
+ bhi 3f
+
+ /* Critical inner Loop: Block with 3 bytes initial overlap */
+ .p2align 2
+1:
+ bic t1, w1, #MSB
+ cmp t1, w2, S2LOMEM #8
+ sub r3, w1, b1
+ bic r3, r3, w1
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #24
+ bne 6f
+ ldr w1, [wp1], #4
+ b 1b
+4:
+ S2LOMEM w2, w2, #8
+ b 8f
+
+5:
+#ifdef __ARMEB__
+ /* The syndrome value may contain false ones if the string ends
+ * with the bytes 0x01 0x00
+ */
+ tst w1, #0xff000000
+ itt ne
+ tstne w1, #0x00ff0000
+ tstne w1, #0x0000ff00
+ beq 7f
+#else
+ bics r3, r3, #0xff000000
+ bne 7f
+#endif
+ ldrb w2, [wp2]
+ S2LOMEM t1, w1, #24
+#ifdef __ARMEB__
+ lsl w2, w2, #24
+#endif
+ b 8f
+
+6:
+ S2LOMEM t1, w1, #24
+ and w2, w2, #LSB
+ b 8f
+
+ /* Critical inner Loop: Block with 2 bytes initial overlap */
+ .p2align 2
+2:
+ S2HIMEM t1, w1, #16
+ sub r3, w1, b1
+ S2LOMEM t1, t1, #16
+ bic r3, r3, w1
+ cmp t1, w2, S2LOMEM #16
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #16
+ bne 6f
+ ldr w1, [wp1], #4
+ b 2b
+
+5:
+#ifdef __ARMEB__
+ /* The syndrome value may contain false ones if the string ends
+ * with the bytes 0x01 0x00
+ */
+ tst w1, #0xff000000
+ it ne
+ tstne w1, #0x00ff0000
+ beq 7f
+#else
+ lsls r3, r3, #16
+ bne 7f
+#endif
+ ldrh w2, [wp2]
+ S2LOMEM t1, w1, #16
+#ifdef __ARMEB__
+ lsl w2, w2, #16
+#endif
+ b 8f
+
+6:
+ S2HIMEM w2, w2, #16
+ S2LOMEM t1, w1, #16
+4:
+ S2LOMEM w2, w2, #16
+ b 8f
+
+ /* Critical inner Loop: Block with 1 byte initial overlap */
+ .p2align 2
+3:
+ and t1, w1, #LSB
+ cmp t1, w2, S2LOMEM #24
+ sub r3, w1, b1
+ bic r3, r3, w1
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #8
+ bne 6f
+ ldr w1, [wp1], #4
+ b 3b
+4:
+ S2LOMEM w2, w2, #24
+ b 8f
+5:
+ /* The syndrome value may contain false ones if the string ends
+ * with the bytes 0x01 0x00
+ */
+ tst w1, #LSB
+ beq 7f
+ ldr w2, [wp2], #4
+6:
+ S2LOMEM t1, w1, #8
+ bic w2, w2, #MSB
+ b 8f
+7:
+ mov r0, #0
+
+ /* Restore registers and stack. */
+ ldrd r6, r7, [sp]
+ ldrd r4, r5, [sp, #8]
+ adds sp, sp, #16
+
+ bx lr
+
+8:
+ and r2, t1, #LSB
+ and r0, w2, #LSB
+ cmp r0, #1
+ it cs
+ cmpcs r0, r2
+ itt eq
+ S2LOMEMEQ t1, t1, #8
+ S2LOMEMEQ w2, w2, #8
+ beq 8b
+ sub r0, r2, r0
+
+ /* Restore registers and stack. */
+ ldrd r6, r7, [sp]
+ ldrd r4, r5, [sp, #8]
+ adds sp, sp, #16
+
+ bx lr
+ .fnend
+ .size strcmp, .-strcmp
diff --git a/reference/bionic-a9/strcpy.S b/reference/bionic-a9/strcpy.S
new file mode 100644
index 000000000000..9925378a157d
--- /dev/null
+++ b/reference/bionic-a9/strcpy.S
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * Copyright (c) 2008 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Android adaptation and tweak by Jim Huang <jserv@0xlab.org>.
+ */
+
+ .globl strcpy
+ .type strcpy, %function
+ .text
+
+strcpy:
+ .fnstart
+ PLD [r1, #0]
+ eor r2, r0, r1
+ mov ip, r0
+ tst r2, #3
+ bne 4f
+ tst r1, #3
+ bne 3f
+5:
+ str r5, [sp, #-4]!
+ mov r5, #0x01
+ orr r5, r5, r5, lsl #8
+ orr r5, r5, r5, lsl #16
+
+ str r4, [sp, #-4]!
+ tst r1, #4
+ ldr r3, [r1], #4
+ beq 2f
+ sub r2, r3, r5
+ bics r2, r2, r3
+ tst r2, r5, lsl #7
+ itt eq
+ streq r3, [ip], #4
+ ldreq r3, [r1], #4
+ bne 1f
+ /* Inner loop. We now know that r1 is 64-bit aligned, so we
+ can safely fetch up to two words. This allows us to avoid
+ load stalls. */
+ .p2align 2
+2:
+ PLD [r1, #8]
+ ldr r4, [r1], #4
+ sub r2, r3, r5
+ bics r2, r2, r3
+ tst r2, r5, lsl #7
+ sub r2, r4, r5
+ bne 1f
+ str r3, [ip], #4
+ bics r2, r2, r4
+ tst r2, r5, lsl #7
+ itt eq
+ ldreq r3, [r1], #4
+ streq r4, [ip], #4
+ beq 2b
+ mov r3, r4
+1:
+#ifdef __ARMEB__
+ rors r3, r3, #24
+#endif
+ strb r3, [ip], #1
+ tst r3, #0xff
+#ifdef __ARMEL__
+ ror r3, r3, #8
+#endif
+ bne 1b
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ bx lr
+
+ /* Strings have the same offset from word alignment, but it's
+ not zero. */
+3:
+ tst r1, #1
+ beq 1f
+ ldrb r2, [r1], #1
+ strb r2, [ip], #1
+ cmp r2, #0
+ it eq
+ bxeq lr
+1:
+ tst r1, #2
+ beq 5b
+ ldrh r2, [r1], #2
+#ifdef __ARMEB__
+ tst r2, #0xff00
+ iteet ne
+ strneh r2, [ip], #2
+ lsreq r2, r2, #8
+ streqb r2, [ip]
+ tstne r2, #0xff
+#else
+ tst r2, #0xff
+ itet ne
+ strneh r2, [ip], #2
+ streqb r2, [ip]
+ tstne r2, #0xff00
+#endif
+ bne 5b
+ bx lr
+
+ /* src and dst do not have a common word-alignement. Fall back to
+ byte copying. */
+4:
+ ldrb r2, [r1], #1
+ strb r2, [ip], #1
+ cmp r2, #0
+ bne 4b
+ bx lr
+
+ .fnend
diff --git a/reference/bionic-a9/strlen.c b/reference/bionic-a9/strlen.c
new file mode 100644
index 000000000000..8781d760566d
--- /dev/null
+++ b/reference/bionic-a9/strlen.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+#undef strlen
+
+#define __ARM_HAVE_PLD 1
+
+size_t strlen(const char *s)
+{
+ __builtin_prefetch(s);
+ __builtin_prefetch(s+32);
+
+ union {
+ const char *b;
+ const uint32_t *w;
+ uintptr_t i;
+ } u;
+
+ // these are some scratch variables for the asm code below
+ uint32_t v, t;
+
+ // initialize the string length to zero
+ size_t l = 0;
+
+ // align the pointer to a 32-bit word boundary
+ u.b = s;
+ while (u.i & 0x3) {
+ if (__builtin_expect(*u.b++ == 0, 0)) {
+ goto done;
+ }
+ l++;
+ }
+
+ // loop for each word, testing if it contains a zero byte
+ // if so, exit the loop and update the length.
+ // We need to process 32 bytes per loop to schedule PLD properly
+ // and achieve the maximum bus speed.
+ asm(
+ "ldr %[v], [%[s]], #4 \n"
+ "sub %[l], %[l], %[s] \n"
+ "0: \n"
+#if __ARM_HAVE_PLD
+ "pld [%[s], #64] \n"
+#endif
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+#if !defined(__OPTIMIZE_SIZE__)
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]] , #4 \n"
+ "bne 1f \n"
+ "sub %[t], %[v], %[mask], lsr #7\n"
+ "and %[t], %[t], %[mask] \n"
+ "bics %[t], %[t], %[v] \n"
+ "ldreq %[v], [%[s]], #4 \n"
+#endif
+ "beq 0b \n"
+ "1: \n"
+ "add %[l], %[l], %[s] \n"
+ "tst %[v], #0xFF \n"
+ "beq 2f \n"
+ "add %[l], %[l], #1 \n"
+ "tst %[v], #0xFF00 \n"
+ "beq 2f \n"
+ "add %[l], %[l], #1 \n"
+ "tst %[v], #0xFF0000 \n"
+ "addne %[l], %[l], #1 \n"
+ "2: \n"
+ : [l]"=&r"(l), [v]"=&r"(v), [t]"=&r"(t), [s]"=&r"(u.b)
+ : "%[l]"(l), "%[s]"(u.b), [mask]"r"(0x80808080UL)
+ : "cc"
+ );
+
+done:
+ return l;
+}
diff --git a/reference/bionic-c/.deps/.dirstamp b/reference/bionic-c/.deps/.dirstamp
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/reference/bionic-c/.deps/.dirstamp
diff --git a/reference/bionic-c/.deps/bcopy.Po b/reference/bionic-c/.deps/bcopy.Po
new file mode 100644
index 000000000000..96be4c851dbc
--- /dev/null
+++ b/reference/bionic-c/.deps/bcopy.Po
@@ -0,0 +1,25 @@
+reference/bionic-c/bcopy.o: reference/bionic-c/bcopy.c \
+ /usr/include/string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/strings.h /usr/include/xlocale/_strings.h \
+ /usr/include/xlocale/_string.h
+
+/usr/include/string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
diff --git a/reference/bionic-c/.deps/memchr.Po b/reference/bionic-c/.deps/memchr.Po
new file mode 100644
index 000000000000..fa2ee69f9b28
--- /dev/null
+++ b/reference/bionic-c/.deps/memchr.Po
@@ -0,0 +1,27 @@
+reference/bionic-c/memchr.o: reference/bionic-c/memchr.c \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include/stddef.h \
+ /usr/include/sys/_types.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/string.h /usr/include/sys/_null.h /usr/include/strings.h \
+ /usr/include/xlocale/_strings.h /usr/include/xlocale/_string.h
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include/stddef.h:
+
+/usr/include/sys/_types.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/string.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
diff --git a/reference/bionic-c/.deps/memcmp.Po b/reference/bionic-c/.deps/memcmp.Po
new file mode 100644
index 000000000000..4baeaf394465
--- /dev/null
+++ b/reference/bionic-c/.deps/memcmp.Po
@@ -0,0 +1,25 @@
+reference/bionic-c/memcmp.o: reference/bionic-c/memcmp.c \
+ /usr/include/string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/strings.h /usr/include/xlocale/_strings.h \
+ /usr/include/xlocale/_string.h
+
+/usr/include/string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
diff --git a/reference/bionic-c/.deps/memcpy.Po b/reference/bionic-c/.deps/memcpy.Po
new file mode 100644
index 000000000000..0c54c7640795
--- /dev/null
+++ b/reference/bionic-c/.deps/memcpy.Po
@@ -0,0 +1,27 @@
+reference/bionic-c/memcpy.o: reference/bionic-c/memcpy.c \
+ reference/bionic-c/bcopy.c /usr/include/string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/strings.h /usr/include/xlocale/_strings.h \
+ /usr/include/xlocale/_string.h
+
+reference/bionic-c/bcopy.c:
+
+/usr/include/string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
diff --git a/reference/bionic-c/.deps/memset.Po b/reference/bionic-c/.deps/memset.Po
new file mode 100644
index 000000000000..ccdad2a911bc
--- /dev/null
+++ b/reference/bionic-c/.deps/memset.Po
@@ -0,0 +1,38 @@
+reference/bionic-c/memset.o: reference/bionic-c/memset.c \
+ /usr/include/string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/strings.h /usr/include/xlocale/_strings.h \
+ /usr/include/xlocale/_string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include/stdint.h \
+ /usr/include/stdint.h /usr/include/machine/_stdint.h \
+ /usr/include/x86/_stdint.h /usr/include/sys/_stdint.h
+
+/usr/include/string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include/stdint.h:
+
+/usr/include/stdint.h:
+
+/usr/include/machine/_stdint.h:
+
+/usr/include/x86/_stdint.h:
+
+/usr/include/sys/_stdint.h:
diff --git a/reference/bionic-c/.deps/strchr.Po b/reference/bionic-c/.deps/strchr.Po
new file mode 100644
index 000000000000..6acbe8a36574
--- /dev/null
+++ b/reference/bionic-c/.deps/strchr.Po
@@ -0,0 +1,25 @@
+reference/bionic-c/strchr.o: reference/bionic-c/strchr.c \
+ /usr/include/string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/strings.h /usr/include/xlocale/_strings.h \
+ /usr/include/xlocale/_string.h
+
+/usr/include/string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
diff --git a/reference/bionic-c/.deps/strcmp.Po b/reference/bionic-c/.deps/strcmp.Po
new file mode 100644
index 000000000000..f8bf758e01a9
--- /dev/null
+++ b/reference/bionic-c/.deps/strcmp.Po
@@ -0,0 +1,25 @@
+reference/bionic-c/strcmp.o: reference/bionic-c/strcmp.c \
+ /usr/include/string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/strings.h /usr/include/xlocale/_strings.h \
+ /usr/include/xlocale/_string.h
+
+/usr/include/string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
diff --git a/reference/bionic-c/.deps/strcpy.Po b/reference/bionic-c/.deps/strcpy.Po
new file mode 100644
index 000000000000..0409ee3a65c4
--- /dev/null
+++ b/reference/bionic-c/.deps/strcpy.Po
@@ -0,0 +1,25 @@
+reference/bionic-c/strcpy.o: reference/bionic-c/strcpy.c \
+ /usr/include/string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/strings.h /usr/include/xlocale/_strings.h \
+ /usr/include/xlocale/_string.h
+
+/usr/include/string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
diff --git a/reference/bionic-c/.deps/strlen.Po b/reference/bionic-c/.deps/strlen.Po
new file mode 100644
index 000000000000..ea51a4bd63c1
--- /dev/null
+++ b/reference/bionic-c/.deps/strlen.Po
@@ -0,0 +1,25 @@
+reference/bionic-c/strlen.o: reference/bionic-c/strlen.c \
+ /usr/include/string.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/include/strings.h /usr/include/xlocale/_strings.h \
+ /usr/include/xlocale/_string.h
+
+/usr/include/string.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/include/strings.h:
+
+/usr/include/xlocale/_strings.h:
+
+/usr/include/xlocale/_string.h:
diff --git a/reference/bionic-c/.dirstamp b/reference/bionic-c/.dirstamp
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/reference/bionic-c/.dirstamp
diff --git a/reference/bionic-c/bcopy.c b/reference/bionic-c/bcopy.c
new file mode 100644
index 000000000000..4308c6484ab8
--- /dev/null
+++ b/reference/bionic-c/bcopy.c
@@ -0,0 +1,128 @@
+/* $OpenBSD: bcopy.c,v 1.5 2005/08/08 08:05:37 espie Exp $ */
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+/*
+ * sizeof(word) MUST BE A POWER OF TWO
+ * SO THAT wmask BELOW IS ALL ONES
+ */
+typedef long word; /* "word" used for optimal copy speed */
+
+#define wsize sizeof(word)
+#define wmask (wsize - 1)
+
+/*
+ * Copy a block of memory, handling overlap.
+ * This is the routine that actually implements
+ * (the portable versions of) bcopy, memcpy, and memmove.
+ */
+#ifdef MEMCOPY
+void *
+memcpy(void *dst0, const void *src0, size_t length)
+#else
+#ifdef MEMMOVE
+void *
+memmove(void *dst0, const void *src0, size_t length)
+#else
+void
+bcopy(const void *src0, void *dst0, size_t length)
+#endif
+#endif
+{
+ char *dst = dst0;
+ const char *src = src0;
+ size_t t;
+
+ if (length == 0 || dst == src) /* nothing to do */
+ goto done;
+
+ /*
+ * Macros: loop-t-times; and loop-t-times, t>0
+ */
+#define TLOOP(s) if (t) TLOOP1(s)
+#define TLOOP1(s) do { s; } while (--t)
+
+ if ((unsigned long)dst < (unsigned long)src) {
+ /*
+ * Copy forward.
+ */
+ t = (long)src; /* only need low bits */
+ if ((t | (long)dst) & wmask) {
+ /*
+ * Try to align operands. This cannot be done
+ * unless the low bits match.
+ */
+ if ((t ^ (long)dst) & wmask || length < wsize)
+ t = length;
+ else
+ t = wsize - (t & wmask);
+ length -= t;
+ TLOOP1(*dst++ = *src++);
+ }
+ /*
+ * Copy whole words, then mop up any trailing bytes.
+ */
+ t = length / wsize;
+ TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize);
+ t = length & wmask;
+ TLOOP(*dst++ = *src++);
+ } else {
+ /*
+ * Copy backwards. Otherwise essentially the same.
+ * Alignment works as before, except that it takes
+ * (t&wmask) bytes to align, not wsize-(t&wmask).
+ */
+ src += length;
+ dst += length;
+ t = (long)src;
+ if ((t | (long)dst) & wmask) {
+ if ((t ^ (long)dst) & wmask || length <= wsize)
+ t = length;
+ else
+ t &= wmask;
+ length -= t;
+ TLOOP1(*--dst = *--src);
+ }
+ t = length / wsize;
+ TLOOP(src -= wsize; dst -= wsize; *(word *)dst = *(word *)src);
+ t = length & wmask;
+ TLOOP(*--dst = *--src);
+ }
+done:
+#if defined(MEMCOPY) || defined(MEMMOVE)
+ return (dst0);
+#else
+ return;
+#endif
+}
diff --git a/reference/bionic-c/memchr.c b/reference/bionic-c/memchr.c
new file mode 100644
index 000000000000..b14167abd68e
--- /dev/null
+++ b/reference/bionic-c/memchr.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <stddef.h>
+#include <string.h>
+
+void *memchr(const void *s, int c, size_t n)
+{
+ const unsigned char* p = s;
+ const unsigned char* end = p + n;
+
+ for (;;) {
+ if (p >= end || p[0] == c) break; p++;
+ if (p >= end || p[0] == c) break; p++;
+ if (p >= end || p[0] == c) break; p++;
+ if (p >= end || p[0] == c) break; p++;
+ }
+ if (p >= end)
+ return NULL;
+ else
+ return (void*) p;
+}
diff --git a/reference/bionic-c/memcmp.c b/reference/bionic-c/memcmp.c
new file mode 100644
index 000000000000..864095497754
--- /dev/null
+++ b/reference/bionic-c/memcmp.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <string.h>
+
+int memcmp(const void *s1, const void *s2, size_t n)
+{
+ const unsigned char* p1 = s1;
+ const unsigned char* end1 = p1 + n;
+ const unsigned char* p2 = s2;
+ int d = 0;
+
+ for (;;) {
+ if (d || p1 >= end1) break;
+ d = (int)*p1++ - (int)*p2++;
+
+ if (d || p1 >= end1) break;
+ d = (int)*p1++ - (int)*p2++;
+
+ if (d || p1 >= end1) break;
+ d = (int)*p1++ - (int)*p2++;
+
+ if (d || p1 >= end1) break;
+ d = (int)*p1++ - (int)*p2++;
+ }
+ return d;
+}
diff --git a/reference/bionic-c/memcpy.c b/reference/bionic-c/memcpy.c
new file mode 100644
index 000000000000..dea78b2d9189
--- /dev/null
+++ b/reference/bionic-c/memcpy.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#define MEMCOPY
+#include "bcopy.c"
diff --git a/reference/bionic-c/memset.c b/reference/bionic-c/memset.c
new file mode 100644
index 000000000000..41dafb289b29
--- /dev/null
+++ b/reference/bionic-c/memset.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <string.h>
+#include <stdint.h>
+
+void* memset(void* dst, int c, size_t n)
+{
+ char* q = dst;
+ char* end = q + n;
+
+ for (;;) {
+ if (q >= end) break; *q++ = (char) c;
+ if (q >= end) break; *q++ = (char) c;
+ if (q >= end) break; *q++ = (char) c;
+ if (q >= end) break; *q++ = (char) c;
+ }
+
+ return dst;
+}
diff --git a/reference/bionic-c/strchr.c b/reference/bionic-c/strchr.c
new file mode 100644
index 000000000000..3f9aec5c3b21
--- /dev/null
+++ b/reference/bionic-c/strchr.c
@@ -0,0 +1,44 @@
+/* $OpenBSD: index.c,v 1.5 2005/08/08 08:05:37 espie Exp $ */
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+#undef strchr
+
+char *
+strchr(const char *p, int ch)
+{
+ for (;; ++p) {
+ if (*p == (char) ch)
+ return((char *)p);
+ if (!*p)
+ return((char *)NULL);
+ }
+ /* NOTREACHED */
+}
diff --git a/reference/bionic-c/strcmp.c b/reference/bionic-c/strcmp.c
new file mode 100644
index 000000000000..c4e4783fa61e
--- /dev/null
+++ b/reference/bionic-c/strcmp.c
@@ -0,0 +1,52 @@
+/* $OpenBSD: strcmp.c,v 1.7 2005/08/08 08:05:37 espie Exp $ */
+
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if !defined(_KERNEL) && !defined(_STANDALONE)
+#include <string.h>
+#undef strcmp
+#else
+#include <lib/libkern/libkern.h>
+#endif
+
+/*
+ * Compare strings.
+ */
+int
+strcmp(const char *s1, const char *s2)
+{
+ while (*s1 == *s2++)
+ if (*s1++ == 0)
+ return (0);
+ return (*(unsigned char *)s1 - *(unsigned char *)--s2);
+}
diff --git a/reference/bionic-c/strcpy.c b/reference/bionic-c/strcpy.c
new file mode 100644
index 000000000000..eb21d678032d
--- /dev/null
+++ b/reference/bionic-c/strcpy.c
@@ -0,0 +1,41 @@
+/* $OpenBSD: strcpy.c,v 1.8 2005/08/08 08:05:37 espie Exp $ */
+
+/*
+ * Copyright (c) 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+char *
+strcpy(char *to, const char *from)
+{
+ char *save = to;
+
+ for (; (*to = *from) != '\0'; ++from, ++to);
+ return(save);
+}
diff --git a/reference/bionic-c/strlen.c b/reference/bionic-c/strlen.c
new file mode 100644
index 000000000000..12d9ec4dadbb
--- /dev/null
+++ b/reference/bionic-c/strlen.c
@@ -0,0 +1,47 @@
+/* $OpenBSD: strlen.c,v 1.7 2005/08/08 08:05:37 espie Exp $ */
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if !defined(_KERNEL) && !defined(_STANDALONE)
+#include <string.h>
+#else
+#include <lib/libkern/libkern.h>
+#endif
+
+size_t
+strlen(const char *str)
+{
+ const char *s;
+
+ for (s = str; *s; ++s)
+ ;
+ return (s - str);
+}
+
diff --git a/reference/csl/.deps/memcpy.Po b/reference/csl/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/csl/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/csl/.deps/memset.Po b/reference/csl/.deps/memset.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/csl/.deps/memset.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/csl/arm_asm.h b/reference/csl/arm_asm.h
new file mode 100644
index 000000000000..2e0b1dd80915
--- /dev/null
+++ b/reference/csl/arm_asm.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2009 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARM_ASM__H
+#define ARM_ASM__H
+
+/* First define some macros that keep everything else sane. */
+#if defined (__ARM_ARCH_7A__) || defined (__ARM_ARCH_7R__)
+#define _ISA_ARM_7
+#endif
+
+#if defined (_ISA_ARM_7) || defined (__ARM_ARCH_6__) || \
+ defined (__ARM_ARCH_6J__) || defined (__ARM_ARCH_6T2__) || \
+ defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6ZK__) || \
+ defined (__ARM_ARCH_6Z__)
+#define _ISA_ARM_6
+#endif
+
+#if defined (_ISA_ARM_6) || defined (__ARM_ARCH_5__) || \
+ defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5TE__) || \
+ defined (__ARM_ARCH_5TEJ__)
+#define _ISA_ARM_5
+#endif
+
+#if defined (_ISA_ARM_5) || defined (__ARM_ARCH_4T__)
+#define _ISA_ARM_4T
+#endif
+
+#if defined (__ARM_ARCH_7M__) || defined (__ARM_ARCH_7__) || \
+ defined (__ARM_ARCH_7EM__)
+#define _ISA_THUMB_2
+#endif
+
+#if defined (_ISA_THUMB_2) || defined (__ARM_ARCH_6M__)
+#define _ISA_THUMB_1
+#endif
+
+
+/* Now some macros for common instruction sequences. */
+
+asm(".macro RETURN cond=\n\t"
+#if defined (_ISA_ARM_4T) || defined (_ISA_THUMB_1)
+ "bx\\cond lr\n\t"
+#else
+ "mov\\cond pc, lr\n\t"
+#endif
+ ".endm"
+ );
+
+asm(".macro optpld base, offset=#0\n\t"
+#if defined (_ISA_ARM_7)
+ "pld [\\base, \\offset]\n\t"
+#endif
+ ".endm"
+ );
+
+#endif /* ARM_ASM__H */
diff --git a/reference/csl/memcpy.c b/reference/csl/memcpy.c
new file mode 100644
index 000000000000..9c8270b6a6d0
--- /dev/null
+++ b/reference/csl/memcpy.c
@@ -0,0 +1,291 @@
+/* Copyright (c) 2009 CodeSourcery, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of CodeSourcery nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CODESOURCERY, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CODESOURCERY BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arm_asm.h"
+#include <string.h>
+#include <stdint.h>
+#include <stddef.h>
+
+/* Standard operations for word-sized values. */
+#define WORD_REF(ADDRESS, OFFSET) \
+ *((WORD_TYPE*)((char*)(ADDRESS) + (OFFSET)))
+#define WORD_COPY(OUT, IN, OFFSET) \
+ WORD_REF(OUT, OFFSET) = WORD_REF(IN, OFFSET)
+
+/* On processors with NEON, we use 128-bit vectors. Also,
+ we need to include arm_neon.h to use these. */
+#if defined(__ARM_NEON__)
+ #include <arm_neon.h>
+
+ #define WORD_TYPE uint8x16_t
+ #define WORD_SIZE 16
+ #define MAYBE_PREFETCH(IN) __builtin_prefetch((IN), 0, 0)
+
+/* On ARM processors with 64-bit ldrd instructions, we use those,
+ except on Cortex-M* where benchmarking has shown them to
+ be slower. */
+#elif defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__) || defined(_ISA_ARM_6)
+ #define WORD_TYPE uint64_t
+ #define WORD_SIZE 8
+ #define MAYBE_PREFETCH(IN) __builtin_prefetch((IN), 0, 0)
+
+/* On everything else, we use 32-bit loads and stores, and
+ do not use prefetching. */
+#else
+ #define WORD_TYPE uint32_t
+ #define WORD_SIZE 4
+ #define MAYBE_PREFETCH(IN)
+#endif
+
+/* On all ARM platforms, 'SHORTWORD' is a 32-bit value. */
+#define SHORTWORD_TYPE uint32_t
+#define SHORTWORD_SIZE 4
+#define SHORTWORD_REF(ADDRESS, OFFSET) \
+ *((SHORTWORD_TYPE*)((char*)(ADDRESS) + (OFFSET)))
+#define SHORTWORD_COPY(OUT, IN, OFFSET) \
+ SHORTWORD_REF(OUT, OFFSET) = SHORTWORD_REF(IN, OFFSET)
+
+/* Shifting directionality depends on endianness. */
+#ifdef __ARMEB__
+#define SHORTWORD_SHIFT(IN0, IN1, OFFSET) \
+ ((IN0) << ((OFFSET)*8)) | ((IN1) >> (SHORTWORD_SIZE*8 - (OFFSET)*8))
+#else
+#define SHORTWORD_SHIFT(IN0, IN1, OFFSET) \
+ ((IN0) >> ((OFFSET)*8)) | ((IN1) << (SHORTWORD_SIZE*8 - (OFFSET)*8))
+#endif
+
+void *memcpy(void *OUT, const void *IN, size_t N)
+{
+ void* OUT0 = OUT;
+
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ const char* OUT_end = (char*)OUT + N;
+ while ((char*)OUT < OUT_end) {
+ *((char*)OUT) = *((char*)IN);
+ OUT++;
+ IN++;
+ }
+
+ return OUT0;
+#else
+ /* Handle short strings and immediately return. */
+ if (__builtin_expect(N < SHORTWORD_SIZE, 1)) {
+ size_t i = 0;
+ while (i < N) {
+ ((char*)OUT)[i] = ((char*)IN)[i];
+ i++;
+ }
+ return OUT;
+ }
+
+ const char* OUT_end = (char*)OUT + N;
+
+ /* Align OUT to SHORTWORD_SIZE. */
+ while ((uintptr_t)OUT % SHORTWORD_SIZE != 0) {
+ *(char*) (OUT++) = *(char*) (IN++);
+ }
+
+ if ((uintptr_t) IN % SHORTWORD_SIZE == 0) {
+
+#if WORD_SIZE > SHORTWORD_SIZE
+ /* Align OUT to WORD_SIZE in steps of SHORTWORD_SIZE. */
+ if (__builtin_expect(OUT_end - (char*)OUT >= WORD_SIZE, 0)) {
+ while ((uintptr_t)OUT % WORD_SIZE != 0) {
+ SHORTWORD_COPY(OUT, IN, 0);
+ OUT += SHORTWORD_SIZE;
+ IN += SHORTWORD_SIZE;
+ }
+
+ if ((uintptr_t) IN % WORD_SIZE == 0) {
+#endif /* WORD_SIZE > SHORTWORD_SIZE */
+
+#if defined(__ARM_NEON__)
+ /* Testing on Cortex-A8 indicates that the following idiom
+ produces faster assembly code when doing vector copies,
+ but not when doing regular copies. */
+ size_t i = 0;
+ N = OUT_end - (char*)OUT;
+ MAYBE_PREFETCH(IN + 64);
+ MAYBE_PREFETCH(IN + 128);
+ MAYBE_PREFETCH(IN + 192);
+ if (N >= 640) {
+ MAYBE_PREFETCH(IN + 256);
+ MAYBE_PREFETCH(IN + 320);
+ MAYBE_PREFETCH(IN + 384);
+ MAYBE_PREFETCH(IN + 448);
+ MAYBE_PREFETCH(IN + 512);
+ MAYBE_PREFETCH(IN + 576);
+ MAYBE_PREFETCH(IN + 640);
+ MAYBE_PREFETCH(IN + 704);
+ /* We phrase the loop condition in this way so that the
+ i + WORD_SIZE * 16 value can be reused to increment i. */
+ while (i + WORD_SIZE * 16 <= N - 640) {
+ MAYBE_PREFETCH(IN + 768);
+ MAYBE_PREFETCH(IN + 832);
+ MAYBE_PREFETCH(IN + 896);
+ MAYBE_PREFETCH(IN + 960);
+ WORD_COPY(OUT, IN, i);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 1);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 2);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 3);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 4);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 5);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 6);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 7);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 8);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 9);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 10);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 11);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 12);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 13);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 14);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 15);
+ i += WORD_SIZE * 16;
+ }
+ }
+ while (i + WORD_SIZE * 16 <= N) {
+ WORD_COPY(OUT, IN, i);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 1);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 2);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 3);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 4);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 5);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 6);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 7);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 8);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 9);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 10);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 11);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 12);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 13);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 14);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 15);
+ i += WORD_SIZE * 16;
+ }
+ while (i + WORD_SIZE * 4 <= N) {
+ WORD_COPY(OUT, IN, i);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 1);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 2);
+ WORD_COPY(OUT, IN, i + WORD_SIZE * 3);
+ i += WORD_SIZE * 4;
+ }
+ while (i + WORD_SIZE <= N) {
+ WORD_COPY(OUT, IN, i);
+ i += WORD_SIZE;
+ }
+ OUT += i;
+ IN += i;
+#else /* not defined(__ARM_NEON__) */
+ /* Note: 16-times unrolling is about 20% faster than 4-times
+ unrolling on both ARM Cortex-A8 and Cortex-M3. */
+ MAYBE_PREFETCH(IN + 64);
+ MAYBE_PREFETCH(IN + 128);
+ MAYBE_PREFETCH(IN + 192);
+ while (OUT_end - (char*)OUT >= WORD_SIZE * 16) {
+ MAYBE_PREFETCH(IN + 256);
+ MAYBE_PREFETCH(IN + 320);
+ WORD_COPY(OUT, IN, 0);
+ WORD_COPY(OUT, IN, WORD_SIZE * 1);
+ WORD_COPY(OUT, IN, WORD_SIZE * 2);
+ WORD_COPY(OUT, IN, WORD_SIZE * 3);
+ WORD_COPY(OUT, IN, WORD_SIZE * 4);
+ WORD_COPY(OUT, IN, WORD_SIZE * 5);
+ WORD_COPY(OUT, IN, WORD_SIZE * 6);
+ WORD_COPY(OUT, IN, WORD_SIZE * 7);
+ WORD_COPY(OUT, IN, WORD_SIZE * 8);
+ WORD_COPY(OUT, IN, WORD_SIZE * 9);
+ WORD_COPY(OUT, IN, WORD_SIZE * 10);
+ WORD_COPY(OUT, IN, WORD_SIZE * 11);
+ WORD_COPY(OUT, IN, WORD_SIZE * 12);
+ WORD_COPY(OUT, IN, WORD_SIZE * 13);
+ WORD_COPY(OUT, IN, WORD_SIZE * 14);
+ WORD_COPY(OUT, IN, WORD_SIZE * 15);
+ OUT += WORD_SIZE * 16;
+ IN += WORD_SIZE * 16;
+ }
+ while (WORD_SIZE * 4 <= OUT_end - (char*)OUT) {
+ WORD_COPY(OUT, IN, 0);
+ WORD_COPY(OUT, IN, WORD_SIZE * 1);
+ WORD_COPY(OUT, IN, WORD_SIZE * 2);
+ WORD_COPY(OUT, IN, WORD_SIZE * 3);
+ OUT += WORD_SIZE * 4;
+ IN += WORD_SIZE * 4;
+ }
+ while (WORD_SIZE <= OUT_end - (char*)OUT) {
+ WORD_COPY(OUT, IN, 0);
+ OUT += WORD_SIZE;
+ IN += WORD_SIZE;
+ }
+#endif /* not defined(__ARM_NEON__) */
+
+#if WORD_SIZE > SHORTWORD_SIZE
+ } else { /* if IN is not WORD_SIZE aligned */
+ while (SHORTWORD_SIZE * 4 <= OUT_end - (char*)OUT) {
+ SHORTWORD_COPY(OUT, IN, 0);
+ SHORTWORD_COPY(OUT, IN, SHORTWORD_SIZE * 1);
+ SHORTWORD_COPY(OUT, IN, SHORTWORD_SIZE * 2);
+ SHORTWORD_COPY(OUT, IN, SHORTWORD_SIZE * 3);
+ OUT += SHORTWORD_SIZE * 4;
+ IN += SHORTWORD_SIZE * 4;
+ }
+ } /* end if IN is not WORD_SIZE aligned */
+ } /* end if N >= WORD_SIZE */
+
+ while (SHORTWORD_SIZE <= OUT_end - (char*)OUT) {
+ SHORTWORD_COPY(OUT, IN, 0);
+ OUT += SHORTWORD_SIZE;
+ IN += SHORTWORD_SIZE;
+ }
+#endif /* WORD_SIZE > SHORTWORD_SIZE */
+
+ } else { /* if IN is not SHORTWORD_SIZE aligned */
+ ptrdiff_t misalign = (uintptr_t)IN % SHORTWORD_SIZE;
+
+ SHORTWORD_TYPE temp1, temp2;
+ temp1 = SHORTWORD_REF(IN, -misalign);
+
+ /* Benchmarking indicates that unrolling this loop doesn't
+ produce a measurable performance improvement on ARM. */
+ while (SHORTWORD_SIZE <= OUT_end - (char*)OUT) {
+ IN += SHORTWORD_SIZE;
+ temp2 = SHORTWORD_REF(IN, -misalign);
+ SHORTWORD_REF(OUT, 0) = SHORTWORD_SHIFT(temp1, temp2, misalign);
+ temp1 = temp2;
+ OUT += SHORTWORD_SIZE;
+ }
+
+ } /* end if IN is not SHORTWORD_SIZE aligned */
+
+ while ((char*)OUT < OUT_end) {
+ *((char*)OUT) = *((char*)IN);
+ OUT++;
+ IN++;
+ }
+
+ return OUT0;
+#endif
+}
diff --git a/reference/csl/memset.c b/reference/csl/memset.c
new file mode 100644
index 000000000000..3986fa97cf2a
--- /dev/null
+++ b/reference/csl/memset.c
@@ -0,0 +1,214 @@
+/* Copyright (c) 2009 CodeSourcery, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of CodeSourcery nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CODESOURCERY, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CODESOURCERY BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arm_asm.h"
+#include <string.h>
+#include <stdint.h>
+
+/* Standard operations for word-sized values. */
+#define WORD_REF(ADDRESS, OFFSET) \
+ *((WORD_TYPE*)((char*)(ADDRESS) + (OFFSET)))
+
+/* On processors with NEON, we use 128-bit vectors. Also,
+ we need to include arm_neon.h to use these. */
+#if defined(__ARM_NEON__)
+ #include <arm_neon.h>
+
+ #define WORD_TYPE uint8x16_t
+ #define WORD_SIZE 16
+
+ #define WORD_DUPLICATE(VALUE) \
+ vdupq_n_u8(VALUE)
+
+/* On ARM processors with 64-bit ldrd instructions, we use those,
+ except on Cortex-M* where benchmarking has shown them to
+ be slower. */
+#elif defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__) || defined(_ISA_ARM_6)
+ #define WORD_TYPE uint64_t
+ #define WORD_SIZE 8
+
+ /* ARM stores 64-bit values in two 32-bit registers and does not
+ have 64-bit multiply or bitwise-or instructions, so this union
+ operation results in optimal code. */
+ static inline uint64_t splat8(value) {
+ union { uint32_t ints[2]; uint64_t result; } quad;
+ quad.ints[0] = (unsigned char)(value) * 0x01010101;
+ quad.ints[1] = quad.ints[0];
+ return quad.result;
+ }
+ #define WORD_DUPLICATE(VALUE) \
+ splat8(VALUE)
+
+/* On everything else, we use 32-bit loads and stores. */
+#else
+ #define WORD_TYPE uint32_t
+ #define WORD_SIZE 4
+ #define WORD_DUPLICATE(VALUE) \
+ (unsigned char)(VALUE) * 0x01010101
+#endif
+
+/* On all ARM platforms, 'SHORTWORD' is a 32-bit value. */
+#define SHORTWORD_TYPE uint32_t
+#define SHORTWORD_SIZE 4
+#define SHORTWORD_REF(ADDRESS, OFFSET) \
+ *((SHORTWORD_TYPE*)((char*)(ADDRESS) + (OFFSET)))
+#define SHORTWORD_DUPLICATE(VALUE) \
+ (uint32_t)(unsigned char)(VALUE) * 0x01010101
+
+void *memset(void *DST, int C, size_t LENGTH)
+{
+ void* DST0 = DST;
+ unsigned char C_BYTE = C;
+
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ const char* DST_end = (char*)DST + LENGTH;
+ while ((char*)DST < DST_end) {
+ *((char*)DST) = C_BYTE;
+ DST++;
+ }
+
+ return DST0;
+#else /* not PREFER_SIZE_OVER_SPEED */
+ /* Handle short strings and immediately return. */
+ if (__builtin_expect(LENGTH < SHORTWORD_SIZE, 1)) {
+ size_t i = 0;
+ while (i < LENGTH) {
+ ((char*)DST)[i] = C_BYTE;
+ i++;
+ }
+ return DST;
+ }
+
+ const char* DST_end = (char*)DST + LENGTH;
+
+ /* Align DST to SHORTWORD_SIZE. */
+ while ((uintptr_t)DST % SHORTWORD_SIZE != 0) {
+ *(char*) (DST++) = C_BYTE;
+ }
+
+#if WORD_SIZE > SHORTWORD_SIZE
+ SHORTWORD_TYPE C_SHORTWORD = SHORTWORD_DUPLICATE(C_BYTE);
+
+ /* Align DST to WORD_SIZE in steps of SHORTWORD_SIZE. */
+ if (__builtin_expect(DST_end - (char*)DST >= WORD_SIZE, 0)) {
+ while ((uintptr_t)DST % WORD_SIZE != 0) {
+ SHORTWORD_REF(DST, 0) = C_SHORTWORD;
+ DST += SHORTWORD_SIZE;
+ }
+#endif /* WORD_SIZE > SHORTWORD_SIZE */
+
+ WORD_TYPE C_WORD = WORD_DUPLICATE(C_BYTE);
+
+#if defined(__ARM_NEON__)
+ /* Testing on Cortex-A8 indicates that the following idiom
+ produces faster assembly code when doing vector copies,
+ but not when doing regular copies. */
+ size_t i = 0;
+ LENGTH = DST_end - (char*)DST;
+ while (i + WORD_SIZE * 16 <= LENGTH) {
+ WORD_REF(DST, i) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 1) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 2) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 3) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 4) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 5) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 6) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 7) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 8) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 9) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 10) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 11) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 12) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 13) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 14) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 15) = C_WORD;
+ i += WORD_SIZE * 16;
+ }
+ while (i + WORD_SIZE * 4 <= LENGTH) {
+ WORD_REF(DST, i) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 1) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 2) = C_WORD;
+ WORD_REF(DST, i + WORD_SIZE * 3) = C_WORD;
+ i += WORD_SIZE * 4;
+ }
+ while (i + WORD_SIZE <= LENGTH) {
+ WORD_REF(DST, i) = C_WORD;
+ i += WORD_SIZE;
+ }
+ DST += i;
+#else /* not defined(__ARM_NEON__) */
+ /* Note: 16-times unrolling is about 50% faster than 4-times
+ unrolling on both ARM Cortex-A8 and Cortex-M3. */
+ while (DST_end - (char*) DST >= WORD_SIZE * 16) {
+ WORD_REF(DST, 0) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 1) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 2) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 3) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 4) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 5) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 6) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 7) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 8) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 9) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 10) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 11) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 12) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 13) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 14) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 15) = C_WORD;
+ DST += WORD_SIZE * 16;
+ }
+ while (WORD_SIZE * 4 <= DST_end - (char*) DST) {
+ WORD_REF(DST, 0) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 1) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 2) = C_WORD;
+ WORD_REF(DST, WORD_SIZE * 3) = C_WORD;
+ DST += WORD_SIZE * 4;
+ }
+ while (WORD_SIZE <= DST_end - (char*) DST) {
+ WORD_REF(DST, 0) = C_WORD;
+ DST += WORD_SIZE;
+ }
+#endif /* not defined(__ARM_NEON__) */
+
+#if WORD_SIZE > SHORTWORD_SIZE
+ } /* end if N >= WORD_SIZE */
+
+ while (SHORTWORD_SIZE <= DST_end - (char*)DST) {
+ SHORTWORD_REF(DST, 0) = C_SHORTWORD;
+ DST += SHORTWORD_SIZE;
+ }
+#endif /* WORD_SIZE > SHORTWORD_SIZE */
+
+ while ((char*)DST < DST_end) {
+ *((char*)DST) = C_BYTE;
+ DST++;
+ }
+
+ return DST0;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
diff --git a/reference/glibc-c/.deps/.dirstamp b/reference/glibc-c/.deps/.dirstamp
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/reference/glibc-c/.deps/.dirstamp
diff --git a/reference/glibc-c/.deps/memchr.Po b/reference/glibc-c/.deps/memchr.Po
new file mode 100644
index 000000000000..86a51a09dfb4
--- /dev/null
+++ b/reference/glibc-c/.deps/memchr.Po
@@ -0,0 +1,45 @@
+reference/glibc-c/memchr.o: reference/glibc-c/memchr.c config.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/stdlib.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h \
+ /usr/include/sys/_null.h /usr/include/sys/_types.h \
+ /usr/include/machine/_types.h /usr/include/x86/_types.h \
+ /usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/types.h \
+ /usr/include/machine/endian.h /usr/include/x86/endian.h \
+ /usr/include/sys/_pthreadtypes.h /usr/include/sys/_stdint.h \
+ /usr/include/sys/select.h /usr/include/sys/_sigset.h \
+ /usr/include/sys/_timeval.h /usr/include/sys/timespec.h \
+ /usr/include/sys/_timespec.h
+
+config.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/stdlib.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/cdefs.h:
+
+/usr/include/sys/_null.h:
+
+/usr/include/sys/_types.h:
+
+/usr/include/machine/_types.h:
+
+/usr/include/x86/_types.h:
+
+/usr/local/lib/gcc48/gcc/x86_64-portbld-freebsd11.0/4.8.5/include-fixed/sys/types.h:
+
+/usr/include/machine/endian.h:
+
+/usr/include/x86/endian.h:
+
+/usr/include/sys/_pthreadtypes.h:
+
+/usr/include/sys/_stdint.h:
+
+/usr/include/sys/select.h:
+
+/usr/include/sys/_sigset.h:
+
+/usr/include/sys/_timeval.h:
+
+/usr/include/sys/timespec.h:
+
+/usr/include/sys/_timespec.h:
diff --git a/reference/glibc-c/.deps/memcmp.Po b/reference/glibc-c/.deps/memcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc-c/.deps/memcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc-c/.deps/memcpy.Po b/reference/glibc-c/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc-c/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc-c/.deps/memset.Po b/reference/glibc-c/.deps/memset.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc-c/.deps/memset.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc-c/.deps/strchr.Po b/reference/glibc-c/.deps/strchr.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc-c/.deps/strchr.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc-c/.deps/strcmp.Po b/reference/glibc-c/.deps/strcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc-c/.deps/strcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc-c/.deps/strcpy.Po b/reference/glibc-c/.deps/strcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc-c/.deps/strcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc-c/.deps/strlen.Po b/reference/glibc-c/.deps/strlen.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc-c/.deps/strlen.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc-c/.deps/wordcopy.Po b/reference/glibc-c/.deps/wordcopy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc-c/.deps/wordcopy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc-c/.dirstamp b/reference/glibc-c/.dirstamp
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/reference/glibc-c/.dirstamp
diff --git a/reference/glibc-c/memchr.c b/reference/glibc-c/memchr.c
new file mode 100644
index 000000000000..bc606b25ade7
--- /dev/null
+++ b/reference/glibc-c/memchr.c
@@ -0,0 +1,204 @@
+/* Copyright (C) 1991,93,96,97,99,2000,2003,2012 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Based on strlen implementation by Torbjorn Granlund (tege@sics.se),
+ with help from Dan Sahlin (dan@sics.se) and
+ commentary by Jim Blandy (jimb@ai.mit.edu);
+ adaptation to memchr suggested by Dick Karpinski (dick@cca.ucsf.edu),
+ and implemented by Roland McGrath (roland@ai.mit.edu).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#undef __ptr_t
+#define __ptr_t void *
+
+#if defined _LIBC
+# include <string.h>
+# include <memcopy.h>
+#endif
+
+#if HAVE_STDLIB_H || defined _LIBC
+# include <stdlib.h>
+#endif
+
+#if HAVE_LIMITS_H || defined _LIBC
+# include <limits.h>
+#endif
+
+#define LONG_MAX_32_BITS 2147483647
+
+#ifndef LONG_MAX
+#define LONG_MAX LONG_MAX_32_BITS
+#endif
+
+#include <sys/types.h>
+#if HAVE_BP_SYM_H || defined _LIBC
+#include <bp-sym.h>
+#else
+# define BP_SYM(sym) sym
+#endif
+
+#undef memchr
+#undef __memchr
+
+/* Search no more than N bytes of S for C. */
+__ptr_t
+memchr (s, c_in, n)
+ const __ptr_t s;
+ int c_in;
+ size_t n;
+{
+ const unsigned char *char_ptr;
+ const unsigned long int *longword_ptr;
+ unsigned long int longword, magic_bits, charmask;
+ unsigned char c;
+
+ c = (unsigned char) c_in;
+
+ /* Handle the first few characters by reading one character at a time.
+ Do this until CHAR_PTR is aligned on a longword boundary. */
+ for (char_ptr = (const unsigned char *) s;
+ n > 0 && ((unsigned long int) char_ptr
+ & (sizeof (longword) - 1)) != 0;
+ --n, ++char_ptr)
+ if (*char_ptr == c)
+ return (__ptr_t) char_ptr;
+
+ /* All these elucidatory comments refer to 4-byte longwords,
+ but the theory applies equally well to 8-byte longwords. */
+
+ longword_ptr = (unsigned long int *) char_ptr;
+
+ /* Bits 31, 24, 16, and 8 of this number are zero. Call these bits
+ the "holes." Note that there is a hole just to the left of
+ each byte, with an extra at the end:
+
+ bits: 01111110 11111110 11111110 11111111
+ bytes: AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD
+
+ The 1-bits make sure that carries propagate to the next 0-bit.
+ The 0-bits provide holes for carries to fall into. */
+
+ if (sizeof (longword) != 4 && sizeof (longword) != 8)
+ abort ();
+
+#if LONG_MAX <= LONG_MAX_32_BITS
+ magic_bits = 0x7efefeff;
+#else
+ magic_bits = ((unsigned long int) 0x7efefefe << 32) | 0xfefefeff;
+#endif
+
+ /* Set up a longword, each of whose bytes is C. */
+ charmask = c | (c << 8);
+ charmask |= charmask << 16;
+#if LONG_MAX > LONG_MAX_32_BITS
+ charmask |= charmask << 32;
+#endif
+
+ /* Instead of the traditional loop which tests each character,
+ we will test a longword at a time. The tricky part is testing
+ if *any of the four* bytes in the longword in question are zero. */
+ while (n >= sizeof (longword))
+ {
+ /* We tentatively exit the loop if adding MAGIC_BITS to
+ LONGWORD fails to change any of the hole bits of LONGWORD.
+
+ 1) Is this safe? Will it catch all the zero bytes?
+ Suppose there is a byte with all zeros. Any carry bits
+ propagating from its left will fall into the hole at its
+ least significant bit and stop. Since there will be no
+ carry from its most significant bit, the LSB of the
+ byte to the left will be unchanged, and the zero will be
+ detected.
+
+ 2) Is this worthwhile? Will it ignore everything except
+ zero bytes? Suppose every byte of LONGWORD has a bit set
+ somewhere. There will be a carry into bit 8. If bit 8
+ is set, this will carry into bit 16. If bit 8 is clear,
+ one of bits 9-15 must be set, so there will be a carry
+ into bit 16. Similarly, there will be a carry into bit
+ 24. If one of bits 24-30 is set, there will be a carry
+ into bit 31, so all of the hole bits will be changed.
+
+ The one misfire occurs when bits 24-30 are clear and bit
+ 31 is set; in this case, the hole at bit 31 is not
+ changed. If we had access to the processor carry flag,
+ we could close this loophole by putting the fourth hole
+ at bit 32!
+
+ So it ignores everything except 128's, when they're aligned
+ properly.
+
+ 3) But wait! Aren't we looking for C, not zero?
+ Good point. So what we do is XOR LONGWORD with a longword,
+ each of whose bytes is C. This turns each byte that is C
+ into a zero. */
+
+ longword = *longword_ptr++ ^ charmask;
+
+ /* Add MAGIC_BITS to LONGWORD. */
+ if ((((longword + magic_bits)
+
+ /* Set those bits that were unchanged by the addition. */
+ ^ ~longword)
+
+ /* Look at only the hole bits. If any of the hole bits
+ are unchanged, most likely one of the bytes was a
+ zero. */
+ & ~magic_bits) != 0)
+ {
+ /* Which of the bytes was C? If none of them were, it was
+ a misfire; continue the search. */
+
+ const unsigned char *cp = (const unsigned char *) (longword_ptr - 1);
+
+ if (cp[0] == c)
+ return (__ptr_t) cp;
+ if (cp[1] == c)
+ return (__ptr_t) &cp[1];
+ if (cp[2] == c)
+ return (__ptr_t) &cp[2];
+ if (cp[3] == c)
+ return (__ptr_t) &cp[3];
+#if LONG_MAX > 2147483647
+ if (cp[4] == c)
+ return (__ptr_t) &cp[4];
+ if (cp[5] == c)
+ return (__ptr_t) &cp[5];
+ if (cp[6] == c)
+ return (__ptr_t) &cp[6];
+ if (cp[7] == c)
+ return (__ptr_t) &cp[7];
+#endif
+ }
+
+ n -= sizeof (longword);
+ }
+
+ char_ptr = (const unsigned char *) longword_ptr;
+
+ while (n-- > 0)
+ {
+ if (*char_ptr == c)
+ return (__ptr_t) char_ptr;
+ else
+ ++char_ptr;
+ }
+
+ return 0;
+}
diff --git a/reference/glibc-c/memcmp.c b/reference/glibc-c/memcmp.c
new file mode 100644
index 000000000000..8ea364c2dfb5
--- /dev/null
+++ b/reference/glibc-c/memcmp.c
@@ -0,0 +1,369 @@
+/* Copyright (C) 1991,1993,1995,1997,1998,2003,2004,2012
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Torbjorn Granlund (tege@sics.se).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#undef __ptr_t
+#define __ptr_t void *
+
+#if defined HAVE_STRING_H || defined _LIBC
+# include <string.h>
+#endif
+
+#undef memcmp
+
+#ifdef _LIBC
+
+# include <memcopy.h>
+# include <endian.h>
+
+# if __BYTE_ORDER == __BIG_ENDIAN
+# define WORDS_BIGENDIAN
+# endif
+
+#else /* Not in the GNU C library. */
+
+# include <sys/types.h>
+
+/* Type to use for aligned memory operations.
+ This should normally be the biggest type supported by a single load
+ and store. Must be an unsigned type. */
+# define op_t unsigned long int
+# define OPSIZ (sizeof(op_t))
+
+/* Threshold value for when to enter the unrolled loops. */
+# define OP_T_THRES 16
+
+/* Type to use for unaligned operations. */
+typedef unsigned char byte;
+
+# ifndef WORDS_BIGENDIAN
+# define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2)))
+# else
+# define MERGE(w0, sh_1, w1, sh_2) (((w0) << (sh_1)) | ((w1) >> (sh_2)))
+# endif
+
+#endif /* In the GNU C library. */
+
+#ifdef WORDS_BIGENDIAN
+# define CMP_LT_OR_GT(a, b) ((a) > (b) ? 1 : -1)
+#else
+# define CMP_LT_OR_GT(a, b) memcmp_bytes ((a), (b))
+#endif
+
+/* BE VERY CAREFUL IF YOU CHANGE THIS CODE! */
+
+/* The strategy of this memcmp is:
+
+ 1. Compare bytes until one of the block pointers is aligned.
+
+ 2. Compare using memcmp_common_alignment or
+ memcmp_not_common_alignment, regarding the alignment of the other
+ block after the initial byte operations. The maximum number of
+ full words (of type op_t) are compared in this way.
+
+ 3. Compare the few remaining bytes. */
+
+#ifndef WORDS_BIGENDIAN
+/* memcmp_bytes -- Compare A and B bytewise in the byte order of the machine.
+ A and B are known to be different.
+ This is needed only on little-endian machines. */
+
+static int memcmp_bytes (op_t, op_t) __THROW;
+
+# ifdef __GNUC__
+__inline
+# endif
+static int
+memcmp_bytes (a, b)
+ op_t a, b;
+{
+ long int srcp1 = (long int) &a;
+ long int srcp2 = (long int) &b;
+ op_t a0, b0;
+
+ do
+ {
+ a0 = ((byte *) srcp1)[0];
+ b0 = ((byte *) srcp2)[0];
+ srcp1 += 1;
+ srcp2 += 1;
+ }
+ while (a0 == b0);
+ return a0 - b0;
+}
+#endif
+
+static int memcmp_common_alignment (long, long, size_t) __THROW;
+
+/* memcmp_common_alignment -- Compare blocks at SRCP1 and SRCP2 with LEN `op_t'
+ objects (not LEN bytes!). Both SRCP1 and SRCP2 should be aligned for
+ memory operations on `op_t's. */
+static int
+memcmp_common_alignment (srcp1, srcp2, len)
+ long int srcp1;
+ long int srcp2;
+ size_t len;
+{
+ op_t a0, a1;
+ op_t b0, b1;
+
+ switch (len % 4)
+ {
+ default: /* Avoid warning about uninitialized local variables. */
+ case 2:
+ a0 = ((op_t *) srcp1)[0];
+ b0 = ((op_t *) srcp2)[0];
+ srcp1 -= 2 * OPSIZ;
+ srcp2 -= 2 * OPSIZ;
+ len += 2;
+ goto do1;
+ case 3:
+ a1 = ((op_t *) srcp1)[0];
+ b1 = ((op_t *) srcp2)[0];
+ srcp1 -= OPSIZ;
+ srcp2 -= OPSIZ;
+ len += 1;
+ goto do2;
+ case 0:
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ return 0;
+ a0 = ((op_t *) srcp1)[0];
+ b0 = ((op_t *) srcp2)[0];
+ goto do3;
+ case 1:
+ a1 = ((op_t *) srcp1)[0];
+ b1 = ((op_t *) srcp2)[0];
+ srcp1 += OPSIZ;
+ srcp2 += OPSIZ;
+ len -= 1;
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ goto do0;
+ /* Fall through. */
+ }
+
+ do
+ {
+ a0 = ((op_t *) srcp1)[0];
+ b0 = ((op_t *) srcp2)[0];
+ if (a1 != b1)
+ return CMP_LT_OR_GT (a1, b1);
+
+ do3:
+ a1 = ((op_t *) srcp1)[1];
+ b1 = ((op_t *) srcp2)[1];
+ if (a0 != b0)
+ return CMP_LT_OR_GT (a0, b0);
+
+ do2:
+ a0 = ((op_t *) srcp1)[2];
+ b0 = ((op_t *) srcp2)[2];
+ if (a1 != b1)
+ return CMP_LT_OR_GT (a1, b1);
+
+ do1:
+ a1 = ((op_t *) srcp1)[3];
+ b1 = ((op_t *) srcp2)[3];
+ if (a0 != b0)
+ return CMP_LT_OR_GT (a0, b0);
+
+ srcp1 += 4 * OPSIZ;
+ srcp2 += 4 * OPSIZ;
+ len -= 4;
+ }
+ while (len != 0);
+
+ /* This is the right position for do0. Please don't move
+ it into the loop. */
+ do0:
+ if (a1 != b1)
+ return CMP_LT_OR_GT (a1, b1);
+ return 0;
+}
+
+static int memcmp_not_common_alignment (long, long, size_t) __THROW;
+
+/* memcmp_not_common_alignment -- Compare blocks at SRCP1 and SRCP2 with LEN
+ `op_t' objects (not LEN bytes!). SRCP2 should be aligned for memory
+ operations on `op_t', but SRCP1 *should be unaligned*. */
+static int
+memcmp_not_common_alignment (srcp1, srcp2, len)
+ long int srcp1;
+ long int srcp2;
+ size_t len;
+{
+ op_t a0, a1, a2, a3;
+ op_t b0, b1, b2, b3;
+ op_t x;
+ int shl, shr;
+
+ /* Calculate how to shift a word read at the memory operation
+ aligned srcp1 to make it aligned for comparison. */
+
+ shl = 8 * (srcp1 % OPSIZ);
+ shr = 8 * OPSIZ - shl;
+
+ /* Make SRCP1 aligned by rounding it down to the beginning of the `op_t'
+ it points in the middle of. */
+ srcp1 &= -OPSIZ;
+
+ switch (len % 4)
+ {
+ default: /* Avoid warning about uninitialized local variables. */
+ case 2:
+ a1 = ((op_t *) srcp1)[0];
+ a2 = ((op_t *) srcp1)[1];
+ b2 = ((op_t *) srcp2)[0];
+ srcp1 -= 1 * OPSIZ;
+ srcp2 -= 2 * OPSIZ;
+ len += 2;
+ goto do1;
+ case 3:
+ a0 = ((op_t *) srcp1)[0];
+ a1 = ((op_t *) srcp1)[1];
+ b1 = ((op_t *) srcp2)[0];
+ srcp2 -= 1 * OPSIZ;
+ len += 1;
+ goto do2;
+ case 0:
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ return 0;
+ a3 = ((op_t *) srcp1)[0];
+ a0 = ((op_t *) srcp1)[1];
+ b0 = ((op_t *) srcp2)[0];
+ srcp1 += 1 * OPSIZ;
+ goto do3;
+ case 1:
+ a2 = ((op_t *) srcp1)[0];
+ a3 = ((op_t *) srcp1)[1];
+ b3 = ((op_t *) srcp2)[0];
+ srcp1 += 2 * OPSIZ;
+ srcp2 += 1 * OPSIZ;
+ len -= 1;
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ goto do0;
+ /* Fall through. */
+ }
+
+ do
+ {
+ a0 = ((op_t *) srcp1)[0];
+ b0 = ((op_t *) srcp2)[0];
+ x = MERGE(a2, shl, a3, shr);
+ if (x != b3)
+ return CMP_LT_OR_GT (x, b3);
+
+ do3:
+ a1 = ((op_t *) srcp1)[1];
+ b1 = ((op_t *) srcp2)[1];
+ x = MERGE(a3, shl, a0, shr);
+ if (x != b0)
+ return CMP_LT_OR_GT (x, b0);
+
+ do2:
+ a2 = ((op_t *) srcp1)[2];
+ b2 = ((op_t *) srcp2)[2];
+ x = MERGE(a0, shl, a1, shr);
+ if (x != b1)
+ return CMP_LT_OR_GT (x, b1);
+
+ do1:
+ a3 = ((op_t *) srcp1)[3];
+ b3 = ((op_t *) srcp2)[3];
+ x = MERGE(a1, shl, a2, shr);
+ if (x != b2)
+ return CMP_LT_OR_GT (x, b2);
+
+ srcp1 += 4 * OPSIZ;
+ srcp2 += 4 * OPSIZ;
+ len -= 4;
+ }
+ while (len != 0);
+
+ /* This is the right position for do0. Please don't move
+ it into the loop. */
+ do0:
+ x = MERGE(a2, shl, a3, shr);
+ if (x != b3)
+ return CMP_LT_OR_GT (x, b3);
+ return 0;
+}
+
+int
+memcmp (s1, s2, len)
+ const __ptr_t s1;
+ const __ptr_t s2;
+ size_t len;
+{
+ op_t a0;
+ op_t b0;
+ long int srcp1 = (long int) s1;
+ long int srcp2 = (long int) s2;
+ op_t res;
+
+ if (len >= OP_T_THRES)
+ {
+ /* There are at least some bytes to compare. No need to test
+ for LEN == 0 in this alignment loop. */
+ while (srcp2 % OPSIZ != 0)
+ {
+ a0 = ((byte *) srcp1)[0];
+ b0 = ((byte *) srcp2)[0];
+ srcp1 += 1;
+ srcp2 += 1;
+ res = a0 - b0;
+ if (res != 0)
+ return res;
+ len -= 1;
+ }
+
+ /* SRCP2 is now aligned for memory operations on `op_t'.
+ SRCP1 alignment determines if we can do a simple,
+ aligned compare or need to shuffle bits. */
+
+ if (srcp1 % OPSIZ == 0)
+ res = memcmp_common_alignment (srcp1, srcp2, len / OPSIZ);
+ else
+ res = memcmp_not_common_alignment (srcp1, srcp2, len / OPSIZ);
+ if (res != 0)
+ return res;
+
+ /* Number of bytes remaining in the interval [0..OPSIZ-1]. */
+ srcp1 += len & -OPSIZ;
+ srcp2 += len & -OPSIZ;
+ len %= OPSIZ;
+ }
+
+ /* There are just a few bytes to compare. Use byte memory operations. */
+ while (len != 0)
+ {
+ a0 = ((byte *) srcp1)[0];
+ b0 = ((byte *) srcp2)[0];
+ srcp1 += 1;
+ srcp2 += 1;
+ res = a0 - b0;
+ if (res != 0)
+ return res;
+ len -= 1;
+ }
+
+ return 0;
+}
diff --git a/reference/glibc-c/memcopy.h b/reference/glibc-c/memcopy.h
new file mode 100644
index 000000000000..a8e5a44fe5dd
--- /dev/null
+++ b/reference/glibc-c/memcopy.h
@@ -0,0 +1,146 @@
+/* memcopy.h -- definitions for memory copy functions. Generic C version.
+ Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Torbjorn Granlund (tege@sics.se).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* The strategy of the memory functions is:
+
+ 1. Copy bytes until the destination pointer is aligned.
+
+ 2. Copy words in unrolled loops. If the source and destination
+ are not aligned in the same way, use word memory operations,
+ but shift and merge two read words before writing.
+
+ 3. Copy the few remaining bytes.
+
+ This is fast on processors that have at least 10 registers for
+ allocation by GCC, and that can access memory at reg+const in one
+ instruction.
+
+ I made an "exhaustive" test of this memmove when I wrote it,
+ exhaustive in the sense that I tried all alignment and length
+ combinations, with and without overlap. */
+
+#include <sys/cdefs.h>
+#include <endian.h>
+
+/* The macros defined in this file are:
+
+ BYTE_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_to_copy)
+
+ BYTE_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_to_copy)
+
+ WORD_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_remaining, nbytes_to_copy)
+
+ WORD_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_remaining, nbytes_to_copy)
+
+ MERGE(old_word, sh_1, new_word, sh_2)
+ [I fail to understand. I feel stupid. --roland]
+*/
+
+/* Type to use for aligned memory operations.
+ This should normally be the biggest type supported by a single load
+ and store. */
+#define op_t unsigned long int
+#define OPSIZ (sizeof(op_t))
+
+/* Type to use for unaligned operations. */
+typedef unsigned char byte;
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2)))
+#endif
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define MERGE(w0, sh_1, w1, sh_2) (((w0) << (sh_1)) | ((w1) >> (sh_2)))
+#endif
+
+/* Copy exactly NBYTES bytes from SRC_BP to DST_BP,
+ without any assumptions about alignment of the pointers. */
+#define BYTE_COPY_FWD(dst_bp, src_bp, nbytes) \
+ do \
+ { \
+ size_t __nbytes = (nbytes); \
+ while (__nbytes > 0) \
+ { \
+ byte __x = ((byte *) src_bp)[0]; \
+ src_bp += 1; \
+ __nbytes -= 1; \
+ ((byte *) dst_bp)[0] = __x; \
+ dst_bp += 1; \
+ } \
+ } while (0)
+
+/* Copy exactly NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR,
+ beginning at the bytes right before the pointers and continuing towards
+ smaller addresses. Don't assume anything about alignment of the
+ pointers. */
+#define BYTE_COPY_BWD(dst_ep, src_ep, nbytes) \
+ do \
+ { \
+ size_t __nbytes = (nbytes); \
+ while (__nbytes > 0) \
+ { \
+ byte __x; \
+ src_ep -= 1; \
+ __x = ((byte *) src_ep)[0]; \
+ dst_ep -= 1; \
+ __nbytes -= 1; \
+ ((byte *) dst_ep)[0] = __x; \
+ } \
+ } while (0)
+
+/* Copy *up to* NBYTES bytes from SRC_BP to DST_BP, with
+ the assumption that DST_BP is aligned on an OPSIZ multiple. If
+ not all bytes could be easily copied, store remaining number of bytes
+ in NBYTES_LEFT, otherwise store 0. */
+extern void _wordcopy_fwd_aligned (long int, long int, size_t) __THROW;
+extern void _wordcopy_fwd_dest_aligned (long int, long int, size_t) __THROW;
+#define WORD_COPY_FWD(dst_bp, src_bp, nbytes_left, nbytes) \
+ do \
+ { \
+ if (src_bp % OPSIZ == 0) \
+ _wordcopy_fwd_aligned (dst_bp, src_bp, (nbytes) / OPSIZ); \
+ else \
+ _wordcopy_fwd_dest_aligned (dst_bp, src_bp, (nbytes) / OPSIZ); \
+ src_bp += (nbytes) & -OPSIZ; \
+ dst_bp += (nbytes) & -OPSIZ; \
+ (nbytes_left) = (nbytes) % OPSIZ; \
+ } while (0)
+
+/* Copy *up to* NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR,
+ beginning at the words (of type op_t) right before the pointers and
+ continuing towards smaller addresses. May take advantage of that
+ DST_END_PTR is aligned on an OPSIZ multiple. If not all bytes could be
+ easily copied, store remaining number of bytes in NBYTES_REMAINING,
+ otherwise store 0. */
+extern void _wordcopy_bwd_aligned (long int, long int, size_t) __THROW;
+extern void _wordcopy_bwd_dest_aligned (long int, long int, size_t) __THROW;
+#define WORD_COPY_BWD(dst_ep, src_ep, nbytes_left, nbytes) \
+ do \
+ { \
+ if (src_ep % OPSIZ == 0) \
+ _wordcopy_bwd_aligned (dst_ep, src_ep, (nbytes) / OPSIZ); \
+ else \
+ _wordcopy_bwd_dest_aligned (dst_ep, src_ep, (nbytes) / OPSIZ); \
+ src_ep -= (nbytes) & -OPSIZ; \
+ dst_ep -= (nbytes) & -OPSIZ; \
+ (nbytes_left) = (nbytes) % OPSIZ; \
+ } while (0)
+
+
+/* Threshold value for when to enter the unrolled loops. */
+#define OP_T_THRES 16
diff --git a/reference/glibc-c/memcpy.c b/reference/glibc-c/memcpy.c
new file mode 100644
index 000000000000..77e521644a11
--- /dev/null
+++ b/reference/glibc-c/memcpy.c
@@ -0,0 +1,63 @@
+/* Copy memory to memory until the specified number of bytes
+ has been copied. Overlap is NOT handled correctly.
+ Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Torbjorn Granlund (tege@sics.se).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+#include "memcopy.h"
+#include "pagecopy.h"
+
+#undef memcpy
+
+void *
+memcpy (dstpp, srcpp, len)
+ void *dstpp;
+ const void *srcpp;
+ size_t len;
+{
+ unsigned long int dstp = (long int) dstpp;
+ unsigned long int srcp = (long int) srcpp;
+
+ /* Copy from the beginning to the end. */
+
+ /* If there not too few bytes to copy, use word copy. */
+ if (len >= OP_T_THRES)
+ {
+ /* Copy just a few bytes to make DSTP aligned. */
+ len -= (-dstp) % OPSIZ;
+ BYTE_COPY_FWD (dstp, srcp, (-dstp) % OPSIZ);
+
+ /* Copy whole pages from SRCP to DSTP by virtual address manipulation,
+ as much as possible. */
+
+ PAGE_COPY_FWD_MAYBE (dstp, srcp, len, len);
+
+ /* Copy from SRCP to DSTP taking advantage of the known alignment of
+ DSTP. Number of bytes remaining is put in the third argument,
+ i.e. in LEN. This number may vary from machine to machine. */
+
+ WORD_COPY_FWD (dstp, srcp, len, len);
+
+ /* Fall out and copy the tail. */
+ }
+
+ /* There are just a few bytes to copy. Use byte memory operations. */
+ BYTE_COPY_FWD (dstp, srcp, len);
+
+ return dstpp;
+}
diff --git a/reference/glibc-c/memset.c b/reference/glibc-c/memset.c
new file mode 100644
index 000000000000..207e7c4d401f
--- /dev/null
+++ b/reference/glibc-c/memset.c
@@ -0,0 +1,89 @@
+/* Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+#include "memcopy.h"
+
+#undef memset
+
+void *
+memset (dstpp, c, len)
+ void *dstpp;
+ int c;
+ size_t len;
+{
+ long int dstp = (long int) dstpp;
+
+ if (len >= 8)
+ {
+ size_t xlen;
+ op_t cccc;
+
+ cccc = (unsigned char) c;
+ cccc |= cccc << 8;
+ cccc |= cccc << 16;
+ if (OPSIZ > 4)
+ /* Do the shift in two steps to avoid warning if long has 32 bits. */
+ cccc |= (cccc << 16) << 16;
+
+ /* There are at least some bytes to set.
+ No need to test for LEN == 0 in this alignment loop. */
+ while (dstp % OPSIZ != 0)
+ {
+ ((byte *) dstp)[0] = c;
+ dstp += 1;
+ len -= 1;
+ }
+
+ /* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */
+ xlen = len / (OPSIZ * 8);
+ while (xlen > 0)
+ {
+ ((op_t *) dstp)[0] = cccc;
+ ((op_t *) dstp)[1] = cccc;
+ ((op_t *) dstp)[2] = cccc;
+ ((op_t *) dstp)[3] = cccc;
+ ((op_t *) dstp)[4] = cccc;
+ ((op_t *) dstp)[5] = cccc;
+ ((op_t *) dstp)[6] = cccc;
+ ((op_t *) dstp)[7] = cccc;
+ dstp += 8 * OPSIZ;
+ xlen -= 1;
+ }
+ len %= OPSIZ * 8;
+
+ /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */
+ xlen = len / OPSIZ;
+ while (xlen > 0)
+ {
+ ((op_t *) dstp)[0] = cccc;
+ dstp += OPSIZ;
+ xlen -= 1;
+ }
+ len %= OPSIZ;
+ }
+
+ /* Write the last few bytes. */
+ while (len > 0)
+ {
+ ((byte *) dstp)[0] = c;
+ dstp += 1;
+ len -= 1;
+ }
+
+ return dstpp;
+}
diff --git a/reference/glibc-c/pagecopy.h b/reference/glibc-c/pagecopy.h
new file mode 100644
index 000000000000..89f392cb43c4
--- /dev/null
+++ b/reference/glibc-c/pagecopy.h
@@ -0,0 +1,74 @@
+/* Macros for copying by pages; used in memcpy, memmove. Generic macros.
+ Copyright (C) 1995, 1997 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file defines the macro:
+
+ PAGE_COPY_FWD_MAYBE (dstp, srcp, nbytes_left, nbytes)
+
+ which is invoked like WORD_COPY_FWD et al. The pointers should be at
+ least word aligned. This will check if virtual copying by pages can and
+ should be done and do it if so.
+
+ System-specific pagecopy.h files should define these macros and then
+ #include this file:
+
+ PAGE_COPY_THRESHOLD
+ -- Minimum size for which virtual copying by pages is worthwhile.
+
+ PAGE_SIZE
+ -- Size of a page.
+
+ PAGE_COPY_FWD (dstp, srcp, nbytes_left, nbytes)
+ -- Macro to perform the virtual copy operation.
+ The pointers will be aligned to PAGE_SIZE bytes.
+*/
+
+
+#if PAGE_COPY_THRESHOLD
+
+#include <assert.h>
+
+#define PAGE_COPY_FWD_MAYBE(dstp, srcp, nbytes_left, nbytes) \
+ do \
+ { \
+ if ((nbytes) >= PAGE_COPY_THRESHOLD && \
+ PAGE_OFFSET ((dstp) - (srcp)) == 0) \
+ { \
+ /* The amount to copy is past the threshold for copying \
+ pages virtually with kernel VM operations, and the \
+ source and destination addresses have the same alignment. */ \
+ size_t nbytes_before = PAGE_OFFSET (-(dstp)); \
+ if (nbytes_before != 0) \
+ { \
+ /* First copy the words before the first page boundary. */ \
+ WORD_COPY_FWD (dstp, srcp, nbytes_left, nbytes_before); \
+ assert (nbytes_left == 0); \
+ nbytes -= nbytes_before; \
+ } \
+ PAGE_COPY_FWD (dstp, srcp, nbytes_left, nbytes); \
+ } \
+ } while (0)
+
+/* The page size is always a power of two, so we can avoid modulo division. */
+#define PAGE_OFFSET(n) ((n) & (PAGE_SIZE - 1))
+
+#else
+
+#define PAGE_COPY_FWD_MAYBE(dstp, srcp, nbytes_left, nbytes) /* nada */
+
+#endif
diff --git a/reference/glibc-c/strchr.c b/reference/glibc-c/strchr.c
new file mode 100644
index 000000000000..3866d1b79d94
--- /dev/null
+++ b/reference/glibc-c/strchr.c
@@ -0,0 +1,184 @@
+/* Copyright (C) 1991,1993-1997,1999,2000,2003,2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Based on strlen implementation by Torbjorn Granlund (tege@sics.se),
+ with help from Dan Sahlin (dan@sics.se) and
+ bug fix and commentary by Jim Blandy (jimb@ai.mit.edu);
+ adaptation to strchr suggested by Dick Karpinski (dick@cca.ucsf.edu),
+ and implemented by Roland McGrath (roland@ai.mit.edu).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+#include "memcopy.h"
+#include <stdlib.h>
+
+#undef strchr
+
+/* Find the first occurrence of C in S. */
+char *
+strchr (s, c_in)
+ const char *s;
+ int c_in;
+{
+ const unsigned char *char_ptr;
+ const unsigned long int *longword_ptr;
+ unsigned long int longword, magic_bits, charmask;
+ unsigned char c;
+
+ c = (unsigned char) c_in;
+
+ /* Handle the first few characters by reading one character at a time.
+ Do this until CHAR_PTR is aligned on a longword boundary. */
+ for (char_ptr = (const unsigned char *) s;
+ ((unsigned long int) char_ptr & (sizeof (longword) - 1)) != 0;
+ ++char_ptr)
+ if (*char_ptr == c)
+ return (void *) char_ptr;
+ else if (*char_ptr == '\0')
+ return NULL;
+
+ /* All these elucidatory comments refer to 4-byte longwords,
+ but the theory applies equally well to 8-byte longwords. */
+
+ longword_ptr = (unsigned long int *) char_ptr;
+
+ /* Bits 31, 24, 16, and 8 of this number are zero. Call these bits
+ the "holes." Note that there is a hole just to the left of
+ each byte, with an extra at the end:
+
+ bits: 01111110 11111110 11111110 11111111
+ bytes: AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD
+
+ The 1-bits make sure that carries propagate to the next 0-bit.
+ The 0-bits provide holes for carries to fall into. */
+ switch (sizeof (longword))
+ {
+ case 4: magic_bits = 0x7efefeffL; break;
+ case 8: magic_bits = ((0x7efefefeL << 16) << 16) | 0xfefefeffL; break;
+ default:
+ abort ();
+ }
+
+ /* Set up a longword, each of whose bytes is C. */
+ charmask = c | (c << 8);
+ charmask |= charmask << 16;
+ if (sizeof (longword) > 4)
+ /* Do the shift in two steps to avoid a warning if long has 32 bits. */
+ charmask |= (charmask << 16) << 16;
+ if (sizeof (longword) > 8)
+ abort ();
+
+ /* Instead of the traditional loop which tests each character,
+ we will test a longword at a time. The tricky part is testing
+ if *any of the four* bytes in the longword in question are zero. */
+ for (;;)
+ {
+ /* We tentatively exit the loop if adding MAGIC_BITS to
+ LONGWORD fails to change any of the hole bits of LONGWORD.
+
+ 1) Is this safe? Will it catch all the zero bytes?
+ Suppose there is a byte with all zeros. Any carry bits
+ propagating from its left will fall into the hole at its
+ least significant bit and stop. Since there will be no
+ carry from its most significant bit, the LSB of the
+ byte to the left will be unchanged, and the zero will be
+ detected.
+
+ 2) Is this worthwhile? Will it ignore everything except
+ zero bytes? Suppose every byte of LONGWORD has a bit set
+ somewhere. There will be a carry into bit 8. If bit 8
+ is set, this will carry into bit 16. If bit 8 is clear,
+ one of bits 9-15 must be set, so there will be a carry
+ into bit 16. Similarly, there will be a carry into bit
+ 24. If one of bits 24-30 is set, there will be a carry
+ into bit 31, so all of the hole bits will be changed.
+
+ The one misfire occurs when bits 24-30 are clear and bit
+ 31 is set; in this case, the hole at bit 31 is not
+ changed. If we had access to the processor carry flag,
+ we could close this loophole by putting the fourth hole
+ at bit 32!
+
+ So it ignores everything except 128's, when they're aligned
+ properly.
+
+ 3) But wait! Aren't we looking for C as well as zero?
+ Good point. So what we do is XOR LONGWORD with a longword,
+ each of whose bytes is C. This turns each byte that is C
+ into a zero. */
+
+ longword = *longword_ptr++;
+
+ /* Add MAGIC_BITS to LONGWORD. */
+ if ((((longword + magic_bits)
+
+ /* Set those bits that were unchanged by the addition. */
+ ^ ~longword)
+
+ /* Look at only the hole bits. If any of the hole bits
+ are unchanged, most likely one of the bytes was a
+ zero. */
+ & ~magic_bits) != 0 ||
+
+ /* That caught zeroes. Now test for C. */
+ ((((longword ^ charmask) + magic_bits) ^ ~(longword ^ charmask))
+ & ~magic_bits) != 0)
+ {
+ /* Which of the bytes was C or zero?
+ If none of them were, it was a misfire; continue the search. */
+
+ const unsigned char *cp = (const unsigned char *) (longword_ptr - 1);
+
+ if (*cp == c)
+ return (char *) cp;
+ else if (*cp == '\0')
+ return NULL;
+ if (*++cp == c)
+ return (char *) cp;
+ else if (*cp == '\0')
+ return NULL;
+ if (*++cp == c)
+ return (char *) cp;
+ else if (*cp == '\0')
+ return NULL;
+ if (*++cp == c)
+ return (char *) cp;
+ else if (*cp == '\0')
+ return NULL;
+ if (sizeof (longword) > 4)
+ {
+ if (*++cp == c)
+ return (char *) cp;
+ else if (*cp == '\0')
+ return NULL;
+ if (*++cp == c)
+ return (char *) cp;
+ else if (*cp == '\0')
+ return NULL;
+ if (*++cp == c)
+ return (char *) cp;
+ else if (*cp == '\0')
+ return NULL;
+ if (*++cp == c)
+ return (char *) cp;
+ else if (*cp == '\0')
+ return NULL;
+ }
+ }
+ }
+
+ return NULL;
+}
diff --git a/reference/glibc-c/strcmp.c b/reference/glibc-c/strcmp.c
new file mode 100644
index 000000000000..6ca781575dcb
--- /dev/null
+++ b/reference/glibc-c/strcmp.c
@@ -0,0 +1,45 @@
+/* Copyright (C) 1991, 1996, 1997, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+#include "memcopy.h"
+
+#undef strcmp
+
+/* Compare S1 and S2, returning less than, equal to or
+ greater than zero if S1 is lexicographically less than,
+ equal to or greater than S2. */
+int
+strcmp (p1, p2)
+ const char *p1;
+ const char *p2;
+{
+ register const unsigned char *s1 = (const unsigned char *) p1;
+ register const unsigned char *s2 = (const unsigned char *) p2;
+ unsigned char c1, c2;
+
+ do
+ {
+ c1 = (unsigned char) *s1++;
+ c2 = (unsigned char) *s2++;
+ if (c1 == '\0')
+ return c1 - c2;
+ }
+ while (c1 == c2);
+
+ return c1 - c2;
+}
diff --git a/reference/glibc-c/strcpy.c b/reference/glibc-c/strcpy.c
new file mode 100644
index 000000000000..0b661092867c
--- /dev/null
+++ b/reference/glibc-c/strcpy.c
@@ -0,0 +1,39 @@
+/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <stddef.h>
+#include <string.h>
+
+#undef strcpy
+
+/* Copy SRC to DEST. */
+char *
+strcpy (char *dest, const char *src)
+{
+ char c;
+ char *s = (char *) src;
+ const ptrdiff_t off = dest - s - 1;
+
+ do
+ {
+ c = *s++;
+ s[off] = c;
+ }
+ while (c != '\0');
+
+ return dest;
+}
diff --git a/reference/glibc-c/strlen.c b/reference/glibc-c/strlen.c
new file mode 100644
index 000000000000..d6db374e9b7d
--- /dev/null
+++ b/reference/glibc-c/strlen.c
@@ -0,0 +1,105 @@
+/* Copyright (C) 1991,1993,1997,2000,2003,2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Written by Torbjorn Granlund (tege@sics.se),
+ with help from Dan Sahlin (dan@sics.se);
+ commentary by Jim Blandy (jimb@ai.mit.edu).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+#include <stdlib.h>
+
+#undef strlen
+
+/* Return the length of the null-terminated string STR. Scan for
+ the null terminator quickly by testing four bytes at a time. */
+size_t
+strlen (str)
+ const char *str;
+{
+ const char *char_ptr;
+ const unsigned long int *longword_ptr;
+ unsigned long int longword, himagic, lomagic;
+
+ /* Handle the first few characters by reading one character at a time.
+ Do this until CHAR_PTR is aligned on a longword boundary. */
+ for (char_ptr = str; ((unsigned long int) char_ptr
+ & (sizeof (longword) - 1)) != 0;
+ ++char_ptr)
+ if (*char_ptr == '\0')
+ return char_ptr - str;
+
+ /* All these elucidatory comments refer to 4-byte longwords,
+ but the theory applies equally well to 8-byte longwords. */
+
+ longword_ptr = (unsigned long int *) char_ptr;
+
+ /* Bits 31, 24, 16, and 8 of this number are zero. Call these bits
+ the "holes." Note that there is a hole just to the left of
+ each byte, with an extra at the end:
+
+ bits: 01111110 11111110 11111110 11111111
+ bytes: AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD
+
+ The 1-bits make sure that carries propagate to the next 0-bit.
+ The 0-bits provide holes for carries to fall into. */
+ himagic = 0x80808080L;
+ lomagic = 0x01010101L;
+ if (sizeof (longword) > 4)
+ {
+ /* 64-bit version of the magic. */
+ /* Do the shift in two steps to avoid a warning if long has 32 bits. */
+ himagic = ((himagic << 16) << 16) | himagic;
+ lomagic = ((lomagic << 16) << 16) | lomagic;
+ }
+ if (sizeof (longword) > 8)
+ abort ();
+
+ /* Instead of the traditional loop which tests each character,
+ we will test a longword at a time. The tricky part is testing
+ if *any of the four* bytes in the longword in question are zero. */
+ for (;;)
+ {
+ longword = *longword_ptr++;
+
+ if (((longword - lomagic) & ~longword & himagic) != 0)
+ {
+ /* Which of the bytes was the zero? If none of them were, it was
+ a misfire; continue the search. */
+
+ const char *cp = (const char *) (longword_ptr - 1);
+
+ if (cp[0] == 0)
+ return cp - str;
+ if (cp[1] == 0)
+ return cp - str + 1;
+ if (cp[2] == 0)
+ return cp - str + 2;
+ if (cp[3] == 0)
+ return cp - str + 3;
+ if (sizeof (longword) > 4)
+ {
+ if (cp[4] == 0)
+ return cp - str + 4;
+ if (cp[5] == 0)
+ return cp - str + 5;
+ if (cp[6] == 0)
+ return cp - str + 6;
+ if (cp[7] == 0)
+ return cp - str + 7;
+ }
+ }
+ }
+}
diff --git a/reference/glibc-c/wordcopy.c b/reference/glibc-c/wordcopy.c
new file mode 100644
index 000000000000..b757a62e8415
--- /dev/null
+++ b/reference/glibc-c/wordcopy.c
@@ -0,0 +1,412 @@
+/* _memcopy.c -- subroutines for memory copy functions.
+ Copyright (C) 1991, 1996 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Torbjorn Granlund (tege@sics.se).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* BE VERY CAREFUL IF YOU CHANGE THIS CODE...! */
+
+#include <stddef.h>
+#include "memcopy.h"
+
+/* _wordcopy_fwd_aligned -- Copy block beginning at SRCP to
+ block beginning at DSTP with LEN `op_t' words (not LEN bytes!).
+ Both SRCP and DSTP should be aligned for memory operations on `op_t's. */
+
+void
+_wordcopy_fwd_aligned (dstp, srcp, len)
+ long int dstp;
+ long int srcp;
+ size_t len;
+{
+ op_t a0, a1;
+
+ switch (len % 8)
+ {
+ case 2:
+ a0 = ((op_t *) srcp)[0];
+ srcp -= 6 * OPSIZ;
+ dstp -= 7 * OPSIZ;
+ len += 6;
+ goto do1;
+ case 3:
+ a1 = ((op_t *) srcp)[0];
+ srcp -= 5 * OPSIZ;
+ dstp -= 6 * OPSIZ;
+ len += 5;
+ goto do2;
+ case 4:
+ a0 = ((op_t *) srcp)[0];
+ srcp -= 4 * OPSIZ;
+ dstp -= 5 * OPSIZ;
+ len += 4;
+ goto do3;
+ case 5:
+ a1 = ((op_t *) srcp)[0];
+ srcp -= 3 * OPSIZ;
+ dstp -= 4 * OPSIZ;
+ len += 3;
+ goto do4;
+ case 6:
+ a0 = ((op_t *) srcp)[0];
+ srcp -= 2 * OPSIZ;
+ dstp -= 3 * OPSIZ;
+ len += 2;
+ goto do5;
+ case 7:
+ a1 = ((op_t *) srcp)[0];
+ srcp -= 1 * OPSIZ;
+ dstp -= 2 * OPSIZ;
+ len += 1;
+ goto do6;
+
+ case 0:
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ return;
+ a0 = ((op_t *) srcp)[0];
+ srcp -= 0 * OPSIZ;
+ dstp -= 1 * OPSIZ;
+ goto do7;
+ case 1:
+ a1 = ((op_t *) srcp)[0];
+ srcp -=-1 * OPSIZ;
+ dstp -= 0 * OPSIZ;
+ len -= 1;
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ goto do0;
+ goto do8; /* No-op. */
+ }
+
+ do
+ {
+ do8:
+ a0 = ((op_t *) srcp)[0];
+ ((op_t *) dstp)[0] = a1;
+ do7:
+ a1 = ((op_t *) srcp)[1];
+ ((op_t *) dstp)[1] = a0;
+ do6:
+ a0 = ((op_t *) srcp)[2];
+ ((op_t *) dstp)[2] = a1;
+ do5:
+ a1 = ((op_t *) srcp)[3];
+ ((op_t *) dstp)[3] = a0;
+ do4:
+ a0 = ((op_t *) srcp)[4];
+ ((op_t *) dstp)[4] = a1;
+ do3:
+ a1 = ((op_t *) srcp)[5];
+ ((op_t *) dstp)[5] = a0;
+ do2:
+ a0 = ((op_t *) srcp)[6];
+ ((op_t *) dstp)[6] = a1;
+ do1:
+ a1 = ((op_t *) srcp)[7];
+ ((op_t *) dstp)[7] = a0;
+
+ srcp += 8 * OPSIZ;
+ dstp += 8 * OPSIZ;
+ len -= 8;
+ }
+ while (len != 0);
+
+ /* This is the right position for do0. Please don't move
+ it into the loop. */
+ do0:
+ ((op_t *) dstp)[0] = a1;
+}
+
+/* _wordcopy_fwd_dest_aligned -- Copy block beginning at SRCP to
+ block beginning at DSTP with LEN `op_t' words (not LEN bytes!).
+ DSTP should be aligned for memory operations on `op_t's, but SRCP must
+ *not* be aligned. */
+
+void
+_wordcopy_fwd_dest_aligned (dstp, srcp, len)
+ long int dstp;
+ long int srcp;
+ size_t len;
+{
+ op_t a0, a1, a2, a3;
+ int sh_1, sh_2;
+
+ /* Calculate how to shift a word read at the memory operation
+ aligned srcp to make it aligned for copy. */
+
+ sh_1 = 8 * (srcp % OPSIZ);
+ sh_2 = 8 * OPSIZ - sh_1;
+
+ /* Make SRCP aligned by rounding it down to the beginning of the `op_t'
+ it points in the middle of. */
+ srcp &= -OPSIZ;
+
+ switch (len % 4)
+ {
+ case 2:
+ a1 = ((op_t *) srcp)[0];
+ a2 = ((op_t *) srcp)[1];
+ srcp -= 1 * OPSIZ;
+ dstp -= 3 * OPSIZ;
+ len += 2;
+ goto do1;
+ case 3:
+ a0 = ((op_t *) srcp)[0];
+ a1 = ((op_t *) srcp)[1];
+ srcp -= 0 * OPSIZ;
+ dstp -= 2 * OPSIZ;
+ len += 1;
+ goto do2;
+ case 0:
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ return;
+ a3 = ((op_t *) srcp)[0];
+ a0 = ((op_t *) srcp)[1];
+ srcp -=-1 * OPSIZ;
+ dstp -= 1 * OPSIZ;
+ len += 0;
+ goto do3;
+ case 1:
+ a2 = ((op_t *) srcp)[0];
+ a3 = ((op_t *) srcp)[1];
+ srcp -=-2 * OPSIZ;
+ dstp -= 0 * OPSIZ;
+ len -= 1;
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ goto do0;
+ goto do4; /* No-op. */
+ }
+
+ do
+ {
+ do4:
+ a0 = ((op_t *) srcp)[0];
+ ((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2);
+ do3:
+ a1 = ((op_t *) srcp)[1];
+ ((op_t *) dstp)[1] = MERGE (a3, sh_1, a0, sh_2);
+ do2:
+ a2 = ((op_t *) srcp)[2];
+ ((op_t *) dstp)[2] = MERGE (a0, sh_1, a1, sh_2);
+ do1:
+ a3 = ((op_t *) srcp)[3];
+ ((op_t *) dstp)[3] = MERGE (a1, sh_1, a2, sh_2);
+
+ srcp += 4 * OPSIZ;
+ dstp += 4 * OPSIZ;
+ len -= 4;
+ }
+ while (len != 0);
+
+ /* This is the right position for do0. Please don't move
+ it into the loop. */
+ do0:
+ ((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2);
+}
+
+/* _wordcopy_bwd_aligned -- Copy block finishing right before
+ SRCP to block finishing right before DSTP with LEN `op_t' words
+ (not LEN bytes!). Both SRCP and DSTP should be aligned for memory
+ operations on `op_t's. */
+
+void
+_wordcopy_bwd_aligned (dstp, srcp, len)
+ long int dstp;
+ long int srcp;
+ size_t len;
+{
+ op_t a0, a1;
+
+ switch (len % 8)
+ {
+ case 2:
+ srcp -= 2 * OPSIZ;
+ dstp -= 1 * OPSIZ;
+ a0 = ((op_t *) srcp)[1];
+ len += 6;
+ goto do1;
+ case 3:
+ srcp -= 3 * OPSIZ;
+ dstp -= 2 * OPSIZ;
+ a1 = ((op_t *) srcp)[2];
+ len += 5;
+ goto do2;
+ case 4:
+ srcp -= 4 * OPSIZ;
+ dstp -= 3 * OPSIZ;
+ a0 = ((op_t *) srcp)[3];
+ len += 4;
+ goto do3;
+ case 5:
+ srcp -= 5 * OPSIZ;
+ dstp -= 4 * OPSIZ;
+ a1 = ((op_t *) srcp)[4];
+ len += 3;
+ goto do4;
+ case 6:
+ srcp -= 6 * OPSIZ;
+ dstp -= 5 * OPSIZ;
+ a0 = ((op_t *) srcp)[5];
+ len += 2;
+ goto do5;
+ case 7:
+ srcp -= 7 * OPSIZ;
+ dstp -= 6 * OPSIZ;
+ a1 = ((op_t *) srcp)[6];
+ len += 1;
+ goto do6;
+
+ case 0:
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ return;
+ srcp -= 8 * OPSIZ;
+ dstp -= 7 * OPSIZ;
+ a0 = ((op_t *) srcp)[7];
+ goto do7;
+ case 1:
+ srcp -= 9 * OPSIZ;
+ dstp -= 8 * OPSIZ;
+ a1 = ((op_t *) srcp)[8];
+ len -= 1;
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ goto do0;
+ goto do8; /* No-op. */
+ }
+
+ do
+ {
+ do8:
+ a0 = ((op_t *) srcp)[7];
+ ((op_t *) dstp)[7] = a1;
+ do7:
+ a1 = ((op_t *) srcp)[6];
+ ((op_t *) dstp)[6] = a0;
+ do6:
+ a0 = ((op_t *) srcp)[5];
+ ((op_t *) dstp)[5] = a1;
+ do5:
+ a1 = ((op_t *) srcp)[4];
+ ((op_t *) dstp)[4] = a0;
+ do4:
+ a0 = ((op_t *) srcp)[3];
+ ((op_t *) dstp)[3] = a1;
+ do3:
+ a1 = ((op_t *) srcp)[2];
+ ((op_t *) dstp)[2] = a0;
+ do2:
+ a0 = ((op_t *) srcp)[1];
+ ((op_t *) dstp)[1] = a1;
+ do1:
+ a1 = ((op_t *) srcp)[0];
+ ((op_t *) dstp)[0] = a0;
+
+ srcp -= 8 * OPSIZ;
+ dstp -= 8 * OPSIZ;
+ len -= 8;
+ }
+ while (len != 0);
+
+ /* This is the right position for do0. Please don't move
+ it into the loop. */
+ do0:
+ ((op_t *) dstp)[7] = a1;
+}
+
+/* _wordcopy_bwd_dest_aligned -- Copy block finishing right
+ before SRCP to block finishing right before DSTP with LEN `op_t'
+ words (not LEN bytes!). DSTP should be aligned for memory
+ operations on `op_t', but SRCP must *not* be aligned. */
+
+void
+_wordcopy_bwd_dest_aligned (dstp, srcp, len)
+ long int dstp;
+ long int srcp;
+ size_t len;
+{
+ op_t a0, a1, a2, a3;
+ int sh_1, sh_2;
+
+ /* Calculate how to shift a word read at the memory operation
+ aligned srcp to make it aligned for copy. */
+
+ sh_1 = 8 * (srcp % OPSIZ);
+ sh_2 = 8 * OPSIZ - sh_1;
+
+ /* Make srcp aligned by rounding it down to the beginning of the op_t
+ it points in the middle of. */
+ srcp &= -OPSIZ;
+ srcp += OPSIZ;
+
+ switch (len % 4)
+ {
+ case 2:
+ srcp -= 3 * OPSIZ;
+ dstp -= 1 * OPSIZ;
+ a2 = ((op_t *) srcp)[2];
+ a1 = ((op_t *) srcp)[1];
+ len += 2;
+ goto do1;
+ case 3:
+ srcp -= 4 * OPSIZ;
+ dstp -= 2 * OPSIZ;
+ a3 = ((op_t *) srcp)[3];
+ a2 = ((op_t *) srcp)[2];
+ len += 1;
+ goto do2;
+ case 0:
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ return;
+ srcp -= 5 * OPSIZ;
+ dstp -= 3 * OPSIZ;
+ a0 = ((op_t *) srcp)[4];
+ a3 = ((op_t *) srcp)[3];
+ goto do3;
+ case 1:
+ srcp -= 6 * OPSIZ;
+ dstp -= 4 * OPSIZ;
+ a1 = ((op_t *) srcp)[5];
+ a0 = ((op_t *) srcp)[4];
+ len -= 1;
+ if (OP_T_THRES <= 3 * OPSIZ && len == 0)
+ goto do0;
+ goto do4; /* No-op. */
+ }
+
+ do
+ {
+ do4:
+ a3 = ((op_t *) srcp)[3];
+ ((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2);
+ do3:
+ a2 = ((op_t *) srcp)[2];
+ ((op_t *) dstp)[2] = MERGE (a3, sh_1, a0, sh_2);
+ do2:
+ a1 = ((op_t *) srcp)[1];
+ ((op_t *) dstp)[1] = MERGE (a2, sh_1, a3, sh_2);
+ do1:
+ a0 = ((op_t *) srcp)[0];
+ ((op_t *) dstp)[0] = MERGE (a1, sh_1, a2, sh_2);
+
+ srcp -= 4 * OPSIZ;
+ dstp -= 4 * OPSIZ;
+ len -= 4;
+ }
+ while (len != 0);
+
+ /* This is the right position for do0. Please don't move
+ it into the loop. */
+ do0:
+ ((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2);
+}
diff --git a/reference/glibc/.deps/memcpy.Po b/reference/glibc/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc/.deps/memset.Po b/reference/glibc/.deps/memset.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc/.deps/memset.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc/.deps/strchr.Po b/reference/glibc/.deps/strchr.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc/.deps/strchr.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc/.deps/strlen.Po b/reference/glibc/.deps/strlen.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/glibc/.deps/strlen.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/glibc/memcpy.S b/reference/glibc/memcpy.S
new file mode 100644
index 000000000000..357a89aea4d3
--- /dev/null
+++ b/reference/glibc/memcpy.S
@@ -0,0 +1,229 @@
+/* Copyright (C) 2006, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ Contributed by MontaVista Software, Inc. (written by Nicolas Pitre)
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/*
+ * Data preload for architectures that support it (ARM V5TE and above)
+ */
+#if (!defined (__ARM_ARCH_2__) && !defined (__ARM_ARCH_3__) \
+ && !defined (__ARM_ARCH_3M__) && !defined (__ARM_ARCH_4__) \
+ && !defined (__ARM_ARCH_4T__) && !defined (__ARM_ARCH_5__) \
+ && !defined (__ARM_ARCH_5T__))
+#define PLD(code...) code
+#else
+#define PLD(code...)
+#endif
+
+/*
+ * This can be used to enable code to cacheline align the source pointer.
+ * Experiments on tested architectures (StrongARM and XScale) didn't show
+ * this a worthwhile thing to do. That might be different in the future.
+ */
+//#define CALGN(code...) code
+#define CALGN(code...)
+
+/*
+ * Endian independent macros for shifting bytes within registers.
+ */
+#ifndef __ARMEB__
+#define pull lsr
+#define push lsl
+#else
+#define pull lsl
+#define push lsr
+#endif
+
+ .text
+ .global memcpy
+ .type memcpy, %function
+
+/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+
+memcpy:
+
+ stmfd sp!, {r0, r4, lr}
+
+ subs r2, r2, #4
+ blt 8f
+ ands ip, r0, #3
+ PLD( pld [r1, #0] )
+ bne 9f
+ ands ip, r1, #3
+ bne 10f
+
+1: subs r2, r2, #(28)
+ stmfd sp!, {r5 - r8}
+ blt 5f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( rsb r3, ip, #32 )
+ CALGN( sbcnes r4, r3, r2 ) @ C is always set here
+ CALGN( bcs 2f )
+ CALGN( adr r4, 6f )
+ CALGN( subs r2, r2, r3 ) @ C gets set
+ CALGN( add pc, r4, ip )
+
+ PLD( pld [r1, #0] )
+2: PLD( subs r2, r2, #96 )
+ PLD( pld [r1, #28] )
+ PLD( blt 4f )
+ PLD( pld [r1, #60] )
+ PLD( pld [r1, #92] )
+
+3: PLD( pld [r1, #124] )
+4: ldmia r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ subs r2, r2, #32
+ stmia r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ bge 3b
+ PLD( cmn r2, #96 )
+ PLD( bge 4b )
+
+5: ands ip, r2, #28
+ rsb ip, ip, #32
+ addne pc, pc, ip @ C is always clear here
+ b 7f
+6: nop
+ ldr r3, [r1], #4
+ ldr r4, [r1], #4
+ ldr r5, [r1], #4
+ ldr r6, [r1], #4
+ ldr r7, [r1], #4
+ ldr r8, [r1], #4
+ ldr lr, [r1], #4
+
+ add pc, pc, ip
+ nop
+ nop
+ str r3, [r0], #4
+ str r4, [r0], #4
+ str r5, [r0], #4
+ str r6, [r0], #4
+ str r7, [r0], #4
+ str r8, [r0], #4
+ str lr, [r0], #4
+
+ CALGN( bcs 2b )
+
+7: ldmfd sp!, {r5 - r8}
+
+8: movs r2, r2, lsl #31
+ ldrneb r3, [r1], #1
+ ldrcsb r4, [r1], #1
+ ldrcsb ip, [r1]
+ strneb r3, [r0], #1
+ strcsb r4, [r0], #1
+ strcsb ip, [r0]
+
+#if defined (__ARM_ARCH_4T__) && defined(__THUMB_INTERWORK__)
+ ldmfd sp!, {r0, r4, lr}
+ bx lr
+#else
+ ldmfd sp!, {r0, r4, pc}
+#endif
+
+9: rsb ip, ip, #4
+ cmp ip, #2
+ ldrgtb r3, [r1], #1
+ ldrgeb r4, [r1], #1
+ ldrb lr, [r1], #1
+ strgtb r3, [r0], #1
+ strgeb r4, [r0], #1
+ subs r2, r2, ip
+ strb lr, [r0], #1
+ blt 8b
+ ands ip, r1, #3
+ beq 1b
+
+10: bic r1, r1, #3
+ cmp ip, #2
+ ldr lr, [r1], #4
+ beq 17f
+ bgt 18f
+
+
+ .macro forward_copy_shift pull push
+
+ subs r2, r2, #28
+ blt 14f
+
+ CALGN( ands ip, r1, #31 )
+ CALGN( rsb ip, ip, #32 )
+ CALGN( sbcnes r4, ip, r2 ) @ C is always set here
+ CALGN( subcc r2, r2, ip )
+ CALGN( bcc 15f )
+
+11: stmfd sp!, {r5 - r9}
+
+ PLD( pld [r1, #0] )
+ PLD( subs r2, r2, #96 )
+ PLD( pld [r1, #28] )
+ PLD( blt 13f )
+ PLD( pld [r1, #60] )
+ PLD( pld [r1, #92] )
+
+12: PLD( pld [r1, #124] )
+13: ldmia r1!, {r4, r5, r6, r7}
+ mov r3, lr, pull #\pull
+ subs r2, r2, #32
+ ldmia r1!, {r8, r9, ip, lr}
+ orr r3, r3, r4, push #\push
+ mov r4, r4, pull #\pull
+ orr r4, r4, r5, push #\push
+ mov r5, r5, pull #\pull
+ orr r5, r5, r6, push #\push
+ mov r6, r6, pull #\pull
+ orr r6, r6, r7, push #\push
+ mov r7, r7, pull #\pull
+ orr r7, r7, r8, push #\push
+ mov r8, r8, pull #\pull
+ orr r8, r8, r9, push #\push
+ mov r9, r9, pull #\pull
+ orr r9, r9, ip, push #\push
+ mov ip, ip, pull #\pull
+ orr ip, ip, lr, push #\push
+ stmia r0!, {r3, r4, r5, r6, r7, r8, r9, ip}
+ bge 12b
+ PLD( cmn r2, #96 )
+ PLD( bge 13b )
+
+ ldmfd sp!, {r5 - r9}
+
+14: ands ip, r2, #28
+ beq 16f
+
+15: mov r3, lr, pull #\pull
+ ldr lr, [r1], #4
+ subs ip, ip, #4
+ orr r3, r3, lr, push #\push
+ str r3, [r0], #4
+ bgt 15b
+ CALGN( cmp r2, #0 )
+ CALGN( bge 11b )
+
+16: sub r1, r1, #(\push / 8)
+ b 8b
+
+ .endm
+
+
+ forward_copy_shift pull=8 push=24
+
+17: forward_copy_shift pull=16 push=16
+
+18: forward_copy_shift pull=24 push=8
diff --git a/reference/glibc/memset.S b/reference/glibc/memset.S
new file mode 100644
index 000000000000..51585f4b7395
--- /dev/null
+++ b/reference/glibc/memset.S
@@ -0,0 +1,64 @@
+/* Copyright (C) 1998, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Philip Blundell <philb@gnu.org>
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* void *memset (dstpp, c, len) */
+ .text
+ .global memset
+ .type memset, %function
+
+memset:
+ mov r3, r0
+ cmp r2, #8
+ bcc 2f @ less than 8 bytes to move
+
+1:
+ tst r3, #3 @ aligned yet?
+ strneb r1, [r3], #1
+ subne r2, r2, #1
+ bne 1b
+
+ and r1, r1, #255 @ clear any sign bits
+ orr r1, r1, r1, lsl $8
+ orr r1, r1, r1, lsl $16
+ mov ip, r1
+
+1:
+ subs r2, r2, #8
+ stmcsia r3!, {r1, ip} @ store up to 32 bytes per loop iteration
+ subcss r2, r2, #8
+ stmcsia r3!, {r1, ip}
+ subcss r2, r2, #8
+ stmcsia r3!, {r1, ip}
+ subcss r2, r2, #8
+ stmcsia r3!, {r1, ip}
+ bcs 1b
+
+ and r2, r2, #7
+2:
+ subs r2, r2, #1 @ store up to 4 bytes per loop iteration
+ strcsb r1, [r3], #1
+ subcss r2, r2, #1
+ strcsb r1, [r3], #1
+ subcss r2, r2, #1
+ strcsb r1, [r3], #1
+ subcss r2, r2, #1
+ strcsb r1, [r3], #1
+ bcs 2b
+
+ bx lr
diff --git a/reference/glibc/strchr.S b/reference/glibc/strchr.S
new file mode 100644
index 000000000000..a09602714deb
--- /dev/null
+++ b/reference/glibc/strchr.S
@@ -0,0 +1,132 @@
+/* strchr -- find the first instance of C in a nul-terminated string.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define ARCH_HAS_T2
+
+ .syntax unified
+ .text
+ .global strchr
+ .type strchr,%function
+ .align 4
+
+strchr:
+ @ r0 = start of string
+ @ r1 = character to match
+ @ returns NULL for no match, or a pointer to the match
+ ldrb r2, [r0] @ load the first byte asap
+ uxtb r1, r1
+
+ @ To cater to long strings, we want to search through a few
+ @ characters until we reach an aligned pointer. To cater to
+ @ small strings, we don't want to start doing word operations
+ @ immediately. The compromise is a maximum of 16 bytes less
+ @ whatever is required to end with an aligned pointer.
+ @ r3 = number of characters to search in alignment loop
+ and r3, r0, #7
+ rsb r3, r3, #15 @ 16 - 1 peeled loop iteration
+ cmp r2, r1 @ Found C?
+ it ne
+ cmpne r2, #0 @ Found EOS?
+ beq 99f
+
+ @ Loop until we find ...
+1: ldrb r2, [r0, #1]!
+ subs r3, r3, #1 @ ... the aligment point
+ it ne
+ cmpne r2, r1 @ ... or the character
+ it ne
+ cmpne r2, #0 @ ... or EOS
+ bne 1b
+
+ @ Disambiguate the exit possibilites above
+ cmp r2, r1 @ Found the character
+ it ne
+ cmpne r2, #0 @ Found EOS
+ beq 99f
+ add r0, r0, #1
+
+ @ So now we're aligned. Now we actually need a stack frame.
+ push { r4, r5, r6, r7 }
+
+ ldrd r2, r3, [r0], #8
+ orr r1, r1, r1, lsl #8 @ Replicate C to all bytes
+#ifdef ARCH_HAS_T2
+ movw ip, #0x0101
+ pld [r0, #64]
+ movt ip, #0x0101
+#else
+ ldr ip, =0x01010101
+ pld [r0, #64]
+#endif
+ orr r1, r1, r1, lsl #16
+
+ @ Loop searching for EOS or C, 8 bytes at a time.
+2:
+ @ Subtracting (unsigned saturating) from 1 means result of 1 for
+ @ any byte that was originally zero and 0 otherwise. Therefore
+ @ we consider the lsb of each byte the "found" bit.
+ uqsub8 r4, ip, r2 @ Find EOS
+ eor r6, r2, r1 @ Convert C bytes to 0
+ uqsub8 r5, ip, r3
+ eor r7, r3, r1
+ uqsub8 r6, ip, r6 @ Find C
+ pld [r0, #128] @ Prefetch 2 lines ahead
+ uqsub8 r7, ip, r7
+ orr r4, r4, r6 @ Combine found for EOS and C
+ orr r5, r5, r7
+ orrs r6, r4, r5 @ Combine the two words
+ it eq
+ ldrdeq r2, r3, [r0], #8
+ beq 2b
+
+ @ Found something. Disambiguate between first and second words.
+ @ Adjust r0 to point to the word containing the match.
+ @ Adjust r2 to the contents of the word containing the match.
+ @ Adjust r4 to the found bits for the word containing the match.
+ cmp r4, #0
+ sub r0, r0, #4
+ itte eq
+ moveq r4, r5
+ moveq r2, r3
+ subne r0, r0, #4
+
+ @ Find the bit-offset of the match within the word.
+#if defined(__ARMEL__)
+ @ For LE, swap the found word so clz searches from the little end.
+ rev r4, r4
+#else
+ @ For BE, byte swap the word to make it easier to extract the byte.
+ rev r2, r2
+#endif
+ @ We're counting 0x01 (not 0x80), so the bit offset is 7 too high.
+ clz r3, r4
+ sub r3, r3, #7
+ lsr r2, r2, r3 @ Shift down found byte
+ uxtb r1, r1 @ Undo replication of C
+ uxtb r2, r2 @ Extract found byte
+ add r0, r0, r3, lsr #3 @ Adjust the pointer to the found byte
+
+ pop { r4, r5, r6, r7 }
+
+ @ Disambiguate between EOS and C.
+99:
+ cmp r2, r1
+ it ne
+ movne r0, #0 @ Found EOS, return NULL
+ bx lr
+ .size strchr,.-strchr
diff --git a/reference/glibc/strlen.S b/reference/glibc/strlen.S
new file mode 100644
index 000000000000..6b3ce0a008df
--- /dev/null
+++ b/reference/glibc/strlen.S
@@ -0,0 +1,99 @@
+/* strlen -- find the length of a nul-terminated string.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define ARCH_HAS_T2
+
+ .syntax unified
+ .text
+ .global strlen
+ .type strlen,%function
+ .align 4
+strlen:
+ @ r0 = start of string
+ ldrb r2, [r0] @ load the first byte asap
+
+ @ To cater to long strings, we want to search through a few
+ @ characters until we reach an aligned pointer. To cater to
+ @ small strings, we don't want to start doing word operations
+ @ immediately. The compromise is a maximum of 16 bytes less
+ @ whatever is required to end with an aligned pointer.
+ @ r3 = number of characters to search in alignment loop
+ and r3, r0, #7
+ mov r1, r0 @ Save the input pointer
+ rsb r3, r3, #15 @ 16 - 1 peeled loop iteration
+ cmp r2, #0
+ beq 99f
+
+ @ Loop until we find ...
+1:
+ ldrb r2, [r0, #1]!
+ subs r3, r3, #1 @ ... the aligment point
+ it ne
+ cmpne r2, #0 @ ... or EOS
+ bne 1b
+
+ @ Disambiguate the exit possibilites above
+ cmp r2, #0 @ Found EOS
+ beq 99f
+ add r0, r0, #1
+
+ @ So now we're aligned.
+ ldrd r2, r3, [r0], #8
+#ifdef ARCH_HAS_T2
+ movw ip, #0x0101
+ pld [r0, #64]
+ movt ip, #0x0101
+#else
+ ldr ip, =0x01010101
+ pld [r0, #64]
+#endif
+
+ @ Loop searching for EOS, 8 bytes at a time.
+ @ Subtracting (unsigned saturating) from 1 for any byte means that
+ @ we get 1 for any byte that was originally zero and 0 otherwise.
+ @ Therefore we consider the lsb of each byte the "found" bit.
+ .balign 16
+2: uqsub8 r2, ip, r2 @ Find EOS
+ uqsub8 r3, ip, r3
+ pld [r0, #128] @ Prefetch 2 lines ahead
+ orrs r3, r3, r2 @ Combine the two words
+ it eq
+ ldrdeq r2, r3, [r0], #8
+ beq 2b
+
+ @ Found something. Disambiguate between first and second words.
+ @ Adjust r0 to point to the word containing the match.
+ @ Adjust r2 to the found bits for the word containing the match.
+ cmp r2, #0
+ sub r0, r0, #4
+ ite eq
+ moveq r2, r3
+ subne r0, r0, #4
+
+ @ Find the bit-offset of the match within the word. Note that the
+ @ bit result from clz will be 7 higher than "true", but we'll
+ @ immediately discard those bits converting to a byte offset.
+#ifdef __ARMEL__
+ rev r2, r2 @ For LE, count from the little end
+#endif
+ clz r2, r2
+ add r0, r0, r2, lsr #3 @ Adjust the pointer to the found byte
+99:
+ sub r0, r0, r1 @ Subtract input to compute length
+ bx lr
+ .size strlen,.-strlen
diff --git a/reference/helpers/bounce.c b/reference/helpers/bounce.c
new file mode 100644
index 000000000000..f2d44b34859e
--- /dev/null
+++ b/reference/helpers/bounce.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Linaro nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+
+/** Just returns. Used to calibrate the loop overhead */
+void *bounce(void *dst0, const void *src0, size_t len0)
+{
+ return dst0;
+}
diff --git a/reference/helpers/spawn.c b/reference/helpers/spawn.c
new file mode 100644
index 000000000000..54b5e51fb555
--- /dev/null
+++ b/reference/helpers/spawn.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Linaro nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+
+/** Helper called from Python that calls a C function a certain number
+ of times.
+*/
+int spawniis(int (*fun)(int, int, size_t), int runs, int a, int b, size_t c)
+{
+ int result;
+ int i;
+
+ for (i = 0; i != runs; i++)
+ {
+ result = fun(a, b, c);
+ }
+
+ return result;
+}
diff --git a/reference/newlib-c/.deps/memchr.Po b/reference/newlib-c/.deps/memchr.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-c/.deps/memchr.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-c/.deps/memcmp.Po b/reference/newlib-c/.deps/memcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-c/.deps/memcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-c/.deps/memcpy.Po b/reference/newlib-c/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-c/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-c/.deps/memset.Po b/reference/newlib-c/.deps/memset.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-c/.deps/memset.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-c/.deps/strchr.Po b/reference/newlib-c/.deps/strchr.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-c/.deps/strchr.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-c/.deps/strcmp.Po b/reference/newlib-c/.deps/strcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-c/.deps/strcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-c/.deps/strcpy.Po b/reference/newlib-c/.deps/strcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-c/.deps/strcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-c/.deps/strlen.Po b/reference/newlib-c/.deps/strlen.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-c/.deps/strlen.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-c/memchr.c b/reference/newlib-c/memchr.c
new file mode 100644
index 000000000000..688f795adb40
--- /dev/null
+++ b/reference/newlib-c/memchr.c
@@ -0,0 +1,134 @@
+/*
+FUNCTION
+ <<memchr>>---find character in memory
+
+INDEX
+ memchr
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ void *memchr(const void *<[src]>, int <[c]>, size_t <[length]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ void *memchr(<[src]>, <[c]>, <[length]>)
+ void *<[src]>;
+ void *<[c]>;
+ size_t <[length]>;
+
+DESCRIPTION
+ This function searches memory starting at <<*<[src]>>> for the
+ character <[c]>. The search only ends with the first
+ occurrence of <[c]>, or after <[length]> characters; in
+ particular, <<NUL>> does not terminate the search.
+
+RETURNS
+ If the character <[c]> is found within <[length]> characters
+ of <<*<[src]>>>, a pointer to the character is returned. If
+ <[c]> is not found, then <<NULL>> is returned.
+
+PORTABILITY
+<<memchr>> is ANSI C.
+
+<<memchr>> requires no supporting OS subroutines.
+
+QUICKREF
+ memchr ansi pure
+*/
+
+#include "shim.h"
+#include <string.h>
+#include <limits.h>
+
+/* Nonzero if either X or Y is not aligned on a "long" boundary. */
+#define UNALIGNED(X) ((long)X & (sizeof (long) - 1))
+
+/* How many bytes are loaded each iteration of the word copy loop. */
+#define LBLOCKSIZE (sizeof (long))
+
+/* Threshhold for punting to the bytewise iterator. */
+#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE)
+
+#if LONG_MAX == 2147483647L
+#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
+#else
+#if LONG_MAX == 9223372036854775807L
+/* Nonzero if X (a long int) contains a NULL byte. */
+#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080)
+#else
+#error long int is not a 32bit or 64bit type.
+#endif
+#endif
+
+#ifndef DETECTNULL
+#error long int is not a 32bit or 64bit byte
+#endif
+
+/* DETECTCHAR returns nonzero if (long)X contains the byte used
+ to fill (long)MASK. */
+#define DETECTCHAR(X,MASK) (DETECTNULL(X ^ MASK))
+
+_PTR
+_DEFUN (memchr, (src_void, c, length),
+ _CONST _PTR src_void _AND
+ int c _AND
+ size_t length)
+{
+ _CONST unsigned char *src = (_CONST unsigned char *) src_void;
+ unsigned char d = c;
+
+#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__)
+ unsigned long *asrc;
+ unsigned long mask;
+ int i;
+
+ while (UNALIGNED (src))
+ {
+ if (!length--)
+ return NULL;
+ if (*src == d)
+ return (void *) src;
+ src++;
+ }
+
+ if (!TOO_SMALL (length))
+ {
+ /* If we get this far, we know that length is large and src is
+ word-aligned. */
+ /* The fast code reads the source one word at a time and only
+ performs the bytewise search on word-sized segments if they
+ contain the search character, which is detected by XORing
+ the word-sized segment with a word-sized block of the search
+ character and then detecting for the presence of NUL in the
+ result. */
+ asrc = (unsigned long *) src;
+ mask = d << 8 | d;
+ mask = mask << 16 | mask;
+ for (i = 32; i < LBLOCKSIZE * 8; i <<= 1)
+ mask = (mask << i) | mask;
+
+ while (length >= LBLOCKSIZE)
+ {
+ if (DETECTCHAR (*asrc, mask))
+ break;
+ length -= LBLOCKSIZE;
+ asrc++;
+ }
+
+ /* If there are fewer than LBLOCKSIZE characters left,
+ then we resort to the bytewise loop. */
+
+ src = (unsigned char *) asrc;
+ }
+
+#endif /* not PREFER_SIZE_OVER_SPEED */
+
+ while (length--)
+ {
+ if (*src == d)
+ return (void *) src;
+ src++;
+ }
+
+ return NULL;
+}
diff --git a/reference/newlib-c/memcmp.c b/reference/newlib-c/memcmp.c
new file mode 100644
index 000000000000..4f7ef1afd1b7
--- /dev/null
+++ b/reference/newlib-c/memcmp.c
@@ -0,0 +1,114 @@
+/*
+FUNCTION
+ <<memcmp>>---compare two memory areas
+
+INDEX
+ memcmp
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ int memcmp(const void *<[s1]>, const void *<[s2]>, size_t <[n]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ int memcmp(<[s1]>, <[s2]>, <[n]>)
+ void *<[s1]>;
+ void *<[s2]>;
+ size_t <[n]>;
+
+DESCRIPTION
+ This function compares not more than <[n]> characters of the
+ object pointed to by <[s1]> with the object pointed to by <[s2]>.
+
+
+RETURNS
+ The function returns an integer greater than, equal to or
+ less than zero according to whether the object pointed to by
+ <[s1]> is greater than, equal to or less than the object
+ pointed to by <[s2]>.
+
+PORTABILITY
+<<memcmp>> is ANSI C.
+
+<<memcmp>> requires no supporting OS subroutines.
+
+QUICKREF
+ memcmp ansi pure
+*/
+
+#include "shim.h"
+#include <string.h>
+
+
+/* Nonzero if either X or Y is not aligned on a "long" boundary. */
+#define UNALIGNED(X, Y) \
+ (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1)))
+
+/* How many bytes are copied each iteration of the word copy loop. */
+#define LBLOCKSIZE (sizeof (long))
+
+/* Threshhold for punting to the byte copier. */
+#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE)
+
+int
+_DEFUN (memcmp, (m1, m2, n),
+ _CONST _PTR m1 _AND
+ _CONST _PTR m2 _AND
+ size_t n)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ unsigned char *s1 = (unsigned char *) m1;
+ unsigned char *s2 = (unsigned char *) m2;
+
+ while (n--)
+ {
+ if (*s1 != *s2)
+ {
+ return *s1 - *s2;
+ }
+ s1++;
+ s2++;
+ }
+ return 0;
+#else
+ unsigned char *s1 = (unsigned char *) m1;
+ unsigned char *s2 = (unsigned char *) m2;
+ unsigned long *a1;
+ unsigned long *a2;
+
+ /* If the size is too small, or either pointer is unaligned,
+ then we punt to the byte compare loop. Hopefully this will
+ not turn up in inner loops. */
+ if (!TOO_SMALL(n) && !UNALIGNED(s1,s2))
+ {
+ /* Otherwise, load and compare the blocks of memory one
+ word at a time. */
+ a1 = (unsigned long*) s1;
+ a2 = (unsigned long*) s2;
+ while (n >= LBLOCKSIZE)
+ {
+ if (*a1 != *a2)
+ break;
+ a1++;
+ a2++;
+ n -= LBLOCKSIZE;
+ }
+
+ /* check m mod LBLOCKSIZE remaining characters */
+
+ s1 = (unsigned char*)a1;
+ s2 = (unsigned char*)a2;
+ }
+
+ while (n--)
+ {
+ if (*s1 != *s2)
+ return *s1 - *s2;
+ s1++;
+ s2++;
+ }
+
+ return 0;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
+
diff --git a/reference/newlib-c/memcpy.c b/reference/newlib-c/memcpy.c
new file mode 100644
index 000000000000..d7556ce8ec78
--- /dev/null
+++ b/reference/newlib-c/memcpy.c
@@ -0,0 +1,110 @@
+/*
+FUNCTION
+ <<memcpy>>---copy memory regions
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ void* memcpy(void *<[out]>, const void *<[in]>, size_t <[n]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ void *memcpy(<[out]>, <[in]>, <[n]>
+ void *<[out]>;
+ void *<[in]>;
+ size_t <[n]>;
+
+DESCRIPTION
+ This function copies <[n]> bytes from the memory region
+ pointed to by <[in]> to the memory region pointed to by
+ <[out]>.
+
+ If the regions overlap, the behavior is undefined.
+
+RETURNS
+ <<memcpy>> returns a pointer to the first byte of the <[out]>
+ region.
+
+PORTABILITY
+<<memcpy>> is ANSI C.
+
+<<memcpy>> requires no supporting OS subroutines.
+
+QUICKREF
+ memcpy ansi pure
+ */
+
+#include "shim.h"
+#include <string.h>
+
+/* Nonzero if either X or Y is not aligned on a "long" boundary. */
+#define UNALIGNED(X, Y) \
+ (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1)))
+
+/* How many bytes are copied each iteration of the 4X unrolled loop. */
+#define BIGBLOCKSIZE (sizeof (long) << 2)
+
+/* How many bytes are copied each iteration of the word copy loop. */
+#define LITTLEBLOCKSIZE (sizeof (long))
+
+/* Threshhold for punting to the byte copier. */
+#define TOO_SMALL(LEN) ((LEN) < BIGBLOCKSIZE)
+
+_PTR
+_DEFUN (memcpy, (dst0, src0, len0),
+ _PTR dst0 _AND
+ _CONST _PTR src0 _AND
+ size_t len0)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ char *dst = (char *) dst0;
+ char *src = (char *) src0;
+
+ _PTR save = dst0;
+
+ while (len0--)
+ {
+ *dst++ = *src++;
+ }
+
+ return save;
+#else
+ char *dst = dst0;
+ _CONST char *src = src0;
+ long *aligned_dst;
+ _CONST long *aligned_src;
+
+ /* If the size is small, or either SRC or DST is unaligned,
+ then punt into the byte copy loop. This should be rare. */
+ if (!TOO_SMALL(len0) && !UNALIGNED (src, dst))
+ {
+ aligned_dst = (long*)dst;
+ aligned_src = (long*)src;
+
+ /* Copy 4X long words at a time if possible. */
+ while (len0 >= BIGBLOCKSIZE)
+ {
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ len0 -= BIGBLOCKSIZE;
+ }
+
+ /* Copy one long word at a time if possible. */
+ while (len0 >= LITTLEBLOCKSIZE)
+ {
+ *aligned_dst++ = *aligned_src++;
+ len0 -= LITTLEBLOCKSIZE;
+ }
+
+ /* Pick up any residual with a byte copier. */
+ dst = (char*)aligned_dst;
+ src = (char*)aligned_src;
+ }
+
+ while (len0--)
+ *dst++ = *src++;
+
+ return dst0;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
diff --git a/reference/newlib-c/memset.c b/reference/newlib-c/memset.c
new file mode 100644
index 000000000000..a47b166437fc
--- /dev/null
+++ b/reference/newlib-c/memset.c
@@ -0,0 +1,103 @@
+/*
+FUNCTION
+ <<memset>>---set an area of memory
+
+INDEX
+ memset
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ void *memset(void *<[dst]>, int <[c]>, size_t <[length]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ void *memset(<[dst]>, <[c]>, <[length]>)
+ void *<[dst]>;
+ int <[c]>;
+ size_t <[length]>;
+
+DESCRIPTION
+ This function converts the argument <[c]> into an unsigned
+ char and fills the first <[length]> characters of the array
+ pointed to by <[dst]> to the value.
+
+RETURNS
+ <<memset>> returns the value of <[dst]>.
+
+PORTABILITY
+<<memset>> is ANSI C.
+
+ <<memset>> requires no supporting OS subroutines.
+
+QUICKREF
+ memset ansi pure
+*/
+
+#include "shim.h"
+#include <string.h>
+
+#define LBLOCKSIZE (sizeof(long))
+#define UNALIGNED(X) ((long)X & (LBLOCKSIZE - 1))
+#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE)
+
+_PTR
+_DEFUN (memset, (m, c, n),
+ _PTR m _AND
+ int c _AND
+ size_t n)
+{
+ char *s = (char *) m;
+
+#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__)
+ int i;
+ unsigned long buffer;
+ unsigned long *aligned_addr;
+ unsigned int d = c & 0xff; /* To avoid sign extension, copy C to an
+ unsigned variable. */
+
+ while (UNALIGNED (s))
+ {
+ if (n--)
+ *s++ = (char) c;
+ else
+ return m;
+ }
+
+ if (!TOO_SMALL (n))
+ {
+ /* If we get this far, we know that n is large and s is word-aligned. */
+ aligned_addr = (unsigned long *) s;
+
+ /* Store D into each char sized location in BUFFER so that
+ we can set large blocks quickly. */
+ buffer = (d << 8) | d;
+ buffer |= (buffer << 16);
+ for (i = 32; i < LBLOCKSIZE * 8; i <<= 1)
+ buffer = (buffer << i) | buffer;
+
+ /* Unroll the loop. */
+ while (n >= LBLOCKSIZE*4)
+ {
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ n -= 4*LBLOCKSIZE;
+ }
+
+ while (n >= LBLOCKSIZE)
+ {
+ *aligned_addr++ = buffer;
+ n -= LBLOCKSIZE;
+ }
+ /* Pick up the remainder with a bytewise loop. */
+ s = (char*)aligned_addr;
+ }
+
+#endif /* not PREFER_SIZE_OVER_SPEED */
+
+ while (n--)
+ *s++ = (char) c;
+
+ return m;
+}
diff --git a/reference/newlib-c/shim.h b/reference/newlib-c/shim.h
new file mode 100644
index 000000000000..e265e9737f85
--- /dev/null
+++ b/reference/newlib-c/shim.h
@@ -0,0 +1,5 @@
+/* Basic macros that newlib uses */
+#define _PTR void *
+#define _DEFUN(_name, _args, _def) _name (_def)
+#define _CONST const
+#define _AND ,
diff --git a/reference/newlib-c/strchr.c b/reference/newlib-c/strchr.c
new file mode 100644
index 000000000000..a639e3dd6cdd
--- /dev/null
+++ b/reference/newlib-c/strchr.c
@@ -0,0 +1,126 @@
+/*
+FUNCTION
+ <<strchr>>---search for character in string
+
+INDEX
+ strchr
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ char * strchr(const char *<[string]>, int <[c]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ char * strchr(<[string]>, <[c]>);
+ const char *<[string]>;
+ int <[c]>;
+
+DESCRIPTION
+ This function finds the first occurence of <[c]> (converted to
+ a char) in the string pointed to by <[string]> (including the
+ terminating null character).
+
+RETURNS
+ Returns a pointer to the located character, or a null pointer
+ if <[c]> does not occur in <[string]>.
+
+PORTABILITY
+<<strchr>> is ANSI C.
+
+<<strchr>> requires no supporting OS subroutines.
+
+QUICKREF
+ strchr ansi pure
+*/
+
+#include "shim.h"
+#include <string.h>
+#include <limits.h>
+
+#undef strchr
+
+/* Nonzero if X is not aligned on a "long" boundary. */
+#define UNALIGNED(X) ((long)X & (sizeof (long) - 1))
+
+/* How many bytes are loaded each iteration of the word copy loop. */
+#define LBLOCKSIZE (sizeof (long))
+
+#if LONG_MAX == 2147483647L
+#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
+#else
+#if LONG_MAX == 9223372036854775807L
+/* Nonzero if X (a long int) contains a NULL byte. */
+#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080)
+#else
+#error long int is not a 32bit or 64bit type.
+#endif
+#endif
+
+/* DETECTCHAR returns nonzero if (long)X contains the byte used
+ to fill (long)MASK. */
+#define DETECTCHAR(X,MASK) (DETECTNULL(X ^ MASK))
+
+char *
+_DEFUN (strchr, (s1, i),
+ _CONST char *s1 _AND
+ int i)
+{
+ _CONST unsigned char *s = (_CONST unsigned char *)s1;
+ unsigned char c = i;
+
+#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__)
+ unsigned long mask,j;
+ unsigned long *aligned_addr;
+
+ /* Special case for finding 0. */
+ if (!c)
+ {
+ while (UNALIGNED (s))
+ {
+ if (!*s)
+ return (char *) s;
+ s++;
+ }
+ /* Operate a word at a time. */
+ aligned_addr = (unsigned long *) s;
+ while (!DETECTNULL (*aligned_addr))
+ aligned_addr++;
+ /* Found the end of string. */
+ s = (const unsigned char *) aligned_addr;
+ while (*s)
+ s++;
+ return (char *) s;
+ }
+
+ /* All other bytes. Align the pointer, then search a long at a time. */
+ while (UNALIGNED (s))
+ {
+ if (!*s)
+ return NULL;
+ if (*s == c)
+ return (char *) s;
+ s++;
+ }
+
+ mask = c;
+ for (j = 8; j < LBLOCKSIZE * 8; j <<= 1)
+ mask = (mask << j) | mask;
+
+ aligned_addr = (unsigned long *) s;
+ while (!DETECTNULL (*aligned_addr) && !DETECTCHAR (*aligned_addr, mask))
+ aligned_addr++;
+
+ /* The block of bytes currently pointed to by aligned_addr
+ contains either a null or the target char, or both. We
+ catch it using the bytewise search. */
+
+ s = (unsigned char *) aligned_addr;
+
+#endif /* not PREFER_SIZE_OVER_SPEED */
+
+ while (*s && *s != c)
+ s++;
+ if (*s == c)
+ return (char *)s;
+ return NULL;
+}
diff --git a/reference/newlib-c/strcmp.c b/reference/newlib-c/strcmp.c
new file mode 100644
index 000000000000..459841d341b6
--- /dev/null
+++ b/reference/newlib-c/strcmp.c
@@ -0,0 +1,109 @@
+/*
+FUNCTION
+ <<strcmp>>---character string compare
+
+INDEX
+ strcmp
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ int strcmp(const char *<[a]>, const char *<[b]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ int strcmp(<[a]>, <[b]>)
+ char *<[a]>;
+ char *<[b]>;
+
+DESCRIPTION
+ <<strcmp>> compares the string at <[a]> to
+ the string at <[b]>.
+
+RETURNS
+ If <<*<[a]>>> sorts lexicographically after <<*<[b]>>>,
+ <<strcmp>> returns a number greater than zero. If the two
+ strings match, <<strcmp>> returns zero. If <<*<[a]>>>
+ sorts lexicographically before <<*<[b]>>>, <<strcmp>> returns a
+ number less than zero.
+
+PORTABILITY
+<<strcmp>> is ANSI C.
+
+<<strcmp>> requires no supporting OS subroutines.
+
+QUICKREF
+ strcmp ansi pure
+*/
+
+#include "shim.h"
+#include <string.h>
+#include <limits.h>
+
+#undef strcmp
+
+/* Nonzero if either X or Y is not aligned on a "long" boundary. */
+#define UNALIGNED(X, Y) \
+ (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1)))
+
+/* DETECTNULL returns nonzero if (long)X contains a NULL byte. */
+#if LONG_MAX == 2147483647L
+#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
+#else
+#if LONG_MAX == 9223372036854775807L
+#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080)
+#else
+#error long int is not a 32bit or 64bit type.
+#endif
+#endif
+
+#ifndef DETECTNULL
+#error long int is not a 32bit or 64bit byte
+#endif
+
+int
+_DEFUN (strcmp, (s1, s2),
+ _CONST char *s1 _AND
+ _CONST char *s2)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ while (*s1 != '\0' && *s1 == *s2)
+ {
+ s1++;
+ s2++;
+ }
+
+ return (*(unsigned char *) s1) - (*(unsigned char *) s2);
+#else
+ unsigned long *a1;
+ unsigned long *a2;
+
+ /* If s1 or s2 are unaligned, then compare bytes. */
+ if (!UNALIGNED (s1, s2))
+ {
+ /* If s1 and s2 are word-aligned, compare them a word at a time. */
+ a1 = (unsigned long*)s1;
+ a2 = (unsigned long*)s2;
+ while (*a1 == *a2)
+ {
+ /* To get here, *a1 == *a2, thus if we find a null in *a1,
+ then the strings must be equal, so return zero. */
+ if (DETECTNULL (*a1))
+ return 0;
+
+ a1++;
+ a2++;
+ }
+
+ /* A difference was detected in last few bytes of s1, so search bytewise */
+ s1 = (char*)a1;
+ s2 = (char*)a2;
+ }
+
+ while (*s1 != '\0' && *s1 == *s2)
+ {
+ s1++;
+ s2++;
+ }
+ return (*(unsigned char *) s1) - (*(unsigned char *) s2);
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
diff --git a/reference/newlib-c/strcpy.c b/reference/newlib-c/strcpy.c
new file mode 100644
index 000000000000..ec69937b9c28
--- /dev/null
+++ b/reference/newlib-c/strcpy.c
@@ -0,0 +1,100 @@
+/*
+FUNCTION
+ <<strcpy>>---copy string
+
+INDEX
+ strcpy
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ char *strcpy(char *<[dst]>, const char *<[src]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ char *strcpy(<[dst]>, <[src]>)
+ char *<[dst]>;
+ char *<[src]>;
+
+DESCRIPTION
+ <<strcpy>> copies the string pointed to by <[src]>
+ (including the terminating null character) to the array
+ pointed to by <[dst]>.
+
+RETURNS
+ This function returns the initial value of <[dst]>.
+
+PORTABILITY
+<<strcpy>> is ANSI C.
+
+<<strcpy>> requires no supporting OS subroutines.
+
+QUICKREF
+ strcpy ansi pure
+*/
+
+#include "shim.h"
+#include <string.h>
+#include <limits.h>
+
+/*SUPPRESS 560*/
+/*SUPPRESS 530*/
+
+/* Nonzero if either X or Y is not aligned on a "long" boundary. */
+#define UNALIGNED(X, Y) \
+ (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1)))
+
+#if LONG_MAX == 2147483647L
+#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
+#else
+#if LONG_MAX == 9223372036854775807L
+/* Nonzero if X (a long int) contains a NULL byte. */
+#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080)
+#else
+#error long int is not a 32bit or 64bit type.
+#endif
+#endif
+
+#ifndef DETECTNULL
+#error long int is not a 32bit or 64bit byte
+#endif
+
+char*
+_DEFUN (strcpy, (dst0, src0),
+ char *dst0 _AND
+ _CONST char *src0)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ char *s = dst0;
+
+ while (*dst0++ = *src0++)
+ ;
+
+ return s;
+#else
+ char *dst = dst0;
+ _CONST char *src = src0;
+ long *aligned_dst;
+ _CONST long *aligned_src;
+
+ /* If SRC or DEST is unaligned, then copy bytes. */
+ if (!UNALIGNED (src, dst))
+ {
+ aligned_dst = (long*)dst;
+ aligned_src = (long*)src;
+
+ /* SRC and DEST are both "long int" aligned, try to do "long int"
+ sized copies. */
+ while (!DETECTNULL(*aligned_src))
+ {
+ *aligned_dst++ = *aligned_src++;
+ }
+
+ dst = (char*)aligned_dst;
+ src = (char*)aligned_src;
+ }
+
+ while ((*dst++ = *src++))
+ ;
+ return dst0;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
diff --git a/reference/newlib-c/strlen.c b/reference/newlib-c/strlen.c
new file mode 100644
index 000000000000..64efa282998b
--- /dev/null
+++ b/reference/newlib-c/strlen.c
@@ -0,0 +1,88 @@
+/*
+FUNCTION
+ <<strlen>>---character string length
+
+INDEX
+ strlen
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ size_t strlen(const char *<[str]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ size_t strlen(<[str]>)
+ char *<[src]>;
+
+DESCRIPTION
+ The <<strlen>> function works out the length of the string
+ starting at <<*<[str]>>> by counting chararacters until it
+ reaches a <<NULL>> character.
+
+RETURNS
+ <<strlen>> returns the character count.
+
+PORTABILITY
+<<strlen>> is ANSI C.
+
+<<strlen>> requires no supporting OS subroutines.
+
+QUICKREF
+ strlen ansi pure
+*/
+
+#include "shim.h"
+#include <string.h>
+#include <limits.h>
+
+#define LBLOCKSIZE (sizeof (long))
+#define UNALIGNED(X) ((long)X & (LBLOCKSIZE - 1))
+
+#if LONG_MAX == 2147483647L
+#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
+#else
+#if LONG_MAX == 9223372036854775807L
+/* Nonzero if X (a long int) contains a NULL byte. */
+#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080)
+#else
+#error long int is not a 32bit or 64bit type.
+#endif
+#endif
+
+#ifndef DETECTNULL
+#error long int is not a 32bit or 64bit byte
+#endif
+
+size_t
+_DEFUN (strlen, (str),
+ _CONST char *str)
+{
+ _CONST char *start = str;
+
+#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__)
+ unsigned long *aligned_addr;
+
+ /* Align the pointer, so we can search a word at a time. */
+ while (UNALIGNED (str))
+ {
+ if (!*str)
+ return str - start;
+ str++;
+ }
+
+ /* If the string is word-aligned, we can check for the presence of
+ a null in each word-sized block. */
+ aligned_addr = (unsigned long *)str;
+ while (!DETECTNULL (*aligned_addr))
+ aligned_addr++;
+
+ /* Once a null is detected, we check each byte in that block for a
+ precise position of the null. */
+ str = (char *) aligned_addr;
+
+#endif /* not PREFER_SIZE_OVER_SPEED */
+
+ while (*str)
+ str++;
+ return str - start;
+}
diff --git a/reference/newlib-xscale/.deps/memchr.Po b/reference/newlib-xscale/.deps/memchr.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-xscale/.deps/memchr.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-xscale/.deps/memcpy.Po b/reference/newlib-xscale/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-xscale/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-xscale/.deps/memset.Po b/reference/newlib-xscale/.deps/memset.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-xscale/.deps/memset.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-xscale/.deps/strchr.Po b/reference/newlib-xscale/.deps/strchr.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-xscale/.deps/strchr.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-xscale/.deps/strcmp.Po b/reference/newlib-xscale/.deps/strcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-xscale/.deps/strcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-xscale/.deps/strcpy.Po b/reference/newlib-xscale/.deps/strcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-xscale/.deps/strcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-xscale/.deps/strlen.Po b/reference/newlib-xscale/.deps/strlen.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib-xscale/.deps/strlen.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib-xscale/memchr.c b/reference/newlib-xscale/memchr.c
new file mode 100644
index 000000000000..d574efb9cee5
--- /dev/null
+++ b/reference/newlib-xscale/memchr.c
@@ -0,0 +1,95 @@
+#include <string.h>
+#include "xscale.h"
+
+void *
+memchr (const void *start, int c, size_t len)
+{
+ const char *str = start;
+
+ if (len == 0)
+ return 0;
+
+ asm (PRELOADSTR ("%0") : : "r" (start));
+
+ c &= 0xff;
+
+#ifndef __OPTIMIZE_SIZE__
+ /* Skip unaligned part. */
+ if ((long)str & 3)
+ {
+ str--;
+ do
+ {
+ if (*++str == c)
+ return (void *)str;
+ }
+ while (((long)str & 3) != 0 && --len > 0);
+ }
+
+ if (len > 3)
+ {
+ unsigned int c2 = c + (c << 8);
+ c2 += c2 << 16;
+
+ /* Load two constants:
+ R7 = 0xfefefeff [ == ~(0x80808080 << 1) ]
+ R6 = 0x80808080 */
+
+ asm (
+ "mov r6, #0x80\n\
+ add r6, r6, #0x8000\n\
+ add r6, r6, r6, lsl #16\n\
+ mvn r7, r6, lsl #1\n\
+\n\
+0:\n\
+ cmp %1, #0x7\n\
+ bls 1f\n\
+\n\
+ ldmia %0!, { r3, r9 }\n\
+" PRELOADSTR ("%0") "\n\
+ sub %1, %1, #8\n\
+ eor r3, r3, %2\n\
+ eor r9, r9, %2\n\
+ add r2, r3, r7\n\
+ add r8, r9, r7\n\
+ bic r2, r2, r3\n\
+ bic r8, r8, r9\n\
+ and r1, r2, r6\n\
+ and r9, r8, r6\n\
+ orrs r1, r1, r9\n\
+ beq 0b\n\
+\n\
+ add %1, %1, #8\n\
+ sub %0, %0, #8\n\
+1:\n\
+ cmp %1, #0x3\n\
+ bls 2f\n\
+\n\
+ ldr r3, [%0], #4\n\
+" PRELOADSTR ("%0") "\n\
+ sub %1, %1, #4\n\
+ eor r3, r3, %2\n\
+ add r2, r3, r7\n\
+ bic r2, r2, r3\n\
+ ands r1, r2, r6\n\
+ beq 1b\n\
+\n\
+ sub %0, %0, #4\n\
+ add %1, %1, #4\n\
+2:\n\
+"
+ : "=&r" (str), "=&r" (len)
+ : "r" (c2), "0" (str), "1" (len)
+ : "r1", "r2", "r3", "r6", "r7", "r8", "r9", "cc");
+ }
+#endif
+
+ while (len-- > 0)
+ {
+ if (*str == c)
+ return (void *)str;
+ str++;
+ }
+
+ return 0;
+}
diff --git a/reference/newlib-xscale/memcpy.c b/reference/newlib-xscale/memcpy.c
new file mode 100644
index 000000000000..434c9147012e
--- /dev/null
+++ b/reference/newlib-xscale/memcpy.c
@@ -0,0 +1,94 @@
+#include <string.h>
+#include "xscale.h"
+
+void *
+memcpy (void *dst0, const void *src0, size_t len)
+{
+ int dummy;
+ asm volatile (
+#ifndef __OPTIMIZE_SIZE__
+ "cmp %2, #0x3\n\
+ bls 3f\n\
+ and lr, %1, #0x3\n\
+ and r3, %0, #0x3\n\
+ cmp lr, r3\n\
+ bne 3f\n\
+ cmp lr, #0x0\n\
+ beq 2f\n\
+ b 1f\n\
+0:\n\
+ ldrb r3, [%1], #1\n\
+"
+ PRELOADSTR ("%1")
+"\n\
+ tst %1, #0x3\n\
+ strb r3, [%0], #1\n\
+ beq 3f\n\
+1:\n\
+ sub %2, %2, #1\n\
+ cmn %2, #1\n\
+ bne 0b\n\
+2:\n\
+ cmp %2, #0xf\n\
+ bls 1f\n\
+0:\n\
+ ldmia %1!, { r3, r4, r5, lr }\n\
+"
+ PRELOADSTR ("%1")
+"\n\
+\n\
+ sub %2, %2, #16\n\
+ cmp %2, #0xf\n\
+ stmia %0!, { r3, r4, r5, lr }\n\
+ bhi 0b\n\
+1:\n\
+ cmp %2, #0x7\n\
+ bls 1f\n\
+0:\n\
+ ldmia %1!, { r3, r4 }\n\
+"
+ PRELOADSTR ("%1")
+"\n\
+\n\
+ sub %2, %2, #8\n\
+ cmp %2, #0x7\n\
+ stmia %0!, { r3, r4 }\n\
+ bhi 0b\n\
+1:\n\
+ cmp %2, #0x3\n\
+ bls 3f\n\
+0:\n\
+ sub %2, %2, #4\n\
+ ldr r3, [%1], #4\n\
+"
+ PRELOADSTR ("%1")
+"\n\
+\n\
+ cmp %2, #0x3\n\
+ str r3, [%0], #4\n\
+ bhi 0b\n\
+"
+#endif /* !__OPTIMIZE_SIZE__ */
+"\n\
+3:\n\
+"
+ PRELOADSTR ("%1")
+"\n\
+ sub %2, %2, #1\n\
+ cmn %2, #1\n\
+ beq 1f\n\
+0:\n\
+ sub %2, %2, #1\n\
+ ldrb r3, [%1], #1\n\
+"
+ PRELOADSTR ("%1")
+"\n\
+ cmn %2, #1\n\
+ strb r3, [%0], #1\n\
+ bne 0b\n\
+1:"
+ : "=&r" (dummy), "=&r" (src0), "=&r" (len)
+ : "0" (dst0), "1" (src0), "2" (len)
+ : "memory", "lr", "r3", "r4", "r5", "cc");
+ return dst0;
+}
diff --git a/reference/newlib-xscale/memset.c b/reference/newlib-xscale/memset.c
new file mode 100644
index 000000000000..3ff9b9945b0f
--- /dev/null
+++ b/reference/newlib-xscale/memset.c
@@ -0,0 +1,81 @@
+#include <string.h>
+#include "xscale.h"
+
+void *
+memset (void *dst, int c, size_t len)
+{
+ int dummy;
+
+ asm volatile ("tst %0, #0x3"
+#ifndef __OPTIMIZE_SIZE__
+"\n\
+ beq 1f\n\
+ b 2f\n\
+0:\n\
+ strb %1, [%0], #1\n\
+ tst %0, #0x3\n\
+ beq 1f\n\
+2:\n\
+ movs r3, %2\n\
+ sub %2, %2, #1\n\
+ bne 0b\n\
+# At this point we know that %2 == len == -1 (since the SUB has already taken\n\
+# place). If we fall through to the 1: label (as the code used to do), the\n\
+# CMP will detect this negative value and branch to the 2: label. This will\n\
+# test %2 again, but this time against 0. The test will fail and the loop\n\
+# at 2: will go on for (almost) ever. Hence the explicit branch to the end\n\
+# of the hand written assembly code.\n\
+ b 4f\n\
+1:\n\
+ cmp %2, #0x3\n\
+ bls 2f\n\
+ and %1, %1, #0xff\n\
+ orr lr, %1, %1, asl #8\n\
+ cmp %2, #0xf\n\
+ orr lr, lr, lr, asl #16\n\
+ bls 1f\n\
+ mov r3, lr\n\
+ mov r4, lr\n\
+ mov r5, lr\n\
+0:\n\
+ sub %2, %2, #16\n\
+ stmia %0!, { r3, r4, r5, lr }\n\
+ cmp %2, #0xf\n\
+ bhi 0b\n\
+1:\n\
+ cmp %2, #0x7\n\
+ bls 1f\n\
+ mov r3, lr\n\
+0:\n\
+ sub %2, %2, #8\n\
+ stmia %0!, { r3, lr }\n\
+ cmp %2, #0x7\n\
+ bhi 0b\n\
+1:\n\
+ cmp %2, #0x3\n\
+ bls 2f\n\
+0:\n\
+ sub %2, %2, #4\n\
+ str lr, [%0], #4\n\
+ cmp %2, #0x3\n\
+ bhi 0b\n\
+"
+#endif /* !__OPTIMIZE_SIZE__ */
+"\n\
+2:\n\
+ movs r3, %2\n\
+ sub %2, %2, #1\n\
+ beq 4f\n\
+0:\n\
+ movs r3, %2\n\
+ sub %2, %2, #1\n\
+ strb %1, [%0], #1\n\
+ bne 0b\n\
+4:"
+
+ : "=&r" (dummy), "=&r" (c), "=&r" (len)
+ : "0" (dst), "1" (c), "2" (len)
+ : "memory", "r3", "r4", "r5", "lr");
+
+ return dst;
+}
diff --git a/reference/newlib-xscale/strchr.c b/reference/newlib-xscale/strchr.c
new file mode 100644
index 000000000000..73bfec5d6bcb
--- /dev/null
+++ b/reference/newlib-xscale/strchr.c
@@ -0,0 +1,66 @@
+#include <string.h>
+#include "xscale.h"
+#undef strchr
+
+char *
+strchr (const char *s, int c)
+{
+ unsigned int c2;
+ asm (PRELOADSTR ("%0") : : "r" (s));
+
+ c &= 0xff;
+
+#ifndef __OPTIMIZE_SIZE__
+ /* Skip unaligned part. */
+ if ((long)s & 3)
+ {
+ s--;
+ do
+ {
+ int c2 = *++s;
+ if (c2 == c)
+ return (char *)s;
+ if (c2 == '\0')
+ return 0;
+ }
+ while (((long)s & 3) != 0);
+ }
+
+ c2 = c + (c << 8);
+ c2 += c2 << 16;
+
+ /* Load two constants:
+ R6 = 0xfefefeff [ == ~(0x80808080 << 1) ]
+ R5 = 0x80808080 */
+
+ asm (PRELOADSTR ("%0") "\n\
+ mov r5, #0x80\n\
+ add r5, r5, #0x8000\n\
+ add r5, r5, r5, lsl #16\n\
+ mvn r6, r5, lsl #1\n\
+\n\
+ sub %0, %0, #4\n\
+0:\n\
+ ldr r1, [%0, #4]!\n\
+" PRELOADSTR ("%0") "\n\
+ add r3, r1, r6\n\
+ bic r3, r3, r1\n\
+ ands r2, r3, r5\n\
+ bne 1f\n\
+ eor r2, r1, %1\n\
+ add r3, r2, r6\n\
+ bic r3, r3, r2\n\
+ ands r1, r3, r5\n\
+ beq 0b\n\
+1:"
+ : "=&r" (s)
+ : "r" (c2), "0" (s)
+ : "r1", "r2", "r3", "r5", "r6", "cc");
+#endif
+
+ while (*s && *s != c)
+ s++;
+ if (*s == c)
+ return (char *)s;
+ return NULL;
+}
diff --git a/reference/newlib-xscale/strcmp.c b/reference/newlib-xscale/strcmp.c
new file mode 100644
index 000000000000..086d7ffd0467
--- /dev/null
+++ b/reference/newlib-xscale/strcmp.c
@@ -0,0 +1,100 @@
+#include <string.h>
+#include "xscale.h"
+#undef strcmp
+
+int
+strcmp (const char *s1, const char *s2)
+{
+ asm (PRELOADSTR ("%0") : : "r" (s1));
+ asm (PRELOADSTR ("%0") : : "r" (s2));
+
+#ifndef __OPTIMIZE_SIZE__
+ if (((long)s1 & 3) == ((long)s2 & 3))
+ {
+ int result;
+
+ /* Skip unaligned part. */
+ while ((long)s1 & 3)
+ {
+ if (*s1 == '\0' || *s1 != *s2)
+ goto out;
+ s1++;
+ s2++;
+ }
+
+ /* Load two constants:
+ lr = 0xfefefeff [ == ~(0x80808080 << 1) ]
+ ip = 0x80808080 */
+
+ asm (
+ "ldr r2, [%1, #0]\n\
+ ldr r3, [%2, #0]\n\
+ cmp r2, r3\n\
+ bne 2f\n\
+\n\
+ mov ip, #0x80\n\
+ add ip, ip, #0x8000\n\
+ add ip, ip, ip, lsl #16\n\
+ mvn lr, ip, lsl #1\n\
+\n\
+0:\n\
+ ldr r2, [%1, #0]\n\
+ add r3, r2, lr\n\
+ bic r3, r3, r2\n\
+ tst r3, ip\n\
+ beq 1f\n\
+ mov %0, #0x0\n\
+ b 3f\n\
+1:\n\
+ ldr r2, [%1, #4]!\n\
+ ldr r3, [%2, #4]!\n\
+" PRELOADSTR("%1") "\n\
+" PRELOADSTR("%2") "\n\
+ cmp r2, r3\n\
+ beq 0b"
+
+ /* The following part could be done in a C loop as well, but it needs
+ to be assembler to save some cycles in the case where the optimized
+ loop above finds the strings to be equal. */
+"\n\
+2:\n\
+ ldrb r2, [%1, #0]\n\
+" PRELOADSTR("%1") "\n\
+" PRELOADSTR("%2") "\n\
+ cmp r2, #0x0\n\
+ beq 1f\n\
+ ldrb r3, [%2, #0]\n\
+ cmp r2, r3\n\
+ bne 1f\n\
+0:\n\
+ ldrb r3, [%1, #1]!\n\
+ add %2, %2, #1\n\
+ ands ip, r3, #0xff\n\
+ beq 1f\n\
+ ldrb r3, [%2]\n\
+ cmp ip, r3\n\
+ beq 0b\n\
+1:\n\
+ ldrb lr, [%1, #0]\n\
+ ldrb ip, [%2, #0]\n\
+ rsb %0, ip, lr\n\
+3:\n\
+"
+
+ : "=r" (result), "=&r" (s1), "=&r" (s2)
+ : "1" (s1), "2" (s2)
+ : "lr", "ip", "r2", "r3", "cc");
+ return result;
+ }
+#endif
+
+ while (*s1 != '\0' && *s1 == *s2)
+ {
+ asm (PRELOADSTR("%0") : : "r" (s1));
+ asm (PRELOADSTR("%0") : : "r" (s2));
+ s1++;
+ s2++;
+ }
+ out:
+ return (*(unsigned char *) s1) - (*(unsigned char *) s2);
+}
diff --git a/reference/newlib-xscale/strcpy.c b/reference/newlib-xscale/strcpy.c
new file mode 100644
index 000000000000..325fa37e6877
--- /dev/null
+++ b/reference/newlib-xscale/strcpy.c
@@ -0,0 +1,55 @@
+#include <string.h>
+#include "xscale.h"
+#undef strcpy
+
+char *
+strcpy (char *dest, const char *src)
+{
+ char *dest0 = dest;
+
+ asm (PRELOADSTR ("%0") : : "r" (src));
+
+#ifndef __OPTIMIZE_SIZE__
+ if (((long)src & 3) == ((long)dest & 3))
+ {
+ /* Skip unaligned part. */
+ while ((long)src & 3)
+ {
+ if (! (*dest++ = *src++))
+ return dest0;
+ }
+
+ /* Load two constants:
+ R4 = 0xfefefeff [ == ~(0x80808080 << 1) ]
+ R5 = 0x80808080 */
+
+ asm ("mov r5, #0x80\n\
+ ldr r1, [%1, #0]\n\
+ add r5, r5, #0x8000\n\
+ add r5, r5, r5, lsl #16\n\
+ mvn r4, r5, lsl #1\n\
+\n\
+ add r3, r1, r5\n\
+ bic r3, r3, r1\n\
+ ands r2, r3, r4\n\
+ bne 1f\n\
+0:\n\
+ ldr r3, [%1, #0]\n\
+ ldr r1, [%1, #4]!\n\
+" PRELOADSTR("%1") "\n\
+ str r3, [%0], #4\n\
+ add r2, r1, r4\n\
+ bic r2, r2, r1\n\
+ ands r3, r2, r5\n\
+ beq 0b\n\
+1:"
+ : "=&r" (dest), "=&r" (src)
+ : "0" (dest), "1" (src)
+ : "r1", "r2", "r3", "r4", "r5", "memory", "cc");
+ }
+#endif
+
+ while (*dest++ = *src++)
+ asm (PRELOADSTR ("%0") : : "r" (src));
+ return dest0;
+}
diff --git a/reference/newlib-xscale/strlen.c b/reference/newlib-xscale/strlen.c
new file mode 100644
index 000000000000..cac958abb375
--- /dev/null
+++ b/reference/newlib-xscale/strlen.c
@@ -0,0 +1,94 @@
+#include <string.h>
+#include "xscale.h"
+#define _CONST const
+
+size_t
+strlen (const char *str)
+{
+ _CONST char *start = str;
+
+ /* Skip unaligned part. */
+ if ((long)str & 3)
+ {
+ str--;
+ do
+ {
+ if (*++str == '\0')
+ goto out;
+ }
+ while ((long)str & 3);
+ }
+
+ /* Load two constants:
+ R4 = 0xfefefeff [ == ~(0x80808080 << 1) ]
+ R5 = 0x80808080 */
+
+ asm ("mov r5, #0x80\n\
+ add r5, r5, #0x8000\n\
+ add r5, r5, r5, lsl #16\n\
+ mvn r4, r5, lsl #1\n\
+"
+
+#if defined __ARM_ARCH_5__ || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5E__ || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_7A__
+
+" tst %0, #0x7\n\
+ itt eq\n\
+ ldreqd r6, [%0]\n\
+ beq 1f\n\
+ ldr r2, [%0]\n\
+ add r3, r2, r4\n\
+ bic r3, r3, r2\n\
+ ands r2, r3, r5\n\
+ bne 2f\n\
+ sub %0, %0, #4\n\
+\n\
+0:\n\
+ ldrd r6, [%0, #8]!\n\
+"
+ PRELOADSTR ("%0")
+"\n\
+1:\n\
+ add r3, r6, r4\n\
+ add r2, r7, r4\n\
+ bic r3, r3, r6\n\
+ bic r2, r2, r7\n\
+ and r3, r3, r5\n\
+ and r2, r2, r5\n\
+ orrs r3, r2, r3\n\
+ beq 0b\n\
+"
+#else
+
+" sub %0, %0, #4\n\
+\n\
+0:\n\
+ ldr r6, [%0, #4]!\n\
+"
+ PRELOADSTR ("%0")
+"\n\
+ add r3, r6, r4\n\
+ bic r3, r3, r6\n\
+ ands r3, r3, r5\n\
+ beq 0b\n\
+"
+#endif /* __ARM_ARCH_5[T][E]__ */
+"\n\
+2:\n\
+ ldrb r3, [%0]\n\
+ cmp r3, #0x0\n\
+ beq 1f\n\
+\n\
+0:\n\
+ ldrb r3, [%0, #1]!\n\
+"
+ PRELOADSTR ("%0")
+"\n\
+ cmp r3, #0x0\n\
+ bne 0b\n\
+1:\n\
+"
+ : "=r" (str) : "0" (str) : "r2", "r3", "r4", "r5", "r6", "r7");
+
+ out:
+ return str - start;
+}
diff --git a/reference/newlib-xscale/xscale.h b/reference/newlib-xscale/xscale.h
new file mode 100644
index 000000000000..90fb211606d1
--- /dev/null
+++ b/reference/newlib-xscale/xscale.h
@@ -0,0 +1,26 @@
+#ifndef __XSCALE_MACH_H__
+#define __XSCALE_MACH_H__
+
+/* These are predefined by new versions of GNU cpp. */
+
+#ifndef __USER_LABEL_PREFIX__
+#define __USER_LABEL_PREFIX__ _
+#endif
+
+#ifndef __REGISTER_PREFIX__
+#define __REGISTER_PREFIX__
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a##b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1(__USER_LABEL_PREFIX__, x)
+
+#define PRELOAD(X) pld [X]
+#define PRELOADSTR(X) " pld [" X "]"
+
+#endif /* !__XSCALE_MACH_H__ */
diff --git a/reference/newlib/.deps/memcpy.Po b/reference/newlib/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib/.deps/strcmp.Po b/reference/newlib/.deps/strcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib/.deps/strcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib/.deps/strcpy.Po b/reference/newlib/.deps/strcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib/.deps/strcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib/.deps/strlen.Po b/reference/newlib/.deps/strlen.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/newlib/.deps/strlen.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/newlib/arm_asm.h b/reference/newlib/arm_asm.h
new file mode 100644
index 000000000000..5a63a8d9e217
--- /dev/null
+++ b/reference/newlib/arm_asm.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2009 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARM_ASM__H
+#define ARM_ASM__H
+
+/* First define some macros that keep everything else sane. */
+#if defined (__ARM_ARCH_7A__) || defined (__ARM_ARCH_7R__)
+#define _ISA_ARM_7
+#endif
+
+#if defined (_ISA_ARM_7) || defined (__ARM_ARCH_6__) || \
+ defined (__ARM_ARCH_6J__) || defined (__ARM_ARCH_6T2__) || \
+ defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6ZK__) || \
+ defined (__ARM_ARCH_6Z__)
+#define _ISA_ARM_6
+#endif
+
+#if defined (_ISA_ARM_6) || defined (__ARM_ARCH_5__) || \
+ defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5TE__) || \
+ defined (__ARM_ARCH_5TEJ__)
+#define _ISA_ARM_5
+#endif
+
+#if defined (_ISA_ARM_5) || defined (__ARM_ARCH_4T__)
+#define _ISA_ARM_4T
+#endif
+
+#if defined (__ARM_ARCH_7M__) || defined (__ARM_ARCH_7__) || \
+ defined (__ARM_ARCH_7EM__)
+#define _ISA_THUMB_2
+#endif
+
+#if defined (_ISA_THUMB_2) || defined (__ARM_ARCH_6M__)
+#define _ISA_THUMB_1
+#endif
+
+
+/* Now some macros for common instruction sequences. */
+#ifdef __ASSEMBLER__
+.macro RETURN cond=
+#if defined (_ISA_ARM_4T) || defined (_ISA_THUMB_1)
+ bx\cond lr
+#else
+ mov\cond pc, lr
+#endif
+.endm
+
+.macro optpld base, offset=#0
+#if defined (_ISA_ARM_7)
+ pld [\base, \offset]
+#endif
+.endm
+
+#else
+asm(".macro RETURN cond=\n\t"
+#if defined (_ISA_ARM_4T) || defined (_ISA_THUMB_1)
+ "bx\\cond lr\n\t"
+#else
+ "mov\\cond pc, lr\n\t"
+#endif
+ ".endm"
+ );
+
+asm(".macro optpld base, offset=#0\n\t"
+#if defined (_ISA_ARM_7)
+ "pld [\\base, \\offset]\n\t"
+#endif
+ ".endm"
+ );
+#endif
+
+#endif /* ARM_ASM__H */
diff --git a/reference/newlib/memcpy.S b/reference/newlib/memcpy.S
new file mode 100644
index 000000000000..e408ed0e0b1c
--- /dev/null
+++ b/reference/newlib/memcpy.S
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2011 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || \
+ (!(defined (__ARM_ARCH_7A__))))
+
+ /* Do nothing here. See memcpy-stub.c in the same directory. */
+
+#else
+ /* Prototype: void *memcpy (void *dst, const void *src, size_t count). */
+
+ /* Use the version of memcpy implemented using LDRD and STRD.
+ This version is tuned for Cortex-A15.
+ This might not be the best for other ARMv7-A CPUs,
+ but there is no predefine to distinguish between
+ different CPUs in the same architecture,
+ and this version is better than the plain memcpy provided in newlib.
+
+ Therefore, we use this version for all ARMv7-A CPUS. */
+
+ /* To make the same code compile for both ARM and Thumb instruction
+ sets, switch to unified syntax at the beginning of this function.
+ However, by using the same code, we may be missing optimization
+ opportunities. For instance, in LDRD/STRD instructions, the first
+ destination register must be even and the second consecutive in
+ ARM state, but not in Thumb state. */
+
+ .syntax unified
+
+#if defined (__thumb__)
+ .thumb
+ .thumb_func
+#endif
+
+ .global memcpy
+ .type memcpy, %function
+memcpy:
+
+ /* Assumes that n >= 0, and dst, src are valid pointers.
+ If there is at least 8 bytes to copy, use LDRD/STRD.
+ If src and dst are misaligned with different offsets,
+ first copy byte by byte until dst is aligned,
+ and then copy using LDRD/STRD and shift if needed.
+ When less than 8 left, copy a word and then byte by byte. */
+
+ /* Save registers (r0 holds the return value):
+ optimized push {r0, r4, r5, lr}.
+ To try and improve performance, stack layout changed,
+ i.e., not keeping the stack looking like users expect
+ (highest numbered register at highest address). */
+ push {r0, lr}
+ strd r4, r5, [sp, #-8]!
+
+ /* TODO: Add debug frame directives.
+ We don't need exception unwind directives, because the code below
+ does not throw any exceptions and does not call any other functions.
+ Generally, newlib functions like this lack debug information for
+ assembler source. */
+
+ /* Get copying of tiny blocks out of the way first. */
+ /* Is there at least 4 bytes to copy? */
+ subs r2, r2, #4
+ blt copy_less_than_4 /* If n < 4. */
+
+ /* Check word alignment. */
+ ands ip, r0, #3 /* ip = last 2 bits of dst. */
+ bne dst_not_word_aligned /* If dst is not word-aligned. */
+
+ /* Get here if dst is word-aligned. */
+ ands ip, r1, #3 /* ip = last 2 bits of src. */
+ bne src_not_word_aligned /* If src is not word-aligned. */
+word_aligned:
+ /* Get here if source and dst both are word-aligned.
+ The number of bytes remaining to copy is r2+4. */
+
+ /* Is there is at least 64 bytes to copy? */
+ subs r2, r2, #60
+ blt copy_less_than_64 /* If r2 + 4 < 64. */
+
+ /* First, align the destination buffer to 8-bytes,
+ to make sure double loads and stores don't cross cache line boundary,
+ as they are then more expensive even if the data is in the cache
+ (require two load/store issue cycles instead of one).
+ If only one of the buffers is not 8-bytes aligned,
+ then it's more important to align dst than src,
+ because there is more penalty for stores
+ than loads that cross cacheline boundary.
+ This check and realignment are only worth doing
+ if there is a lot to copy. */
+
+ /* Get here if dst is word aligned,
+ i.e., the 2 least significant bits are 0.
+ If dst is not 2w aligned (i.e., the 3rd bit is not set in dst),
+ then copy 1 word (4 bytes). */
+ ands r3, r0, #4
+ beq 11f /* If dst already two-word aligned. */
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+ subs r2, r2, #4
+ blt copy_less_than_64
+
+11:
+ /* TODO: Align to cacheline (useful for PLD optimization). */
+
+ /* Every loop iteration copies 64 bytes. */
+1:
+ .irp offset, #0, #8, #16, #24, #32, #40, #48, #56
+ ldrd r4, r5, [r1, \offset]
+ strd r4, r5, [r0, \offset]
+ .endr
+
+ add r0, r0, #64
+ add r1, r1, #64
+ subs r2, r2, #64
+ bge 1b /* If there is more to copy. */
+
+copy_less_than_64:
+
+ /* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
+ Restore the count if there is more than 7 bytes to copy. */
+ adds r2, r2, #56
+ blt copy_less_than_8
+
+ /* Copy 8 bytes at a time. */
+2:
+ ldrd r4, r5, [r1], #8
+ strd r4, r5, [r0], #8
+ subs r2, r2, #8
+ bge 2b /* If there is more to copy. */
+
+copy_less_than_8:
+
+ /* Get here if less than 8 bytes to copy, -8 <= r2 < 0.
+ Check if there is more to copy. */
+ cmn r2, #8
+ beq return /* If r2 + 8 == 0. */
+
+ /* Restore the count if there is more than 3 bytes to copy. */
+ adds r2, r2, #4
+ blt copy_less_than_4
+
+ /* Copy 4 bytes. */
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+
+copy_less_than_4:
+ /* Get here if less than 4 bytes to copy, -4 <= r2 < 0. */
+
+ /* Restore the count, check if there is more to copy. */
+ adds r2, r2, #4
+ beq return /* If r2 == 0. */
+
+ /* Get here with r2 is in {1,2,3}={01,10,11}. */
+ /* Logical shift left r2, insert 0s, update flags. */
+ lsls r2, r2, #31
+
+ /* Copy byte by byte.
+ Condition ne means the last bit of r2 is 0.
+ Condition cs means the second to last bit of r2 is set,
+ i.e., r2 is 1 or 3. */
+ itt ne
+ ldrbne r3, [r1], #1
+ strbne r3, [r0], #1
+
+ itttt cs
+ ldrbcs r4, [r1], #1
+ ldrbcs r5, [r1]
+ strbcs r4, [r0], #1
+ strbcs r5, [r0]
+
+return:
+ /* Restore registers: optimized pop {r0, r4, r5, pc} */
+ ldrd r4, r5, [sp], #8
+ pop {r0, pc} /* This is the only return point of memcpy. */
+
+#ifndef __ARM_FEATURE_UNALIGNED
+
+ /* The following assembly macro implements misaligned copy in software.
+ Assumes that dst is word aligned, src is at offset "pull" bits from
+ word, push = 32 - pull, and the number of bytes that remain to copy
+ is r2 + 4, r2 >= 0. */
+
+ /* In the code below, r2 is the number of bytes that remain to be
+ written. The number of bytes read is always larger, because we have
+ partial words in the shift queue. */
+
+ .macro miscopy pull push shiftleft shiftright
+
+ /* Align src to the previous word boundary. */
+ bic r1, r1, #3
+
+ /* Initialize the shift queue. */
+ ldr r5, [r1], #4 /* Load a word from source. */
+
+ subs r2, r2, #4
+ blt 6f /* Go to misaligned copy of less than 8 bytes. */
+
+ /* Get here if there is more than 8 bytes to copy.
+ The number of bytes to copy is r2+8, r2 >= 0. */
+
+ /* Save registers: push { r6, r7 }.
+ We need additional registers for LDRD and STRD, because in ARM state
+ the first destination register must be even and the second
+ consecutive. */
+ strd r6, r7, [sp, #-8]!
+
+ subs r2, r2, #56
+ blt 4f /* Go to misaligned copy of less than 64 bytes. */
+
+3:
+ /* Get here if there is more than 64 bytes to copy.
+ The number of bytes to copy is r2+64, r2 >= 0. */
+
+ /* Copy 64 bytes in every iteration.
+ Use a partial word from the shift queue. */
+ .irp offset, #0, #8, #16, #24, #32, #40, #48, #56
+ mov r6, r5, \shiftleft #\pull
+ ldrd r4, r5, [r1, \offset]
+ orr r6, r6, r4, \shiftright #\push
+ mov r7, r4, \shiftleft #\pull
+ orr r7, r7, r5, \shiftright #\push
+ strd r6, r7, [r0, \offset]
+ .endr
+
+ add r1, r1, #64
+ add r0, r0, #64
+ subs r2, r2, #64
+ bge 3b
+
+4:
+ /* Get here if there is less than 64 bytes to copy (-64 <= r2 < 0)
+ and they are misaligned. */
+
+ /* Restore the count if there is more than 7 bytes to copy. */
+ adds r2, r2, #56
+
+ /* If less than 8 bytes to copy,
+ restore registers saved for this loop: optimized poplt { r6, r7 }. */
+ itt lt
+ ldrdlt r6, r7, [sp], #8
+ blt 6f /* Go to misaligned copy of less than 8 bytes. */
+
+5:
+ /* Copy 8 bytes at a time.
+ Use a partial word from the shift queue. */
+ mov r6, r5, \shiftleft #\pull
+ ldrd r4, r5, [r1], #8
+ orr r6, r6, r4, \shiftright #\push
+ mov r7, r4, \shiftleft #\pull
+ orr r7, r7, r5, \shiftright #\push
+ strd r6, r7, [r0], #8
+
+ subs r2, r2, #8
+ bge 5b /* If there is more to copy. */
+
+ /* Restore registers saved for this loop: optimized pop { r6, r7 }. */
+ ldrd r6, r7, [sp], #8
+
+6:
+ /* Get here if there less than 8 bytes to copy (-8 <= r2 < 0)
+ and they are misaligned. */
+
+ /* Check if there is more to copy. */
+ cmn r2, #8
+ beq return
+
+ /* Check if there is less than 4 bytes to copy. */
+ cmn r2, #4
+
+ itt lt
+ /* Restore src offset from word-align. */
+ sublt r1, r1, #(\push / 8)
+ blt copy_less_than_4
+
+ /* Use a partial word from the shift queue. */
+ mov r3, r5, \shiftleft #\pull
+ /* Load a word from src, but without writeback
+ (this word is not fully written to dst). */
+ ldr r5, [r1]
+
+ /* Restore src offset from word-align. */
+ add r1, r1, #(\pull / 8)
+
+ /* Shift bytes to create one dst word and store it. */
+ orr r3, r3, r5, \shiftright #\push
+ str r3, [r0], #4
+
+ /* Use single byte copying of the remaining bytes. */
+ b copy_less_than_4
+
+ .endm
+
+#endif /* not __ARM_FEATURE_UNALIGNED */
+
+dst_not_word_aligned:
+
+ /* Get here when dst is not aligned and ip has the last 2 bits of dst,
+ i.e., ip is the offset of dst from word.
+ The number of bytes that remains to copy is r2 + 4,
+ i.e., there are at least 4 bytes to copy.
+ Write a partial word (0 to 3 bytes), such that dst becomes
+ word-aligned. */
+
+ /* If dst is at ip bytes offset from a word (with 0 < ip < 4),
+ then there are (4 - ip) bytes to fill up to align dst to the next
+ word. */
+ rsb ip, ip, #4 /* ip = #4 - ip. */
+ cmp ip, #2
+
+ /* Copy byte by byte with conditionals. */
+ itt gt
+ ldrbgt r3, [r1], #1
+ strbgt r3, [r0], #1
+
+ itt ge
+ ldrbge r4, [r1], #1
+ strbge r4, [r0], #1
+
+ ldrb lr, [r1], #1
+ strb lr, [r0], #1
+
+ /* Update the count.
+ ip holds the number of bytes we have just copied. */
+ subs r2, r2, ip /* r2 = r2 - ip. */
+ blt copy_less_than_4 /* If r2 < ip. */
+
+ /* Get here if there are more than 4 bytes to copy.
+ Check if src is aligned. If beforehand src and dst were not word
+ aligned but congruent (same offset), then now they are both
+ word-aligned, and we can copy the rest efficiently (without
+ shifting). */
+ ands ip, r1, #3 /* ip = last 2 bits of src. */
+ beq word_aligned /* If r1 is word-aligned. */
+
+src_not_word_aligned:
+ /* Get here when src is not word-aligned, but dst is word-aligned.
+ The number of bytes that remains to copy is r2+4. */
+
+#ifdef __ARM_FEATURE_UNALIGNED
+ /* Copy word by word using LDR when alignment can be done in hardware,
+ i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */
+ subs r2, r2, #60
+ blt 8f
+
+7:
+ /* Copy 64 bytes in every loop iteration. */
+ .irp offset, #0, #4, #8, #12, #16, #20, #24, #28, #32, #36, #40, #44, #48, #52, #56, #60
+ ldr r3, [r1, \offset]
+ str r3, [r0, \offset]
+ .endr
+
+ add r0, r0, #64
+ add r1, r1, #64
+ subs r2, r2, #64
+ bge 7b
+
+8:
+ /* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
+ Check if there is more than 3 bytes to copy. */
+ adds r2, r2, #60
+ blt copy_less_than_4
+
+9:
+ /* Get here if there is less than 64 but at least 4 bytes to copy,
+ where the number of bytes to copy is r2+4. */
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+ subs r2, r2, #4
+ bge 9b
+
+ b copy_less_than_4
+
+#else /* not __ARM_FEATURE_UNALIGNED */
+
+ /* ip has last 2 bits of src,
+ i.e., ip is the offset of src from word, and ip > 0.
+ Compute shifts needed to copy from src to dst. */
+ cmp ip, #2
+ beq miscopy_16_16 /* If ip == 2. */
+ bge miscopy_24_8 /* If ip == 3. */
+
+ /* Get here if ip == 1. */
+
+ /* Endian independent macros for shifting bytes within registers. */
+
+#ifndef __ARMEB__
+miscopy_8_24: miscopy pull=8 push=24 shiftleft=lsr shiftright=lsl
+miscopy_16_16: miscopy pull=16 push=16 shiftleft=lsr shiftright=lsl
+miscopy_24_8: miscopy pull=24 push=8 shiftleft=lsr shiftright=lsl
+#else /* not __ARMEB__ */
+miscopy_8_24: miscopy pull=8 push=24 shiftleft=lsl shiftright=lsr
+miscopy_16_16: miscopy pull=16 push=16 shiftleft=lsl shiftright=lsr
+miscopy_24_8: miscopy pull=24 push=8 shiftleft=lsl shiftright=lsr
+#endif /* not __ARMEB__ */
+
+#endif /* not __ARM_FEATURE_UNALIGNED */
+
+#endif /* memcpy */
diff --git a/reference/newlib/shim.h b/reference/newlib/shim.h
new file mode 100644
index 000000000000..e265e9737f85
--- /dev/null
+++ b/reference/newlib/shim.h
@@ -0,0 +1,5 @@
+/* Basic macros that newlib uses */
+#define _PTR void *
+#define _DEFUN(_name, _args, _def) _name (_def)
+#define _CONST const
+#define _AND ,
diff --git a/reference/newlib/strcmp.S b/reference/newlib/strcmp.S
new file mode 100644
index 000000000000..6346f068279f
--- /dev/null
+++ b/reference/newlib/strcmp.S
@@ -0,0 +1,777 @@
+/*
+ * Copyright (c) 2012 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arm_asm.h"
+
+#ifdef __ARMEB__
+#define S2LOMEM lsl
+#define S2LOMEMEQ lsleq
+#define S2HIMEM lsr
+#define MSB 0x000000ff
+#define LSB 0xff000000
+#define BYTE0_OFFSET 24
+#define BYTE1_OFFSET 16
+#define BYTE2_OFFSET 8
+#define BYTE3_OFFSET 0
+#else /* not __ARMEB__ */
+#define S2LOMEM lsr
+#define S2LOMEMEQ lsreq
+#define S2HIMEM lsl
+#define BYTE0_OFFSET 0
+#define BYTE1_OFFSET 8
+#define BYTE2_OFFSET 16
+#define BYTE3_OFFSET 24
+#define MSB 0xff000000
+#define LSB 0x000000ff
+#endif /* not __ARMEB__ */
+
+.syntax unified
+
+#if defined (__thumb__)
+ .thumb
+ .thumb_func
+#endif
+ .global strcmp
+ .type strcmp, %function
+strcmp:
+
+#if (defined (__thumb__) && !defined (__thumb2__))
+1:
+ ldrb r2, [r0]
+ ldrb r3, [r1]
+ adds r0, r0, #1
+ adds r1, r1, #1
+ cmp r2, #0
+ beq 2f
+ cmp r2, r3
+ beq 1b
+2:
+ subs r0, r2, r3
+ bx lr
+#elif (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
+1:
+ ldrb r2, [r0], #1
+ ldrb r3, [r1], #1
+ cmp r2, #1
+ it cs
+ cmpcs r2, r3
+ beq 1b
+ subs r0, r2, r3
+ RETURN
+
+
+#elif (defined (_ISA_THUMB_2) || defined (_ISA_ARM_6))
+ /* Use LDRD whenever possible. */
+
+/* The main thing to look out for when comparing large blocks is that
+ the loads do not cross a page boundary when loading past the index
+ of the byte with the first difference or the first string-terminator.
+
+ For example, if the strings are identical and the string-terminator
+ is at index k, byte by byte comparison will not load beyond address
+ s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
+ k; double word - up to 7 bytes. If the load of these bytes crosses
+ a page boundary, it might cause a memory fault (if the page is not mapped)
+ that would not have happened in byte by byte comparison.
+
+ If an address is (double) word aligned, then a load of a (double) word
+ from that address will not cross a page boundary.
+ Therefore, the algorithm below considers word and double-word alignment
+ of strings separately. */
+
+/* High-level description of the algorithm.
+
+ * The fast path: if both strings are double-word aligned,
+ use LDRD to load two words from each string in every loop iteration.
+ * If the strings have the same offset from a word boundary,
+ use LDRB to load and compare byte by byte until
+ the first string is aligned to a word boundary (at most 3 bytes).
+ This is optimized for quick return on short unaligned strings.
+ * If the strings have the same offset from a double-word boundary,
+ use LDRD to load two words from each string in every loop iteration, as in the fast path.
+ * If the strings do not have the same offset from a double-word boundary,
+ load a word from the second string before the loop to initialize the queue.
+ Use LDRD to load two words from every string in every loop iteration.
+ Inside the loop, load the second word from the second string only after comparing
+ the first word, using the queued value, to guarantee safety across page boundaries.
+ * If the strings do not have the same offset from a word boundary,
+ use LDR and a shift queue. Order of loads and comparisons matters,
+ similarly to the previous case.
+
+ * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
+ * The only difference between ARM and Thumb modes is the use of CBZ instruction.
+ * The only difference between big and little endian is the use of REV in little endian
+ to compute the return value, instead of MOV.
+ * No preload. [TODO.]
+*/
+
+ .macro m_cbz reg label
+#ifdef __thumb2__
+ cbz \reg, \label
+#else /* not defined __thumb2__ */
+ cmp \reg, #0
+ beq \label
+#endif /* not defined __thumb2__ */
+ .endm /* m_cbz */
+
+ .macro m_cbnz reg label
+#ifdef __thumb2__
+ cbnz \reg, \label
+#else /* not defined __thumb2__ */
+ cmp \reg, #0
+ bne \label
+#endif /* not defined __thumb2__ */
+ .endm /* m_cbnz */
+
+ .macro init
+ /* Macro to save temporary registers and prepare magic values. */
+ subs sp, sp, #16
+ strd r4, r5, [sp, #8]
+ strd r6, r7, [sp]
+ mvn r6, #0 /* all F */
+ mov r7, #0 /* all 0 */
+ .endm /* init */
+
+ .macro magic_compare_and_branch w1 w2 label
+ /* Macro to compare registers w1 and w2 and conditionally branch to label. */
+ cmp \w1, \w2 /* Are w1 and w2 the same? */
+ magic_find_zero_bytes \w1
+ it eq
+ cmpeq ip, #0 /* Is there a zero byte in w1? */
+ bne \label
+ .endm /* magic_compare_and_branch */
+
+ .macro magic_find_zero_bytes w1
+ /* Macro to find all-zero bytes in w1, result is in ip. */
+#if (defined (__ARM_FEATURE_DSP))
+ uadd8 ip, \w1, r6
+ sel ip, r7, r6
+#else /* not defined (__ARM_FEATURE_DSP) */
+ /* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
+ Coincidently, these processors only have Thumb-2 mode, where we can use the
+ the (large) magic constant available directly as an immediate in instructions.
+ Note that we cannot use the magic constant in ARM mode, where we need
+ to create the constant in a register. */
+ sub ip, \w1, #0x01010101
+ bic ip, ip, \w1
+ and ip, ip, #0x80808080
+#endif /* not defined (__ARM_FEATURE_DSP) */
+ .endm /* magic_find_zero_bytes */
+
+ .macro setup_return w1 w2
+#ifdef __ARMEB__
+ mov r1, \w1
+ mov r2, \w2
+#else /* not __ARMEB__ */
+ rev r1, \w1
+ rev r2, \w2
+#endif /* not __ARMEB__ */
+ .endm /* setup_return */
+
+ /*
+ optpld r0, #0
+ optpld r1, #0
+ */
+
+ /* Are both strings double-word aligned? */
+ orr ip, r0, r1
+ tst ip, #7
+ bne do_align
+
+ /* Fast path. */
+ init
+
+doubleword_aligned:
+
+ /* Get here when the strings to compare are double-word aligned. */
+ /* Compare two words in every iteration. */
+ .p2align 2
+2:
+ /*
+ optpld r0, #16
+ optpld r1, #16
+ */
+
+ /* Load the next double-word from each string. */
+ ldrd r2, r3, [r0], #8
+ ldrd r4, r5, [r1], #8
+
+ magic_compare_and_branch w1=r2, w2=r4, label=return_24
+ magic_compare_and_branch w1=r3, w2=r5, label=return_35
+ b 2b
+
+do_align:
+ /* Is the first string word-aligned? */
+ ands ip, r0, #3
+ beq word_aligned_r0
+
+ /* Fast compare byte by byte until the first string is word-aligned. */
+ /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
+ to read until the next word boudnary is 4-ip. */
+ bic r0, r0, #3
+ ldr r2, [r0], #4
+ lsls ip, ip, #31
+ beq byte2
+ bcs byte3
+
+byte1:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE1_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbz reg=r3, label=fast_return
+
+byte2:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE2_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbz reg=r3, label=fast_return
+
+byte3:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE3_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbnz reg=r3, label=word_aligned_r0
+
+fast_return:
+ mov r0, ip
+ bx lr
+
+word_aligned_r0:
+ init
+ /* The first string is word-aligned. */
+ /* Is the second string word-aligned? */
+ ands ip, r1, #3
+ bne strcmp_unaligned
+
+word_aligned:
+ /* The strings are word-aligned. */
+ /* Is the first string double-word aligned? */
+ tst r0, #4
+ beq doubleword_aligned_r0
+
+ /* If r0 is not double-word aligned yet, align it by loading
+ and comparing the next word from each string. */
+ ldr r2, [r0], #4
+ ldr r4, [r1], #4
+ magic_compare_and_branch w1=r2 w2=r4 label=return_24
+
+doubleword_aligned_r0:
+ /* Get here when r0 is double-word aligned. */
+ /* Is r1 doubleword_aligned? */
+ tst r1, #4
+ beq doubleword_aligned
+
+ /* Get here when the strings to compare are word-aligned,
+ r0 is double-word aligned, but r1 is not double-word aligned. */
+
+ /* Initialize the queue. */
+ ldr r5, [r1], #4
+
+ /* Compare two words in every iteration. */
+ .p2align 2
+3:
+ /*
+ optpld r0, #16
+ optpld r1, #16
+ */
+
+ /* Load the next double-word from each string and compare. */
+ ldrd r2, r3, [r0], #8
+ magic_compare_and_branch w1=r2 w2=r5 label=return_25
+ ldrd r4, r5, [r1], #8
+ magic_compare_and_branch w1=r3 w2=r4 label=return_34
+ b 3b
+
+ .macro miscmp_word offsetlo offsethi
+ /* Macro to compare misaligned strings. */
+ /* r0, r1 are word-aligned, and at least one of the strings
+ is not double-word aligned. */
+ /* Compare one word in every loop iteration. */
+ /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
+ OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
+
+ /* Initialize the shift queue. */
+ ldr r5, [r1], #4
+
+ /* Compare one word from each string in every loop iteration. */
+ .p2align 2
+7:
+ ldr r3, [r0], #4
+ S2LOMEM r5, r5, #\offsetlo
+ magic_find_zero_bytes w1=r3
+ cmp r7, ip, S2HIMEM #\offsetlo
+ and r2, r3, r6, S2LOMEM #\offsetlo
+ it eq
+ cmpeq r2, r5
+ bne return_25
+ ldr r5, [r1], #4
+ cmp ip, #0
+ eor r3, r2, r3
+ S2HIMEM r2, r5, #\offsethi
+ it eq
+ cmpeq r3, r2
+ bne return_32
+ b 7b
+ .endm /* miscmp_word */
+
+strcmp_unaligned:
+ /* r0 is word-aligned, r1 is at offset ip from a word. */
+ /* Align r1 to the (previous) word-boundary. */
+ bic r1, r1, #3
+
+ /* Unaligned comparison word by word using LDRs. */
+ cmp ip, #2
+ beq miscmp_word_16 /* If ip == 2. */
+ bge miscmp_word_24 /* If ip == 3. */
+ miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */
+miscmp_word_16: miscmp_word offsetlo=16 offsethi=16
+miscmp_word_24: miscmp_word offsetlo=24 offsethi=8
+
+
+return_32:
+ setup_return w1=r3, w2=r2
+ b do_return
+return_34:
+ setup_return w1=r3, w2=r4
+ b do_return
+return_25:
+ setup_return w1=r2, w2=r5
+ b do_return
+return_35:
+ setup_return w1=r3, w2=r5
+ b do_return
+return_24:
+ setup_return w1=r2, w2=r4
+
+do_return:
+
+#ifdef __ARMEB__
+ mov r0, ip
+#else /* not __ARMEB__ */
+ rev r0, ip
+#endif /* not __ARMEB__ */
+
+ /* Restore temporaries early, before computing the return value. */
+ ldrd r6, r7, [sp]
+ ldrd r4, r5, [sp, #8]
+ adds sp, sp, #16
+
+ /* There is a zero or a different byte between r1 and r2. */
+ /* r0 contains a mask of all-zero bytes in r1. */
+ /* Using r0 and not ip here because cbz requires low register. */
+ m_cbz reg=r0, label=compute_return_value
+ clz r0, r0
+ /* r0 contains the number of bits on the left of the first all-zero byte in r1. */
+ rsb r0, r0, #24
+ /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
+ lsr r1, r1, r0
+ lsr r2, r2, r0
+
+compute_return_value:
+ subs r0, r1, r2
+ bx lr
+
+
+#else /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)
+ defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) ||
+ (defined (__thumb__) && !defined (__thumb2__))) */
+
+ /* Use LDR whenever possible. */
+
+#ifdef __thumb2__
+#define magic1(REG) 0x01010101
+#define magic2(REG) 0x80808080
+#else
+#define magic1(REG) REG
+#define magic2(REG) REG, lsl #7
+#endif
+
+ optpld r0
+ optpld r1
+ eor r2, r0, r1
+ tst r2, #3
+ /* Strings not at same byte offset from a word boundary. */
+ bne strcmp_unaligned
+ ands r2, r0, #3
+ bic r0, r0, #3
+ bic r1, r1, #3
+ ldr ip, [r0], #4
+ it eq
+ ldreq r3, [r1], #4
+ beq 1f
+ /* Although s1 and s2 have identical initial alignment, they are
+ not currently word aligned. Rather than comparing bytes,
+ make sure that any bytes fetched from before the addressed
+ bytes are forced to 0xff. Then they will always compare
+ equal. */
+ eor r2, r2, #3
+ lsl r2, r2, #3
+ mvn r3, MSB
+ S2LOMEM r2, r3, r2
+ ldr r3, [r1], #4
+ orr ip, ip, r2
+ orr r3, r3, r2
+1:
+#ifndef __thumb2__
+ /* Load the 'magic' constant 0x01010101. */
+ str r4, [sp, #-4]!
+ mov r4, #1
+ orr r4, r4, r4, lsl #8
+ orr r4, r4, r4, lsl #16
+#endif
+ .p2align 2
+4:
+ optpld r0, #8
+ optpld r1, #8
+ sub r2, ip, magic1(r4)
+ cmp ip, r3
+ itttt eq
+ /* check for any zero bytes in first word */
+ biceq r2, r2, ip
+ tsteq r2, magic2(r4)
+ ldreq ip, [r0], #4
+ ldreq r3, [r1], #4
+ beq 4b
+2:
+ /* There's a zero or a different byte in the word */
+ S2HIMEM r0, ip, #24
+ S2LOMEM ip, ip, #8
+ cmp r0, #1
+ it cs
+ cmpcs r0, r3, S2HIMEM #24
+ it eq
+ S2LOMEMEQ r3, r3, #8
+ beq 2b
+ /* On a big-endian machine, r0 contains the desired byte in bits
+ 0-7; on a little-endian machine they are in bits 24-31. In
+ both cases the other bits in r0 are all zero. For r3 the
+ interesting byte is at the other end of the word, but the
+ other bits are not necessarily zero. We need a signed result
+ representing the differnece in the unsigned bytes, so for the
+ little-endian case we can't just shift the interesting bits
+ up. */
+#ifdef __ARMEB__
+ sub r0, r0, r3, lsr #24
+#else
+ and r3, r3, #255
+#ifdef __thumb2__
+ /* No RSB instruction in Thumb2 */
+ lsr r0, r0, #24
+ sub r0, r0, r3
+#else
+ rsb r0, r3, r0, lsr #24
+#endif
+#endif
+#ifndef __thumb2__
+ ldr r4, [sp], #4
+#endif
+ RETURN
+
+
+strcmp_unaligned:
+
+#if 0
+ /* The assembly code below is based on the following alogrithm. */
+#ifdef __ARMEB__
+#define RSHIFT <<
+#define LSHIFT >>
+#else
+#define RSHIFT >>
+#define LSHIFT <<
+#endif
+
+#define body(shift) \
+ mask = 0xffffffffU RSHIFT shift; \
+ w1 = *wp1++; \
+ w2 = *wp2++; \
+ do \
+ { \
+ t1 = w1 & mask; \
+ if (__builtin_expect(t1 != w2 RSHIFT shift, 0)) \
+ { \
+ w2 RSHIFT= shift; \
+ break; \
+ } \
+ if (__builtin_expect(((w1 - b1) & ~w1) & (b1 << 7), 0)) \
+ { \
+ /* See comment in assembler below re syndrome on big-endian */\
+ if ((((w1 - b1) & ~w1) & (b1 << 7)) & mask) \
+ w2 RSHIFT= shift; \
+ else \
+ { \
+ w2 = *wp2; \
+ t1 = w1 RSHIFT (32 - shift); \
+ w2 = (w2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
+ } \
+ break; \
+ } \
+ w2 = *wp2++; \
+ t1 ^= w1; \
+ if (__builtin_expect(t1 != w2 LSHIFT (32 - shift), 0)) \
+ { \
+ t1 = w1 >> (32 - shift); \
+ w2 = (w2 << (32 - shift)) RSHIFT (32 - shift); \
+ break; \
+ } \
+ w1 = *wp1++; \
+ } while (1)
+
+ const unsigned* wp1;
+ const unsigned* wp2;
+ unsigned w1, w2;
+ unsigned mask;
+ unsigned shift;
+ unsigned b1 = 0x01010101;
+ char c1, c2;
+ unsigned t1;
+
+ while (((unsigned) s1) & 3)
+ {
+ c1 = *s1++;
+ c2 = *s2++;
+ if (c1 == 0 || c1 != c2)
+ return c1 - (int)c2;
+ }
+ wp1 = (unsigned*) (((unsigned)s1) & ~3);
+ wp2 = (unsigned*) (((unsigned)s2) & ~3);
+ t1 = ((unsigned) s2) & 3;
+ if (t1 == 1)
+ {
+ body(8);
+ }
+ else if (t1 == 2)
+ {
+ body(16);
+ }
+ else
+ {
+ body (24);
+ }
+
+ do
+ {
+#ifdef __ARMEB__
+ c1 = (char) t1 >> 24;
+ c2 = (char) w2 >> 24;
+#else /* not __ARMEB__ */
+ c1 = (char) t1;
+ c2 = (char) w2;
+#endif /* not __ARMEB__ */
+ t1 RSHIFT= 8;
+ w2 RSHIFT= 8;
+ } while (c1 != 0 && c1 == c2);
+ return c1 - c2;
+#endif /* 0 */
+
+
+ wp1 .req r0
+ wp2 .req r1
+ b1 .req r2
+ w1 .req r4
+ w2 .req r5
+ t1 .req ip
+ @ r3 is scratch
+
+ /* First of all, compare bytes until wp1(sp1) is word-aligned. */
+1:
+ tst wp1, #3
+ beq 2f
+ ldrb r2, [wp1], #1
+ ldrb r3, [wp2], #1
+ cmp r2, #1
+ it cs
+ cmpcs r2, r3
+ beq 1b
+ sub r0, r2, r3
+ RETURN
+
+2:
+ str r5, [sp, #-4]!
+ str r4, [sp, #-4]!
+ //stmfd sp!, {r4, r5}
+ mov b1, #1
+ orr b1, b1, b1, lsl #8
+ orr b1, b1, b1, lsl #16
+
+ and t1, wp2, #3
+ bic wp2, wp2, #3
+ ldr w1, [wp1], #4
+ ldr w2, [wp2], #4
+ cmp t1, #2
+ beq 2f
+ bhi 3f
+
+ /* Critical inner Loop: Block with 3 bytes initial overlap */
+ .p2align 2
+1:
+ bic t1, w1, MSB
+ cmp t1, w2, S2LOMEM #8
+ sub r3, w1, b1
+ bic r3, r3, w1
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #24
+ bne 6f
+ ldr w1, [wp1], #4
+ b 1b
+4:
+ S2LOMEM w2, w2, #8
+ b 8f
+
+5:
+#ifdef __ARMEB__
+ /* The syndrome value may contain false ones if the string ends
+ with the bytes 0x01 0x00 */
+ tst w1, #0xff000000
+ itt ne
+ tstne w1, #0x00ff0000
+ tstne w1, #0x0000ff00
+ beq 7f
+#else
+ bics r3, r3, #0xff000000
+ bne 7f
+#endif
+ ldrb w2, [wp2]
+ S2LOMEM t1, w1, #24
+#ifdef __ARMEB__
+ lsl w2, w2, #24
+#endif
+ b 8f
+
+6:
+ S2LOMEM t1, w1, #24
+ and w2, w2, LSB
+ b 8f
+
+ /* Critical inner Loop: Block with 2 bytes initial overlap */
+ .p2align 2
+2:
+ S2HIMEM t1, w1, #16
+ sub r3, w1, b1
+ S2LOMEM t1, t1, #16
+ bic r3, r3, w1
+ cmp t1, w2, S2LOMEM #16
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #16
+ bne 6f
+ ldr w1, [wp1], #4
+ b 2b
+
+5:
+#ifdef __ARMEB__
+ /* The syndrome value may contain false ones if the string ends
+ with the bytes 0x01 0x00 */
+ tst w1, #0xff000000
+ it ne
+ tstne w1, #0x00ff0000
+ beq 7f
+#else
+ lsls r3, r3, #16
+ bne 7f
+#endif
+ ldrh w2, [wp2]
+ S2LOMEM t1, w1, #16
+#ifdef __ARMEB__
+ lsl w2, w2, #16
+#endif
+ b 8f
+
+6:
+ S2HIMEM w2, w2, #16
+ S2LOMEM t1, w1, #16
+4:
+ S2LOMEM w2, w2, #16
+ b 8f
+
+ /* Critical inner Loop: Block with 1 byte initial overlap */
+ .p2align 2
+3:
+ and t1, w1, LSB
+ cmp t1, w2, S2LOMEM #24
+ sub r3, w1, b1
+ bic r3, r3, w1
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #8
+ bne 6f
+ ldr w1, [wp1], #4
+ b 3b
+4:
+ S2LOMEM w2, w2, #24
+ b 8f
+5:
+ /* The syndrome value may contain false ones if the string ends
+ with the bytes 0x01 0x00 */
+ tst w1, LSB
+ beq 7f
+ ldr w2, [wp2], #4
+6:
+ S2LOMEM t1, w1, #8
+ bic w2, w2, MSB
+ b 8f
+7:
+ mov r0, #0
+ //ldmfd sp!, {r4, r5}
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ RETURN
+8:
+ and r2, t1, LSB
+ and r0, w2, LSB
+ cmp r0, #1
+ it cs
+ cmpcs r0, r2
+ itt eq
+ S2LOMEMEQ t1, t1, #8
+ S2LOMEMEQ w2, w2, #8
+ beq 8b
+ sub r0, r2, r0
+ //ldmfd sp!, {r4, r5}
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ RETURN
+
+#endif /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)
+ defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) ||
+ (defined (__thumb__) && !defined (__thumb2__))) */
diff --git a/reference/newlib/strcpy.c b/reference/newlib/strcpy.c
new file mode 100644
index 000000000000..93426d42ad41
--- /dev/null
+++ b/reference/newlib/strcpy.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2008 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arm_asm.h"
+
+#ifdef __thumb2__
+#define magic1(REG) "#0x01010101"
+#define magic2(REG) "#0x80808080"
+#else
+#define magic1(REG) #REG
+#define magic2(REG) #REG ", lsl #7"
+#endif
+
+char* __attribute__((naked))
+strcpy (char* dst, const char* src)
+{
+ asm (
+#if !(defined(__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || \
+ (defined (__thumb__) && !defined (__thumb2__)))
+ "optpld r1\n\t"
+ "eor r2, r0, r1\n\t"
+ "mov ip, r0\n\t"
+ "tst r2, #3\n\t"
+ "bne 4f\n\t"
+ "tst r1, #3\n\t"
+ "bne 3f\n"
+ "5:\n\t"
+#ifndef __thumb2__
+ "str r5, [sp, #-4]!\n\t"
+ "mov r5, #0x01\n\t"
+ "orr r5, r5, r5, lsl #8\n\t"
+ "orr r5, r5, r5, lsl #16\n\t"
+#endif
+
+ "str r4, [sp, #-4]!\n\t"
+ "tst r1, #4\n\t"
+ "ldr r3, [r1], #4\n\t"
+ "beq 2f\n\t"
+ "sub r2, r3, "magic1(r5)"\n\t"
+ "bics r2, r2, r3\n\t"
+ "tst r2, "magic2(r5)"\n\t"
+ "itt eq\n\t"
+ "streq r3, [ip], #4\n\t"
+ "ldreq r3, [r1], #4\n"
+ "bne 1f\n\t"
+ /* Inner loop. We now know that r1 is 64-bit aligned, so we
+ can safely fetch up to two words. This allows us to avoid
+ load stalls. */
+ ".p2align 2\n"
+ "2:\n\t"
+ "optpld r1, #8\n\t"
+ "ldr r4, [r1], #4\n\t"
+ "sub r2, r3, "magic1(r5)"\n\t"
+ "bics r2, r2, r3\n\t"
+ "tst r2, "magic2(r5)"\n\t"
+ "sub r2, r4, "magic1(r5)"\n\t"
+ "bne 1f\n\t"
+ "str r3, [ip], #4\n\t"
+ "bics r2, r2, r4\n\t"
+ "tst r2, "magic2(r5)"\n\t"
+ "itt eq\n\t"
+ "ldreq r3, [r1], #4\n\t"
+ "streq r4, [ip], #4\n\t"
+ "beq 2b\n\t"
+ "mov r3, r4\n"
+ "1:\n\t"
+#ifdef __ARMEB__
+ "rors r3, r3, #24\n\t"
+#endif
+ "strb r3, [ip], #1\n\t"
+ "tst r3, #0xff\n\t"
+#ifdef __ARMEL__
+ "ror r3, r3, #8\n\t"
+#endif
+ "bne 1b\n\t"
+ "ldr r4, [sp], #4\n\t"
+#ifndef __thumb2__
+ "ldr r5, [sp], #4\n\t"
+#endif
+ "RETURN\n"
+
+ /* Strings have the same offset from word alignment, but it's
+ not zero. */
+ "3:\n\t"
+ "tst r1, #1\n\t"
+ "beq 1f\n\t"
+ "ldrb r2, [r1], #1\n\t"
+ "strb r2, [ip], #1\n\t"
+ "cmp r2, #0\n\t"
+ "it eq\n"
+ "RETURN eq\n"
+ "1:\n\t"
+ "tst r1, #2\n\t"
+ "beq 5b\n\t"
+ "ldrh r2, [r1], #2\n\t"
+#ifdef __ARMEB__
+ "tst r2, #0xff00\n\t"
+ "iteet ne\n\t"
+ "strneh r2, [ip], #2\n\t"
+ "lsreq r2, r2, #8\n\t"
+ "streqb r2, [ip]\n\t"
+ "tstne r2, #0xff\n\t"
+#else
+ "tst r2, #0xff\n\t"
+ "itet ne\n\t"
+ "strneh r2, [ip], #2\n\t"
+ "streqb r2, [ip]\n\t"
+ "tstne r2, #0xff00\n\t"
+#endif
+ "bne 5b\n\t"
+ "RETURN\n"
+
+ /* src and dst do not have a common word-alignement. Fall back to
+ byte copying. */
+ "4:\n\t"
+ "ldrb r2, [r1], #1\n\t"
+ "strb r2, [ip], #1\n\t"
+ "cmp r2, #0\n\t"
+ "bne 4b\n\t"
+ "RETURN"
+
+#elif !defined (__thumb__) || defined (__thumb2__)
+ "mov r3, r0\n\t"
+ "1:\n\t"
+ "ldrb r2, [r1], #1\n\t"
+ "strb r2, [r3], #1\n\t"
+ "cmp r2, #0\n\t"
+ "bne 1b\n\t"
+ "RETURN"
+#else
+ "mov r3, r0\n\t"
+ "1:\n\t"
+ "ldrb r2, [r1]\n\t"
+ "add r1, r1, #1\n\t"
+ "strb r2, [r3]\n\t"
+ "add r3, r3, #1\n\t"
+ "cmp r2, #0\n\t"
+ "bne 1b\n\t"
+ "RETURN"
+#endif
+ );
+}
diff --git a/reference/newlib/strlen.c b/reference/newlib/strlen.c
new file mode 100644
index 000000000000..93ec8bb1bca4
--- /dev/null
+++ b/reference/newlib/strlen.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2008 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arm_asm.h"
+#include <limits.h>
+#include <stddef.h>
+
+#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || \
+ (defined (__thumb__) && !defined (__thumb2__))
+
+size_t
+strlen (const char* str)
+{
+ int scratch;
+#if defined (__thumb__) && !defined (__thumb2__)
+ size_t len;
+ asm ("mov %0, #0\n"
+ "1:\n\t"
+ "ldrb %1, [%2, %0]\n\t"
+ "add %0, %0, #1\n\t"
+ "cmp %1, #0\n\t"
+ "bne 1b"
+ : "=&r" (len), "=&r" (scratch) : "r" (str) : "memory", "cc");
+ return len - 1;
+#else
+ const char* end;
+ asm ("1:\n\t"
+ "ldrb %1, [%0], #1\n\t"
+ "cmp %1, #0\n\t"
+ "bne 1b"
+ : "=&r" (end), "=&r" (scratch) : "0" (str) : "memory", "cc");
+ return end - str - 1;
+#endif
+}
+#else
+
+size_t __attribute__((naked))
+strlen (const char* str)
+{
+ asm ("len .req r0\n\t"
+ "data .req r3\n\t"
+ "addr .req r1\n\t"
+
+ "optpld r0\n\t"
+ /* Word-align address */
+ "bic addr, r0, #3\n\t"
+ /* Get adjustment for start ... */
+ "ands len, r0, #3\n\t"
+ "neg len, len\n\t"
+ /* First word of data */
+ "ldr data, [addr], #4\n\t"
+ /* Ensure bytes preceeding start ... */
+ "add ip, len, #4\n\t"
+ "mov ip, ip, asl #3\n\t"
+ "mvn r2, #0\n\t"
+ /* ... are masked out */
+#ifdef __thumb__
+ "itt ne\n\t"
+# ifdef __ARMEB__
+ "lslne r2, ip\n\t"
+# else
+ "lsrne r2, ip\n\t"
+# endif
+ "orrne data, data, r2\n\t"
+#else
+ "it ne\n\t"
+# ifdef __ARMEB__
+ "orrne data, data, r2, lsl ip\n\t"
+# else
+ "orrne data, data, r2, lsr ip\n\t"
+# endif
+#endif
+ /* Magic const 0x01010101 */
+#ifdef _ISA_ARM_7
+ "movw ip, #0x101\n\t"
+#else
+ "mov ip, #0x1\n\t"
+ "orr ip, ip, ip, lsl #8\n\t"
+#endif
+ "orr ip, ip, ip, lsl #16\n"
+
+ /* This is the main loop. We subtract one from each byte in
+ the word: the sign bit changes iff the byte was zero or
+ 0x80 -- we eliminate the latter case by anding the result
+ with the 1-s complement of the data. */
+ "1:\n\t"
+ /* test (data - 0x01010101) */
+ "sub r2, data, ip\n\t"
+ /* ... & ~data */
+ "bic r2, r2, data\n\t"
+ /* ... & 0x80808080 == 0? */
+ "ands r2, r2, ip, lsl #7\n\t"
+#ifdef _ISA_ARM_7
+ /* yes, get more data... */
+ "itt eq\n\t"
+ "ldreq data, [addr], #4\n\t"
+ /* and 4 more bytes */
+ "addeq len, len, #4\n\t"
+ /* If we have PLD, then unroll the loop a bit. */
+ "optpld addr, #8\n\t"
+ /* test (data - 0x01010101) */
+ "ittt eq\n\t"
+ "subeq r2, data, ip\n\t"
+ /* ... & ~data */
+ "biceq r2, r2, data\n\t"
+ /* ... & 0x80808080 == 0? */
+ "andeqs r2, r2, ip, lsl #7\n\t"
+#endif
+ "itt eq\n\t"
+ /* yes, get more data... */
+ "ldreq data, [addr], #4\n\t"
+ /* and 4 more bytes */
+ "addeq len, len, #4\n\t"
+ "beq 1b\n\t"
+#ifdef __ARMEB__
+ "tst data, #0xff000000\n\t"
+ "itttt ne\n\t"
+ "addne len, len, #1\n\t"
+ "tstne data, #0xff0000\n\t"
+ "addne len, len, #1\n\t"
+ "tstne data, #0xff00\n\t"
+ "it ne\n\t"
+ "addne len, len, #1\n\t"
+#else
+# ifdef _ISA_ARM_5
+ /* R2 is the residual sign bits from the above test. All we
+ need to do now is establish the position of the first zero
+ byte... */
+ /* Little-endian is harder, we need the number of trailing
+ zeros / 8 */
+# ifdef _ISA_ARM_7
+ "rbit r2, r2\n\t"
+ "clz r2, r2\n\t"
+# else
+ "rsb r1, r2, #0\n\t"
+ "and r2, r2, r1\n\t"
+ "clz r2, r2\n\t"
+ "rsb r2, r2, #31\n\t"
+# endif
+ "add len, len, r2, lsr #3\n\t"
+# else /* No CLZ instruction */
+ "tst data, #0xff\n\t"
+ "itttt ne\n\t"
+ "addne len, len, #1\n\t"
+ "tstne data, #0xff00\n\t"
+ "addne len, len, #1\n\t"
+ "tstne data, #0xff0000\n\t"
+ "it ne\n\t"
+ "addne len, len, #1\n\t"
+# endif
+#endif
+ "RETURN");
+}
+#endif
diff --git a/reference/plain/.deps/memcpy.Po b/reference/plain/.deps/memcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/plain/.deps/memcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/plain/.deps/memset.Po b/reference/plain/.deps/memset.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/plain/.deps/memset.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/plain/.deps/strcmp.Po b/reference/plain/.deps/strcmp.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/plain/.deps/strcmp.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/plain/.deps/strcpy.Po b/reference/plain/.deps/strcpy.Po
new file mode 100644
index 000000000000..9ce06a81ea45
--- /dev/null
+++ b/reference/plain/.deps/strcpy.Po
@@ -0,0 +1 @@
+# dummy
diff --git a/reference/plain/memcpy.c b/reference/plain/memcpy.c
new file mode 100644
index 000000000000..af226c8b159c
--- /dev/null
+++ b/reference/plain/memcpy.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Linaro nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+
+/** A plain, byte-by-byte memcpy */
+void *memcpy(void *dst0, const void *src0, size_t len0)
+{
+ char *dst = (char *) dst0;
+ char *src = (char *) src0;
+
+ void *save = dst0;
+
+ while (len0--)
+ {
+ *dst++ = *src++;
+ }
+
+ return save;
+}
diff --git a/reference/plain/memset.c b/reference/plain/memset.c
new file mode 100644
index 000000000000..f171304aed67
--- /dev/null
+++ b/reference/plain/memset.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2011, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Linaro nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+
+/** A plain, byte-by-byte memset */
+void *memset(void *dst0, int c, size_t len0)
+{
+ char *dst = (char *) dst0;
+ void *save = dst0;
+
+ while (len0--)
+ {
+ *dst++ = c;
+ }
+
+ return save;
+}
diff --git a/reference/plain/strcmp.c b/reference/plain/strcmp.c
new file mode 100644
index 000000000000..5ef534f28349
--- /dev/null
+++ b/reference/plain/strcmp.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Linaro nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** A plain, byte-by-byte strcmp */
+int strcmp(const char *s1, const char *s2)
+{
+ while (*s1 != '\0' && *s1 == *s2)
+ {
+ s1++;
+ s2++;
+ }
+
+ return (*(unsigned char *) s1) - (*(unsigned char *) s2);
+}
diff --git a/reference/plain/strcpy.c b/reference/plain/strcpy.c
new file mode 100644
index 000000000000..66c5820a1869
--- /dev/null
+++ b/reference/plain/strcpy.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Linaro nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** A plain, byte-by-byte strcpy */
+char* strcpy(char *dst0, const char *src0)
+{
+ char *s = dst0;
+
+ while (*dst0++ = *src0++)
+ {
+ }
+
+ return s;
+}