diff options
author | Andrew Turner <andrew@FreeBSD.org> | 2016-09-16 15:19:31 +0000 |
---|---|---|
committer | Andrew Turner <andrew@FreeBSD.org> | 2016-09-16 15:19:31 +0000 |
commit | 162378196a8674c75b4e93addb18ef42cb9b7737 (patch) | |
tree | 35a65fc34832b001eb50dd981f9db6866797e750 /reference/newlib |
Notes
Diffstat (limited to 'reference/newlib')
-rw-r--r-- | reference/newlib/.deps/memcpy.Po | 1 | ||||
-rw-r--r-- | reference/newlib/.deps/strcmp.Po | 1 | ||||
-rw-r--r-- | reference/newlib/.deps/strcpy.Po | 1 | ||||
-rw-r--r-- | reference/newlib/.deps/strlen.Po | 1 | ||||
-rw-r--r-- | reference/newlib/arm_asm.h | 98 | ||||
-rw-r--r-- | reference/newlib/memcpy.S | 423 | ||||
-rw-r--r-- | reference/newlib/shim.h | 5 | ||||
-rw-r--r-- | reference/newlib/strcmp.S | 777 | ||||
-rw-r--r-- | reference/newlib/strcpy.c | 167 | ||||
-rw-r--r-- | reference/newlib/strlen.c | 179 |
10 files changed, 1653 insertions, 0 deletions
diff --git a/reference/newlib/.deps/memcpy.Po b/reference/newlib/.deps/memcpy.Po new file mode 100644 index 000000000000..9ce06a81ea45 --- /dev/null +++ b/reference/newlib/.deps/memcpy.Po @@ -0,0 +1 @@ +# dummy diff --git a/reference/newlib/.deps/strcmp.Po b/reference/newlib/.deps/strcmp.Po new file mode 100644 index 000000000000..9ce06a81ea45 --- /dev/null +++ b/reference/newlib/.deps/strcmp.Po @@ -0,0 +1 @@ +# dummy diff --git a/reference/newlib/.deps/strcpy.Po b/reference/newlib/.deps/strcpy.Po new file mode 100644 index 000000000000..9ce06a81ea45 --- /dev/null +++ b/reference/newlib/.deps/strcpy.Po @@ -0,0 +1 @@ +# dummy diff --git a/reference/newlib/.deps/strlen.Po b/reference/newlib/.deps/strlen.Po new file mode 100644 index 000000000000..9ce06a81ea45 --- /dev/null +++ b/reference/newlib/.deps/strlen.Po @@ -0,0 +1 @@ +# dummy diff --git a/reference/newlib/arm_asm.h b/reference/newlib/arm_asm.h new file mode 100644 index 000000000000..5a63a8d9e217 --- /dev/null +++ b/reference/newlib/arm_asm.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2009 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ARM_ASM__H +#define ARM_ASM__H + +/* First define some macros that keep everything else sane. */ +#if defined (__ARM_ARCH_7A__) || defined (__ARM_ARCH_7R__) +#define _ISA_ARM_7 +#endif + +#if defined (_ISA_ARM_7) || defined (__ARM_ARCH_6__) || \ + defined (__ARM_ARCH_6J__) || defined (__ARM_ARCH_6T2__) || \ + defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6ZK__) || \ + defined (__ARM_ARCH_6Z__) +#define _ISA_ARM_6 +#endif + +#if defined (_ISA_ARM_6) || defined (__ARM_ARCH_5__) || \ + defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5TE__) || \ + defined (__ARM_ARCH_5TEJ__) +#define _ISA_ARM_5 +#endif + +#if defined (_ISA_ARM_5) || defined (__ARM_ARCH_4T__) +#define _ISA_ARM_4T +#endif + +#if defined (__ARM_ARCH_7M__) || defined (__ARM_ARCH_7__) || \ + defined (__ARM_ARCH_7EM__) +#define _ISA_THUMB_2 +#endif + +#if defined (_ISA_THUMB_2) || defined (__ARM_ARCH_6M__) +#define _ISA_THUMB_1 +#endif + + +/* Now some macros for common instruction sequences. */ +#ifdef __ASSEMBLER__ +.macro RETURN cond= +#if defined (_ISA_ARM_4T) || defined (_ISA_THUMB_1) + bx\cond lr +#else + mov\cond pc, lr +#endif +.endm + +.macro optpld base, offset=#0 +#if defined (_ISA_ARM_7) + pld [\base, \offset] +#endif +.endm + +#else +asm(".macro RETURN cond=\n\t" +#if defined (_ISA_ARM_4T) || defined (_ISA_THUMB_1) + "bx\\cond lr\n\t" +#else + "mov\\cond pc, lr\n\t" +#endif + ".endm" + ); + +asm(".macro optpld base, offset=#0\n\t" +#if defined (_ISA_ARM_7) + "pld [\\base, \\offset]\n\t" +#endif + ".endm" + ); +#endif + +#endif /* ARM_ASM__H */ diff --git a/reference/newlib/memcpy.S b/reference/newlib/memcpy.S new file mode 100644 index 000000000000..e408ed0e0b1c --- /dev/null +++ b/reference/newlib/memcpy.S @@ -0,0 +1,423 @@ +/* + * Copyright (c) 2011 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || \ + (!(defined (__ARM_ARCH_7A__)))) + + /* Do nothing here. See memcpy-stub.c in the same directory. */ + +#else + /* Prototype: void *memcpy (void *dst, const void *src, size_t count). */ + + /* Use the version of memcpy implemented using LDRD and STRD. + This version is tuned for Cortex-A15. + This might not be the best for other ARMv7-A CPUs, + but there is no predefine to distinguish between + different CPUs in the same architecture, + and this version is better than the plain memcpy provided in newlib. + + Therefore, we use this version for all ARMv7-A CPUS. */ + + /* To make the same code compile for both ARM and Thumb instruction + sets, switch to unified syntax at the beginning of this function. + However, by using the same code, we may be missing optimization + opportunities. For instance, in LDRD/STRD instructions, the first + destination register must be even and the second consecutive in + ARM state, but not in Thumb state. */ + + .syntax unified + +#if defined (__thumb__) + .thumb + .thumb_func +#endif + + .global memcpy + .type memcpy, %function +memcpy: + + /* Assumes that n >= 0, and dst, src are valid pointers. + If there is at least 8 bytes to copy, use LDRD/STRD. + If src and dst are misaligned with different offsets, + first copy byte by byte until dst is aligned, + and then copy using LDRD/STRD and shift if needed. + When less than 8 left, copy a word and then byte by byte. */ + + /* Save registers (r0 holds the return value): + optimized push {r0, r4, r5, lr}. + To try and improve performance, stack layout changed, + i.e., not keeping the stack looking like users expect + (highest numbered register at highest address). */ + push {r0, lr} + strd r4, r5, [sp, #-8]! + + /* TODO: Add debug frame directives. + We don't need exception unwind directives, because the code below + does not throw any exceptions and does not call any other functions. + Generally, newlib functions like this lack debug information for + assembler source. */ + + /* Get copying of tiny blocks out of the way first. */ + /* Is there at least 4 bytes to copy? */ + subs r2, r2, #4 + blt copy_less_than_4 /* If n < 4. */ + + /* Check word alignment. */ + ands ip, r0, #3 /* ip = last 2 bits of dst. */ + bne dst_not_word_aligned /* If dst is not word-aligned. */ + + /* Get here if dst is word-aligned. */ + ands ip, r1, #3 /* ip = last 2 bits of src. */ + bne src_not_word_aligned /* If src is not word-aligned. */ +word_aligned: + /* Get here if source and dst both are word-aligned. + The number of bytes remaining to copy is r2+4. */ + + /* Is there is at least 64 bytes to copy? */ + subs r2, r2, #60 + blt copy_less_than_64 /* If r2 + 4 < 64. */ + + /* First, align the destination buffer to 8-bytes, + to make sure double loads and stores don't cross cache line boundary, + as they are then more expensive even if the data is in the cache + (require two load/store issue cycles instead of one). + If only one of the buffers is not 8-bytes aligned, + then it's more important to align dst than src, + because there is more penalty for stores + than loads that cross cacheline boundary. + This check and realignment are only worth doing + if there is a lot to copy. */ + + /* Get here if dst is word aligned, + i.e., the 2 least significant bits are 0. + If dst is not 2w aligned (i.e., the 3rd bit is not set in dst), + then copy 1 word (4 bytes). */ + ands r3, r0, #4 + beq 11f /* If dst already two-word aligned. */ + ldr r3, [r1], #4 + str r3, [r0], #4 + subs r2, r2, #4 + blt copy_less_than_64 + +11: + /* TODO: Align to cacheline (useful for PLD optimization). */ + + /* Every loop iteration copies 64 bytes. */ +1: + .irp offset, #0, #8, #16, #24, #32, #40, #48, #56 + ldrd r4, r5, [r1, \offset] + strd r4, r5, [r0, \offset] + .endr + + add r0, r0, #64 + add r1, r1, #64 + subs r2, r2, #64 + bge 1b /* If there is more to copy. */ + +copy_less_than_64: + + /* Get here if less than 64 bytes to copy, -64 <= r2 < 0. + Restore the count if there is more than 7 bytes to copy. */ + adds r2, r2, #56 + blt copy_less_than_8 + + /* Copy 8 bytes at a time. */ +2: + ldrd r4, r5, [r1], #8 + strd r4, r5, [r0], #8 + subs r2, r2, #8 + bge 2b /* If there is more to copy. */ + +copy_less_than_8: + + /* Get here if less than 8 bytes to copy, -8 <= r2 < 0. + Check if there is more to copy. */ + cmn r2, #8 + beq return /* If r2 + 8 == 0. */ + + /* Restore the count if there is more than 3 bytes to copy. */ + adds r2, r2, #4 + blt copy_less_than_4 + + /* Copy 4 bytes. */ + ldr r3, [r1], #4 + str r3, [r0], #4 + +copy_less_than_4: + /* Get here if less than 4 bytes to copy, -4 <= r2 < 0. */ + + /* Restore the count, check if there is more to copy. */ + adds r2, r2, #4 + beq return /* If r2 == 0. */ + + /* Get here with r2 is in {1,2,3}={01,10,11}. */ + /* Logical shift left r2, insert 0s, update flags. */ + lsls r2, r2, #31 + + /* Copy byte by byte. + Condition ne means the last bit of r2 is 0. + Condition cs means the second to last bit of r2 is set, + i.e., r2 is 1 or 3. */ + itt ne + ldrbne r3, [r1], #1 + strbne r3, [r0], #1 + + itttt cs + ldrbcs r4, [r1], #1 + ldrbcs r5, [r1] + strbcs r4, [r0], #1 + strbcs r5, [r0] + +return: + /* Restore registers: optimized pop {r0, r4, r5, pc} */ + ldrd r4, r5, [sp], #8 + pop {r0, pc} /* This is the only return point of memcpy. */ + +#ifndef __ARM_FEATURE_UNALIGNED + + /* The following assembly macro implements misaligned copy in software. + Assumes that dst is word aligned, src is at offset "pull" bits from + word, push = 32 - pull, and the number of bytes that remain to copy + is r2 + 4, r2 >= 0. */ + + /* In the code below, r2 is the number of bytes that remain to be + written. The number of bytes read is always larger, because we have + partial words in the shift queue. */ + + .macro miscopy pull push shiftleft shiftright + + /* Align src to the previous word boundary. */ + bic r1, r1, #3 + + /* Initialize the shift queue. */ + ldr r5, [r1], #4 /* Load a word from source. */ + + subs r2, r2, #4 + blt 6f /* Go to misaligned copy of less than 8 bytes. */ + + /* Get here if there is more than 8 bytes to copy. + The number of bytes to copy is r2+8, r2 >= 0. */ + + /* Save registers: push { r6, r7 }. + We need additional registers for LDRD and STRD, because in ARM state + the first destination register must be even and the second + consecutive. */ + strd r6, r7, [sp, #-8]! + + subs r2, r2, #56 + blt 4f /* Go to misaligned copy of less than 64 bytes. */ + +3: + /* Get here if there is more than 64 bytes to copy. + The number of bytes to copy is r2+64, r2 >= 0. */ + + /* Copy 64 bytes in every iteration. + Use a partial word from the shift queue. */ + .irp offset, #0, #8, #16, #24, #32, #40, #48, #56 + mov r6, r5, \shiftleft #\pull + ldrd r4, r5, [r1, \offset] + orr r6, r6, r4, \shiftright #\push + mov r7, r4, \shiftleft #\pull + orr r7, r7, r5, \shiftright #\push + strd r6, r7, [r0, \offset] + .endr + + add r1, r1, #64 + add r0, r0, #64 + subs r2, r2, #64 + bge 3b + +4: + /* Get here if there is less than 64 bytes to copy (-64 <= r2 < 0) + and they are misaligned. */ + + /* Restore the count if there is more than 7 bytes to copy. */ + adds r2, r2, #56 + + /* If less than 8 bytes to copy, + restore registers saved for this loop: optimized poplt { r6, r7 }. */ + itt lt + ldrdlt r6, r7, [sp], #8 + blt 6f /* Go to misaligned copy of less than 8 bytes. */ + +5: + /* Copy 8 bytes at a time. + Use a partial word from the shift queue. */ + mov r6, r5, \shiftleft #\pull + ldrd r4, r5, [r1], #8 + orr r6, r6, r4, \shiftright #\push + mov r7, r4, \shiftleft #\pull + orr r7, r7, r5, \shiftright #\push + strd r6, r7, [r0], #8 + + subs r2, r2, #8 + bge 5b /* If there is more to copy. */ + + /* Restore registers saved for this loop: optimized pop { r6, r7 }. */ + ldrd r6, r7, [sp], #8 + +6: + /* Get here if there less than 8 bytes to copy (-8 <= r2 < 0) + and they are misaligned. */ + + /* Check if there is more to copy. */ + cmn r2, #8 + beq return + + /* Check if there is less than 4 bytes to copy. */ + cmn r2, #4 + + itt lt + /* Restore src offset from word-align. */ + sublt r1, r1, #(\push / 8) + blt copy_less_than_4 + + /* Use a partial word from the shift queue. */ + mov r3, r5, \shiftleft #\pull + /* Load a word from src, but without writeback + (this word is not fully written to dst). */ + ldr r5, [r1] + + /* Restore src offset from word-align. */ + add r1, r1, #(\pull / 8) + + /* Shift bytes to create one dst word and store it. */ + orr r3, r3, r5, \shiftright #\push + str r3, [r0], #4 + + /* Use single byte copying of the remaining bytes. */ + b copy_less_than_4 + + .endm + +#endif /* not __ARM_FEATURE_UNALIGNED */ + +dst_not_word_aligned: + + /* Get here when dst is not aligned and ip has the last 2 bits of dst, + i.e., ip is the offset of dst from word. + The number of bytes that remains to copy is r2 + 4, + i.e., there are at least 4 bytes to copy. + Write a partial word (0 to 3 bytes), such that dst becomes + word-aligned. */ + + /* If dst is at ip bytes offset from a word (with 0 < ip < 4), + then there are (4 - ip) bytes to fill up to align dst to the next + word. */ + rsb ip, ip, #4 /* ip = #4 - ip. */ + cmp ip, #2 + + /* Copy byte by byte with conditionals. */ + itt gt + ldrbgt r3, [r1], #1 + strbgt r3, [r0], #1 + + itt ge + ldrbge r4, [r1], #1 + strbge r4, [r0], #1 + + ldrb lr, [r1], #1 + strb lr, [r0], #1 + + /* Update the count. + ip holds the number of bytes we have just copied. */ + subs r2, r2, ip /* r2 = r2 - ip. */ + blt copy_less_than_4 /* If r2 < ip. */ + + /* Get here if there are more than 4 bytes to copy. + Check if src is aligned. If beforehand src and dst were not word + aligned but congruent (same offset), then now they are both + word-aligned, and we can copy the rest efficiently (without + shifting). */ + ands ip, r1, #3 /* ip = last 2 bits of src. */ + beq word_aligned /* If r1 is word-aligned. */ + +src_not_word_aligned: + /* Get here when src is not word-aligned, but dst is word-aligned. + The number of bytes that remains to copy is r2+4. */ + +#ifdef __ARM_FEATURE_UNALIGNED + /* Copy word by word using LDR when alignment can be done in hardware, + i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */ + subs r2, r2, #60 + blt 8f + +7: + /* Copy 64 bytes in every loop iteration. */ + .irp offset, #0, #4, #8, #12, #16, #20, #24, #28, #32, #36, #40, #44, #48, #52, #56, #60 + ldr r3, [r1, \offset] + str r3, [r0, \offset] + .endr + + add r0, r0, #64 + add r1, r1, #64 + subs r2, r2, #64 + bge 7b + +8: + /* Get here if less than 64 bytes to copy, -64 <= r2 < 0. + Check if there is more than 3 bytes to copy. */ + adds r2, r2, #60 + blt copy_less_than_4 + +9: + /* Get here if there is less than 64 but at least 4 bytes to copy, + where the number of bytes to copy is r2+4. */ + ldr r3, [r1], #4 + str r3, [r0], #4 + subs r2, r2, #4 + bge 9b + + b copy_less_than_4 + +#else /* not __ARM_FEATURE_UNALIGNED */ + + /* ip has last 2 bits of src, + i.e., ip is the offset of src from word, and ip > 0. + Compute shifts needed to copy from src to dst. */ + cmp ip, #2 + beq miscopy_16_16 /* If ip == 2. */ + bge miscopy_24_8 /* If ip == 3. */ + + /* Get here if ip == 1. */ + + /* Endian independent macros for shifting bytes within registers. */ + +#ifndef __ARMEB__ +miscopy_8_24: miscopy pull=8 push=24 shiftleft=lsr shiftright=lsl +miscopy_16_16: miscopy pull=16 push=16 shiftleft=lsr shiftright=lsl +miscopy_24_8: miscopy pull=24 push=8 shiftleft=lsr shiftright=lsl +#else /* not __ARMEB__ */ +miscopy_8_24: miscopy pull=8 push=24 shiftleft=lsl shiftright=lsr +miscopy_16_16: miscopy pull=16 push=16 shiftleft=lsl shiftright=lsr +miscopy_24_8: miscopy pull=24 push=8 shiftleft=lsl shiftright=lsr +#endif /* not __ARMEB__ */ + +#endif /* not __ARM_FEATURE_UNALIGNED */ + +#endif /* memcpy */ diff --git a/reference/newlib/shim.h b/reference/newlib/shim.h new file mode 100644 index 000000000000..e265e9737f85 --- /dev/null +++ b/reference/newlib/shim.h @@ -0,0 +1,5 @@ +/* Basic macros that newlib uses */ +#define _PTR void * +#define _DEFUN(_name, _args, _def) _name (_def) +#define _CONST const +#define _AND , diff --git a/reference/newlib/strcmp.S b/reference/newlib/strcmp.S new file mode 100644 index 000000000000..6346f068279f --- /dev/null +++ b/reference/newlib/strcmp.S @@ -0,0 +1,777 @@ +/* + * Copyright (c) 2012 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "arm_asm.h" + +#ifdef __ARMEB__ +#define S2LOMEM lsl +#define S2LOMEMEQ lsleq +#define S2HIMEM lsr +#define MSB 0x000000ff +#define LSB 0xff000000 +#define BYTE0_OFFSET 24 +#define BYTE1_OFFSET 16 +#define BYTE2_OFFSET 8 +#define BYTE3_OFFSET 0 +#else /* not __ARMEB__ */ +#define S2LOMEM lsr +#define S2LOMEMEQ lsreq +#define S2HIMEM lsl +#define BYTE0_OFFSET 0 +#define BYTE1_OFFSET 8 +#define BYTE2_OFFSET 16 +#define BYTE3_OFFSET 24 +#define MSB 0xff000000 +#define LSB 0x000000ff +#endif /* not __ARMEB__ */ + +.syntax unified + +#if defined (__thumb__) + .thumb + .thumb_func +#endif + .global strcmp + .type strcmp, %function +strcmp: + +#if (defined (__thumb__) && !defined (__thumb2__)) +1: + ldrb r2, [r0] + ldrb r3, [r1] + adds r0, r0, #1 + adds r1, r1, #1 + cmp r2, #0 + beq 2f + cmp r2, r3 + beq 1b +2: + subs r0, r2, r3 + bx lr +#elif (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED)) +1: + ldrb r2, [r0], #1 + ldrb r3, [r1], #1 + cmp r2, #1 + it cs + cmpcs r2, r3 + beq 1b + subs r0, r2, r3 + RETURN + + +#elif (defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)) + /* Use LDRD whenever possible. */ + +/* The main thing to look out for when comparing large blocks is that + the loads do not cross a page boundary when loading past the index + of the byte with the first difference or the first string-terminator. + + For example, if the strings are identical and the string-terminator + is at index k, byte by byte comparison will not load beyond address + s1+k and s2+k; word by word comparison may load up to 3 bytes beyond + k; double word - up to 7 bytes. If the load of these bytes crosses + a page boundary, it might cause a memory fault (if the page is not mapped) + that would not have happened in byte by byte comparison. + + If an address is (double) word aligned, then a load of a (double) word + from that address will not cross a page boundary. + Therefore, the algorithm below considers word and double-word alignment + of strings separately. */ + +/* High-level description of the algorithm. + + * The fast path: if both strings are double-word aligned, + use LDRD to load two words from each string in every loop iteration. + * If the strings have the same offset from a word boundary, + use LDRB to load and compare byte by byte until + the first string is aligned to a word boundary (at most 3 bytes). + This is optimized for quick return on short unaligned strings. + * If the strings have the same offset from a double-word boundary, + use LDRD to load two words from each string in every loop iteration, as in the fast path. + * If the strings do not have the same offset from a double-word boundary, + load a word from the second string before the loop to initialize the queue. + Use LDRD to load two words from every string in every loop iteration. + Inside the loop, load the second word from the second string only after comparing + the first word, using the queued value, to guarantee safety across page boundaries. + * If the strings do not have the same offset from a word boundary, + use LDR and a shift queue. Order of loads and comparisons matters, + similarly to the previous case. + + * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value. + * The only difference between ARM and Thumb modes is the use of CBZ instruction. + * The only difference between big and little endian is the use of REV in little endian + to compute the return value, instead of MOV. + * No preload. [TODO.] +*/ + + .macro m_cbz reg label +#ifdef __thumb2__ + cbz \reg, \label +#else /* not defined __thumb2__ */ + cmp \reg, #0 + beq \label +#endif /* not defined __thumb2__ */ + .endm /* m_cbz */ + + .macro m_cbnz reg label +#ifdef __thumb2__ + cbnz \reg, \label +#else /* not defined __thumb2__ */ + cmp \reg, #0 + bne \label +#endif /* not defined __thumb2__ */ + .endm /* m_cbnz */ + + .macro init + /* Macro to save temporary registers and prepare magic values. */ + subs sp, sp, #16 + strd r4, r5, [sp, #8] + strd r6, r7, [sp] + mvn r6, #0 /* all F */ + mov r7, #0 /* all 0 */ + .endm /* init */ + + .macro magic_compare_and_branch w1 w2 label + /* Macro to compare registers w1 and w2 and conditionally branch to label. */ + cmp \w1, \w2 /* Are w1 and w2 the same? */ + magic_find_zero_bytes \w1 + it eq + cmpeq ip, #0 /* Is there a zero byte in w1? */ + bne \label + .endm /* magic_compare_and_branch */ + + .macro magic_find_zero_bytes w1 + /* Macro to find all-zero bytes in w1, result is in ip. */ +#if (defined (__ARM_FEATURE_DSP)) + uadd8 ip, \w1, r6 + sel ip, r7, r6 +#else /* not defined (__ARM_FEATURE_DSP) */ + /* __ARM_FEATURE_DSP is not defined for some Cortex-M processors. + Coincidently, these processors only have Thumb-2 mode, where we can use the + the (large) magic constant available directly as an immediate in instructions. + Note that we cannot use the magic constant in ARM mode, where we need + to create the constant in a register. */ + sub ip, \w1, #0x01010101 + bic ip, ip, \w1 + and ip, ip, #0x80808080 +#endif /* not defined (__ARM_FEATURE_DSP) */ + .endm /* magic_find_zero_bytes */ + + .macro setup_return w1 w2 +#ifdef __ARMEB__ + mov r1, \w1 + mov r2, \w2 +#else /* not __ARMEB__ */ + rev r1, \w1 + rev r2, \w2 +#endif /* not __ARMEB__ */ + .endm /* setup_return */ + + /* + optpld r0, #0 + optpld r1, #0 + */ + + /* Are both strings double-word aligned? */ + orr ip, r0, r1 + tst ip, #7 + bne do_align + + /* Fast path. */ + init + +doubleword_aligned: + + /* Get here when the strings to compare are double-word aligned. */ + /* Compare two words in every iteration. */ + .p2align 2 +2: + /* + optpld r0, #16 + optpld r1, #16 + */ + + /* Load the next double-word from each string. */ + ldrd r2, r3, [r0], #8 + ldrd r4, r5, [r1], #8 + + magic_compare_and_branch w1=r2, w2=r4, label=return_24 + magic_compare_and_branch w1=r3, w2=r5, label=return_35 + b 2b + +do_align: + /* Is the first string word-aligned? */ + ands ip, r0, #3 + beq word_aligned_r0 + + /* Fast compare byte by byte until the first string is word-aligned. */ + /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes + to read until the next word boudnary is 4-ip. */ + bic r0, r0, #3 + ldr r2, [r0], #4 + lsls ip, ip, #31 + beq byte2 + bcs byte3 + +byte1: + ldrb ip, [r1], #1 + uxtb r3, r2, ror #BYTE1_OFFSET + subs ip, r3, ip + bne fast_return + m_cbz reg=r3, label=fast_return + +byte2: + ldrb ip, [r1], #1 + uxtb r3, r2, ror #BYTE2_OFFSET + subs ip, r3, ip + bne fast_return + m_cbz reg=r3, label=fast_return + +byte3: + ldrb ip, [r1], #1 + uxtb r3, r2, ror #BYTE3_OFFSET + subs ip, r3, ip + bne fast_return + m_cbnz reg=r3, label=word_aligned_r0 + +fast_return: + mov r0, ip + bx lr + +word_aligned_r0: + init + /* The first string is word-aligned. */ + /* Is the second string word-aligned? */ + ands ip, r1, #3 + bne strcmp_unaligned + +word_aligned: + /* The strings are word-aligned. */ + /* Is the first string double-word aligned? */ + tst r0, #4 + beq doubleword_aligned_r0 + + /* If r0 is not double-word aligned yet, align it by loading + and comparing the next word from each string. */ + ldr r2, [r0], #4 + ldr r4, [r1], #4 + magic_compare_and_branch w1=r2 w2=r4 label=return_24 + +doubleword_aligned_r0: + /* Get here when r0 is double-word aligned. */ + /* Is r1 doubleword_aligned? */ + tst r1, #4 + beq doubleword_aligned + + /* Get here when the strings to compare are word-aligned, + r0 is double-word aligned, but r1 is not double-word aligned. */ + + /* Initialize the queue. */ + ldr r5, [r1], #4 + + /* Compare two words in every iteration. */ + .p2align 2 +3: + /* + optpld r0, #16 + optpld r1, #16 + */ + + /* Load the next double-word from each string and compare. */ + ldrd r2, r3, [r0], #8 + magic_compare_and_branch w1=r2 w2=r5 label=return_25 + ldrd r4, r5, [r1], #8 + magic_compare_and_branch w1=r3 w2=r4 label=return_34 + b 3b + + .macro miscmp_word offsetlo offsethi + /* Macro to compare misaligned strings. */ + /* r0, r1 are word-aligned, and at least one of the strings + is not double-word aligned. */ + /* Compare one word in every loop iteration. */ + /* OFFSETLO is the original bit-offset of r1 from a word-boundary, + OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */ + + /* Initialize the shift queue. */ + ldr r5, [r1], #4 + + /* Compare one word from each string in every loop iteration. */ + .p2align 2 +7: + ldr r3, [r0], #4 + S2LOMEM r5, r5, #\offsetlo + magic_find_zero_bytes w1=r3 + cmp r7, ip, S2HIMEM #\offsetlo + and r2, r3, r6, S2LOMEM #\offsetlo + it eq + cmpeq r2, r5 + bne return_25 + ldr r5, [r1], #4 + cmp ip, #0 + eor r3, r2, r3 + S2HIMEM r2, r5, #\offsethi + it eq + cmpeq r3, r2 + bne return_32 + b 7b + .endm /* miscmp_word */ + +strcmp_unaligned: + /* r0 is word-aligned, r1 is at offset ip from a word. */ + /* Align r1 to the (previous) word-boundary. */ + bic r1, r1, #3 + + /* Unaligned comparison word by word using LDRs. */ + cmp ip, #2 + beq miscmp_word_16 /* If ip == 2. */ + bge miscmp_word_24 /* If ip == 3. */ + miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */ +miscmp_word_16: miscmp_word offsetlo=16 offsethi=16 +miscmp_word_24: miscmp_word offsetlo=24 offsethi=8 + + +return_32: + setup_return w1=r3, w2=r2 + b do_return +return_34: + setup_return w1=r3, w2=r4 + b do_return +return_25: + setup_return w1=r2, w2=r5 + b do_return +return_35: + setup_return w1=r3, w2=r5 + b do_return +return_24: + setup_return w1=r2, w2=r4 + +do_return: + +#ifdef __ARMEB__ + mov r0, ip +#else /* not __ARMEB__ */ + rev r0, ip +#endif /* not __ARMEB__ */ + + /* Restore temporaries early, before computing the return value. */ + ldrd r6, r7, [sp] + ldrd r4, r5, [sp, #8] + adds sp, sp, #16 + + /* There is a zero or a different byte between r1 and r2. */ + /* r0 contains a mask of all-zero bytes in r1. */ + /* Using r0 and not ip here because cbz requires low register. */ + m_cbz reg=r0, label=compute_return_value + clz r0, r0 + /* r0 contains the number of bits on the left of the first all-zero byte in r1. */ + rsb r0, r0, #24 + /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */ + lsr r1, r1, r0 + lsr r2, r2, r0 + +compute_return_value: + subs r0, r1, r2 + bx lr + + +#else /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6) + defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || + (defined (__thumb__) && !defined (__thumb2__))) */ + + /* Use LDR whenever possible. */ + +#ifdef __thumb2__ +#define magic1(REG) 0x01010101 +#define magic2(REG) 0x80808080 +#else +#define magic1(REG) REG +#define magic2(REG) REG, lsl #7 +#endif + + optpld r0 + optpld r1 + eor r2, r0, r1 + tst r2, #3 + /* Strings not at same byte offset from a word boundary. */ + bne strcmp_unaligned + ands r2, r0, #3 + bic r0, r0, #3 + bic r1, r1, #3 + ldr ip, [r0], #4 + it eq + ldreq r3, [r1], #4 + beq 1f + /* Although s1 and s2 have identical initial alignment, they are + not currently word aligned. Rather than comparing bytes, + make sure that any bytes fetched from before the addressed + bytes are forced to 0xff. Then they will always compare + equal. */ + eor r2, r2, #3 + lsl r2, r2, #3 + mvn r3, MSB + S2LOMEM r2, r3, r2 + ldr r3, [r1], #4 + orr ip, ip, r2 + orr r3, r3, r2 +1: +#ifndef __thumb2__ + /* Load the 'magic' constant 0x01010101. */ + str r4, [sp, #-4]! + mov r4, #1 + orr r4, r4, r4, lsl #8 + orr r4, r4, r4, lsl #16 +#endif + .p2align 2 +4: + optpld r0, #8 + optpld r1, #8 + sub r2, ip, magic1(r4) + cmp ip, r3 + itttt eq + /* check for any zero bytes in first word */ + biceq r2, r2, ip + tsteq r2, magic2(r4) + ldreq ip, [r0], #4 + ldreq r3, [r1], #4 + beq 4b +2: + /* There's a zero or a different byte in the word */ + S2HIMEM r0, ip, #24 + S2LOMEM ip, ip, #8 + cmp r0, #1 + it cs + cmpcs r0, r3, S2HIMEM #24 + it eq + S2LOMEMEQ r3, r3, #8 + beq 2b + /* On a big-endian machine, r0 contains the desired byte in bits + 0-7; on a little-endian machine they are in bits 24-31. In + both cases the other bits in r0 are all zero. For r3 the + interesting byte is at the other end of the word, but the + other bits are not necessarily zero. We need a signed result + representing the differnece in the unsigned bytes, so for the + little-endian case we can't just shift the interesting bits + up. */ +#ifdef __ARMEB__ + sub r0, r0, r3, lsr #24 +#else + and r3, r3, #255 +#ifdef __thumb2__ + /* No RSB instruction in Thumb2 */ + lsr r0, r0, #24 + sub r0, r0, r3 +#else + rsb r0, r3, r0, lsr #24 +#endif +#endif +#ifndef __thumb2__ + ldr r4, [sp], #4 +#endif + RETURN + + +strcmp_unaligned: + +#if 0 + /* The assembly code below is based on the following alogrithm. */ +#ifdef __ARMEB__ +#define RSHIFT << +#define LSHIFT >> +#else +#define RSHIFT >> +#define LSHIFT << +#endif + +#define body(shift) \ + mask = 0xffffffffU RSHIFT shift; \ + w1 = *wp1++; \ + w2 = *wp2++; \ + do \ + { \ + t1 = w1 & mask; \ + if (__builtin_expect(t1 != w2 RSHIFT shift, 0)) \ + { \ + w2 RSHIFT= shift; \ + break; \ + } \ + if (__builtin_expect(((w1 - b1) & ~w1) & (b1 << 7), 0)) \ + { \ + /* See comment in assembler below re syndrome on big-endian */\ + if ((((w1 - b1) & ~w1) & (b1 << 7)) & mask) \ + w2 RSHIFT= shift; \ + else \ + { \ + w2 = *wp2; \ + t1 = w1 RSHIFT (32 - shift); \ + w2 = (w2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \ + } \ + break; \ + } \ + w2 = *wp2++; \ + t1 ^= w1; \ + if (__builtin_expect(t1 != w2 LSHIFT (32 - shift), 0)) \ + { \ + t1 = w1 >> (32 - shift); \ + w2 = (w2 << (32 - shift)) RSHIFT (32 - shift); \ + break; \ + } \ + w1 = *wp1++; \ + } while (1) + + const unsigned* wp1; + const unsigned* wp2; + unsigned w1, w2; + unsigned mask; + unsigned shift; + unsigned b1 = 0x01010101; + char c1, c2; + unsigned t1; + + while (((unsigned) s1) & 3) + { + c1 = *s1++; + c2 = *s2++; + if (c1 == 0 || c1 != c2) + return c1 - (int)c2; + } + wp1 = (unsigned*) (((unsigned)s1) & ~3); + wp2 = (unsigned*) (((unsigned)s2) & ~3); + t1 = ((unsigned) s2) & 3; + if (t1 == 1) + { + body(8); + } + else if (t1 == 2) + { + body(16); + } + else + { + body (24); + } + + do + { +#ifdef __ARMEB__ + c1 = (char) t1 >> 24; + c2 = (char) w2 >> 24; +#else /* not __ARMEB__ */ + c1 = (char) t1; + c2 = (char) w2; +#endif /* not __ARMEB__ */ + t1 RSHIFT= 8; + w2 RSHIFT= 8; + } while (c1 != 0 && c1 == c2); + return c1 - c2; +#endif /* 0 */ + + + wp1 .req r0 + wp2 .req r1 + b1 .req r2 + w1 .req r4 + w2 .req r5 + t1 .req ip + @ r3 is scratch + + /* First of all, compare bytes until wp1(sp1) is word-aligned. */ +1: + tst wp1, #3 + beq 2f + ldrb r2, [wp1], #1 + ldrb r3, [wp2], #1 + cmp r2, #1 + it cs + cmpcs r2, r3 + beq 1b + sub r0, r2, r3 + RETURN + +2: + str r5, [sp, #-4]! + str r4, [sp, #-4]! + //stmfd sp!, {r4, r5} + mov b1, #1 + orr b1, b1, b1, lsl #8 + orr b1, b1, b1, lsl #16 + + and t1, wp2, #3 + bic wp2, wp2, #3 + ldr w1, [wp1], #4 + ldr w2, [wp2], #4 + cmp t1, #2 + beq 2f + bhi 3f + + /* Critical inner Loop: Block with 3 bytes initial overlap */ + .p2align 2 +1: + bic t1, w1, MSB + cmp t1, w2, S2LOMEM #8 + sub r3, w1, b1 + bic r3, r3, w1 + bne 4f + ands r3, r3, b1, lsl #7 + it eq + ldreq w2, [wp2], #4 + bne 5f + eor t1, t1, w1 + cmp t1, w2, S2HIMEM #24 + bne 6f + ldr w1, [wp1], #4 + b 1b +4: + S2LOMEM w2, w2, #8 + b 8f + +5: +#ifdef __ARMEB__ + /* The syndrome value may contain false ones if the string ends + with the bytes 0x01 0x00 */ + tst w1, #0xff000000 + itt ne + tstne w1, #0x00ff0000 + tstne w1, #0x0000ff00 + beq 7f +#else + bics r3, r3, #0xff000000 + bne 7f +#endif + ldrb w2, [wp2] + S2LOMEM t1, w1, #24 +#ifdef __ARMEB__ + lsl w2, w2, #24 +#endif + b 8f + +6: + S2LOMEM t1, w1, #24 + and w2, w2, LSB + b 8f + + /* Critical inner Loop: Block with 2 bytes initial overlap */ + .p2align 2 +2: + S2HIMEM t1, w1, #16 + sub r3, w1, b1 + S2LOMEM t1, t1, #16 + bic r3, r3, w1 + cmp t1, w2, S2LOMEM #16 + bne 4f + ands r3, r3, b1, lsl #7 + it eq + ldreq w2, [wp2], #4 + bne 5f + eor t1, t1, w1 + cmp t1, w2, S2HIMEM #16 + bne 6f + ldr w1, [wp1], #4 + b 2b + +5: +#ifdef __ARMEB__ + /* The syndrome value may contain false ones if the string ends + with the bytes 0x01 0x00 */ + tst w1, #0xff000000 + it ne + tstne w1, #0x00ff0000 + beq 7f +#else + lsls r3, r3, #16 + bne 7f +#endif + ldrh w2, [wp2] + S2LOMEM t1, w1, #16 +#ifdef __ARMEB__ + lsl w2, w2, #16 +#endif + b 8f + +6: + S2HIMEM w2, w2, #16 + S2LOMEM t1, w1, #16 +4: + S2LOMEM w2, w2, #16 + b 8f + + /* Critical inner Loop: Block with 1 byte initial overlap */ + .p2align 2 +3: + and t1, w1, LSB + cmp t1, w2, S2LOMEM #24 + sub r3, w1, b1 + bic r3, r3, w1 + bne 4f + ands r3, r3, b1, lsl #7 + it eq + ldreq w2, [wp2], #4 + bne 5f + eor t1, t1, w1 + cmp t1, w2, S2HIMEM #8 + bne 6f + ldr w1, [wp1], #4 + b 3b +4: + S2LOMEM w2, w2, #24 + b 8f +5: + /* The syndrome value may contain false ones if the string ends + with the bytes 0x01 0x00 */ + tst w1, LSB + beq 7f + ldr w2, [wp2], #4 +6: + S2LOMEM t1, w1, #8 + bic w2, w2, MSB + b 8f +7: + mov r0, #0 + //ldmfd sp!, {r4, r5} + ldr r4, [sp], #4 + ldr r5, [sp], #4 + RETURN +8: + and r2, t1, LSB + and r0, w2, LSB + cmp r0, #1 + it cs + cmpcs r0, r2 + itt eq + S2LOMEMEQ t1, t1, #8 + S2LOMEMEQ w2, w2, #8 + beq 8b + sub r0, r2, r0 + //ldmfd sp!, {r4, r5} + ldr r4, [sp], #4 + ldr r5, [sp], #4 + RETURN + +#endif /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6) + defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || + (defined (__thumb__) && !defined (__thumb2__))) */ diff --git a/reference/newlib/strcpy.c b/reference/newlib/strcpy.c new file mode 100644 index 000000000000..93426d42ad41 --- /dev/null +++ b/reference/newlib/strcpy.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2008 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "arm_asm.h" + +#ifdef __thumb2__ +#define magic1(REG) "#0x01010101" +#define magic2(REG) "#0x80808080" +#else +#define magic1(REG) #REG +#define magic2(REG) #REG ", lsl #7" +#endif + +char* __attribute__((naked)) +strcpy (char* dst, const char* src) +{ + asm ( +#if !(defined(__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || \ + (defined (__thumb__) && !defined (__thumb2__))) + "optpld r1\n\t" + "eor r2, r0, r1\n\t" + "mov ip, r0\n\t" + "tst r2, #3\n\t" + "bne 4f\n\t" + "tst r1, #3\n\t" + "bne 3f\n" + "5:\n\t" +#ifndef __thumb2__ + "str r5, [sp, #-4]!\n\t" + "mov r5, #0x01\n\t" + "orr r5, r5, r5, lsl #8\n\t" + "orr r5, r5, r5, lsl #16\n\t" +#endif + + "str r4, [sp, #-4]!\n\t" + "tst r1, #4\n\t" + "ldr r3, [r1], #4\n\t" + "beq 2f\n\t" + "sub r2, r3, "magic1(r5)"\n\t" + "bics r2, r2, r3\n\t" + "tst r2, "magic2(r5)"\n\t" + "itt eq\n\t" + "streq r3, [ip], #4\n\t" + "ldreq r3, [r1], #4\n" + "bne 1f\n\t" + /* Inner loop. We now know that r1 is 64-bit aligned, so we + can safely fetch up to two words. This allows us to avoid + load stalls. */ + ".p2align 2\n" + "2:\n\t" + "optpld r1, #8\n\t" + "ldr r4, [r1], #4\n\t" + "sub r2, r3, "magic1(r5)"\n\t" + "bics r2, r2, r3\n\t" + "tst r2, "magic2(r5)"\n\t" + "sub r2, r4, "magic1(r5)"\n\t" + "bne 1f\n\t" + "str r3, [ip], #4\n\t" + "bics r2, r2, r4\n\t" + "tst r2, "magic2(r5)"\n\t" + "itt eq\n\t" + "ldreq r3, [r1], #4\n\t" + "streq r4, [ip], #4\n\t" + "beq 2b\n\t" + "mov r3, r4\n" + "1:\n\t" +#ifdef __ARMEB__ + "rors r3, r3, #24\n\t" +#endif + "strb r3, [ip], #1\n\t" + "tst r3, #0xff\n\t" +#ifdef __ARMEL__ + "ror r3, r3, #8\n\t" +#endif + "bne 1b\n\t" + "ldr r4, [sp], #4\n\t" +#ifndef __thumb2__ + "ldr r5, [sp], #4\n\t" +#endif + "RETURN\n" + + /* Strings have the same offset from word alignment, but it's + not zero. */ + "3:\n\t" + "tst r1, #1\n\t" + "beq 1f\n\t" + "ldrb r2, [r1], #1\n\t" + "strb r2, [ip], #1\n\t" + "cmp r2, #0\n\t" + "it eq\n" + "RETURN eq\n" + "1:\n\t" + "tst r1, #2\n\t" + "beq 5b\n\t" + "ldrh r2, [r1], #2\n\t" +#ifdef __ARMEB__ + "tst r2, #0xff00\n\t" + "iteet ne\n\t" + "strneh r2, [ip], #2\n\t" + "lsreq r2, r2, #8\n\t" + "streqb r2, [ip]\n\t" + "tstne r2, #0xff\n\t" +#else + "tst r2, #0xff\n\t" + "itet ne\n\t" + "strneh r2, [ip], #2\n\t" + "streqb r2, [ip]\n\t" + "tstne r2, #0xff00\n\t" +#endif + "bne 5b\n\t" + "RETURN\n" + + /* src and dst do not have a common word-alignement. Fall back to + byte copying. */ + "4:\n\t" + "ldrb r2, [r1], #1\n\t" + "strb r2, [ip], #1\n\t" + "cmp r2, #0\n\t" + "bne 4b\n\t" + "RETURN" + +#elif !defined (__thumb__) || defined (__thumb2__) + "mov r3, r0\n\t" + "1:\n\t" + "ldrb r2, [r1], #1\n\t" + "strb r2, [r3], #1\n\t" + "cmp r2, #0\n\t" + "bne 1b\n\t" + "RETURN" +#else + "mov r3, r0\n\t" + "1:\n\t" + "ldrb r2, [r1]\n\t" + "add r1, r1, #1\n\t" + "strb r2, [r3]\n\t" + "add r3, r3, #1\n\t" + "cmp r2, #0\n\t" + "bne 1b\n\t" + "RETURN" +#endif + ); +} diff --git a/reference/newlib/strlen.c b/reference/newlib/strlen.c new file mode 100644 index 000000000000..93ec8bb1bca4 --- /dev/null +++ b/reference/newlib/strlen.c @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2008 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "arm_asm.h" +#include <limits.h> +#include <stddef.h> + +#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) || \ + (defined (__thumb__) && !defined (__thumb2__)) + +size_t +strlen (const char* str) +{ + int scratch; +#if defined (__thumb__) && !defined (__thumb2__) + size_t len; + asm ("mov %0, #0\n" + "1:\n\t" + "ldrb %1, [%2, %0]\n\t" + "add %0, %0, #1\n\t" + "cmp %1, #0\n\t" + "bne 1b" + : "=&r" (len), "=&r" (scratch) : "r" (str) : "memory", "cc"); + return len - 1; +#else + const char* end; + asm ("1:\n\t" + "ldrb %1, [%0], #1\n\t" + "cmp %1, #0\n\t" + "bne 1b" + : "=&r" (end), "=&r" (scratch) : "0" (str) : "memory", "cc"); + return end - str - 1; +#endif +} +#else + +size_t __attribute__((naked)) +strlen (const char* str) +{ + asm ("len .req r0\n\t" + "data .req r3\n\t" + "addr .req r1\n\t" + + "optpld r0\n\t" + /* Word-align address */ + "bic addr, r0, #3\n\t" + /* Get adjustment for start ... */ + "ands len, r0, #3\n\t" + "neg len, len\n\t" + /* First word of data */ + "ldr data, [addr], #4\n\t" + /* Ensure bytes preceeding start ... */ + "add ip, len, #4\n\t" + "mov ip, ip, asl #3\n\t" + "mvn r2, #0\n\t" + /* ... are masked out */ +#ifdef __thumb__ + "itt ne\n\t" +# ifdef __ARMEB__ + "lslne r2, ip\n\t" +# else + "lsrne r2, ip\n\t" +# endif + "orrne data, data, r2\n\t" +#else + "it ne\n\t" +# ifdef __ARMEB__ + "orrne data, data, r2, lsl ip\n\t" +# else + "orrne data, data, r2, lsr ip\n\t" +# endif +#endif + /* Magic const 0x01010101 */ +#ifdef _ISA_ARM_7 + "movw ip, #0x101\n\t" +#else + "mov ip, #0x1\n\t" + "orr ip, ip, ip, lsl #8\n\t" +#endif + "orr ip, ip, ip, lsl #16\n" + + /* This is the main loop. We subtract one from each byte in + the word: the sign bit changes iff the byte was zero or + 0x80 -- we eliminate the latter case by anding the result + with the 1-s complement of the data. */ + "1:\n\t" + /* test (data - 0x01010101) */ + "sub r2, data, ip\n\t" + /* ... & ~data */ + "bic r2, r2, data\n\t" + /* ... & 0x80808080 == 0? */ + "ands r2, r2, ip, lsl #7\n\t" +#ifdef _ISA_ARM_7 + /* yes, get more data... */ + "itt eq\n\t" + "ldreq data, [addr], #4\n\t" + /* and 4 more bytes */ + "addeq len, len, #4\n\t" + /* If we have PLD, then unroll the loop a bit. */ + "optpld addr, #8\n\t" + /* test (data - 0x01010101) */ + "ittt eq\n\t" + "subeq r2, data, ip\n\t" + /* ... & ~data */ + "biceq r2, r2, data\n\t" + /* ... & 0x80808080 == 0? */ + "andeqs r2, r2, ip, lsl #7\n\t" +#endif + "itt eq\n\t" + /* yes, get more data... */ + "ldreq data, [addr], #4\n\t" + /* and 4 more bytes */ + "addeq len, len, #4\n\t" + "beq 1b\n\t" +#ifdef __ARMEB__ + "tst data, #0xff000000\n\t" + "itttt ne\n\t" + "addne len, len, #1\n\t" + "tstne data, #0xff0000\n\t" + "addne len, len, #1\n\t" + "tstne data, #0xff00\n\t" + "it ne\n\t" + "addne len, len, #1\n\t" +#else +# ifdef _ISA_ARM_5 + /* R2 is the residual sign bits from the above test. All we + need to do now is establish the position of the first zero + byte... */ + /* Little-endian is harder, we need the number of trailing + zeros / 8 */ +# ifdef _ISA_ARM_7 + "rbit r2, r2\n\t" + "clz r2, r2\n\t" +# else + "rsb r1, r2, #0\n\t" + "and r2, r2, r1\n\t" + "clz r2, r2\n\t" + "rsb r2, r2, #31\n\t" +# endif + "add len, len, r2, lsr #3\n\t" +# else /* No CLZ instruction */ + "tst data, #0xff\n\t" + "itttt ne\n\t" + "addne len, len, #1\n\t" + "tstne data, #0xff00\n\t" + "addne len, len, #1\n\t" + "tstne data, #0xff0000\n\t" + "it ne\n\t" + "addne len, len, #1\n\t" +# endif +#endif + "RETURN"); +} +#endif |