diff options
Diffstat (limited to 'sys/lib/libkern')
-rw-r--r-- | sys/lib/libkern/arch/arm/Makefile.inc | 27 | ||||
-rw-r--r-- | sys/lib/libkern/arch/arm/bcopy.S | 48 | ||||
-rw-r--r-- | sys/lib/libkern/arch/arm/byte_swap_2.S | 52 | ||||
-rw-r--r-- | sys/lib/libkern/arch/arm/byte_swap_4.S | 53 | ||||
-rw-r--r-- | sys/lib/libkern/arch/arm/bzero.S | 44 | ||||
-rw-r--r-- | sys/lib/libkern/arch/arm/divsi3.S | 410 | ||||
-rw-r--r-- | sys/lib/libkern/arch/arm/memcpy.S | 493 | ||||
-rw-r--r-- | sys/lib/libkern/arch/arm/memmove.S | 5 | ||||
-rw-r--r-- | sys/lib/libkern/arch/arm/memset.S | 134 |
9 files changed, 1266 insertions, 0 deletions
diff --git a/sys/lib/libkern/arch/arm/Makefile.inc b/sys/lib/libkern/arch/arm/Makefile.inc new file mode 100644 index 00000000000..0494703a325 --- /dev/null +++ b/sys/lib/libkern/arch/arm/Makefile.inc @@ -0,0 +1,27 @@ +# $NetBSD: Makefile.inc,v 1.3 2001/11/20 00:29:19 chris Exp $ + +SRCS+= __main.c byte_swap_2.S byte_swap_4.S \ + bcmp.c ffs.c imax.c imin.c lmax.c lmin.c max.c min.c random.c scanc.c \ + skpc.c strcmp.c strlen.c strncmp.c strncasecmp.c \ + strlcat.c strlcpy.c \ + strncpy.c ulmax.c ulmin.c + + +SRCS+= bcopy.S divsi3.S +SRCS+= memchr.c memcmp.c memcpy.S memset.S + +OBJS+= bzero.o +CLEANFILES+= bzero.o + +bzero.o: ${KERNDIR}/memset.c + @echo "${COMPILE.c} -DBZERO ${KERNDIR}/memset.c -o ${.TARGET}" + @${COMPILE.c} -DBZERO ${KERNDIR}/memset.c -o ${.TARGET}.o + @${LD} -x -r ${.TARGET}.o -o ${.TARGET} + @rm -f ${.TARGET}.o + +bzero.po: ${KERNDIR}/memset.c + @echo "${COMPILE.c} -DBZERO ${KERNDIR}/memset.c -o ${.TARGET}" + @${COMPILE.c} -DBZERO ${KERNDIR}/memset.c -o ${.TARGET}.o + @${LD} -x -r ${.TARGET}.o -o ${.TARGET} + @rm -f ${.TARGET}.o + diff --git a/sys/lib/libkern/arch/arm/bcopy.S b/sys/lib/libkern/arch/arm/bcopy.S new file mode 100644 index 00000000000..7bb7844ecb8 --- /dev/null +++ b/sys/lib/libkern/arch/arm/bcopy.S @@ -0,0 +1,48 @@ +/* $NetBSD: bcopy.S,v 1.2 2001/07/16 05:50:06 matt Exp $ */ + +/*- + * Copyright (c) 1997 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Neil A. Carson and Mark Brinicombe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/asm.h> + +/* bcopy = memcpy/memmove with arguments reversed. */ + +ENTRY(bcopy) + /* switch the source and destination registers */ + eor r0, r1, r0 + eor r1, r0, r1 + eor r0, r1, r0 + b PIC_SYM(_C_LABEL(memcpy), PLT) diff --git a/sys/lib/libkern/arch/arm/byte_swap_2.S b/sys/lib/libkern/arch/arm/byte_swap_2.S new file mode 100644 index 00000000000..ff97af0d932 --- /dev/null +++ b/sys/lib/libkern/arch/arm/byte_swap_2.S @@ -0,0 +1,52 @@ +/* $NetBSD: byte_swap_2.S,v 1.1.20.1 2002/07/02 06:50:59 lukem Exp $ */ + +/*- + * Copyright (c) 1999 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/asm.h> + +_ENTRY(_C_LABEL(bswap16)) +_ENTRY(_C_LABEL(ntohs)) +_ENTRY(_C_LABEL(htons)) +_PROF_PROLOGUE + and r1, r0, #0xff + mov r0, r0, lsr #8 + orr r0, r0, r1, lsl #8 +#ifdef __APCS_26__ + movs pc, lr +#else + mov pc, lr +#endif diff --git a/sys/lib/libkern/arch/arm/byte_swap_4.S b/sys/lib/libkern/arch/arm/byte_swap_4.S new file mode 100644 index 00000000000..8ef3359f898 --- /dev/null +++ b/sys/lib/libkern/arch/arm/byte_swap_4.S @@ -0,0 +1,53 @@ +/* $NetBSD: byte_swap_4.S,v 1.1 2000/12/29 20:51:57 bjh21 Exp $ */ + +/*- + * Copyright (c) 1997 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Neil A. Carson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/asm.h> + +_ENTRY(_C_LABEL(bswap32)) +_ENTRY(_C_LABEL(ntohl)) +_ENTRY(_C_LABEL(htonl)) +_PROF_PROLOGUE + eor r1, r0, r0, ror #16 + bic r1, r1, #0x00FF0000 + mov r0, r0, ror #8 + eor r0, r0, r1, lsr #8 +#ifdef __APCS_26__ + movs pc,lr +#else + mov pc, lr +#endif diff --git a/sys/lib/libkern/arch/arm/bzero.S b/sys/lib/libkern/arch/arm/bzero.S new file mode 100644 index 00000000000..c0d4304f954 --- /dev/null +++ b/sys/lib/libkern/arch/arm/bzero.S @@ -0,0 +1,44 @@ +/* $NetBSD: bzero.S,v 1.1 2000/12/29 20:51:57 bjh21 Exp $ */ + +/*- + * Copyright (c) 1997 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Neil A. Carson and Mark Brinicombe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/asm.h> + +ENTRY(bzero) + mov r2, r1 + mov r1, #0 + b _C_LABEL(memset) diff --git a/sys/lib/libkern/arch/arm/divsi3.S b/sys/lib/libkern/arch/arm/divsi3.S new file mode 100644 index 00000000000..a5228f59dee --- /dev/null +++ b/sys/lib/libkern/arch/arm/divsi3.S @@ -0,0 +1,410 @@ +/* $NetBSD: divsi3.S,v 1.2 2001/11/13 20:06:40 chris Exp $ */ + +/* + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <machine/asm.h> + +/* + * stack is aligned as there's a possibility of branching to L_overflow + * which makes a C call + */ + +ENTRY(__umodsi3) + stmfd sp!, {lr} + sub sp, sp, #4 /* align stack */ + bl L_udivide + add sp, sp, #4 /* unalign stack */ + mov r0, r1 +#ifdef __APCS_26__ + ldmfd sp!, {pc}^ +#else /* APCS-32 */ + ldmfd sp!, {pc} +#endif + +ENTRY(__modsi3) + stmfd sp!, {lr} + sub sp, sp, #4 /* align stack */ + bl L_divide + add sp, sp, #4 /* unalign stack */ + mov r0, r1 +#ifdef __APCS_26__ + ldmfd sp!, {pc}^ +#else + ldmfd sp!, {pc} +#endif + +L_overflow: +#if !defined(_KERNEL) && !defined(_STANDALONE) + mov r0, #8 /* SIGFPE */ + bl PIC_SYM(_C_LABEL(raise), PLT) /* raise it */ + mov r0, #0 +#else + /* XXX should cause a fatal error */ + mvn r0, #0 +#endif +#ifdef __APCS_26__ + movs pc, lr +#else + mov pc, lr +#endif + +ENTRY(__udivsi3) +L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */ + eor r0, r1, r0 + eor r1, r0, r1 + eor r0, r1, r0 + /* r0 = r1 / r0; r1 = r1 % r0 */ + cmp r0, #1 + bcc L_overflow + beq L_divide_l0 + mov ip, #0 + movs r1, r1 + bpl L_divide_l1 + orr ip, ip, #0x20000000 /* ip bit 0x20000000 = -ve r1 */ + movs r1, r1, lsr #1 + orrcs ip, ip, #0x10000000 /* ip bit 0x10000000 = bit 0 of r1 */ + b L_divide_l1 + +L_divide_l0: /* r0 == 1 */ + mov r0, r1 + mov r1, #0 +#ifdef __APCS_26__ + movs pc, lr +#else + mov pc, lr +#endif + +ENTRY(__divsi3) +L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */ + eor r0, r1, r0 + eor r1, r0, r1 + eor r0, r1, r0 + /* r0 = r1 / r0; r1 = r1 % r0 */ + cmp r0, #1 + bcc L_overflow + beq L_divide_l0 + ands ip, r0, #0x80000000 + rsbmi r0, r0, #0 + ands r2, r1, #0x80000000 + eor ip, ip, r2 + rsbmi r1, r1, #0 + orr ip, r2, ip, lsr #1 /* ip bit 0x40000000 = -ve division */ + /* ip bit 0x80000000 = -ve remainder */ + +L_divide_l1: + mov r2, #1 + mov r3, #0 + + /* + * If the highest bit of the dividend is set, we have to be + * careful when shifting the divisor. Test this. + */ + movs r1,r1 + bpl L_old_code + + /* + * At this point, the highest bit of r1 is known to be set. + * We abuse this below in the tst instructions. + */ + tst r1, r0 /*, lsl #0 */ + bmi L_divide_b1 + tst r1, r0, lsl #1 + bmi L_divide_b2 + tst r1, r0, lsl #2 + bmi L_divide_b3 + tst r1, r0, lsl #3 + bmi L_divide_b4 + tst r1, r0, lsl #4 + bmi L_divide_b5 + tst r1, r0, lsl #5 + bmi L_divide_b6 + tst r1, r0, lsl #6 + bmi L_divide_b7 + tst r1, r0, lsl #7 + bmi L_divide_b8 + tst r1, r0, lsl #8 + bmi L_divide_b9 + tst r1, r0, lsl #9 + bmi L_divide_b10 + tst r1, r0, lsl #10 + bmi L_divide_b11 + tst r1, r0, lsl #11 + bmi L_divide_b12 + tst r1, r0, lsl #12 + bmi L_divide_b13 + tst r1, r0, lsl #13 + bmi L_divide_b14 + tst r1, r0, lsl #14 + bmi L_divide_b15 + tst r1, r0, lsl #15 + bmi L_divide_b16 + tst r1, r0, lsl #16 + bmi L_divide_b17 + tst r1, r0, lsl #17 + bmi L_divide_b18 + tst r1, r0, lsl #18 + bmi L_divide_b19 + tst r1, r0, lsl #19 + bmi L_divide_b20 + tst r1, r0, lsl #20 + bmi L_divide_b21 + tst r1, r0, lsl #21 + bmi L_divide_b22 + tst r1, r0, lsl #22 + bmi L_divide_b23 + tst r1, r0, lsl #23 + bmi L_divide_b24 + tst r1, r0, lsl #24 + bmi L_divide_b25 + tst r1, r0, lsl #25 + bmi L_divide_b26 + tst r1, r0, lsl #26 + bmi L_divide_b27 + tst r1, r0, lsl #27 + bmi L_divide_b28 + tst r1, r0, lsl #28 + bmi L_divide_b29 + tst r1, r0, lsl #29 + bmi L_divide_b30 + tst r1, r0, lsl #30 + bmi L_divide_b31 +/* + * instead of: + * tst r1, r0, lsl #31 + * bmi L_divide_b32 + */ + b L_divide_b32 + +L_old_code: + cmp r1, r0 + bcc L_divide_b0 + cmp r1, r0, lsl #1 + bcc L_divide_b1 + cmp r1, r0, lsl #2 + bcc L_divide_b2 + cmp r1, r0, lsl #3 + bcc L_divide_b3 + cmp r1, r0, lsl #4 + bcc L_divide_b4 + cmp r1, r0, lsl #5 + bcc L_divide_b5 + cmp r1, r0, lsl #6 + bcc L_divide_b6 + cmp r1, r0, lsl #7 + bcc L_divide_b7 + cmp r1, r0, lsl #8 + bcc L_divide_b8 + cmp r1, r0, lsl #9 + bcc L_divide_b9 + cmp r1, r0, lsl #10 + bcc L_divide_b10 + cmp r1, r0, lsl #11 + bcc L_divide_b11 + cmp r1, r0, lsl #12 + bcc L_divide_b12 + cmp r1, r0, lsl #13 + bcc L_divide_b13 + cmp r1, r0, lsl #14 + bcc L_divide_b14 + cmp r1, r0, lsl #15 + bcc L_divide_b15 + cmp r1, r0, lsl #16 + bcc L_divide_b16 + cmp r1, r0, lsl #17 + bcc L_divide_b17 + cmp r1, r0, lsl #18 + bcc L_divide_b18 + cmp r1, r0, lsl #19 + bcc L_divide_b19 + cmp r1, r0, lsl #20 + bcc L_divide_b20 + cmp r1, r0, lsl #21 + bcc L_divide_b21 + cmp r1, r0, lsl #22 + bcc L_divide_b22 + cmp r1, r0, lsl #23 + bcc L_divide_b23 + cmp r1, r0, lsl #24 + bcc L_divide_b24 + cmp r1, r0, lsl #25 + bcc L_divide_b25 + cmp r1, r0, lsl #26 + bcc L_divide_b26 + cmp r1, r0, lsl #27 + bcc L_divide_b27 + cmp r1, r0, lsl #28 + bcc L_divide_b28 + cmp r1, r0, lsl #29 + bcc L_divide_b29 + cmp r1, r0, lsl #30 + bcc L_divide_b30 +L_divide_b32: + cmp r1, r0, lsl #31 + subhs r1, r1,r0, lsl #31 + addhs r3, r3,r2, lsl #31 +L_divide_b31: + cmp r1, r0, lsl #30 + subhs r1, r1,r0, lsl #30 + addhs r3, r3,r2, lsl #30 +L_divide_b30: + cmp r1, r0, lsl #29 + subhs r1, r1,r0, lsl #29 + addhs r3, r3,r2, lsl #29 +L_divide_b29: + cmp r1, r0, lsl #28 + subhs r1, r1,r0, lsl #28 + addhs r3, r3,r2, lsl #28 +L_divide_b28: + cmp r1, r0, lsl #27 + subhs r1, r1,r0, lsl #27 + addhs r3, r3,r2, lsl #27 +L_divide_b27: + cmp r1, r0, lsl #26 + subhs r1, r1,r0, lsl #26 + addhs r3, r3,r2, lsl #26 +L_divide_b26: + cmp r1, r0, lsl #25 + subhs r1, r1,r0, lsl #25 + addhs r3, r3,r2, lsl #25 +L_divide_b25: + cmp r1, r0, lsl #24 + subhs r1, r1,r0, lsl #24 + addhs r3, r3,r2, lsl #24 +L_divide_b24: + cmp r1, r0, lsl #23 + subhs r1, r1,r0, lsl #23 + addhs r3, r3,r2, lsl #23 +L_divide_b23: + cmp r1, r0, lsl #22 + subhs r1, r1,r0, lsl #22 + addhs r3, r3,r2, lsl #22 +L_divide_b22: + cmp r1, r0, lsl #21 + subhs r1, r1,r0, lsl #21 + addhs r3, r3,r2, lsl #21 +L_divide_b21: + cmp r1, r0, lsl #20 + subhs r1, r1,r0, lsl #20 + addhs r3, r3,r2, lsl #20 +L_divide_b20: + cmp r1, r0, lsl #19 + subhs r1, r1,r0, lsl #19 + addhs r3, r3,r2, lsl #19 +L_divide_b19: + cmp r1, r0, lsl #18 + subhs r1, r1,r0, lsl #18 + addhs r3, r3,r2, lsl #18 +L_divide_b18: + cmp r1, r0, lsl #17 + subhs r1, r1,r0, lsl #17 + addhs r3, r3,r2, lsl #17 +L_divide_b17: + cmp r1, r0, lsl #16 + subhs r1, r1,r0, lsl #16 + addhs r3, r3,r2, lsl #16 +L_divide_b16: + cmp r1, r0, lsl #15 + subhs r1, r1,r0, lsl #15 + addhs r3, r3,r2, lsl #15 +L_divide_b15: + cmp r1, r0, lsl #14 + subhs r1, r1,r0, lsl #14 + addhs r3, r3,r2, lsl #14 +L_divide_b14: + cmp r1, r0, lsl #13 + subhs r1, r1,r0, lsl #13 + addhs r3, r3,r2, lsl #13 +L_divide_b13: + cmp r1, r0, lsl #12 + subhs r1, r1,r0, lsl #12 + addhs r3, r3,r2, lsl #12 +L_divide_b12: + cmp r1, r0, lsl #11 + subhs r1, r1,r0, lsl #11 + addhs r3, r3,r2, lsl #11 +L_divide_b11: + cmp r1, r0, lsl #10 + subhs r1, r1,r0, lsl #10 + addhs r3, r3,r2, lsl #10 +L_divide_b10: + cmp r1, r0, lsl #9 + subhs r1, r1,r0, lsl #9 + addhs r3, r3,r2, lsl #9 +L_divide_b9: + cmp r1, r0, lsl #8 + subhs r1, r1,r0, lsl #8 + addhs r3, r3,r2, lsl #8 +L_divide_b8: + cmp r1, r0, lsl #7 + subhs r1, r1,r0, lsl #7 + addhs r3, r3,r2, lsl #7 +L_divide_b7: + cmp r1, r0, lsl #6 + subhs r1, r1,r0, lsl #6 + addhs r3, r3,r2, lsl #6 +L_divide_b6: + cmp r1, r0, lsl #5 + subhs r1, r1,r0, lsl #5 + addhs r3, r3,r2, lsl #5 +L_divide_b5: + cmp r1, r0, lsl #4 + subhs r1, r1,r0, lsl #4 + addhs r3, r3,r2, lsl #4 +L_divide_b4: + cmp r1, r0, lsl #3 + subhs r1, r1,r0, lsl #3 + addhs r3, r3,r2, lsl #3 +L_divide_b3: + cmp r1, r0, lsl #2 + subhs r1, r1,r0, lsl #2 + addhs r3, r3,r2, lsl #2 +L_divide_b2: + cmp r1, r0, lsl #1 + subhs r1, r1,r0, lsl #1 + addhs r3, r3,r2, lsl #1 +L_divide_b1: + cmp r1, r0 + subhs r1, r1, r0 + addhs r3, r3, r2 +L_divide_b0: + + tst ip, #0x20000000 + bne L_udivide_l1 + mov r0, r3 + cmp ip, #0 + rsbmi r1, r1, #0 + movs ip, ip, lsl #1 + bicmi r0, r0, #0x80000000 /* Fix incase we divided 0x80000000 */ + rsbmi r0, r0, #0 +#ifdef __APCS_26__ + movs pc, lr +#else + mov pc, lr +#endif + +L_udivide_l1: + tst ip, #0x10000000 + mov r1, r1, lsl #1 + orrne r1, r1, #1 + mov r3, r3, lsl #1 + cmp r1, r0 + subhs r1, r1, r0 + addhs r3, r3, r2 + mov r0, r3 +#ifdef __APCS_26__ + movs pc, lr +#else + mov pc, lr +#endif diff --git a/sys/lib/libkern/arch/arm/memcpy.S b/sys/lib/libkern/arch/arm/memcpy.S new file mode 100644 index 00000000000..1e9552612de --- /dev/null +++ b/sys/lib/libkern/arch/arm/memcpy.S @@ -0,0 +1,493 @@ +/* $NetBSD: memcpy.S,v 1.2 2001/11/20 00:29:20 chris Exp $ */ + +/*- + * Copyright (c) 1997 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Neil A. Carson and Mark Brinicombe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/asm.h> + +/* + * This is one fun bit of code ... + * Some easy listening music is suggested while trying to understand this + * code e.g. Iron Maiden + * + * For anyone attempting to understand it : + * + * The core code is implemented here with simple stubs for memcpy() + * memmove() and bcopy(). + * + * All local labels are prefixed with Lmemcpy_ + * Following the prefix a label starting f is used in the forward copy code + * while a label using b is used in the backwards copy code + * The source and destination addresses determine whether a forward or + * backward copy is performed. + * Separate bits of code are used to deal with the following situations + * for both the forward and backwards copy. + * unaligned source address + * unaligned destination address + * Separate copy routines are used to produce an optimised result for each + * of these cases. + * The copy code will use LDM/STM instructions to copy up to 32 bytes at + * a time where possible. + * + * Note: r12 (aka ip) can be trashed during the function along with + * r0-r3 although r0-r2 have defined uses i.e. src, dest, len through out. + * Additional registers are preserved prior to use i.e. r4, r5 & lr + * + * Apologies for the state of the comments ;-) + */ + +ENTRY(memcpy) +ENTRY_NP(memmove) + /* Determine copy direction */ + cmp r1, r0 + + moveq r0, #0 /* Quick abort for len=0 */ +#ifdef __APCS_26__ + moveqs pc, lr +#else + moveq pc, lr +#endif + + /* save leaf functions having to store this away */ + stmdb sp!, {r0, lr} /* memcpy() returns dest addr */ + + bcc Lmemcpy_backwards + + /* start of forwards copy */ + subs r2, r2, #4 + blt Lmemcpy_fl4 /* less than 4 bytes */ + ands r12, r0, #3 + bne Lmemcpy_fdestul /* oh unaligned destination addr */ + ands r12, r1, #3 + bne Lmemcpy_fsrcul /* oh unaligned source addr */ + +Lmemcpy_ft8: + /* We have aligned source and destination */ + subs r2, r2, #8 + blt Lmemcpy_fl12 /* less than 12 bytes (4 from above) */ + subs r2, r2, #0x14 + blt Lmemcpy_fl32 /* less than 32 bytes (12 from above) */ + stmdb sp!, {r4} /* borrow r4 */ + + /* blat 32 bytes at a time */ + /* XXX for really big copies perhaps we should use more registers */ +Lmemcpy_floop32: + ldmia r1!, {r3, r4, r12, lr} + stmia r0!, {r3, r4, r12, lr} + ldmia r1!, {r3, r4, r12, lr} + stmia r0!, {r3, r4, r12, lr} + subs r2, r2, #0x20 + bge Lmemcpy_floop32 + + cmn r2, #0x10 + ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */ + stmgeia r0!, {r3, r4, r12, lr} + subge r2, r2, #0x10 + ldmia sp!, {r4} /* return r4 */ + +Lmemcpy_fl32: + adds r2, r2, #0x14 + + /* blat 12 bytes at a time */ +Lmemcpy_floop12: + ldmgeia r1!, {r3, r12, lr} + stmgeia r0!, {r3, r12, lr} + subges r2, r2, #0x0c + bge Lmemcpy_floop12 + +Lmemcpy_fl12: + adds r2, r2, #8 + blt Lmemcpy_fl4 + + subs r2, r2, #4 + ldrlt r3, [r1], #4 + strlt r3, [r0], #4 + ldmgeia r1!, {r3, r12} + stmgeia r0!, {r3, r12} + subge r2, r2, #4 + +Lmemcpy_fl4: + /* less than 4 bytes to go */ + adds r2, r2, #4 +#ifdef __APCS_26_ + ldmeqia sp!, {r0, pc}^ /* done */ +#else + ldmeqia sp!, {r0, pc} /* done */ +#endif + /* copy the crud byte at a time */ + cmp r2, #2 + ldrb r3, [r1], #1 + strb r3, [r0], #1 + ldrgeb r3, [r1], #1 + strgeb r3, [r0], #1 + ldrgtb r3, [r1], #1 + strgtb r3, [r0], #1 +#ifdef __APCS_26__ + ldmia sp!, {r0, pc}^ +#else + ldmia sp!, {r0, pc} +#endif + + /* erg - unaligned destination */ +Lmemcpy_fdestul: + rsb r12, r12, #4 + cmp r12, #2 + + /* align destination with byte copies */ + ldrb r3, [r1], #1 + strb r3, [r0], #1 + ldrgeb r3, [r1], #1 + strgeb r3, [r0], #1 + ldrgtb r3, [r1], #1 + strgtb r3, [r0], #1 + subs r2, r2, r12 + blt Lmemcpy_fl4 /* less the 4 bytes */ + + ands r12, r1, #3 + beq Lmemcpy_ft8 /* we have an aligned source */ + + /* erg - unaligned source */ + /* This is where it gets nasty ... */ +Lmemcpy_fsrcul: + bic r1, r1, #3 + ldr lr, [r1], #4 + cmp r12, #2 + bgt Lmemcpy_fsrcul3 + beq Lmemcpy_fsrcul2 + cmp r2, #0x0c + blt Lmemcpy_fsrcul1loop4 + sub r2, r2, #0x0c + stmdb sp!, {r4, r5} + +Lmemcpy_fsrcul1loop16: + mov r3, lr, lsr #8 + ldmia r1!, {r4, r5, r12, lr} + orr r3, r3, r4, lsl #24 + mov r4, r4, lsr #8 + orr r4, r4, r5, lsl #24 + mov r5, r5, lsr #8 + orr r5, r5, r12, lsl #24 + mov r12, r12, lsr #8 + orr r12, r12, lr, lsl #24 + stmia r0!, {r3-r5, r12} + subs r2, r2, #0x10 + bge Lmemcpy_fsrcul1loop16 + ldmia sp!, {r4, r5} + adds r2, r2, #0x0c + blt Lmemcpy_fsrcul1l4 + +Lmemcpy_fsrcul1loop4: + mov r12, lr, lsr #8 + ldr lr, [r1], #4 + orr r12, r12, lr, lsl #24 + str r12, [r0], #4 + subs r2, r2, #4 + bge Lmemcpy_fsrcul1loop4 + +Lmemcpy_fsrcul1l4: + sub r1, r1, #3 + b Lmemcpy_fl4 + +Lmemcpy_fsrcul2: + cmp r2, #0x0c + blt Lmemcpy_fsrcul2loop4 + sub r2, r2, #0x0c + stmdb sp!, {r4, r5} + +Lmemcpy_fsrcul2loop16: + mov r3, lr, lsr #16 + ldmia r1!, {r4, r5, r12, lr} + orr r3, r3, r4, lsl #16 + mov r4, r4, lsr #16 + orr r4, r4, r5, lsl #16 + mov r5, r5, lsr #16 + orr r5, r5, r12, lsl #16 + mov r12, r12, lsr #16 + orr r12, r12, lr, lsl #16 + stmia r0!, {r3-r5, r12} + subs r2, r2, #0x10 + bge Lmemcpy_fsrcul2loop16 + ldmia sp!, {r4, r5} + adds r2, r2, #0x0c + blt Lmemcpy_fsrcul2l4 + +Lmemcpy_fsrcul2loop4: + mov r12, lr, lsr #16 + ldr lr, [r1], #4 + orr r12, r12, lr, lsl #16 + str r12, [r0], #4 + subs r2, r2, #4 + bge Lmemcpy_fsrcul2loop4 + +Lmemcpy_fsrcul2l4: + sub r1, r1, #2 + b Lmemcpy_fl4 + +Lmemcpy_fsrcul3: + cmp r2, #0x0c + blt Lmemcpy_fsrcul3loop4 + sub r2, r2, #0x0c + stmdb sp!, {r4, r5} + +Lmemcpy_fsrcul3loop16: + mov r3, lr, lsr #24 + ldmia r1!, {r4, r5, r12, lr} + orr r3, r3, r4, lsl #8 + mov r4, r4, lsr #24 + orr r4, r4, r5, lsl #8 + mov r5, r5, lsr #24 + orr r5, r5, r12, lsl #8 + mov r12, r12, lsr #24 + orr r12, r12, lr, lsl #8 + stmia r0!, {r3-r5, r12} + subs r2, r2, #0x10 + bge Lmemcpy_fsrcul3loop16 + ldmia sp!, {r4, r5} + adds r2, r2, #0x0c + blt Lmemcpy_fsrcul3l4 + +Lmemcpy_fsrcul3loop4: + mov r12, lr, lsr #24 + ldr lr, [r1], #4 + orr r12, r12, lr, lsl #8 + str r12, [r0], #4 + subs r2, r2, #4 + bge Lmemcpy_fsrcul3loop4 + +Lmemcpy_fsrcul3l4: + sub r1, r1, #1 + b Lmemcpy_fl4 + +Lmemcpy_backwards: + add r1, r1, r2 + add r0, r0, r2 + subs r2, r2, #4 + blt Lmemcpy_bl4 /* less than 4 bytes */ + ands r12, r0, #3 + bne Lmemcpy_bdestul /* oh unaligned destination addr */ + ands r12, r1, #3 + bne Lmemcpy_bsrcul /* oh unaligned source addr */ + +Lmemcpy_bt8: + /* We have aligned source and destination */ + subs r2, r2, #8 + blt Lmemcpy_bl12 /* less than 12 bytes (4 from above) */ + stmdb sp!, {r4} + subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */ + blt Lmemcpy_bl32 + + /* blat 32 bytes at a time */ + /* XXX for really big copies perhaps we should use more registers */ +Lmemcpy_bloop32: + ldmdb r1!, {r3, r4, r12, lr} + stmdb r0!, {r3, r4, r12, lr} + ldmdb r1!, {r3, r4, r12, lr} + stmdb r0!, {r3, r4, r12, lr} + subs r2, r2, #0x20 + bge Lmemcpy_bloop32 + +Lmemcpy_bl32: + cmn r2, #0x10 + ldmgedb r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */ + stmgedb r0!, {r3, r4, r12, lr} + subge r2, r2, #0x10 + adds r2, r2, #0x14 + ldmgedb r1!, {r3, r12, lr} /* blat a remaining 12 bytes */ + stmgedb r0!, {r3, r12, lr} + subge r2, r2, #0x0c + ldmia sp!, {r4} + +Lmemcpy_bl12: + adds r2, r2, #8 + blt Lmemcpy_bl4 + subs r2, r2, #4 + ldrlt r3, [r1, #-4]! + strlt r3, [r0, #-4]! + ldmgedb r1!, {r3, r12} + stmgedb r0!, {r3, r12} + subge r2, r2, #4 + +Lmemcpy_bl4: + /* less than 4 bytes to go */ + adds r2, r2, #4 +#ifdef __APCS_26__ + ldmeqia sp!, {r0, pc}^ +#else + ldmeqia sp!, {r0, pc} +#endif + + /* copy the crud byte at a time */ + cmp r2, #2 + ldrb r3, [r1, #-1]! + strb r3, [r0, #-1]! + ldrgeb r3, [r1, #-1]! + strgeb r3, [r0, #-1]! + ldrgtb r3, [r1, #-1]! + strgtb r3, [r0, #-1]! +#ifdef __APCS_26__ + ldmia sp!, {r0, pc}^ +#else + ldmia sp!, {r0, pc} +#endif + + /* erg - unaligned destination */ +Lmemcpy_bdestul: + cmp r12, #2 + + /* align destination with byte copies */ + ldrb r3, [r1, #-1]! + strb r3, [r0, #-1]! + ldrgeb r3, [r1, #-1]! + strgeb r3, [r0, #-1]! + ldrgtb r3, [r1, #-1]! + strgtb r3, [r0, #-1]! + subs r2, r2, r12 + blt Lmemcpy_bl4 /* less than 4 bytes to go */ + ands r12, r1, #3 + beq Lmemcpy_bt8 /* we have an aligned source */ + + /* erg - unaligned source */ + /* This is where it gets nasty ... */ +Lmemcpy_bsrcul: + bic r1, r1, #3 + ldr r3, [r1, #0] + cmp r12, #2 + blt Lmemcpy_bsrcul1 + beq Lmemcpy_bsrcul2 + cmp r2, #0x0c + blt Lmemcpy_bsrcul3loop4 + sub r2, r2, #0x0c + stmdb sp!, {r4, r5} + +Lmemcpy_bsrcul3loop16: + mov lr, r3, lsl #8 + ldmdb r1!, {r3-r5, r12} + orr lr, lr, r12, lsr #24 + mov r12, r12, lsl #8 + orr r12, r12, r5, lsr #24 + mov r5, r5, lsl #8 + orr r5, r5, r4, lsr #24 + mov r4, r4, lsl #8 + orr r4, r4, r3, lsr #24 + stmdb r0!, {r4, r5, r12, lr} + subs r2, r2, #0x10 + bge Lmemcpy_bsrcul3loop16 + ldmia sp!, {r4, r5} + adds r2, r2, #0x0c + blt Lmemcpy_bsrcul3l4 + +Lmemcpy_bsrcul3loop4: + mov r12, r3, lsl #8 + ldr r3, [r1, #-4]! + orr r12, r12, r3, lsr #24 + str r12, [r0, #-4]! + subs r2, r2, #4 + bge Lmemcpy_bsrcul3loop4 + +Lmemcpy_bsrcul3l4: + add r1, r1, #3 + b Lmemcpy_bl4 + +Lmemcpy_bsrcul2: + cmp r2, #0x0c + blt Lmemcpy_bsrcul2loop4 + sub r2, r2, #0x0c + stmdb sp!, {r4, r5} + +Lmemcpy_bsrcul2loop16: + mov lr, r3, lsl #16 + ldmdb r1!, {r3-r5, r12} + orr lr, lr, r12, lsr #16 + mov r12, r12, lsl #16 + orr r12, r12, r5, lsr #16 + mov r5, r5, lsl #16 + orr r5, r5, r4, lsr #16 + mov r4, r4, lsl #16 + orr r4, r4, r3, lsr #16 + stmdb r0!, {r4, r5, r12, lr} + subs r2, r2, #0x10 + bge Lmemcpy_bsrcul2loop16 + ldmia sp!, {r4, r5} + adds r2, r2, #0x0c + blt Lmemcpy_bsrcul2l4 + +Lmemcpy_bsrcul2loop4: + mov r12, r3, lsl #16 + ldr r3, [r1, #-4]! + orr r12, r12, r3, lsr #16 + str r12, [r0, #-4]! + subs r2, r2, #4 + bge Lmemcpy_bsrcul2loop4 + +Lmemcpy_bsrcul2l4: + add r1, r1, #2 + b Lmemcpy_bl4 + +Lmemcpy_bsrcul1: + cmp r2, #0x0c + blt Lmemcpy_bsrcul1loop4 + sub r2, r2, #0x0c + stmdb sp!, {r4, r5} + +Lmemcpy_bsrcul1loop32: + mov lr, r3, lsl #24 + ldmdb r1!, {r3-r5, r12} + orr lr, lr, r12, lsr #8 + mov r12, r12, lsl #24 + orr r12, r12, r5, lsr #8 + mov r5, r5, lsl #24 + orr r5, r5, r4, lsr #8 + mov r4, r4, lsl #24 + orr r4, r4, r3, lsr #8 + stmdb r0!, {r4, r5, r12, lr} + subs r2, r2, #0x10 + bge Lmemcpy_bsrcul1loop32 + ldmia sp!, {r4, r5} + adds r2, r2, #0x0c + blt Lmemcpy_bsrcul1l4 + +Lmemcpy_bsrcul1loop4: + mov r12, r3, lsl #24 + ldr r3, [r1, #-4]! + orr r12, r12, r3, lsr #8 + str r12, [r0, #-4]! + subs r2, r2, #4 + bge Lmemcpy_bsrcul1loop4 + +Lmemcpy_bsrcul1l4: + add r1, r1, #1 + b Lmemcpy_bl4 + diff --git a/sys/lib/libkern/arch/arm/memmove.S b/sys/lib/libkern/arch/arm/memmove.S new file mode 100644 index 00000000000..ceb29afe511 --- /dev/null +++ b/sys/lib/libkern/arch/arm/memmove.S @@ -0,0 +1,5 @@ +/* $NetBSD: memmove.S,v 1.2 2001/11/20 00:29:20 chris Exp $ */ + +/* + * placeholder to keep the make system happy, memove is actually in memcpy.S + */ diff --git a/sys/lib/libkern/arch/arm/memset.S b/sys/lib/libkern/arch/arm/memset.S new file mode 100644 index 00000000000..fe41292691e --- /dev/null +++ b/sys/lib/libkern/arch/arm/memset.S @@ -0,0 +1,134 @@ +/* $NetBSD: memset.S,v 1.1 2000/12/29 20:51:57 bjh21 Exp $ */ + +/* + * Copyright (c) 1995 Mark Brinicombe. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Mark Brinicombe. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <machine/asm.h> + +/* + * Sets a block of memory to the specified value + * + * On entry: + * r0 - dest address + * r1 - byte to write + * r2 - number of bytes to write + * + * On exit: + * r0 - dest address + */ + +ENTRY(memset) + stmfd sp!, {r0} /* Remember address for return value */ + and r1, r1, #0x000000ff /* We write bytes */ + + cmp r2, #0x00000004 /* Do we have less than 4 bytes */ + blt Lmemset_lessthanfour + + /* Ok first we will word align the address */ + + ands r3, r0, #0x00000003 /* Get the bottom two bits */ + beq Lmemset_addraligned /* The address is word aligned */ + + rsb r3, r3, #0x00000004 + sub r2, r2, r3 + cmp r3, #0x00000002 + strb r1, [r0], #0x0001 /* Set 1 byte */ + strgeb r1, [r0], #0x0001 /* Set another byte */ + strgtb r1, [r0], #0x0001 /* and a third */ + + cmp r2, #0x00000004 + blt Lmemset_lessthanfour + + /* Now we must be word aligned */ + +Lmemset_addraligned: + + orr r3, r1, r1, lsl #8 /* Repeat the byte into a word */ + orr r3, r3, r3, lsl #16 + + /* We know we have at least 4 bytes ... */ + + cmp r2, #0x00000020 /* If less than 32 then use words */ + blt Lmemset_lessthan32 + + /* We have at least 32 so lets use quad words */ + + stmfd sp!, {r4-r6} /* Store registers */ + mov r4, r3 /* Duplicate data */ + mov r5, r3 + mov r6, r3 + +Lmemset_loop16: + stmia r0!, {r3-r6} /* Store 16 bytes */ + sub r2, r2, #0x00000010 /* Adjust count */ + cmp r2, #0x00000010 /* Still got at least 16 bytes ? */ + bgt Lmemset_loop16 + + ldmfd sp!, {r4-r6} /* Restore registers */ + + /* Do we need to set some words as well ? */ + + cmp r2, #0x00000004 + blt Lmemset_lessthanfour + + /* Have either less than 16 or less than 32 depending on route taken */ + +Lmemset_lessthan32: + + /* We have at least 4 bytes so copy as words */ + +Lmemset_loop4: + str r3, [r0], #0x0004 + sub r2, r2, #0x0004 + cmp r2, #0x00000004 + bge Lmemset_loop4 + +Lmemset_lessthanfour: + cmp r2, #0x00000000 + ldmeqfd sp!, {r0} +#ifdef __APCS_26__ + moveqs pc, lr /* Zero length so exit */ +#else + moveq pc, lr /* Zero length so exit */ +#endif + + cmp r2, #0x00000002 + strb r1, [r0], #0x0001 /* Set 1 byte */ + strgeb r1, [r0], #0x0001 /* Set another byte */ + strgtb r1, [r0], #0x0001 /* and a third */ + + ldmfd sp!, {r0} +#ifdef __APCS_26__ + movs pc, lr /* Exit */ +#else + mov pc, lr /* Exit */ +#endif |