summaryrefslogtreecommitdiffstats
path: root/firmware/asm
diff options
context:
space:
mode:
authorThomas Martitz <kugel@rockbox.org>2012-01-07 19:56:09 +0100
committerThomas Martitz <kugel@rockbox.org>2012-01-22 18:46:45 +0100
commita035261089403de259e74ce4dd196e2715138ed2 (patch)
tree440d75109a7ebfefef2a58076b38b1bfe9db8f63 /firmware/asm
parent8e8e978de6b6283b66a6829fa8c5e3b94674ce7d (diff)
downloadrockbox-a035261089403de259e74ce4dd196e2715138ed2.tar.gz
rockbox-a035261089403de259e74ce4dd196e2715138ed2.tar.bz2
rockbox-a035261089403de259e74ce4dd196e2715138ed2.zip
Move optimized memcpy and friends and strlen to firmware/asm,
using the new automatic-asm-picking infrastructure.
Diffstat (limited to 'firmware/asm')
-rw-r--r--firmware/asm/SOURCES8
-rw-r--r--firmware/asm/arm/memcpy.S176
-rw-r--r--firmware/asm/arm/memmove.S190
-rw-r--r--firmware/asm/arm/memset.S98
-rw-r--r--firmware/asm/arm/memset16.S82
-rw-r--r--firmware/asm/m68k/memcpy.S682
-rw-r--r--firmware/asm/m68k/memmove.S670
-rw-r--r--firmware/asm/m68k/memset.S152
-rw-r--r--firmware/asm/m68k/memset16.S146
-rw-r--r--firmware/asm/m68k/strlen.S71
-rw-r--r--firmware/asm/memcpy.c117
-rw-r--r--firmware/asm/memmove.c147
-rw-r--r--firmware/asm/memset.c110
-rw-r--r--firmware/asm/memset16.c78
-rw-r--r--firmware/asm/mips/memcpy.S143
-rw-r--r--firmware/asm/mips/memset.S239
-rw-r--r--firmware/asm/sh/memcpy.S219
-rw-r--r--firmware/asm/sh/memmove.S222
-rw-r--r--firmware/asm/sh/memset.S109
-rw-r--r--firmware/asm/sh/strlen.S96
-rw-r--r--firmware/asm/strlen.c93
21 files changed, 3847 insertions, 1 deletions
diff --git a/firmware/asm/SOURCES b/firmware/asm/SOURCES
index 8b13789179..805727ea93 100644
--- a/firmware/asm/SOURCES
+++ b/firmware/asm/SOURCES
@@ -1 +1,7 @@
-
+memset16.c
+#if (CONFIG_PLATFORM & PLATFORM_NATIVE) || defined(HAVE_ROCKBOX_C_LIBRARY)
+memcpy.c
+memmove.c
+memset.c
+strlen.c
+#endif
diff --git a/firmware/asm/arm/memcpy.S b/firmware/asm/arm/memcpy.S
new file mode 100644
index 0000000000..2a55fb5656
--- /dev/null
+++ b/firmware/asm/arm/memcpy.S
@@ -0,0 +1,176 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ * This file was originally part of the GNU C Library
+ * Contributed to glibc by MontaVista Software, Inc. (written by Nicolas Pitre)
+ * Adapted for Rockbox by Daniel Ankers
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include "config.h"
+
+/*
+ * Endian independent macros for shifting bytes within registers.
+ */
+#ifndef __ARMEB__
+#define pull lsr
+#define push lsl
+#else
+#define pull lsl
+#define push lsr
+#endif
+
+/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+
+ .section .icode,"ax",%progbits
+
+ .align 2
+ .global memcpy
+ .type memcpy,%function
+
+memcpy:
+ stmfd sp!, {r0, r4, lr}
+
+ subs r2, r2, #4
+ blt 8f
+ ands ip, r0, #3
+ bne 9f
+ ands ip, r1, #3
+ bne 10f
+
+1: subs r2, r2, #(28)
+ stmfd sp!, {r5 - r8}
+ blt 5f
+
+2:
+3:
+4: ldmia r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ subs r2, r2, #32
+ stmia r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ bge 3b
+
+5: ands ip, r2, #28
+ rsb ip, ip, #32
+ addne pc, pc, ip @ C is always clear here
+ b 7f
+6: nop
+ ldr r3, [r1], #4
+ ldr r4, [r1], #4
+ ldr r5, [r1], #4
+ ldr r6, [r1], #4
+ ldr r7, [r1], #4
+ ldr r8, [r1], #4
+ ldr lr, [r1], #4
+
+ add pc, pc, ip
+ nop
+ nop
+ str r3, [r0], #4
+ str r4, [r0], #4
+ str r5, [r0], #4
+ str r6, [r0], #4
+ str r7, [r0], #4
+ str r8, [r0], #4
+ str lr, [r0], #4
+
+7: ldmfd sp!, {r5 - r8}
+
+8: movs r2, r2, lsl #31
+ ldrneb r3, [r1], #1
+ ldrcsb r4, [r1], #1
+ ldrcsb ip, [r1]
+ strneb r3, [r0], #1
+ strcsb r4, [r0], #1
+ strcsb ip, [r0]
+
+ ldmpc regs="r0, r4"
+
+9: rsb ip, ip, #4
+ cmp ip, #2
+ ldrgtb r3, [r1], #1
+ ldrgeb r4, [r1], #1
+ ldrb lr, [r1], #1
+ strgtb r3, [r0], #1
+ strgeb r4, [r0], #1
+ subs r2, r2, ip
+ strb lr, [r0], #1
+ blt 8b
+ ands ip, r1, #3
+ beq 1b
+
+10: bic r1, r1, #3
+ cmp ip, #2
+ ldr lr, [r1], #4
+ beq 17f
+ bgt 18f
+
+
+ .macro forward_copy_shift pull push
+
+ subs r2, r2, #28
+ blt 14f
+
+11: stmfd sp!, {r5 - r9}
+
+12:
+13: ldmia r1!, {r4, r5, r6, r7}
+ mov r3, lr, pull #\pull
+ subs r2, r2, #32
+ ldmia r1!, {r8, r9, ip, lr}
+ orr r3, r3, r4, push #\push
+ mov r4, r4, pull #\pull
+ orr r4, r4, r5, push #\push
+ mov r5, r5, pull #\pull
+ orr r5, r5, r6, push #\push
+ mov r6, r6, pull #\pull
+ orr r6, r6, r7, push #\push
+ mov r7, r7, pull #\pull
+ orr r7, r7, r8, push #\push
+ mov r8, r8, pull #\pull
+ orr r8, r8, r9, push #\push
+ mov r9, r9, pull #\pull
+ orr r9, r9, ip, push #\push
+ mov ip, ip, pull #\pull
+ orr ip, ip, lr, push #\push
+ stmia r0!, {r3, r4, r5, r6, r7, r8, r9, ip}
+ bge 12b
+
+ ldmfd sp!, {r5 - r9}
+
+14: ands ip, r2, #28
+ beq 16f
+
+15: mov r3, lr, pull #\pull
+ ldr lr, [r1], #4
+ subs ip, ip, #4
+ orr r3, r3, lr, push #\push
+ str r3, [r0], #4
+ bgt 15b
+
+16: sub r1, r1, #(\push / 8)
+ b 8b
+
+ .endm
+
+
+ forward_copy_shift pull=8 push=24
+
+17: forward_copy_shift pull=16 push=16
+
+18: forward_copy_shift pull=24 push=8
+
diff --git a/firmware/asm/arm/memmove.S b/firmware/asm/arm/memmove.S
new file mode 100644
index 0000000000..d8cab048be
--- /dev/null
+++ b/firmware/asm/arm/memmove.S
@@ -0,0 +1,190 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ * This file was originally part of the GNU C Library
+ * Contributed to glibc by MontaVista Software, Inc. (written by Nicolas Pitre)
+ * Adapted for Rockbox by Daniel Ankers
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include "config.h"
+
+/*
+ * Endian independent macros for shifting bytes within registers.
+ */
+#ifndef __ARMEB__
+#define pull lsr
+#define push lsl
+#else
+#define pull lsl
+#define push lsr
+#endif
+
+ .text
+
+/*
+ * Prototype: void *memmove(void *dest, const void *src, size_t n);
+ *
+ * Note:
+ *
+ * If the memory regions don't overlap, we simply branch to memcpy which is
+ * normally a bit faster. Otherwise the copy is done going downwards.
+ */
+
+ .section .icode,"ax",%progbits
+
+ .align 2
+ .global memmove
+ .type memmove,%function
+
+memmove:
+
+ subs ip, r0, r1
+ cmphi r2, ip
+ bls memcpy
+
+ stmfd sp!, {r0, r4, lr}
+ add r1, r1, r2
+ add r0, r0, r2
+ subs r2, r2, #4
+ blt 8f
+ ands ip, r0, #3
+ bne 9f
+ ands ip, r1, #3
+ bne 10f
+
+1: subs r2, r2, #(28)
+ stmfd sp!, {r5 - r8}
+ blt 5f
+
+2:
+3:
+4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ subs r2, r2, #32
+ stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+ bge 3b
+
+5: ands ip, r2, #28
+ rsb ip, ip, #32
+ addne pc, pc, ip @ C is always clear here
+ b 7f
+6: nop
+ ldr r3, [r1, #-4]!
+ ldr r4, [r1, #-4]!
+ ldr r5, [r1, #-4]!
+ ldr r6, [r1, #-4]!
+ ldr r7, [r1, #-4]!
+ ldr r8, [r1, #-4]!
+ ldr lr, [r1, #-4]!
+
+ add pc, pc, ip
+ nop
+ nop
+ str r3, [r0, #-4]!
+ str r4, [r0, #-4]!
+ str r5, [r0, #-4]!
+ str r6, [r0, #-4]!
+ str r7, [r0, #-4]!
+ str r8, [r0, #-4]!
+ str lr, [r0, #-4]!
+
+7: ldmfd sp!, {r5 - r8}
+
+8: movs r2, r2, lsl #31
+ ldrneb r3, [r1, #-1]!
+ ldrcsb r4, [r1, #-1]!
+ ldrcsb ip, [r1, #-1]
+ strneb r3, [r0, #-1]!
+ strcsb r4, [r0, #-1]!
+ strcsb ip, [r0, #-1]
+ ldmpc regs="r0, r4"
+
+9: cmp ip, #2
+ ldrgtb r3, [r1, #-1]!
+ ldrgeb r4, [r1, #-1]!
+ ldrb lr, [r1, #-1]!
+ strgtb r3, [r0, #-1]!
+ strgeb r4, [r0, #-1]!
+ subs r2, r2, ip
+ strb lr, [r0, #-1]!
+ blt 8b
+ ands ip, r1, #3
+ beq 1b
+
+10: bic r1, r1, #3
+ cmp ip, #2
+ ldr r3, [r1, #0]
+ beq 17f
+ blt 18f
+
+
+ .macro backward_copy_shift push pull
+
+ subs r2, r2, #28
+ blt 14f
+
+11: stmfd sp!, {r5 - r9}
+
+12:
+13: ldmdb r1!, {r7, r8, r9, ip}
+ mov lr, r3, push #\push
+ subs r2, r2, #32
+ ldmdb r1!, {r3, r4, r5, r6}
+ orr lr, lr, ip, pull #\pull
+ mov ip, ip, push #\push
+ orr ip, ip, r9, pull #\pull
+ mov r9, r9, push #\push
+ orr r9, r9, r8, pull #\pull
+ mov r8, r8, push #\push
+ orr r8, r8, r7, pull #\pull
+ mov r7, r7, push #\push
+ orr r7, r7, r6, pull #\pull
+ mov r6, r6, push #\push
+ orr r6, r6, r5, pull #\pull
+ mov r5, r5, push #\push
+ orr r5, r5, r4, pull #\pull
+ mov r4, r4, push #\push
+ orr r4, r4, r3, pull #\pull
+ stmdb r0!, {r4 - r9, ip, lr}
+ bge 12b
+
+ ldmfd sp!, {r5 - r9}
+
+14: ands ip, r2, #28
+ beq 16f
+
+15: mov lr, r3, push #\push
+ ldr r3, [r1, #-4]!
+ subs ip, ip, #4
+ orr lr, lr, r3, pull #\pull
+ str lr, [r0, #-4]!
+ bgt 15b
+
+16: add r1, r1, #(\pull / 8)
+ b 8b
+
+ .endm
+
+
+ backward_copy_shift push=8 pull=24
+
+17: backward_copy_shift push=16 pull=16
+
+18: backward_copy_shift push=24 pull=8
+
+
diff --git a/firmware/asm/arm/memset.S b/firmware/asm/arm/memset.S
new file mode 100644
index 0000000000..682da874ce
--- /dev/null
+++ b/firmware/asm/arm/memset.S
@@ -0,0 +1,98 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2006 by Thom Johansen
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",%progbits
+
+ .align 2
+
+/* The following code is based on code found in Linux kernel version 2.6.15.3
+ * linux/arch/arm/lib/memset.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ */
+
+/* This code will align a pointer for memset, if needed */
+1: cmp r2, #4 @ 1 do we have enough
+ blt 5f @ 1 bytes to align with?
+ cmp r3, #2 @ 1
+ strgtb r1, [r0, #-1]! @ 1
+ strgeb r1, [r0, #-1]! @ 1
+ strb r1, [r0, #-1]! @ 1
+ sub r2, r2, r3 @ 1 r2 = r2 - r3
+ b 2f
+
+ .global memset
+ .type memset,%function
+memset:
+ add r0, r0, r2 @ we'll write backwards in memory
+ ands r3, r0, #3 @ 1 unaligned?
+ bne 1b @ 1
+2:
+/*
+ * we know that the pointer in r0 is aligned to a word boundary.
+ */
+ orr r1, r1, r1, lsl #8
+ orr r1, r1, r1, lsl #16
+ mov r3, r1
+ cmp r2, #16
+ blt 5f
+/*
+ * We need an extra register for this loop - save the return address and
+ * use the LR
+ */
+ str lr, [sp, #-4]!
+ mov ip, r1
+ mov lr, r1
+
+3: subs r2, r2, #64
+ stmgedb r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
+ stmgedb r0!, {r1, r3, ip, lr}
+ stmgedb r0!, {r1, r3, ip, lr}
+ stmgedb r0!, {r1, r3, ip, lr}
+ bgt 3b
+ ldrpc cond=eq @ Now <64 bytes to go.
+/*
+ * No need to correct the count; we're only testing bits from now on
+ */
+ tst r2, #32
+ stmnedb r0!, {r1, r3, ip, lr}
+ stmnedb r0!, {r1, r3, ip, lr}
+ tst r2, #16
+ stmnedb r0!, {r1, r3, ip, lr}
+ ldr lr, [sp], #4
+
+5: tst r2, #8
+ stmnedb r0!, {r1, r3}
+ tst r2, #4
+ strne r1, [r0, #-4]!
+/*
+ * When we get here, we've got less than 4 bytes to zero. We
+ * may have an unaligned pointer as well.
+ */
+6: tst r2, #2
+ strneb r1, [r0, #-1]!
+ strneb r1, [r0, #-1]!
+ tst r2, #1
+ strneb r1, [r0, #-1]!
+ bx lr
+.end:
+ .size memset,.end-memset
diff --git a/firmware/asm/arm/memset16.S b/firmware/asm/arm/memset16.S
new file mode 100644
index 0000000000..5c787b1bed
--- /dev/null
+++ b/firmware/asm/arm/memset16.S
@@ -0,0 +1,82 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2006 by Thom Johansen
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",%progbits
+
+ .align 2
+
+/* The following code is based on code from the Linux kernel version 2.6.15.3,
+ * linux/arch/arm/lib/memset.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ */
+
+ .global memset16
+ .type memset16,%function
+memset16:
+ tst r0, #2 @ unaligned?
+ cmpne r2, #0
+ strneh r1, [r0], #2 @ store one halfword to align
+ subne r2, r2, #1
+
+/*
+ * we know that the pointer in r0 is aligned to a word boundary.
+ */
+ orr r1, r1, r1, lsl #16
+ mov r3, r1
+ cmp r2, #8
+ blt 4f
+/*
+ * We need an extra register for this loop - save the return address and
+ * use the LR
+ */
+ str lr, [sp, #-4]!
+ mov ip, r1
+ mov lr, r1
+
+2: subs r2, r2, #32
+ stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
+ stmgeia r0!, {r1, r3, ip, lr}
+ stmgeia r0!, {r1, r3, ip, lr}
+ stmgeia r0!, {r1, r3, ip, lr}
+ bgt 2b
+ ldrpc cond=eq @ Now <64 bytes to go.
+/*
+ * No need to correct the count; we're only testing bits from now on
+ */
+ tst r2, #16
+ stmneia r0!, {r1, r3, ip, lr}
+ stmneia r0!, {r1, r3, ip, lr}
+ tst r2, #8
+ stmneia r0!, {r1, r3, ip, lr}
+ ldr lr, [sp], #4
+
+4: tst r2, #4
+ stmneia r0!, {r1, r3}
+ tst r2, #2
+ strne r1, [r0], #4
+
+ tst r2, #1
+ strneh r1, [r0], #2
+ bx lr
+.end:
+ .size memset16,.end-memset16
diff --git a/firmware/asm/m68k/memcpy.S b/firmware/asm/m68k/memcpy.S
new file mode 100644
index 0000000000..9762e31e02
--- /dev/null
+++ b/firmware/asm/m68k/memcpy.S
@@ -0,0 +1,682 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2004-2005 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",@progbits
+
+#define FULLSPEED /* use burst writing for word aligned destinations */
+ .align 2
+ .global memcpy
+ .global __memcpy_fwd_entry
+ .type memcpy,@function
+
+/* Copies <length> bytes of data in memory from <source> to <dest>
+ * This version is optimized for speed
+ *
+ * arguments:
+ * (4,%sp) - destination address
+ * (8,%sp) - source address
+ * (12,%sp) - length
+ *
+ * return value:
+ * %d0 - destination address (like ANSI version)
+ *
+ * register usage:
+ * %a0 - current source address
+ * %a1 - current dest address
+ * %a2 - source end address (in line-copy loops)
+ * %d0 - data / scratch
+ * %d1 - source end address (byte and longword copy) / data / scratch
+ * %d2 - data / scratch
+ * %d3..%d7 - data
+ *
+ * For maximum speed this routine reads and writes whole lines using burst
+ * move (movem.l) where possible. For byte aligned destinations (long+1 and
+ * long+3) it writes longwords only. Same goes for word aligned destinations
+ * if FULLSPEED is undefined.
+ */
+memcpy:
+ move.l (4,%sp),%a1 /* Destination */
+ move.l (8,%sp),%a0 /* Source */
+ move.l (12,%sp),%d1 /* Length */
+
+__memcpy_fwd_entry:
+ add.l %a0,%d1 /* %d1 = source end */
+
+ move.l %a0,%d0
+ addq.l #7,%d0
+ and.l #0xFFFFFFFC,%d0 /* %d0 = first source long bound + 4 */
+ cmp.l %d0,%d1 /* at least one aligned longword to copy? */
+ blo.w .bytes2_start /* no, jump directly to trailing byte loop */
+
+ subq.l #4,%d0 /* %d0 = first source long bound */
+ cmp.l %a0,%d0 /* any bytes to copy? */
+ jls .bytes1_end /* no: skip byte loop */
+
+ /* leading byte loop: copies 0..3 bytes */
+.bytes1_loop:
+ move.b (%a0)+,(%a1)+ /* copy byte */
+ cmp.l %a0,%d0 /* runs %a0 up to first long bound */
+ jhi .bytes1_loop
+
+.bytes1_end:
+ moveq.l #31,%d0
+ add.l %a0,%d0
+ and.l #0xFFFFFFF0,%d0 /* %d0 = first source line bound + 16 */
+ cmp.l %d0,%d1 /* at least one aligned line to copy? */
+ blo.w .long_start /* no: jump to longword copy loop */
+
+ lea.l (-28,%sp),%sp /* free up some registers */
+ movem.l %d2-%d7/%a2,(%sp)
+
+ moveq.l #16,%d2
+ sub.l %d2,%d0 /* %d0 = first source line bound */
+ move.l %d1,%a2 /* %a2 = end address */
+ lea.l (-15,%a2),%a2 /* adjust end address for loops doing 16 bytes/ pass */
+ move.l %a1,%d1
+ moveq.l #3,%d2 /* mask */
+ and.l %d2,%d1
+ jmp.l (2,%pc,%d1.l*4) /* switch (dest_addr & 3) */
+ bra.w .lines_do0_start
+ bra.w .lines_do1_start
+ bra.w .lines_do2_start
+ /* bra.w .lines_do3_start implicit */
+
+ /* byte aligned destination (long + 3): use line burst reads in main loop */
+.lines_do3_start:
+ moveq.l #24,%d1 /* shift count for shifting by 3 bytes */
+ cmp.l %a0,%d0 /* any leading longwords? */
+ jhi .lines_do3_head_start /* yes: leading longword copy */
+
+ movem.l (%a0),%d4-%d7 /* load first line */
+ lea.l (16,%a0),%a0
+ move.l %d4,%d2
+ lsr.l %d1,%d2 /* get high byte of first longword */
+ move.b %d2,(%a1)+ /* store byte */
+ jra .lines_do3_entry /* jump into main loop */
+
+.lines_do3_head_start:
+ move.l (%a0)+,%d7 /* load first longword */
+ move.l %d7,%d2
+ lsr.l %d1,%d2 /* get high byte */
+ move.b %d2,(%a1)+ /* store byte */
+ jra .lines_do3_head_entry /* jump into leading longword loop */
+
+.lines_do3_head_loop:
+ move.l %d7,%d6 /* move old longword away */
+ move.l (%a0)+,%d7 /* load new longword */
+ move.l %d7,%d2
+ lsr.l %d1,%d2 /* get high byte */
+ or.l %d2,%d6 /* combine with old lower 3 bytes */
+ move.l %d6,(%a1)+ /* store longword */
+.lines_do3_head_entry:
+ lsl.l #8,%d7 /* shift up lower 3 bytes */
+ cmp.l %a0,%d0 /* runs %a0 up to first line bound */
+ jhi .lines_do3_head_loop
+
+.lines_do3_loop:
+ move.l %d7,%d3 /* move last longword of old line away */
+ movem.l (%a0),%d4-%d7 /* load new line */
+ lea.l (16,%a0),%a0
+ move.l %d4,%d2
+ lsr.l %d1,%d2 /* get high byte of 1st longword */
+ or.l %d2,%d3 /* combine with old lower 3 bytes */
+ move.l %d3,(%a1)+ /* store longword */
+.lines_do3_entry:
+ lsl.l #8,%d4 /* shift up lower 3 bytes */
+ move.l %d5,%d2
+ lsr.l %d1,%d2 /* get high byte of 2nd longword */
+ or.l %d2,%d4 /* combine with 1st lower 3 bytes */
+ move.l %d4,(%a1)+ /* store longword */
+ lsl.l #8,%d5 /* shift up lower 3 bytes */
+ move.l %d6,%d2
+ lsr.l %d1,%d2 /* get high byte of 3rd longword */
+ or.l %d2,%d5 /* combine with 2nd lower 3 bytes */
+ move.l %d5,(%a1)+ /* store longword */
+ lsl.l #8,%d6 /* shift up lower 3 bytes */
+ move.l %d7,%d2
+ lsr.l %d1,%d2 /* get high byte of 4th longword */
+ or.l %d2,%d6 /* combine with 3rd lower 3 bytes */
+ move.l %d6,(%a1)+ /* store longword */
+ lsl.l #8,%d7 /* shift up lower 3 bytes */
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_do3_loop
+
+ lea.l (12,%a2),%a2 /* readjust end address for doing longwords */
+ cmp.l %a0,%a2 /* any trailing longwords? */
+ jls .lines_do3_tail_end /* no: just store last lower 3 bytes */
+
+.lines_do3_tail_loop:
+ move.l %d7,%d6 /* move old longword away */
+ move.l (%a0)+,%d7 /* load new longword */
+ move.l %d7,%d2
+ lsr.l %d1,%d2 /* get high byte */
+ or.l %d2,%d6 /* combine with old lower 3 bytes */
+ move.l %d6,(%a1)+ /* store longword */
+ lsl.l #8,%d7 /* shift up lower 3 bytes */
+ cmp.l %a0,%a2 /* runs %a0 up to last long bound */
+ jhi .lines_do3_tail_loop
+
+.lines_do3_tail_end:
+ swap %d7 /* get high word */
+ move.w %d7,(%a1)+ /* store word */
+ lsr.l %d1,%d7 /* get moved-up low byte */
+ move.b %d7,(%a1)+ /* store byte */
+ jra .lines_end
+
+ /* byte aligned destination (long + 1): use line burst reads in main loop */
+.lines_do1_start:
+ moveq.l #24,%d1 /* shift count for shifting by 3 bytes */
+ cmp.l %a0,%d0 /* any leading longwords? */
+ jhi .lines_do1_head_start /* yes: leading longword copy */
+
+ movem.l (%a0),%d4-%d7 /* load first line */
+ lea.l (16,%a0),%a0
+ move.l %d4,%d2 /* first longword, bytes 3210 */
+ lsr.l #8,%d2 /* first longword, bytes .321 */
+ swap %d2 /* first longword, bytes 21.3 */
+ move.b %d2,(%a1)+ /* store byte */
+ swap %d2 /* first longword, bytes .321 */
+ move.w %d2,(%a1)+ /* store word */
+ jra .lines_do1_entry
+
+.lines_do1_head_start:
+ move.l (%a0)+,%d7 /* load first longword */
+ move.l %d7,%d2 /* first longword, bytes 3210 */
+ lsr.l #8,%d2 /* first longword, bytes .321 */
+ swap %d2 /* first longword, bytes 21.3 */
+ move.b %d2,(%a1)+ /* store byte */
+ swap %d2 /* first longword, bytes .321 */
+ move.w %d2,(%a1)+ /* store word */
+ jra .lines_do1_head_entry
+
+.lines_do1_head_loop:
+ move.l %d7,%d6 /* move old longword away */
+ move.l (%a0)+,%d7 /* load new longword */
+ move.l %d7,%d2
+ lsr.l #8,%d2 /* get upper 3 bytes */
+ or.l %d2,%d6 /* combine with old low byte */
+ move.l %d6,(%a1)+ /* store longword */
+.lines_do1_head_entry:
+ lsl.l %d1,%d7 /* shift up low byte */
+ cmp.l %a0,%d0 /* runs %a0 up to first line bound */
+ jhi .lines_do1_head_loop
+
+.lines_do1_loop:
+ move.l %d7,%d3 /* move last longword of old line away */
+ movem.l (%a0),%d4-%d7 /* load new line */
+ lea.l (16,%a0),%a0
+ move.l %d4,%d2
+ lsr.l #8,%d2 /* get upper 3 bytes of 1st longword */
+ or.l %d2,%d3 /* combine with low byte of old longword */
+ move.l %d3,(%a1)+ /* store longword */
+.lines_do1_entry:
+ lsl.l %d1,%d4 /* shift up low byte */
+ move.l %d5,%d2
+ lsr.l #8,%d2 /* get upper 3 bytes of 2nd longword */
+ or.l %d2,%d4 /* combine with low byte of 1st longword */
+ move.l %d4,(%a1)+ /* store longword */
+ lsl.l %d1,%d5 /* shift up low byte */
+ move.l %d6,%d2
+ lsr.l #8,%d2 /* get upper 3 bytes of 3rd longword */
+ or.l %d2,%d5 /* combine with low byte of 2nd longword */
+ move.l %d5,(%a1)+ /* store longword */
+ lsl.l %d1,%d6 /* shift up low byte */
+ move.l %d7,%d2
+ lsr.l #8,%d2 /* get upper 3 bytes of 4th longword */
+ or.l %d2,%d6 /* combine with low byte of 4th longword */
+ move.l %d6,(%a1)+ /* store longword */
+ lsl.l %d1,%d7 /* shift up low byte */
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_do1_loop
+
+ lea.l (12,%a2),%a2 /* readjust end address for doing longwords */
+ cmp.l %a0,%a2 /* any trailing longwords? */
+ jls .lines_do1_tail_end /* no: just store last low byte */
+
+.lines_do1_tail_loop:
+ move.l %d7,%d6 /* move old longword away */
+ move.l (%a0)+,%d7 /* load new longword */
+ move.l %d7,%d2
+ lsr.l #8,%d2 /* get upper 3 bytes */
+ or.l %d2,%d6 /* combine with old low byte */
+ move.l %d6,(%a1)+ /* store longword */
+ lsl.l %d1,%d7 /* shift up low byte */
+ cmp.l %a0,%a2 /* runs %a0 up to last long bound */
+ jhi .lines_do1_tail_loop
+
+.lines_do1_tail_end:
+ lsr.l %d1,%d7 /* get shifted-up low byte */
+ move.b %d7,(%a1)+ /* store byte */
+ jra .lines_end
+
+ /* long aligned destination (line + 0/4/8/12): head */
+.lines_do0_head_loop:
+ move.l (%a0)+,(%a1)+ /* copy longword */
+.lines_do0_start:
+ cmp.l %a0,%d0 /* runs %a0 up to first line bound */
+ jhi .lines_do0_head_loop
+
+.lines_do0_head_end:
+ move.l %a1,%d1
+ lsr.l #2,%d1
+ moveq.l #3,%d0 /* mask */
+ and.l %d0,%d1
+ moveq.l #16,%d0 /* address increment for one main loop pass */
+ jmp.l (2,%pc,%d1.l*2) /* switch ((dest_addr >> 2) & 3) */
+ bra.b .lines_lo0_start
+ bra.b .lines_lo4_start
+ bra.b .lines_lo8_start
+ /* bra.b .lines_lo12_start implicit */
+
+ /* long aligned destination (line + 12): use line bursts in the loop */
+.lines_lo12_start:
+ movem.l (%a0),%d4-%d7 /* load first line */
+ add.l %d0,%a0
+ move.l %d4,(%a1)+ /* store 1st longword */
+ cmp.l %a0,%a2 /* any full lines? */
+ jls .lines_lo12_end /* no: skip main loop */
+
+.lines_lo12_loop:
+ move.l %d5,%d1 /* move last 3 longwords of old line away */
+ move.l %d6,%d2
+ move.l %d7,%d3
+ movem.l (%a0),%d4-%d7 /* load new line */
+ add.l %d0,%a0
+ movem.l %d1-%d4,(%a1) /* store line (3 old + 1 new longwords) */
+ add.l %d0,%a1
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_lo12_loop
+
+ /* long aligned destination (line + 0/4/8/12): tail */
+.lines_lo12_end:
+ move.l %d5,(%a1)+ /* store 3rd last longword */
+.lines_lo8_end:
+ move.l %d6,(%a1)+ /* store 2nd last longword */
+.lines_lo4_end:
+ move.l %d7,(%a1)+ /* store last longword */
+.lines_lo0_end:
+ lea.l (12,%a2),%a2 /* readjust end address for doing longwords */
+ cmp.l %a0,%a2 /* any trailing longwords? */
+ jls .lines_end /* no: get outta here */
+
+.lines_do0_tail_loop:
+ move.l (%a0)+,(%a1)+ /* copy longword */
+ cmp.l %a0,%a2 /* runs %a0 up to last long bound */
+ jhi .lines_do0_tail_loop
+
+ jra .lines_end
+
+ /* line aligned destination: use line bursts in the loop */
+.lines_lo0_start:
+.lines_lo0_loop:
+ movem.l (%a0),%d4-%d7 /* load line */
+ add.l %d0,%a0
+ movem.l %d4-%d7,(%a1) /* store line */
+ add.l %d0,%a1
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_lo0_loop
+
+ jra .lines_lo0_end /* handle trailing longwords */
+
+ /* long aligned destination (line + 4): use line bursts in the loop */
+.lines_lo4_start:
+ movem.l (%a0),%d4-%d7 /* load first line */
+ add.l %d0,%a0
+ move.l %d4,(%a1)+ /* store 1st longword */
+ move.l %d5,(%a1)+ /* store 2nd longword */
+ move.l %d6,(%a1)+ /* store 3rd longword */
+ cmp.l %a0,%a2 /* any full lines? */
+ jls .lines_lo4_end /* no: skip main loop */
+
+.lines_lo4_loop:
+ move.l %d7,%d3 /* move last longword of old line away */
+ movem.l (%a0),%d4-%d7 /* load new line */
+ add.l %d0,%a0
+ movem.l %d3-%d6,(%a1) /* store line (1 old + 3 new longwords) */
+ add.l %d0,%a1
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_lo4_loop
+
+ jra .lines_lo4_end /* handle trailing longwords */
+
+ /* long aligned destination (line + 8): use line bursts in the loop */
+.lines_lo8_start:
+ movem.l (%a0),%d4-%d7 /* load first line */
+ add.l %d0,%a0
+ move.l %d4,(%a1)+ /* store 1st longword */
+ move.l %d5,(%a1)+ /* store 2nd longword */
+ cmp.l %a0,%a2
+ jls .lines_lo8_end
+
+.lines_lo8_loop:
+ move.l %d6,%d2 /* move last 2 longwords of old line away */
+ move.l %d7,%d3
+ movem.l (%a0),%d4-%d7 /* load new line */
+ add.l %d0,%a0
+ movem.l %d2-%d5,(%a1) /* store line (2 old + 2 new longwords) */
+ add.l %d0,%a1
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_lo8_loop
+
+ jra .lines_lo8_end /* handle trailing longwords */
+
+#ifdef FULLSPEED
+
+ /* word aligned destination (line + 2/6/10/14): head */
+.lines_do2_start:
+ cmp.l %a0,%d0 /* any leading longwords? */
+ jls .lines_do2_selector /* no: jump to mainloop selector */
+
+ move.l (%a0)+,%d7 /* load first longword */
+ swap %d7 /* swap words */
+ move.w %d7,(%a1)+ /* store high word */
+ cmp.l %a0,%d0 /* any more longword? */
+ jls .lines_do2_head_end /* no: skip head loop */
+
+.lines_do2_head_loop:
+ move.l %d7,%d6 /* move old longword away */
+ move.l (%a0)+,%d7 /* load new longword */
+ swap %d7 /* swap words */
+ move.w %d7,%d6 /* combine high word with old low word */
+ move.l %d6,(%a1)+ /* store longword */
+ cmp.l %a0,%d0 /* runs %a0 up to first line bound */
+ jhi .lines_do2_head_loop
+
+.lines_do2_head_end:
+ swap %d7 /* undo swap */
+ move.w %d7,(%a1)+ /* store word */
+
+.lines_do2_selector:
+ move.l %a1,%d1
+ lsr.l #2,%d1
+ moveq.l #3,%d0 /* mask */
+ and.l %d0,%d1
+ moveq.l #16,%d0 /* address increment for one main loop pass */
+ jmp.l (2,%pc,%d1.l*4) /* switch ((dest_addr >> 2) & 3) */
+ bra.w .lines_lo2_start
+ bra.w .lines_lo6_start
+ bra.w .lines_lo10_start
+ /* bra.w .lines_lo14_start implicit */
+
+ /* word aligned destination (line + 14): use line bursts in the loop */
+.lines_lo14_start:
+ movem.l (%a0),%d4-%d7 /* load first line */
+ add.l %d0,%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,(%a1)+ /* store word */
+ jra .lines_lo14_entry /* jump into main loop */
+
+.lines_lo14_loop:
+ move.l %d4,%d0 /* move old line away */
+ move.l %d5,%d1
+ move.l %d6,%d2
+ move.l %d7,%d3
+ movem.l (%a0),%d4-%d7 /* load new line */
+ lea.l (16,%a0),%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,%d3 /* combine 1st high word with old low word */
+ movem.l %d0-%d3,(%a1) /* store line */
+ lea.l (16,%a1),%a1
+.lines_lo14_entry:
+ swap %d5 /* swap words of 2nd long */
+ move.w %d5,%d4 /* combine 2nd high word with 1st low word */
+ swap %d6 /* swap words of 3rd long */
+ move.w %d6,%d5 /* combine 3rd high word with 2nd low word */
+ swap %d7 /* swap words of 4th long */
+ move.w %d7,%d6 /* combine 4th high word with 3rd low word */
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_lo14_loop
+
+ /* word aligned destination (line + 2/6/10/14): tail */
+.lines_lo14_end:
+ move.l %d4,(%a1)+ /* store third last longword */
+.lines_lo10_end:
+ move.l %d5,(%a1)+ /* store second last longword */
+.lines_lo6_end:
+ move.l %d6,(%a1)+ /* store last longword */
+.lines_lo2_end:
+ lea.l (12,%a2),%a2 /* readjust end address for doing longwords */
+ cmp.l %a0,%a2 /* any trailing longwords? */
+ jls .lines_do2_tail_end /* no: skip tail loop */
+
+.lines_do2_tail_loop:
+ move.l %d7,%d6 /* move old longword away */
+ move.l (%a0)+,%d7 /* load new longword */
+ swap %d7 /* swap words */
+ move.w %d7,%d6 /* combine high word with old low word */
+ move.l %d6,(%a1)+ /* store longword */
+ cmp.l %a0,%a2 /* runs %a0 up to last long bound */
+ jhi .lines_do2_tail_loop
+
+.lines_do2_tail_end:
+ swap %d7 /* undo swap */
+ move.w %d7,(%a1)+ /* store last word */
+ jra .lines_end
+
+ /* word aligned destination (line + 2): use line bursts in the loop */
+.lines_lo2_start:
+ movem.l (%a0),%d4-%d7 /* load first line */
+ add.l %d0,%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,(%a1)+ /* store high word */
+ swap %d5 /* swap words of 2nd long */
+ move.w %d5,%d4 /* combine 2nd high word with 1st low word */
+ swap %d6 /* swap words of 3rd long */
+ move.w %d6,%d5 /* combine 3nd high word with 2nd low word */
+ swap %d7 /* swap words of 4th long */
+ move.w %d7,%d6 /* combine 4th high word with 3rd low word */
+ move.l %d4,(%a1)+ /* store 1st longword */
+ move.l %d5,(%a1)+ /* store 2nd longword */
+ move.l %d6,(%a1)+ /* store 3rd longword */
+ cmp.l %a0,%a2 /* any full lines? */
+ jls .lines_lo2_end /* no: skip main loop */
+
+.lines_lo2_loop:
+ move.l %d7,%d3 /* move last longword of old line away */
+ movem.l (%a0),%d4-%d7 /* load line */
+ add.l %d0,%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,%d3 /* combine 1st high word with old low word */
+ swap %d5 /* swap words of 2nd long */
+ move.w %d5,%d4 /* combine 2nd high word with 1st low word */
+ swap %d6 /* swap words of 3rd long */
+ move.w %d6,%d5 /* combine 3rd high word with 2nd low word */
+ swap %d7 /* swap words of 4th long */
+ move.w %d7,%d6 /* combine 4th high word with 3rd low word */
+ movem.l %d3-%d6,(%a1) /* store line */
+ add.l %d0,%a1
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_lo2_loop
+
+ jra .lines_lo2_end /* handle trailing longwords */
+
+ /* word aligned destination (line + 6): use line bursts in the loop */
+.lines_lo6_start:
+ movem.l (%a0),%d4-%d7 /* load first line */
+ add.l %d0,%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,(%a1)+ /* store high word */
+ swap %d5 /* swap words of 2nd long */
+ move.w %d5,%d4 /* combine 2nd high word with 1st low word */
+ swap %d6 /* swap words of 3rd long */
+ move.w %d6,%d5 /* combine 3rd high word with 2nd low word */
+ move.l %d4,(%a1)+ /* store 1st longword */
+ move.l %d5,(%a1)+ /* store 2nd longword */
+ jra .lines_lo6_entry /* jump into main loop */
+
+.lines_lo6_loop:
+ move.l %d6,%d2 /* move last 2 longwords of old line away */
+ move.l %d7,%d3
+ movem.l (%a0),%d4-%d7 /* load line */
+ add.l %d0,%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,%d3 /* combine 1st high word with old low word */
+ swap %d5 /* swap words of 2nd long */
+ move.w %d5,%d4 /* combine 2nd high word with 1st low word */
+ swap %d6 /* swap words of 3rd long */
+ move.w %d6,%d5 /* combine 3rd high word with 2nd low word */
+ movem.l %d2-%d5,(%a1) /* store line */
+ add.l %d0,%a1
+.lines_lo6_entry:
+ swap %d7 /* swap words of 4th long */
+ move.w %d7,%d6 /* combine 4th high word with 3rd low word */
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_lo6_loop
+
+ jra .lines_lo6_end /* handle trailing longwords */
+
+ /* word aligned destination (line + 10): use line bursts in the loop */
+.lines_lo10_start:
+ movem.l (%a0),%d4-%d7 /* load first line */
+ add.l %d0,%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,(%a1)+ /* store high word */
+ swap %d5 /* swap words of 2nd long */
+ move.w %d5,%d4 /* combine 2nd high word with 1st low word */
+ move.l %d4,(%a1)+ /* store 1st longword */
+ jra .lines_lo10_entry /* jump into main loop */
+
+.lines_lo10_loop:
+ move.l %d5,%d1 /* move last 3 longwords of old line away */
+ move.l %d6,%d2
+ move.l %d7,%d3
+ movem.l (%a0),%d4-%d7 /* load line */
+ add.l %d0,%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,%d3 /* combine 1st high word with old low word */
+ swap %d5 /* swap words of 2nd long */
+ move.w %d5,%d4 /* combine 2nd high word with 1st low word */
+ movem.l %d1-%d4,(%a1) /* store line */
+ add.l %d0,%a1
+.lines_lo10_entry:
+ swap %d6 /* swap words of 3rd long */
+ move.w %d6,%d5 /* combine 3rd high word with 2nd low word */
+ swap %d7 /* swap words of 4th long */
+ move.w %d7,%d6 /* combine 4th high word with 3rd low word */
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_lo10_loop
+
+ jra .lines_lo10_end /* handle trailing longwords */
+
+#else /* !FULLSPEED */
+
+ /* word aligned destination (long + 2): use line burst reads in the loop */
+.lines_do2_start:
+ cmp.l %a0,%d0 /* any leading longwords? */
+ jhi .lines_do2_head_start /* yes: leading longword copy */
+
+ movem.l (%a0),%d4-%d7 /* load first line */
+ lea.l (16,%a0),%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,(%a1)+ /* store high word */
+ jra .lines_do2_entry /* jump into main loop */
+
+.lines_do2_head_start:
+ move.l (%a0)+,%d7 /* load first longword */
+ swap %d7 /* swap words */
+ move.w %d7,(%a1)+ /* store high word */
+ cmp.l %a0,%d0 /* any full longword? */
+ jls .lines_do2_loop /* no: skip head loop */
+
+.lines_do2_head_loop:
+ move.l %d7,%d6 /* move old longword away */
+ move.l (%a0)+,%d7 /* load new longword */
+ swap %d7 /* swap words */
+ move.w %d7,%d6 /* combine high word with old low word */
+ move.l %d6,(%a1)+ /* store longword */
+ cmp.l %a0,%d0 /* runs %a0 up to first line bound */
+ jhi .lines_do2_head_loop
+
+.lines_do2_loop:
+ move.l %d7,%d3 /* move last longword of old line away */
+ movem.l (%a0),%d4-%d7 /* load line */
+ lea.l (16,%a0),%a0
+ swap %d4 /* swap words of 1st long */
+ move.w %d4,%d3 /* combine 1st high word with old low word */
+ move.l %d3,(%a1)+ /* store 1st longword */
+.lines_do2_entry:
+ swap %d5 /* swap words of 2nd long */
+ move.w %d5,%d4 /* combine 2nd high word with 1st low word */
+ move.l %d4,(%a1)+ /* store 2nd longword */
+ swap %d6 /* swap words of 3rd long */
+ move.w %d6,%d5 /* combine 3rd high word with 2nd low word */
+ move.l %d5,(%a1)+ /* store 3rd longword */
+ swap %d7 /* swap words of 4th long */
+ move.w %d7,%d6 /* combine 4th high word with 3rd low word */
+ move.l %d6,(%a1)+ /* store 4th longword */
+ cmp.l %a0,%a2 /* runs %a0 up to last line bound */
+ jhi .lines_do2_loop
+
+.lines_do2_end:
+ lea.l (12,%a2),%a2 /* readjust end address for doing longwords */
+ cmp.l %a0,%a2 /* any trailing longwords? */
+ jls .lines_do2_tail_end /* no: skip tail loop */
+
+.lines_do2_tail_loop:
+ move.l %d7,%d6 /* move old longword away */
+ move.l (%a0)+,%d7 /* load new longword */
+ swap %d7 /* swap words */
+ move.w %d7,%d6 /* combine high word with old low word */
+ move.l %d6,(%a1)+ /* store longword */
+ cmp.l %a0,%a2 /* runs %a0 up to last long bound */
+ jhi .lines_do2_tail_loop
+
+.lines_do2_tail_end:
+ swap %d7 /* undo swap */
+ move.w %d7,(%a1)+ /* store last word */
+ /* jra .lines_end implicit */
+
+#endif /* !FULLSPEED */
+
+.lines_end:
+ addq.l #3,%a2 /* readjust end address */
+ move.l %a2,%d1 /* end address in %d1 again */
+ movem.l (%sp),%d2-%d7/%a2 /* restore registers */
+ lea.l (28,%sp),%sp
+ jra .bytes2_start /* jump to trailing byte loop */
+
+.long_start:
+ subq.l #3,%d1 /* adjust end address for doing 4 bytes/ pass */
+
+ /* longword copy loop - no lines */
+.long_loop:
+ move.l (%a0)+,(%a1)+ /* copy longword (write can be unaligned) */
+ cmp.l %a0,%d1 /* runs %a0 up to last long bound */
+ jhi .long_loop
+
+ addq.l #3,%d1 /* readjust end address */
+ cmp.l %a0,%d1 /* any bytes left? */
+ jls .bytes2_end /* no: skip trailing byte loop */
+
+ /* trailing byte loop */
+.bytes2_loop:
+ move.b (%a0)+,(%a1)+ /* copy byte */
+.bytes2_start:
+ cmp.l %a0,%d1 /* runs %a0 up to end address */
+ jhi .bytes2_loop
+
+.bytes2_end:
+ move.l (4,%sp),%d0 /* return destination */
+ rts
+
+.end:
+ .size memcpy,.end-memcpy
diff --git a/firmware/asm/m68k/memmove.S b/firmware/asm/m68k/memmove.S
new file mode 100644
index 0000000000..736cd619e1
--- /dev/null
+++ b/firmware/asm/m68k/memmove.S
@@ -0,0 +1,670 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2006 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",@progbits
+
+#define FULLSPEED /* use burst writing for word aligned destinations */
+ .align 2
+ .global memmove
+ .type memmove,@function
+
+/* Moves <length> bytes of data in memory from <source> to <dest>
+ * Regions may overlap.
+ * This version is optimized for speed, and needs the corresponding memcpy
+ * implementation for the forward copy branch.
+ *
+ * arguments:
+ * (4,%sp) - destination address
+ * (8,%sp) - source address
+ * (12,%sp) - length
+ *
+ * return value:
+ * %d0 - destination address (like ANSI version)
+ *
+ * register usage:
+ * %a0 - current source address
+ * %a1 - current dest address
+ * %a2 - source start address (in line-copy loops)
+ * %d0 - source start address (byte and longword copy) / data / scratch
+ * %d1 - data / scratch
+ * %d2 - data / scratch
+ * %d3..%d7 - data
+ *
+ * For maximum speed this routine reads and writes whole lines using burst
+ * move (movem.l) where possible. For byte aligned destinations (long-1 and
+ * long-3) it writes longwords only. Same goes for word aligned destinations
+ * if FULLSPEED is undefined.
+ */
+memmove:
+ move.l (4,%sp),%a1 /* Destination */
+ move.l (8,%sp),%a0 /* Source */
+ move.l (12,%sp),%d1 /* Length */
+
+ cmp.l %a0,%a1
+ bhi.b .backward /* dest > src -> backward copy */
+ jmp __memcpy_fwd_entry
+
+.backward:
+ move.l %a0,%d0 /* %d0 = source start */
+ add.l %d1,%a0 /* %a0 = source end */
+ add.l %d1,%a1 /* %a1 = destination end */
+
+ move.l %a0,%d1
+ and.l #0xFFFFFFFC,%d1 /* %d1 = last source long bound */
+ subq.l #4,%d1
+ cmp.l %d0,%d1 /* at least one aligned longword to copy? */
+ blo.w .bytes2r_start
+
+ addq.l #4,%d1 /* %d1 = last source long bound */
+ cmp.l %d1,%a0 /* any bytes to copy */
+ jls .bytes1r_end /* no: skip byte loop */
+
+ /* leading byte loop: copies 0..3 bytes */
+.bytes1r_loop:
+ move.b -(%a0),-(%a1) /* copy byte */
+ cmp.l %d1,%a0 /* runs %a0 down to last long bound */
+ jhi .bytes1r_loop
+
+.bytes1r_end:
+ moveq.l #-16,%d1
+ add.l %a0,%d1
+ and.l #0xFFFFFFF0,%d1 /* %d1 = last source line bound - 16 */
+ cmp.l %d0,%d1 /* at least one aligned line to copy? */
+ blo.w .longr_start /* no: jump to longword copy loop */
+
+ lea.l (-28,%sp),%sp /* free up some registers */
+ movem.l %d2-%d7/%a2,(%sp)
+
+ moveq.l #16,%d2
+ add.l %d2,%d1 /* %d1 = last source line bound */
+ move.l %d0,%a2 /* %a2 = start address */
+ lea.l (15,%a2),%a2 /* adjust start address for loops doing 16 bytes/pass */
+ move.l %a1,%d0
+ moveq.l #3,%d2 /* mask */
+ and.l %d2,%d0
+ jmp.l (2,%pc,%d0.l*4) /* switch (dest_addr & 3) */
+ bra.w .lines_do0r_start
+ bra.w .lines_do1r_start
+ bra.w .lines_do2r_start
+ /* bra.w .lines_do3r_start implicit */
+
+ /* byte aligned destination (long - 1): use line burst reads in main loop */
+.lines_do3r_start:
+ moveq.l #24,%d0 /* shift count for shifting by 3 bytes */
+ cmp.l %d1,%a0 /* any leading longwords? */
+ jhi .lines_do3r_head_start /* yes: leading longword copy */
+
+ lea.l (-16,%a0),%a0
+ movem.l (%a0),%d3-%d6 /* load initial line */
+ move.l %d6,%d2 /* last longword, bytes 3210 */
+ move.b %d2,-(%a1) /* store byte */
+ lsr.l #8,%d2 /* last longword, bytes .321 */
+ move.w %d2,-(%a1) /* store word */
+ jra .lines_do3r_entry
+
+.lines_do3r_head_start:
+ move.l -(%a0),%d3 /* load initial longword */
+ move.l %d3,%d2 /* bytes 3210 */
+ move.b %d2,-(%a1) /* store byte */
+ lsr.l #8,%d2 /* bytes .321 */
+ move.w %d2,-(%a1) /* store word */
+ jra .lines_do3r_head_entry
+
+.lines_do3r_head_loop:
+ move.l %d3,%d4 /* move old longword away */
+ move.l -(%a0),%d3 /* load new longword */
+ move.l %d3,%d2
+ lsl.l #8,%d2 /* get bytes 210. */
+ or.l %d2,%d4 /* combine with old high byte */
+ move.l %d4,-(%a1) /* store longword */
+.lines_do3r_head_entry:
+ lsr.l %d0,%d3 /* shift down high byte */
+ cmp.l %d1,%a0 /* run %a0 down to last line bound */
+ jhi .lines_do3r_head_loop
+
+.lines_do3r_loop:
+ move.l %d3,%d7 /* move first longword of last line away */
+ lea.l (-16,%a0),%a0
+ movem.l (%a0),%d3-%d6 /* load new line */
+ move.l %d6,%d2
+ lsl.l #8,%d2 /* get bytes 210. of 4th longword */
+ or.l %d2,%d7 /* combine with high byte of old longword */
+ move.l %d7,-(%a1) /* store longword */
+.lines_do3r_entry:
+ lsr.l %d0,%d6 /* shift down high byte */
+ move.l %d5,%d2
+ lsl.l #8,%d2 /* get bytes 210. of 3rd longword */
+ or.l %d2,%d6 /* combine with high byte of 4th longword */
+ move.l %d6,-(%a1) /* store longword */
+ lsr.l %d0,%d5 /* shift down high byte */
+ move.l %d4,%d2
+ lsl.l #8,%d2 /* get bytes 210. of 2nd longword */
+ or.l %d2,%d5 /* combine with high byte or 3rd longword */
+ move.l %d5,-(%a1) /* store longword */
+ lsr.l %d0,%d4 /* shift down high byte */
+ move.l %d3,%d2
+ lsl.l #8,%d2 /* get bytes 210. of 1st longword */
+ or.l %d2,%d4 /* combine with high byte of 2nd longword */
+ move.l %d4,-(%a1) /* store longword */
+ lsr.l %d0,%d3 /* shift down high byte */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_do3r_loop
+
+ lea.l (-12,%a2),%a2 /* readjust start address for doing longwords */
+ cmp.l %a2,%a0 /* any trailing longwords? */
+ jls .lines_do3r_tail_end /* no: just store last high byte */
+
+.lines_do3r_tail_loop:
+ move.l %d3,%d4 /* move old longword away */
+ move.l -(%a0),%d3 /* load new longword */
+ move.l %d3,%d2
+ lsl.l #8,%d2 /* get bytes 210. */
+ or.l %d2,%d4 /* combine with old high byte */
+ move.l %d4,-(%a1) /* store longword */
+ lsr.l %d0,%d3 /* shift down high byte */
+ cmp.l %a2,%a0 /* run %a0 down to first long bound */
+ jhi .lines_do3r_tail_loop
+
+.lines_do3r_tail_end:
+ move.b %d3,-(%a1) /* store shifted-down high byte */
+ jra .linesr_end
+
+ /* byte aligned destination (long - 3): use line burst reads in main loop */
+.lines_do1r_start:
+ moveq.l #24,%d0 /* shift count for shifting by 3 bytes */
+ cmp.l %d1,%a0 /* any leading longwords? */
+ jhi .lines_do1r_head_start /* yes: leading longword copy */
+
+ lea.l (-16,%a0),%a0
+ movem.l (%a0),%d3-%d6 /* load initial line */
+ move.b %d6,-(%a1) /* store low byte of last longword */
+ jra .lines_do1r_entry
+
+.lines_do1r_head_start:
+ move.l -(%a0),%d3 /* load initial longword */
+ move.b %d3,-(%a1) /* store low byte */
+ jra .lines_do1r_head_entry
+
+.lines_do1r_head_loop:
+ move.l %d3,%d4 /* move old longword away */
+ move.l -(%a0),%d3 /* load new longword */
+ move.l %d3,%d2
+ lsl.l %d0,%d2 /* get low byte */
+ or.l %d2,%d4 /* combine with old bytes .321 */
+ move.l %d4,-(%a1) /* store longword */
+.lines_do1r_head_entry:
+ lsr.l #8,%d3 /* get bytes .321 */
+ cmp.l %d1,%a0 /* run %a0 down to last line bound */
+ jhi .lines_do1r_head_loop
+
+.lines_do1r_loop:
+ move.l %d3,%d7 /* move first longword of old line away */
+ lea.l (-16,%a0),%a0
+ movem.l (%a0),%d3-%d6 /* load new line */
+ move.l %d6,%d2
+ lsl.l %d0,%d2 /* get low byte of 4th longword */
+ or.l %d2,%d7 /* combine with bytes .321 of old longword */
+ move.l %d7,-(%a1) /* store longword */
+.lines_do1r_entry:
+ lsr.l #8,%d6 /* get bytes .321 */
+ move.l %d5,%d2
+ lsl.l %d0,%d2 /* get low byte of 3rd longword */
+ or.l %d2,%d6 /* combine with bytes .321 of 4th longword */
+ move.l %d6,-(%a1) /* store longword */
+ lsr.l #8,%d5 /* get bytes .321 */
+ move.l %d4,%d2
+ lsl.l %d0,%d2 /* get low byte of 2nd longword */
+ or.l %d2,%d5 /* combine with bytes .321 of 3rd longword */
+ move.l %d5,-(%a1) /* store longword */
+ lsr.l #8,%d4 /* get bytes .321 */
+ move.l %d3,%d2
+ lsl.l %d0,%d2 /* get low byte of 1st longword */
+ or.l %d2,%d4 /* combine with bytes .321 of 2nd longword */
+ move.l %d4,-(%a1) /* store longword */
+ lsr.l #8,%d3 /* get bytes .321 */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_do1r_loop
+
+ lea.l (-12,%a2),%a2 /* readjust start address for doing longwords */
+ cmp.l %a2,%a0 /* any trailing longwords? */
+ jls .lines_do1r_tail_end /* no: just store last high byte */
+
+.lines_do1r_tail_loop:
+ move.l %d3,%d4 /* move old longword away */
+ move.l -(%a0),%d3 /* load new longword */
+ move.l %d3,%d2
+ lsl.l %d0,%d2 /* get low byte */
+ or.l %d2,%d4 /* combine with old bytes .321 */
+ move.l %d4,-(%a1) /* store longword */
+ lsr.l #8,%d3 /* get bytes .321 */
+ cmp.l %a2,%a0 /* run %a0 down to first long bound */
+ jhi .lines_do1r_tail_loop
+
+.lines_do1r_tail_end:
+ move.w %d3,-(%a1) /* store word 21 */
+ swap %d3
+ move.b %d3,-(%a1) /* store byte 3 */
+ jra .linesr_end
+
+ /* long aligned destination (line - 0/4/8/12): head */
+.lines_do0r_head_loop:
+ move.l -(%a0),-(%a1) /* copy longword */
+.lines_do0r_start:
+ cmp.l %d1,%a0 /* run %a0 down to last line bound */
+ jhi .lines_do0r_head_loop
+
+.lines_do0r_head_end:
+ move.l %a1,%d1
+ lsr.l #2,%d1
+ moveq.l #3,%d0 /* mask */
+ and.l %d0,%d1
+ moveq.l #16,%d0 /* address decrement for one main loop pass */
+ jmp.l (2,%pc,%d1.l*2) /* switch ((dest_addr >> 2) & 3) */
+ bra.b .lines_lo0r_start
+ bra.b .lines_lo4r_start
+ bra.b .lines_lo8r_start
+ /* bra.b .lines_lo12r_start implicit */
+
+ /* long aligned destination (line - 4): use line bursts in the loop */
+.lines_lo12r_start:
+ sub.l %d0,%a0
+ movem.l (%a0),%d1-%d4 /* load initial line */
+ move.l %d4,-(%a1) /* store 4th longword */
+ move.l %d3,-(%a1) /* store 3rd longword */
+ move.l %d2,-(%a1) /* store 2nd longword */
+ cmp.l %a2,%a0 /* any full lines? */
+ jls .lines_lo12r_end /* no: skip main loop */
+
+.lines_lo12r_loop:
+ move.l %d1,%d5 /* move first longword of old line away */
+ sub.l %d0,%a0
+ movem.l (%a0),%d1-%d4 /* load new line */
+ sub.l %d0,%a1
+ movem.l %d2-%d5,(%a1) /* store line (1 old + 3 new longwords */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_lo12r_loop
+
+ jra .lines_lo12r_end /* handle trailing longwords */
+
+ /* line aligned destination: use line bursts in the loop */
+.lines_lo0r_start:
+.lines_lo0r_loop:
+ sub.l %d0,%a0
+ movem.l (%a0),%d1-%d4 /* load line */
+ sub.l %d0,%a1
+ movem.l %d1-%d4,(%a1) /* store line */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_lo0r_loop
+
+ jra .lines_lo0r_end /* handle trailing longwords */
+
+ /* long aligned destination (line - 8): use line bursts in the loop */
+.lines_lo8r_start:
+ sub.l %d0,%a0
+ movem.l (%a0),%d1-%d4 /* load initial line */
+ move.l %d4,-(%a1) /* store 4th longword */
+ move.l %d3,-(%a1) /* store 3rd longword */
+ cmp.l %a2,%a0 /* any full lines? */
+ jls .lines_lo8r_end /* no: skip main loop */
+
+.lines_lo8r_loop:
+ move.l %d2,%d6 /* move first 2 longwords of old line away */
+ move.l %d1,%d5
+ sub.l %d0,%a0
+ movem.l (%a0),%d1-%d4 /* load new line */
+ sub.l %d0,%a1
+ movem.l %d3-%d6,(%a1) /* store line (2 old + 2 new longwords */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_lo8r_loop
+
+ jra .lines_lo8r_end /* handle trailing longwords */
+
+ /* long aligned destination (line - 12): use line bursts in the loop */
+.lines_lo4r_start:
+ sub.l %d0,%a0
+ movem.l (%a0),%d1-%d4 /* load initial line */
+ move.l %d4,-(%a1) /* store 4th longword */
+ cmp.l %a2,%a0 /* any full lines? */
+ jls .lines_lo4r_end /* no: skip main loop */
+
+.lines_lo4r_loop:
+ move.l %d3,%d7 /* move first 3 longwords of old line away */
+ move.l %d2,%d6
+ move.l %d1,%d5
+ sub.l %d0,%a0
+ movem.l (%a0),%d1-%d4 /* load new line */
+ sub.l %d0,%a1
+ movem.l %d4-%d7,(%a1) /* store line (3 old + 1 new longwords */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_lo4r_loop
+
+ /* long aligned destination (line - 0/4/8/12): tail */
+.lines_lo4r_end:
+ move.l %d3,-(%a1) /* store 3rd last longword */
+.lines_lo8r_end:
+ move.l %d2,-(%a1) /* store 2nd last longword */
+.lines_lo12r_end:
+ move.l %d1,-(%a1) /* store last longword */
+.lines_lo0r_end:
+ lea.l (-12,%a2),%a2 /* readjust end address for doing longwords */
+ cmp.l %a2,%a0 /* any trailing longwords? */
+ jls .linesr_end /* no: get outta here */
+
+.lines_do0r_tail_loop:
+ move.l -(%a0),-(%a1) /* copy longword */
+ cmp.l %a2,%a0 /* run %a0 down to first long bound */
+ jhi .lines_do0r_tail_loop
+
+ jra .linesr_end
+
+#ifdef FULLSPEED
+ /* word aligned destination (line - 2/6/10/14): head */
+.lines_do2r_start:
+ cmp.l %d1,%a0 /* any leading longwords? */
+ jls .lines_do2r_selector /* no: jump to mainloop selector */
+
+ move.l -(%a0),%d3 /* load initial longword */
+ move.w %d3,-(%a1) /* store low word */
+ cmp.l %d1,%a0 /* any more longwords? */
+ jls .lines_do2r_head_end /* no: skip head loop */
+
+.lines_do2r_head_loop:
+ move.l %d3,%d4 /* move old longword away */
+ move.l -(%a0),%d3 /* load new longword */
+ move.w %d3,%d4 /* combine low word with old high word */
+ swap %d4 /* swap words */
+ move.l %d4,-(%a1) /* store longword */
+ cmp.l %d1,%a0 /* run %a0 down to last line bound */
+ jhi .lines_do2r_head_loop
+
+.lines_do2r_head_end:
+ swap %d3 /* get high word */
+ move.w %d3,-(%a1) /* and store it */
+
+.lines_do2r_selector:
+ move.l %a1,%d1
+ lsr.l #2,%d1
+ moveq.l #3,%d0 /* mask */
+ and.l %d0,%d1
+ moveq.l #16,%d7 /* address decrement for one main loop pass */
+ jmp.l (2,%pc,%d1.l*4) /* switch ((dest_addr >> 2) & 3) */
+ bra.w .lines_lo2r_start
+ bra.w .lines_lo6r_start
+ bra.w .lines_lo10r_start
+ /* bra.w .lines_lo14r_start implicit */
+
+ /* word aligned destination (line - 2): use line bursts in the loop */
+.lines_lo14r_start:
+ sub.l %d7,%a0
+ movem.l (%a0),%d0-%d3 /* load initial line */
+ move.w %d3,-(%a1) /* store last low word */
+ move.w %d2,%d3 /* combine 3rd low word with 4th high word */
+ swap %d3 /* swap words of 3rd long */
+ move.w %d1,%d2 /* combine 2nd low word with 3rd high word */
+ swap %d2 /* swap words of 2nd long */
+ move.w %d0,%d1 /* combine 1st low word with 2nd high word */
+ swap %d1 /* swap words of 1st long */
+ move.l %d3,-(%a1) /* store 3rd longword */
+ move.l %d2,-(%a1) /* store 2nd longword */
+ move.l %d1,-(%a1) /* store 1st longword */
+ cmp.l %a2,%a0 /* any full lines? */
+ jls .lines_lo14r_end /* no: skip main loop */
+
+.lines_lo14r_loop:
+ move.l %d0,%d4 /* move first longword of old line away */
+ sub.l %d7,%a0
+ movem.l (%a0),%d0-%d3 /* load line */
+ move.w %d3,%d4 /* combine 4th low word with old high word */
+ swap %d4 /* swap words of 4th long */
+ move.w %d2,%d3 /* combine 3rd low word with 4th high word */
+ swap %d3 /* swap words of 3rd long */
+ move.w %d1,%d2 /* combine 2nd low word with 3rd high word */
+ swap %d2 /* swap words of 2nd long */
+ move.w %d0,%d1 /* combine 1st low word with 2nd high word */
+ swap %d1 /* swap words of 1st long */
+ sub.l %d7,%a1
+ movem.l %d1-%d4,(%a1) /* store line */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_lo14r_loop
+
+ jra .lines_lo14r_end /* handle trailing longwords */
+
+ /* word aligned destination (line - 6): use line bursts in the loop */
+.lines_lo10r_start:
+ sub.l %d7,%a0
+ movem.l (%a0),%d0-%d3 /* load initial line */
+ move.w %d3,-(%a1) /* store last low word */
+ move.w %d2,%d3 /* combine 3rd low word with 4th high word */
+ swap %d3 /* swap words of 3rd long */
+ move.w %d1,%d2 /* combine 2nd low word with 3rd high word */
+ swap %d2 /* swap words of 2nd long */
+ move.l %d3,-(%a1) /* store 3rd longword */
+ move.l %d2,-(%a1) /* store 2nd longword */
+ jra .lines_lo10r_entry /* jump into main loop */
+
+.lines_lo10r_loop:
+ move.l %d0,%d4 /* move first 2 longwords of old line away */
+ move.l %d1,%d5
+ sub.l %d7,%a0
+ movem.l (%a0),%d0-%d3 /* load line */
+ move.w %d3,%d4 /* combine 4th low word with old high word */
+ swap %d4 /* swap words of 4th long */
+ move.w %d2,%d3 /* combine 3rd low word with 4th high word */
+ swap %d3 /* swap words of 3rd long */
+ move.w %d1,%d2 /* combine 2nd low word with 3rd high word */
+ swap %d2 /* swap words of 2nd long */
+ sub.l %d7,%a1
+ movem.l %d2-%d5,(%a1) /* store line */
+.lines_lo10r_entry:
+ move.w %d0,%d1 /* combine 1st low word with 2nd high word */
+ swap %d1 /* swap words of 1st long */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_lo10r_loop
+
+ jra .lines_lo10r_end /* handle trailing longwords */
+
+ /* word aligned destination (line - 10): use line bursts in the loop */
+.lines_lo6r_start:
+ sub.l %d7,%a0
+ movem.l (%a0),%d0-%d3 /* load initial line */
+ move.w %d3,-(%a1) /* store last low word */
+ move.w %d2,%d3 /* combine 3rd low word with 4th high word */
+ swap %d3 /* swap words of 3rd long */
+ move.l %d3,-(%a1) /* store 3rd longword */
+ jra .lines_lo6r_entry /* jump into main loop */
+
+.lines_lo6r_loop:
+ move.l %d0,%d4 /* move first 3 longwords of old line away */
+ move.l %d1,%d5
+ move.l %d2,%d6
+ sub.l %d7,%a0
+ movem.l (%a0),%d0-%d3 /* load line */
+ move.w %d3,%d4 /* combine 4th low word with old high word */
+ swap %d4 /* swap words of 4th long */
+ move.w %d2,%d3 /* combine 3rd low word with 4th high word */
+ swap %d3 /* swap words of 3rd long */
+ sub.l %d7,%a1
+ movem.l %d3-%d6,(%a1) /* store line */
+.lines_lo6r_entry:
+ move.w %d1,%d2 /* combine 2nd low word with 3rd high word */
+ swap %d2 /* swap words of 2nd long */
+ move.w %d0,%d1 /* combine 1st low word with 2nd high word */
+ swap %d1 /* swap words of 1st long */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_lo6r_loop
+
+ jra .lines_lo6r_end /* handle trailing longwords */
+
+ /* word aligned destination (line - 14): use line bursts in the loop */
+.lines_lo2r_start:
+ sub.l %d7,%a0
+ movem.l (%a0),%d0-%d3 /* load initial line */
+ move.w %d3,-(%a1) /* store last low word */
+ jra .lines_lo2r_entry /* jump into main loop */
+
+.lines_lo2r_loop:
+ move.l %d0,%d4 /* move old line away */
+ move.l %d1,%d5
+ move.l %d2,%d6
+ move.l %d3,%d7
+ lea.l (-16,%a0),%a0
+ movem.l (%a0),%d0-%d3 /* load line */
+ move.w %d3,%d4 /* combine 4th low word with old high word */
+ swap %d4 /* swap words of 4th long */
+ lea.l (-16,%a1),%a1
+ movem.l %d4-%d7,(%a1) /* store line */
+.lines_lo2r_entry:
+ move.w %d2,%d3 /* combine 3rd low word with 4th high word */
+ swap %d3 /* swap words of 3rd long */
+ move.w %d1,%d2 /* combine 2nd low word with 3rd high word */
+ swap %d2 /* swap words of 2nd long */
+ move.w %d0,%d1 /* combine 1st low word with 2nd high word */
+ swap %d1 /* swap words of 1st long */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_lo2r_loop
+
+ /* word aligned destination (line - 2/6/10/14): tail */
+.lines_lo2r_end:
+ move.l %d3,-(%a1) /* store third last longword */
+.lines_lo6r_end:
+ move.l %d2,-(%a1) /* store second last longword */
+.lines_lo10r_end:
+ move.l %d1,-(%a1) /* store last longword */
+.lines_lo14r_end:
+ lea.l (-12,%a2),%a2 /* readjust start address for doing longwords */
+ cmp.l %a2,%a0 /* any trailing longwords? */
+ jls .lines_do2r_tail_end /* no: skip tail loop */
+
+.lines_do2r_tail_loop:
+ move.l %d0,%d1 /* move old longword away */
+ move.l -(%a0),%d0 /* load new longword */
+ move.w %d0,%d1 /* combine low word with old high word */
+ swap %d1 /* swap words */
+ move.l %d1,-(%a1) /* store longword */
+ cmp.l %a2,%a0 /* run %a0 down to first long bound */
+ jhi .lines_do2r_tail_loop
+
+.lines_do2r_tail_end:
+ swap %d0 /* get final high word */
+ move.w %d0,-(%a1) /* store it */
+ /* jra .linesr_end implicit */
+
+#else /* !FULLSPEED */
+
+ /* word aligned destination (long - 2): use line burst reads in the loop */
+.lines_do2r_start:
+ cmp.l %d1,%a0 /* any leading longwords? */
+ jhi .lines_do2r_head_start /* yes: leading longword copy */
+
+ lea.l (-16,%a0),%a0
+ movem.l (%a0),%d3-%d6 /* load initial line */
+ move.w %d6,-(%a1) /* store last low word */
+ jra .lines_do2r_entry /* jump into main loop */
+
+.lines_do2r_head_start:
+ move.l -(%a0),%d3 /* load initial longword */
+ move.w %d3,-(%a1) /* store low word */
+ cmp.l %d1,%a0 /* any full longword? */
+ jls .lines_do2r_loop /* no: skip head loop */
+
+.lines_do2r_head_loop:
+ move.l %d3,%d4 /* move old longword away */
+ move.l -(%a0),%d3 /* load new longword */
+ move.w %d3,%d4 /* combine low word with old high word */
+ swap %d4 /* swap words */
+ move.l %d4,-(%a1) /* store longword */
+ cmp.l %d1,%a0 /* run %a0 down to last line bound */
+ jhi .lines_do2r_head_loop
+
+.lines_do2r_loop:
+ move.l %d3,%d7 /* move first longword of old line away */
+ lea.l (-16,%a0),%a0
+ movem.l (%a0),%d3-%d6 /* load line */
+ move.w %d6,%d7 /* combine 4th low word with old high word */
+ swap %d7 /* swap words of 4th long */
+ move.l %d7,-(%a1) /* store 4th longword */
+.lines_do2r_entry:
+ move.w %d5,%d6 /* combine 3rd low word with 4th high word */
+ swap %d6 /* swap words of 3rd long */
+ move.l %d6,-(%a1) /* store 3rd longword */
+ move.w %d4,%d5 /* combine 2nd low word with 3rd high word */
+ swap %d5 /* swap words of 2nd long */
+ move.l %d5,-(%a1) /* store 2nd longword */
+ move.w %d3,%d4 /* combine 1st low word with 2nd high word */
+ swap %d4 /* swap words of 1st long */
+ move.l %d4,-(%a1) /* store 1st longword */
+ cmp.l %a2,%a0 /* run %a0 down to first line bound */
+ jhi .lines_do2r_loop
+
+.lines_do2r_end:
+ lea.l (-12,%a2),%a2 /* readjust start address for doing longwords */
+ cmp.l %a2,%a0 /* any trailing longwords? */
+ jls .lines_do2r_tail_end /* no: skip tail loop */
+
+.lines_do2r_tail_loop:
+ move.l %d3,%d4 /* move old longword away */
+ move.l -(%a0),%d3 /* load new longword */
+ move.w %d3,%d4 /* combine low word with old high word */
+ swap %d4 /* swap words */
+ move.l %d4,-(%a1) /* store longword */
+ cmp.l %a2,%a0 /* run %a0 down to first long bound */
+ jhi .lines_do2r_tail_loop
+
+.lines_do2r_tail_end:
+ swap %d3 /* get final high word */
+ move.w %d3,-(%a1) /* store it */
+ /* jra .linesr_end implicit */
+
+#endif /* !FULLSPEED */
+
+.linesr_end:
+ subq.l #3,%a2 /* readjust end address */
+ move.l %a2,%d0 /* start address in %d0 again */
+ movem.l (%sp),%d2-%d7/%a2 /* restore registers */
+ lea.l (28,%sp),%sp
+ jra .bytes2r_start /* jump to trailing byte loop */
+
+.longr_start:
+ addq.l #3,%d0 /* adjust start address for doing 4 bytes/ pass */
+
+ /* longword copy loop - no lines */
+.longr_loop:
+ move.l -(%a0),-(%a1) /* copy longword (write can be unaligned) */
+ cmp.l %d0,%a0 /* runs %a0 down to first long bound */
+ jhi .longr_loop
+
+ subq.l #3,%d0 /* readjust start address */
+ cmp.l %d0,%a0 /* any bytes left? */
+ jls .bytes2r_end /* no: skip trailing byte loop */
+
+ /* trailing byte loop */
+.bytes2r_loop:
+ move.b -(%a0),-(%a1) /* copy byte */
+.bytes2r_start:
+ cmp.l %d0,%a0 /* runs %a0 down to start address */
+ jhi .bytes2r_loop
+
+.bytes2r_end:
+ rts /* returns start address */
+
+.end:
+ .size memmove,.end-memmove
diff --git a/firmware/asm/m68k/memset.S b/firmware/asm/m68k/memset.S
new file mode 100644
index 0000000000..839b305a05
--- /dev/null
+++ b/firmware/asm/m68k/memset.S
@@ -0,0 +1,152 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2004 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",@progbits
+
+ .align 2
+ .global memset
+ .type memset,@function
+
+/* Fills a memory region with specified byte value
+ * This version is optimized for speed
+ *
+ * arguments:
+ * (4,%sp) - start address
+ * (8,%sp) - data
+ * (12,%sp) - length
+ *
+ * return value:
+ * %d0 - start address (like ANSI version)
+ *
+ * register usage:
+ * %d0 - data (spread to all 4 bytes when using long stores)
+ * %d1 - temporary / data (for burst transfer)
+ * %d2 - data (for burst transfer)
+ * %d3 - data (for burst transfer)
+ * %a0 - start address
+ * %a1 - current address (runs down from end to start)
+ *
+ * For maximum speed this routine uses both long stores and burst mode,
+ * storing whole lines with movem.l. The routine fills memory from end
+ * to start in order to ease returning the start address.
+ */
+memset:
+ move.l (4,%sp),%a0 /* start address */
+ move.l (8,%sp),%d0 /* data */
+ move.l (12,%sp),%a1 /* length */
+ add.l %a0,%a1 /* %a1 = end address */
+
+ move.l %a0,%d1
+ addq.l #7,%d1
+ and.l #0xFFFFFFFC,%d1 /* %d1 = first long bound + 4 */
+ cmp.l %d1,%a1 /* at least one aligned longword to fill? */
+ blo.b .no_longs /* no, jump directly to byte loop */
+
+ and.l #0xFF,%d0 /* start: spread data to all 4 bytes */
+ move.l %d0,%d1
+ lsl.l #8,%d1
+ or.l %d1,%d0 /* data now in 2 lower bytes of %d0 */
+ move.l %d0,%d1
+ swap %d0
+ or.l %d1,%d0 /* data now in all 4 bytes of %d0 */
+
+ move.l %a1,%d1
+ and.l #0xFFFFFFFC,%d1 /* %d1 = last long bound */
+ cmp.l %d1,%a1 /* any bytes to set? */
+ bls.b .end_b1 /* no: skip byte loop */
+
+ /* leading byte loop: sets 0..3 bytes */
+.loop_b1:
+ move.b %d0,-(%a1) /* store byte */
+ cmp.l %d1,%a1 /* runs %a1 down to last long bound */
+ bhi.b .loop_b1
+
+.end_b1:
+ moveq.l #31,%d1
+ add.l %a0,%d1
+ and.l #0xFFFFFFF0,%d1 /* %d1 = first line bound + 16 */
+ cmp.l %d1,%a1 /* at least one full line to fill? */
+ blo.b .no_lines /* no, jump to longword loop */
+
+ mov.l %a1,%d1
+ and.l #0xFFFFFFF0,%d1 /* %d1 = last line bound */
+ cmp.l %d1,%a1 /* any longwords to set? */
+ bls.b .end_l1 /* no: skip longword loop */
+
+ /* leading longword loop: sets 0..3 longwords */
+.loop_l1:
+ move.l %d0,-(%a1) /* store longword */
+ cmp.l %d1,%a1 /* runs %a1 down to last line bound */
+ bhi.b .loop_l1
+
+.end_l1:
+ move.l %d2,-(%sp) /* free some registers */
+ move.l %d3,-(%sp)
+
+ move.l %d0,%d1 /* spread data to 4 data registers */
+ move.l %d0,%d2
+ move.l %d0,%d3
+ lea.l (15,%a0),%a0 /* start address += 15, acct. for trl. data */
+
+ /* main loop: set whole lines utilising burst mode */
+.loop_line:
+ lea.l (-16,%a1),%a1 /* pre-decrement */
+ movem.l %d0-%d3,(%a1) /* store line */
+ cmp.l %a0,%a1 /* runs %a1 down to first line bound */
+ bhi.b .loop_line
+
+ lea.l (-15,%a0),%a0 /* correct start address */
+ move.l (%sp)+,%d3 /* restore registers */
+ move.l (%sp)+,%d2
+
+ move.l %a0,%d1 /* %d1 = start address ... */
+ addq.l #3,%d1 /* ... +3, account for possible trailing bytes */
+ cmp.l %d1,%a1 /* any longwords left */
+ bhi.b .loop_l2 /* yes: jump to longword loop */
+ bra.b .no_longs /* no: skip loop */
+
+.no_lines:
+ move.l %a0,%d1 /* %d1 = start address ... */
+ addq.l #3,%d1 /* ... +3, account for possible trailing bytes */
+
+ /* trailing longword loop */
+.loop_l2:
+ move.l %d0,-(%a1) /* store longword */
+ cmp.l %d1,%a1 /* runs %a1 down to first long bound */
+ bhi.b .loop_l2
+
+.no_longs:
+ cmp.l %a0,%a1 /* any bytes left? */
+ bls.b .end_b2 /* no: skip loop */
+
+ /* trailing byte loop */
+.loop_b2:
+ move.b %d0,-(%a1) /* store byte */
+ cmp.l %a0,%a1 /* runs %a1 down to start address */
+ bhi.b .loop_b2
+
+.end_b2:
+ move.l %a0,%d0 /* return start address */
+ rts
+
+.end:
+ .size memset,.end-memset
diff --git a/firmware/asm/m68k/memset16.S b/firmware/asm/m68k/memset16.S
new file mode 100644
index 0000000000..1673038d03
--- /dev/null
+++ b/firmware/asm/m68k/memset16.S
@@ -0,0 +1,146 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2006 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",@progbits
+
+ .global memset16
+ .type memset16,@function
+
+/* Fills a memory region with specified word value
+ * Start address must be word aligned, length is in words
+ * This version is optimized for speed
+ *
+ * arguments:
+ * (4,%sp) - start address
+ * (8,%sp) - data
+ * (12,%sp) - length
+ *
+ * return value:
+ * %d0 - start address
+ *
+ * register usage:
+ * %d0 - data (spread to both words when using long stores)
+ * %d1 - temporary / data (for burst transfer)
+ * %d2 - data (for burst transfer)
+ * %d3 - data (for burst transfer)
+ * %a0 - start address
+ * %a1 - current address (runs down from end to start)
+ *
+ * For maximum speed this routine uses both long stores and burst mode,
+ * storing whole lines with movem.l. The routine fills memory from end
+ * to start in order to ease returning the start address.
+ */
+memset16:
+ move.l (4,%sp),%a0 /* start address */
+ move.l (8,%sp),%d0 /* data */
+ move.l (12,%sp),%a1 /* length */
+ add.l %a1,%a1
+ add.l %a0,%a1 /* %a1 = end address */
+
+ move.l %a0,%d1
+ addq.l #6,%d1
+ and.l #0xFFFFFFFC,%d1 /* %d1 = first long bound + 4 */
+ cmp.l %d1,%a1 /* at least one aligned longword to fill? */
+ blo.b .no_longs /* no, jump directly to word loop */
+
+ and.l #0xFFFF,%d0 /* start: spread data to both words */
+ move.l %d0,%d1
+ swap %d1
+ or.l %d1,%d0 /* data now in both words */
+
+ move.l %a1,%d1
+ and.l #0xFFFFFFFC,%d1 /* %d1 = last long bound */
+ cmp.l %d1,%a1 /* one extra word? */
+ bls.b .end_w1 /* no: skip */
+
+ move.w %d0,-(%a1) /* set leading word */
+
+.end_w1:
+ moveq.l #30,%d1
+ add.l %a0,%d1
+ and.l #0xFFFFFFF0,%d1 /* %d1 = first line bound + 16 */
+ cmp.l %d1,%a1 /* at least one full line to fill? */
+ blo.b .no_lines /* no, jump to longword loop */
+
+ mov.l %a1,%d1
+ and.l #0xFFFFFFF0,%d1 /* %d1 = last line bound */
+ cmp.l %d1,%a1 /* any longwords to set? */
+ bls.b .end_l1 /* no: skip longword loop */
+
+ /* leading longword loop: sets 0..3 longwords */
+.loop_l1:
+ move.l %d0,-(%a1) /* store longword */
+ cmp.l %d1,%a1 /* runs %a1 down to last line bound */
+ bhi.b .loop_l1
+
+.end_l1:
+ move.l %d2,-(%sp) /* free some registers */
+ move.l %d3,-(%sp)
+
+ move.l %d0,%d1 /* spread data to 4 data registers */
+ move.l %d0,%d2
+ move.l %d0,%d3
+ lea.l (14,%a0),%a0 /* start address += 14, acct. for trl. data */
+
+ /* main loop: set whole lines utilising burst mode */
+.loop_line:
+ lea.l (-16,%a1),%a1 /* pre-decrement */
+ movem.l %d0-%d3,(%a1) /* store line */
+ cmp.l %a0,%a1 /* runs %a1 down to first line bound */
+ bhi.b .loop_line
+
+ lea.l (-14,%a0),%a0 /* correct start address */
+ move.l (%sp)+,%d3 /* restore registers */
+ move.l (%sp)+,%d2
+
+ move.l %a0,%d1 /* %d1 = start address ... */
+ addq.l #2,%d1 /* ... +2, account for possible trailing word */
+ cmp.l %d1,%a1 /* any longwords left */
+ bhi.b .loop_l2 /* yes: jump to longword loop */
+ bra.b .no_longs /* no: skip loop */
+
+.no_lines:
+ move.l %a0,%d1 /* %d1 = start address ... */
+ addq.l #2,%d1 /* ... +2, account for possible trailing word */
+
+ /* trailing longword loop */
+.loop_l2:
+ move.l %d0,-(%a1) /* store longword */
+ cmp.l %d1,%a1 /* runs %a1 down to first long bound */
+ bhi.b .loop_l2
+
+.no_longs:
+ cmp.l %a0,%a1 /* any words left? */
+ bls.b .end_w2 /* no: skip loop */
+
+ /* trailing word loop */
+.loop_w2:
+ move.w %d0,-(%a1) /* store word */
+ cmp.l %a0,%a1 /* runs %a1 down to start address */
+ bhi.b .loop_w2
+
+.end_w2:
+ move.l %a0,%d0 /* return start address */
+ rts
+
+.end:
+ .size memset16,.end-memset16
diff --git a/firmware/asm/m68k/strlen.S b/firmware/asm/m68k/strlen.S
new file mode 100644
index 0000000000..765969da04
--- /dev/null
+++ b/firmware/asm/m68k/strlen.S
@@ -0,0 +1,71 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2010 Nils Wallménius
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+/* size_t strlen(const char *str) */
+
+ .section .text,"ax",@progbits
+ .align 2
+ .globl strlen
+ .type strlen, @function
+
+strlen:
+ move.l 4(%sp), %a0 /* %a0 = *str */
+ move.l %a0, %a1 /* %a1 = start address */
+ move.l %a0, %d0
+ andi.l #3, %d0 /* %d0 = %a0 & 3 */
+ beq.b 1f /* already aligned */
+ jmp.l (-2,%pc,%d0.l*4)
+ tst.b (%a0)+
+ beq.b .done
+ tst.b (%a0)+
+ beq.b .done
+ tst.b (%a0)+
+ beq.b .done
+
+ 1:
+ move.l (%a0)+, %d0 /* load %d0 increment %a0 */
+ /* use trick to test the whole word for null bytes */
+ move.l %d0, %d1
+ subi.l #0x01010101, %d1
+ not.l %d0
+ and.l %d1, %d0
+ andi.l #0x80808080, %d0
+ beq.b 1b /* if the test was false repeat */
+
+ /* ok, so the last word contained a 0 byte, test individual bytes */
+ subq.l #4, %a0
+ tst.b (%a0)+
+ beq.b .done
+ tst.b (%a0)+
+ beq.b .done
+ tst.b (%a0)+
+ beq.b .done
+ /* last byte must be 0 so we don't need to load it, so we don't increment a0
+ so we jump past the subq instr */
+ .word 0x51fa /* trapf.w, shadow next instr */
+
+.done:
+ subq.l #1, %a0 /* %a0 is 1 too large due to the last increment */
+ sub.l %a1, %a0 /* how many times did we repeat? */
+ move.l %a0, %d0 /* return value in %d0 */
+ rts
+ .size strlen, .-strlen
+
diff --git a/firmware/asm/memcpy.c b/firmware/asm/memcpy.c
new file mode 100644
index 0000000000..c5456ab41f
--- /dev/null
+++ b/firmware/asm/memcpy.c
@@ -0,0 +1,117 @@
+/*
+FUNCTION
+ <<memcpy>>---copy memory regions
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ void* memcpy(void *<[out]>, const void *<[in]>, size_t <[n]>);
+
+TRAD_SYNOPSIS
+ void *memcpy(<[out]>, <[in]>, <[n]>
+ void *<[out]>;
+ void *<[in]>;
+ size_t <[n]>;
+
+DESCRIPTION
+ This function copies <[n]> bytes from the memory region
+ pointed to by <[in]> to the memory region pointed to by
+ <[out]>.
+
+ If the regions overlap, the behavior is undefined.
+
+RETURNS
+ <<memcpy>> returns a pointer to the first byte of the <[out]>
+ region.
+
+PORTABILITY
+<<memcpy>> is ANSI C.
+
+<<memcpy>> requires no supporting OS subroutines.
+
+QUICKREF
+ memcpy ansi pure
+ */
+
+#include "config.h"
+#include "_ansi.h" /* for _DEFUN */
+#include <string.h>
+
+/* Nonzero if either X or Y is not aligned on a "long" boundary. */
+#define UNALIGNED(X, Y) \
+ (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1)))
+
+/* How many bytes are copied each iteration of the 4X unrolled loop. */
+#define BIGBLOCKSIZE (sizeof (long) << 2)
+
+/* How many bytes are copied each iteration of the word copy loop. */
+#define LITTLEBLOCKSIZE (sizeof (long))
+
+/* Threshold for punting to the byte copier. */
+#define TOO_SMALL(LEN) ((LEN) < BIGBLOCKSIZE)
+
+_PTR
+_DEFUN (memcpy, (dst0, src0, len0),
+ _PTR dst0 _AND
+ _CONST _PTR src0 _AND
+ size_t len0) ICODE_ATTR;
+
+_PTR
+_DEFUN (memcpy, (dst0, src0, len0),
+ _PTR dst0 _AND
+ _CONST _PTR src0 _AND
+ size_t len0)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ char *dst = (char *) dst0;
+ char *src = (char *) src0;
+
+ _PTR save = dst0;
+
+ while (len0--)
+ {
+ *dst++ = *src++;
+ }
+
+ return save;
+#else
+ char *dst = dst0;
+ _CONST char *src = src0;
+ long *aligned_dst;
+ _CONST long *aligned_src;
+ unsigned int len = len0;
+
+ /* If the size is small, or either SRC or DST is unaligned,
+ then punt into the byte copy loop. This should be rare. */
+ if (!TOO_SMALL(len) && !UNALIGNED (src, dst))
+ {
+ aligned_dst = (long*)dst;
+ aligned_src = (long*)src;
+
+ /* Copy 4X long words at a time if possible. */
+ while (len >= BIGBLOCKSIZE)
+ {
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ len -= (unsigned int)BIGBLOCKSIZE;
+ }
+
+ /* Copy one long word at a time if possible. */
+ while (len >= LITTLEBLOCKSIZE)
+ {
+ *aligned_dst++ = *aligned_src++;
+ len -= LITTLEBLOCKSIZE;
+ }
+
+ /* Pick up any residual with a byte copier. */
+ dst = (char*)aligned_dst;
+ src = (char*)aligned_src;
+ }
+
+ while (len--)
+ *dst++ = *src++;
+
+ return dst0;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
diff --git a/firmware/asm/memmove.c b/firmware/asm/memmove.c
new file mode 100644
index 0000000000..5f423964bb
--- /dev/null
+++ b/firmware/asm/memmove.c
@@ -0,0 +1,147 @@
+/*
+FUNCTION
+ <<memmove>>---move possibly overlapping memory
+
+INDEX
+ memmove
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ void *memmove(void *<[dst]>, const void *<[src]>, size_t <[length]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ void *memmove(<[dst]>, <[src]>, <[length]>)
+ void *<[dst]>;
+ void *<[src]>;
+ size_t <[length]>;
+
+DESCRIPTION
+ This function moves <[length]> characters from the block of
+ memory starting at <<*<[src]>>> to the memory starting at
+ <<*<[dst]>>>. <<memmove>> reproduces the characters correctly
+ at <<*<[dst]>>> even if the two areas overlap.
+
+
+RETURNS
+ The function returns <[dst]> as passed.
+
+PORTABILITY
+<<memmove>> is ANSI C.
+
+<<memmove>> requires no supporting OS subroutines.
+
+QUICKREF
+ memmove ansi pure
+*/
+
+#include "config.h"
+#include <_ansi.h>
+#include <string.h>
+
+/* Nonzero if either X or Y is not aligned on a "long" boundary. */
+#define UNALIGNED(X, Y) \
+ (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1)))
+
+/* How many bytes are copied each iteration of the 4X unrolled loop. */
+#define BIGBLOCKSIZE (sizeof (long) << 2)
+
+/* How many bytes are copied each iteration of the word copy loop. */
+#define LITTLEBLOCKSIZE (sizeof (long))
+
+/* Threshhold for punting to the byte copier. */
+#define TOO_SMALL(LEN) ((LEN) < BIGBLOCKSIZE)
+
+_PTR
+_DEFUN (memmove, (dst_void, src_void, length),
+ _PTR dst_void _AND
+ _CONST _PTR src_void _AND
+ size_t length) ICODE_ATTR;
+
+_PTR
+_DEFUN (memmove, (dst_void, src_void, length),
+ _PTR dst_void _AND
+ _CONST _PTR src_void _AND
+ size_t length)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ char *dst = dst_void;
+ _CONST char *src = src_void;
+
+ if (src < dst && dst < src + length)
+ {
+ /* Have to copy backwards */
+ src += length;
+ dst += length;
+ while (length--)
+ {
+ *--dst = *--src;
+ }
+ }
+ else
+ {
+ while (length--)
+ {
+ *dst++ = *src++;
+ }
+ }
+
+ return dst_void;
+#else
+ char *dst = dst_void;
+ _CONST char *src = src_void;
+ long *aligned_dst;
+ _CONST long *aligned_src;
+ unsigned int len = length;
+
+ if (src < dst && dst < src + len)
+ {
+ /* Destructive overlap...have to copy backwards */
+ src += len;
+ dst += len;
+ while (len--)
+ {
+ *--dst = *--src;
+ }
+ }
+ else
+ {
+ /* Use optimizing algorithm for a non-destructive copy to closely
+ match memcpy. If the size is small or either SRC or DST is unaligned,
+ then punt into the byte copy loop. This should be rare. */
+ if (!TOO_SMALL(len) && !UNALIGNED (src, dst))
+ {
+ aligned_dst = (long*)dst;
+ aligned_src = (long*)src;
+
+ /* Copy 4X long words at a time if possible. */
+ while (len >= BIGBLOCKSIZE)
+ {
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ *aligned_dst++ = *aligned_src++;
+ len -= BIGBLOCKSIZE;
+ }
+
+ /* Copy one long word at a time if possible. */
+ while (len >= LITTLEBLOCKSIZE)
+ {
+ *aligned_dst++ = *aligned_src++;
+ len -= LITTLEBLOCKSIZE;
+ }
+
+ /* Pick up any residual with a byte copier. */
+ dst = (char*)aligned_dst;
+ src = (char*)aligned_src;
+ }
+
+ while (len--)
+ {
+ *dst++ = *src++;
+ }
+ }
+
+ return dst_void;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
diff --git a/firmware/asm/memset.c b/firmware/asm/memset.c
new file mode 100644
index 0000000000..7b8d2137e8
--- /dev/null
+++ b/firmware/asm/memset.c
@@ -0,0 +1,110 @@
+/*
+FUNCTION
+ <<memset>>---set an area of memory
+
+INDEX
+ memset
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ void *memset(const void *<[dst]>, int <[c]>, size_t <[length]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ void *memset(<[dst]>, <[c]>, <[length]>)
+ void *<[dst]>;
+ int <[c]>;
+ size_t <[length]>;
+
+DESCRIPTION
+ This function converts the argument <[c]> into an unsigned
+ char and fills the first <[length]> characters of the array
+ pointed to by <[dst]> to the value.
+
+RETURNS
+ <<memset>> returns the value of <[m]>.
+
+PORTABILITY
+<<memset>> is ANSI C.
+
+ <<memset>> requires no supporting OS subroutines.
+
+QUICKREF
+ memset ansi pure
+*/
+
+#include <string.h>
+#include "_ansi.h"
+
+#define LBLOCKSIZE (sizeof(long))
+#define UNALIGNED(X) ((long)X & (LBLOCKSIZE - 1))
+#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE)
+
+_PTR
+_DEFUN (memset, (m, c, n),
+ _PTR m _AND
+ int c _AND
+ size_t n)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ char *s = (char *) m;
+
+ while (n-- != 0)
+ {
+ *s++ = (char) c;
+ }
+
+ return m;
+#else
+ char *s = (char *) m;
+ unsigned int i;
+ unsigned long buffer;
+ unsigned long *aligned_addr;
+
+ if (!TOO_SMALL (n) && !UNALIGNED (m))
+ {
+ /* If we get this far, we know that n is large and m is word-aligned. */
+
+ aligned_addr = (unsigned long*)m;
+
+ /* Store C into each char sized location in BUFFER so that
+ we can set large blocks quickly. */
+ c &= 0xff;
+ if (LBLOCKSIZE == 4)
+ {
+ buffer = (c << 8) | c;
+ buffer |= (buffer << 16);
+ }
+ else
+ {
+ buffer = 0;
+ for (i = 0; i < LBLOCKSIZE; i++)
+ buffer = (buffer << 8) | c;
+ }
+
+ while (n >= LBLOCKSIZE*4)
+ {
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ n -= 4*LBLOCKSIZE;
+ }
+
+ while (n >= LBLOCKSIZE)
+ {
+ *aligned_addr++ = buffer;
+ n -= LBLOCKSIZE;
+ }
+ /* Pick up the remainder with a bytewise loop. */
+ s = (char*)aligned_addr;
+ }
+
+ while (n--)
+ {
+ *s++ = (char)c;
+ }
+
+ return m;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
diff --git a/firmware/asm/memset16.c b/firmware/asm/memset16.c
new file mode 100644
index 0000000000..7e31df0cdd
--- /dev/null
+++ b/firmware/asm/memset16.c
@@ -0,0 +1,78 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2006 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include "string-extra.h" /* memset16() */
+
+#define LBLOCKSIZE (sizeof(long)/2)
+#define UNALIGNED(X) ((long)X & (sizeof(long) - 1))
+#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE)
+
+void memset16(void *dst, int val, size_t len)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ unsigned short *p = (unsigned short *)dst;
+
+ while (len--)
+ *p++ = val;
+#else
+ unsigned short *p = (unsigned short *)dst;
+ unsigned int i;
+ unsigned long buffer;
+ unsigned long *aligned_addr;
+
+ if (!TOO_SMALL(len) && !UNALIGNED(dst))
+ {
+ aligned_addr = (unsigned long *)dst;
+
+ val &= 0xffff;
+ if (LBLOCKSIZE == 2)
+ {
+ buffer = (val << 16) | val;
+ }
+ else
+ {
+ buffer = 0;
+ for (i = 0; i < LBLOCKSIZE; i++)
+ buffer = (buffer << 16) | val;
+ }
+
+ while (len >= LBLOCKSIZE*4)
+ {
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ *aligned_addr++ = buffer;
+ len -= 4*LBLOCKSIZE;
+ }
+
+ while (len >= LBLOCKSIZE)
+ {
+ *aligned_addr++ = buffer;
+ len -= LBLOCKSIZE;
+ }
+
+ p = (unsigned short *)aligned_addr;
+ }
+
+ while (len--)
+ *p++ = val;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}
diff --git a/firmware/asm/mips/memcpy.S b/firmware/asm/mips/memcpy.S
new file mode 100644
index 0000000000..2e7f245c69
--- /dev/null
+++ b/firmware/asm/mips/memcpy.S
@@ -0,0 +1,143 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+ * This file was originally part of the GNU C Library
+ * Contributed to glibc by Hartvig Ekner <hartvige@mips.com>, 2002
+ * Adapted for Rockbox by Maurus Cuelenaere, 2009
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include "config.h"
+#include "mips.h"
+
+/* void *memcpy(void *s1, const void *s2, size_t n); */
+
+#ifdef ROCKBOX_BIG_ENDIAN
+# define LWHI lwl /* high part is left in big-endian */
+# define SWHI swl /* high part is left in big-endian */
+# define LWLO lwr /* low part is right in big-endian */
+# define SWLO swr /* low part is right in big-endian */
+#else
+# define LWHI lwr /* high part is right in little-endian */
+# define SWHI swr /* high part is right in little-endian */
+# define LWLO lwl /* low part is left in little-endian */
+# define SWLO swl /* low part is left in little-endian */
+#endif
+
+ .section .icode, "ax", %progbits
+
+ .global memcpy
+ .type memcpy, %function
+
+ .set noreorder
+
+memcpy:
+ slti t0, a2, 8 # Less than 8?
+ bne t0, zero, last8
+ move v0, a0 # Setup exit value before too late
+
+ xor t0, a1, a0 # Find a0/a1 displacement
+ andi t0, 0x3
+ bne t0, zero, shift # Go handle the unaligned case
+ subu t1, zero, a1
+ andi t1, 0x3 # a0/a1 are aligned, but are we
+ beq t1, zero, chk8w # starting in the middle of a word?
+ subu a2, t1
+ LWHI t0, 0(a1) # Yes we are... take care of that
+ addu a1, t1
+ SWHI t0, 0(a0)
+ addu a0, t1
+
+chk8w:
+ andi t0, a2, 0x1f # 32 or more bytes left?
+ beq t0, a2, chk1w
+ subu a3, a2, t0 # Yes
+ addu a3, a1 # a3 = end address of loop
+ move a2, t0 # a2 = what will be left after loop
+lop8w:
+ lw t0, 0(a1) # Loop taking 8 words at a time
+ lw t1, 4(a1)
+ lw t2, 8(a1)
+ lw t3, 12(a1)
+ lw t4, 16(a1)
+ lw t5, 20(a1)
+ lw t6, 24(a1)
+ lw t7, 28(a1)
+ addiu a0, 32
+ addiu a1, 32
+ sw t0, -32(a0)
+ sw t1, -28(a0)
+ sw t2, -24(a0)
+ sw t3, -20(a0)
+ sw t4, -16(a0)
+ sw t5, -12(a0)
+ sw t6, -8(a0)
+ bne a1, a3, lop8w
+ sw t7, -4(a0)
+
+chk1w:
+ andi t0, a2, 0x3 # 4 or more bytes left?
+ beq t0, a2, last8
+ subu a3, a2, t0 # Yes, handle them one word at a time
+ addu a3, a1 # a3 again end address
+ move a2, t0
+lop1w:
+ lw t0, 0(a1)
+ addiu a0, 4
+ addiu a1, 4
+ bne a1, a3, lop1w
+ sw t0, -4(a0)
+
+last8:
+ blez a2, lst8e # Handle last 8 bytes, one at a time
+ addu a3, a2, a1
+lst8l:
+ lb t0, 0(a1)
+ addiu a0, 1
+ addiu a1, 1
+ bne a1, a3, lst8l
+ sb t0, -1(a0)
+lst8e:
+ jr ra # Bye, bye
+ nop
+
+shift:
+ subu a3, zero, a0 # Src and Dest unaligned
+ andi a3, 0x3 # (unoptimized case...)
+ beq a3, zero, shft1
+ subu a2, a3 # a2 = bytes left
+ LWHI t0, 0(a1) # Take care of first odd part
+ LWLO t0, 3(a1)
+ addu a1, a3
+ SWHI t0, 0(a0)
+ addu a0, a3
+shft1:
+ andi t0, a2, 0x3
+ subu a3, a2, t0
+ addu a3, a1
+shfth:
+ LWHI t1, 0(a1) # Limp through, word by word
+ LWLO t1, 3(a1)
+ addiu a0, 4
+ addiu a1, 4
+ bne a1, a3, shfth
+ sw t1, -4(a0)
+ b last8 # Handle anything which may be left
+ move a2, t0
+
+ .set reorder
diff --git a/firmware/asm/mips/memset.S b/firmware/asm/mips/memset.S
new file mode 100644
index 0000000000..8db76d9123
--- /dev/null
+++ b/firmware/asm/mips/memset.S
@@ -0,0 +1,239 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * This file was originally part of the Linux/MIPS GNU C Library
+ * Copyright (C) 1998 by Ralf Baechle
+ * Adapted for Rockbox by Maurus Cuelenaere, 2009
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include "config.h"
+#include "mips.h"
+
+#define FILL256(dst, offset, val) \
+ sw val, (offset + 0x00)(dst); \
+ sw val, (offset + 0x04)(dst); \
+ sw val, (offset + 0x08)(dst); \
+ sw val, (offset + 0x0c)(dst); \
+ sw val, (offset + 0x10)(dst); \
+ sw val, (offset + 0x14)(dst); \
+ sw val, (offset + 0x18)(dst); \
+ sw val, (offset + 0x1c)(dst); \
+ sw val, (offset + 0x20)(dst); \
+ sw val, (offset + 0x24)(dst); \
+ sw val, (offset + 0x28)(dst); \
+ sw val, (offset + 0x2c)(dst); \
+ sw val, (offset + 0x30)(dst); \
+ sw val, (offset + 0x34)(dst); \
+ sw val, (offset + 0x38)(dst); \
+ sw val, (offset + 0x3c)(dst); \
+ sw val, (offset + 0x40)(dst); \
+ sw val, (offset + 0x44)(dst); \
+ sw val, (offset + 0x48)(dst); \
+ sw val, (offset + 0x4c)(dst); \
+ sw val, (offset + 0x50)(dst); \
+ sw val, (offset + 0x54)(dst); \
+ sw val, (offset + 0x58)(dst); \
+ sw val, (offset + 0x5c)(dst); \
+ sw val, (offset + 0x60)(dst); \
+ sw val, (offset + 0x64)(dst); \
+ sw val, (offset + 0x68)(dst); \
+ sw val, (offset + 0x6c)(dst); \
+ sw val, (offset + 0x70)(dst); \
+ sw val, (offset + 0x74)(dst); \
+ sw val, (offset + 0x78)(dst); \
+ sw val, (offset + 0x7c)(dst); \
+ sw val, (offset + 0x80)(dst); \
+ sw val, (offset + 0x84)(dst); \
+ sw val, (offset + 0x88)(dst); \
+ sw val, (offset + 0x8c)(dst); \
+ sw val, (offset + 0x90)(dst); \
+ sw val, (offset + 0x94)(dst); \
+ sw val, (offset + 0x98)(dst); \
+ sw val, (offset + 0x9c)(dst); \
+ sw val, (offset + 0xa0)(dst); \
+ sw val, (offset + 0xa4)(dst); \
+ sw val, (offset + 0xa8)(dst); \
+ sw val, (offset + 0xac)(dst); \
+ sw val, (offset + 0xb0)(dst); \
+ sw val, (offset + 0xb4)(dst); \
+ sw val, (offset + 0xb8)(dst); \
+ sw val, (offset + 0xbc)(dst); \
+ sw val, (offset + 0xc0)(dst); \
+ sw val, (offset + 0xc4)(dst); \
+ sw val, (offset + 0xc8)(dst); \
+ sw val, (offset + 0xcc)(dst); \
+ sw val, (offset + 0xd0)(dst); \
+ sw val, (offset + 0xd4)(dst); \
+ sw val, (offset + 0xd8)(dst); \
+ sw val, (offset + 0xdc)(dst); \
+ sw val, (offset + 0xe0)(dst); \
+ sw val, (offset + 0xe4)(dst); \
+ sw val, (offset + 0xe8)(dst); \
+ sw val, (offset + 0xec)(dst); \
+ sw val, (offset + 0xf0)(dst); \
+ sw val, (offset + 0xf4)(dst); \
+ sw val, (offset + 0xf8)(dst); \
+ sw val, (offset + 0xfc)(dst);
+
+#define FILL128(dst, offset, val) \
+ sw val, (offset + 0x00)(dst); \
+ sw val, (offset + 0x04)(dst); \
+ sw val, (offset + 0x08)(dst); \
+ sw val, (offset + 0x0c)(dst); \
+ sw val, (offset + 0x10)(dst); \
+ sw val, (offset + 0x14)(dst); \
+ sw val, (offset + 0x18)(dst); \
+ sw val, (offset + 0x1c)(dst); \
+ sw val, (offset + 0x20)(dst); \
+ sw val, (offset + 0x24)(dst); \
+ sw val, (offset + 0x28)(dst); \
+ sw val, (offset + 0x2c)(dst); \
+ sw val, (offset + 0x30)(dst); \
+ sw val, (offset + 0x34)(dst); \
+ sw val, (offset + 0x38)(dst); \
+ sw val, (offset + 0x3c)(dst); \
+ sw val, (offset + 0x40)(dst); \
+ sw val, (offset + 0x44)(dst); \
+ sw val, (offset + 0x48)(dst); \
+ sw val, (offset + 0x4c)(dst); \
+ sw val, (offset + 0x50)(dst); \
+ sw val, (offset + 0x54)(dst); \
+ sw val, (offset + 0x58)(dst); \
+ sw val, (offset + 0x5c)(dst); \
+ sw val, (offset + 0x60)(dst); \
+ sw val, (offset + 0x64)(dst); \
+ sw val, (offset + 0x68)(dst); \
+ sw val, (offset + 0x6c)(dst); \
+ sw val, (offset + 0x70)(dst); \
+ sw val, (offset + 0x74)(dst); \
+ sw val, (offset + 0x78)(dst); \
+ sw val, (offset + 0x7c)(dst);
+
+#define FILL64(dst, offset, val) \
+ sw val, (offset + 0x00)(dst); \
+ sw val, (offset + 0x04)(dst); \
+ sw val, (offset + 0x08)(dst); \
+ sw val, (offset + 0x0c)(dst); \
+ sw val, (offset + 0x10)(dst); \
+ sw val, (offset + 0x14)(dst); \
+ sw val, (offset + 0x18)(dst); \
+ sw val, (offset + 0x1c)(dst); \
+ sw val, (offset + 0x20)(dst); \
+ sw val, (offset + 0x24)(dst); \
+ sw val, (offset + 0x28)(dst); \
+ sw val, (offset + 0x2c)(dst); \
+ sw val, (offset + 0x30)(dst); \
+ sw val, (offset + 0x34)(dst); \
+ sw val, (offset + 0x38)(dst); \
+ sw val, (offset + 0x3c)(dst);
+
+#define FILL32(dst, offset, val) \
+ sw val, (offset + 0x00)(dst); \
+ sw val, (offset + 0x04)(dst); \
+ sw val, (offset + 0x08)(dst); \
+ sw val, (offset + 0x0c)(dst); \
+ sw val, (offset + 0x10)(dst); \
+ sw val, (offset + 0x14)(dst); \
+ sw val, (offset + 0x18)(dst); \
+ sw val, (offset + 0x1c)(dst);
+
+#define FILL 64
+#define F_FILL FILL64
+
+
+#ifdef ROCKBOX_BIG_ENDIAN
+# define SWHI swl /* high part is left in big-endian */
+#else
+# define SWHI swr /* high part is right in little-endian */
+#endif
+
+/*
+ * memset(void *s, int c, size_t n)
+ *
+ * a0: start of area to clear
+ * a1: char to fill with
+ * a2: size of area to clear
+ */
+ .section .icode, "ax", %progbits
+
+ .global memset
+ .type memset, %function
+
+ .set noreorder
+ .align 5
+memset:
+ beqz a1, 1f
+ move v0, a0 /* result */
+
+ andi a1, 0xff /* spread fillword */
+ sll t1, a1, 8
+ or a1, t1
+ sll t1, a1, 16
+ or a1, t1
+1:
+
+ sltiu t0, a2, 4 /* very small region? */
+ bnez t0, small_memset
+ andi t0, a0, 3 /* aligned? */
+
+ beqz t0, 1f
+ subu t0, 4 /* alignment in bytes */
+
+ SWHI a1, (a0) /* make word aligned */
+ subu a0, t0 /* word align ptr */
+ addu a2, t0 /* correct size */
+
+1: ori t1, a2, (FILL-1) /* # of full blocks */
+ xori t1, (FILL-1)
+ beqz t1, memset_partial /* no block to fill */
+ andi t0, a2, (FILL-4)
+
+ addu t1, a0 /* end address */
+ .set reorder
+1: addiu a0, FILL
+ F_FILL( a0, -FILL, a1 )
+ bne t1, a0, 1b
+ .set noreorder
+
+memset_partial:
+ la t1, 2f /* where to start */
+ subu t1, t0
+ jr t1
+ addu a0, t0 /* dest ptr */
+
+ F_FILL( a0, -FILL, a1 ) /* ... but first do words ... */
+2: andi a2, 3 /* 0 <= n <= 3 to go */
+
+ beqz a2, 1f
+ addu a0, a2 /* What's left */
+ SWHI a1, -1(a0)
+1: jr ra
+ move a2, zero
+
+small_memset:
+ beqz a2, 2f
+ addu t1, a0, a2
+
+1: addiu a0, 1 /* fill bytewise */
+ bne t1, a0, 1b
+ sb a1, -1(a0)
+
+2: jr ra /* done */
+ move a2, zero
+
+ .set reorder
diff --git a/firmware/asm/sh/memcpy.S b/firmware/asm/sh/memcpy.S
new file mode 100644
index 0000000000..e23a579b05
--- /dev/null
+++ b/firmware/asm/sh/memcpy.S
@@ -0,0 +1,219 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2004-2005 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",@progbits
+
+ .align 2
+ .global _memcpy
+ .global ___memcpy_fwd_entry
+ .type _memcpy,@function
+
+/* Copies <length> bytes of data in memory from <source> to <dest>
+ * This version is optimized for speed
+ *
+ * arguments:
+ * r4 - destination address
+ * r5 - source address
+ * r6 - length
+ *
+ * return value:
+ * r0 - destination address (like ANSI version)
+ *
+ * register usage:
+ * r0 - data / scratch
+ * r1 - 2nd data / scratch
+ * r2 - scratch
+ * r3 - first long bound / adjusted end address (only if >= 11 bytes)
+ * r4 - current dest address
+ * r5 - current source address
+ * r6 - source end address
+ * r7 - stored dest start address
+ *
+ * The instruction order is devised in a way to utilize the pipelining
+ * of the SH1 to the max. The routine also tries to utilize fast page mode.
+ */
+
+_memcpy:
+ mov r4,r7 /* store dest for returning */
+___memcpy_fwd_entry:
+ add #-8,r4 /* offset for early increment (max. 2 longs) */
+ mov #11,r0
+ cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */
+ add r5,r6 /* r6 = source_end */
+ bf .start_b2 /* no: jump directly to byte loop */
+
+ mov #3,r0
+ neg r5,r3
+ and r0,r3 /* r3 = (4 - align_offset) % 4 */
+ tst r3,r3 /* already aligned? */
+ bt .end_b1 /* yes: skip leading byte loop */
+
+ add r5,r3 /* r3 = first source long bound */
+
+ /* leading byte loop: copies 0..3 bytes */
+.loop_b1:
+ mov.b @r5+,r0 /* load byte & increment source addr */
+ add #1,r4 /* increment dest addr */
+ mov.b r0,@(7,r4) /* store byte */
+ cmp/hi r5,r3 /* runs r5 up to first long bound */
+ bt .loop_b1
+ /* now r5 is always at a long boundary */
+ /* -> memory reading is done in longs for all dest alignments */
+
+ /* selector for main copy loop */
+.end_b1:
+ mov #3,r1
+ and r4,r1 /* r1 = dest alignment offset */
+ mova .jmptab,r0
+ mov.b @(r0,r1),r1 /* select appropriate main loop */
+ add r0,r1
+ mov r6,r3 /* move end address to r3 */
+ jmp @r1 /* and jump to it */
+ add #-7,r3 /* adjust end addr for main loops doing 2 longs/pass */
+
+ /** main loops, copying 2 longs per pass to profit from fast page mode **/
+
+ /* long aligned destination (fastest) */
+ .align 2
+.loop_do0:
+ mov.l @r5+,r1 /* load first long & increment source addr */
+ add #16,r4 /* increment dest addr & account for decrementing stores */
+ mov.l @r5+,r0 /* load second long & increment source addr */
+ cmp/hi r5,r3 /* runs r5 up to last or second last long bound */
+ mov.l r0,@-r4 /* store second long */
+ mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */
+ bt .loop_do0
+
+ add #4,r3 /* readjust end address */
+ cmp/hi r5,r3 /* one long left? */
+ bf .start_b2 /* no, jump to trailing byte loop */
+
+ mov.l @r5+,r0 /* load last long & increment source addr */
+ add #4,r4 /* increment dest addr */
+ bra .start_b2 /* jump to trailing byte loop */
+ mov.l r0,@(4,r4) /* store last long */
+
+ /* word aligned destination (long + 2) */
+ .align 2
+.loop_do2:
+ mov.l @r5+,r1 /* load first long & increment source addr */
+ add #16,r4 /* increment dest addr */
+ mov.l @r5+,r0 /* load second long & increment source addr */
+ cmp/hi r5,r3 /* runs r5 up to last or second last long bound */
+ mov.w r0,@-r4 /* store low word of second long */
+ xtrct r1,r0 /* extract low word of first long & high word of second long */
+ mov.l r0,@-r4 /* and store as long */
+ swap.w r1,r0 /* get high word of first long */
+ mov.w r0,@-r4 /* and store it */
+ bt .loop_do2
+
+ add #4,r3 /* readjust end address */
+ cmp/hi r5,r3 /* one long left? */
+ bf .start_b2 /* no, jump to trailing byte loop */
+
+ mov.l @r5+,r0 /* load last long & increment source addr */
+ add #4,r4 /* increment dest addr */
+ mov.w r0,@(6,r4) /* store low word */
+ shlr16 r0 /* get high word */
+ bra .start_b2 /* jump to trailing byte loop */
+ mov.w r0,@(4,r4) /* and store it */
+
+ /* jumptable for loop selector */
+ .align 2
+.jmptab:
+ .byte .loop_do0 - .jmptab /* placed in the middle because the SH1 */
+ .byte .loop_do1 - .jmptab /* loads bytes sign-extended. Otherwise */
+ .byte .loop_do2 - .jmptab /* the last loop would be out of reach */
+ .byte .loop_do3 - .jmptab /* of the offset range. */
+
+ /* byte aligned destination (long + 1) */
+ .align 2
+.loop_do1:
+ mov.l @r5+,r1 /* load first long & increment source addr */
+ add #16,r4 /* increment dest addr */
+ mov.l @r5+,r0 /* load second long & increment source addr */
+ cmp/hi r5,r3 /* runs r5 up to last or second last long bound */
+ mov.b r0,@-r4 /* store low byte of second long */
+ shlr8 r0 /* get upper 3 bytes */
+ mov r1,r2 /* copy first long */
+ shll16 r2 /* move low byte of first long all the way up, .. */
+ shll8 r2
+ or r2,r0 /* ..combine with the 3 bytes of second long.. */
+ mov.l r0,@-r4 /* ..and store as long */
+ shlr8 r1 /* get middle 2 bytes */
+ mov.w r1,@-r4 /* store as word */
+ shlr16 r1 /* get upper byte */
+ mov.b r1,@-r4 /* and store */
+ bt .loop_do1
+
+ add #4,r3 /* readjust end address */
+.last_do13:
+ cmp/hi r5,r3 /* one long left? */
+ bf .start_b2 /* no, jump to trailing byte loop */
+
+ mov.l @r5+,r0 /* load last long & increment source addr */
+ add #12,r4 /* increment dest addr */
+ mov.b r0,@-r4 /* store low byte */
+ shlr8 r0 /* get middle 2 bytes */
+ mov.w r0,@-r4 /* store as word */
+ shlr16 r0 /* get upper byte */
+ mov.b r0,@-r4 /* and store */
+ bra .start_b2 /* jump to trailing byte loop */
+ add #-4,r4 /* readjust destination */
+
+ /* byte aligned destination (long + 3) */
+ .align 2
+.loop_do3:
+ mov.l @r5+,r1 /* load first long & increment source addr */
+ add #16,r4 /* increment dest addr */
+ mov.l @r5+,r0 /* load second long & increment source addr */
+ mov r1,r2 /* copy first long */
+ mov.b r0,@-r4 /* store low byte of second long */
+ shlr8 r0 /* get middle 2 bytes */
+ mov.w r0,@-r4 /* store as word */
+ shlr16 r0 /* get upper byte */
+ shll8 r2 /* move lower 3 bytes of first long one up.. */
+ or r2,r0 /* ..combine with the 1 byte of second long.. */
+ mov.l r0,@-r4 /* ..and store as long */
+ shlr16 r1 /* get upper byte of first long.. */
+ shlr8 r1
+ cmp/hi r5,r3 /* runs r5 up to last or second last long bound */
+ mov.b r1,@-r4 /* ..and store */
+ bt .loop_do3
+
+ bra .last_do13 /* handle last longword: reuse routine for (long + 1) */
+ add #4,r3 /* readjust end address */
+
+ /* trailing byte loop: copies 0..3 bytes (or all for < 11 in total) */
+ .align 2
+.loop_b2:
+ mov.b @r5+,r0 /* load byte & increment source addr */
+ add #1,r4 /* increment dest addr */
+ mov.b r0,@(7,r4) /* store byte */
+.start_b2:
+ cmp/hi r5,r6 /* runs r5 up to end address */
+ bt .loop_b2
+
+ rts
+ mov r7,r0 /* return dest start address */
+.end:
+ .size _memcpy,.end-_memcpy
diff --git a/firmware/asm/sh/memmove.S b/firmware/asm/sh/memmove.S
new file mode 100644
index 0000000000..d5a7160043
--- /dev/null
+++ b/firmware/asm/sh/memmove.S
@@ -0,0 +1,222 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2006 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",@progbits
+
+ .align 2
+ .global _memmove
+ .type _memmove,@function
+
+/* Moves <length> bytes of data in memory from <source> to <dest>
+ * Regions may overlap.
+ * This version is optimized for speed, and needs the corresponding memcpy
+ * implementation for the forward copy branch.
+ *
+ * arguments:
+ * r4 - destination address
+ * r5 - source address
+ * r6 - length
+ *
+ * return value:
+ * r0 - destination address (like ANSI version)
+ *
+ * register usage:
+ * r0 - data / scratch
+ * r1 - 2nd data / scratch
+ * r2 - scratch
+ * r3 - last long bound / adjusted start address (only if >= 11 bytes)
+ * r4 - current dest address
+ * r5 - source start address
+ * r6 - current source address
+ *
+ * The instruction order is devised in a way to utilize the pipelining
+ * of the SH1 to the max. The routine also tries to utilize fast page mode.
+ */
+
+_memmove:
+ cmp/hi r4,r5 /* source > destination */
+ bf .backward /* no: backward copy */
+ mov.l .memcpy_fwd,r0
+ jmp @r0
+ mov r4,r7 /* store dest for returning */
+
+ .align 2
+.memcpy_fwd:
+ .long ___memcpy_fwd_entry
+
+.backward:
+ add r6,r4 /* r4 = destination end */
+ mov #11,r0
+ cmp/hs r0,r6 /* at least 11 bytes to copy? (ensures 2 aligned longs) */
+ add #-8,r5 /* adjust for late decrement (max. 2 longs) */
+ add r5,r6 /* r6 = source end - 8 */
+ bf .start_b2r /* no: jump directly to byte loop */
+
+ mov #-4,r3 /* r3 = 0xfffffffc */
+ and r6,r3 /* r3 = last source long bound */
+ cmp/hi r3,r6 /* already aligned? */
+ bf .end_b1r /* yes: skip leading byte loop */
+
+.loop_b1r:
+ mov.b @(7,r6),r0 /* load byte */
+ add #-1,r6 /* decrement source addr */
+ mov.b r0,@-r4 /* store byte */
+ cmp/hi r3,r6 /* runs r6 down to last long bound */
+ bt .loop_b1r
+
+.end_b1r:
+ mov #3,r1
+ and r4,r1 /* r1 = dest alignment offset */
+ mova .jmptab_r,r0
+ mov.b @(r0,r1),r1 /* select appropriate main loop.. */
+ add r0,r1
+ mov r5,r3 /* copy start adress to r3 */
+ jmp @r1 /* ..and jump to it */
+ add #7,r3 /* adjust end addr for main loops doing 2 longs/pass */
+
+ /** main loops, copying 2 longs per pass to profit from fast page mode **/
+
+ /* long aligned destination (fastest) */
+ .align 2
+.loop_do0r:
+ mov.l @r6,r1 /* load first long */
+ add #-8,r6 /* decrement source addr */
+ mov.l @(12,r6),r0 /* load second long */
+ cmp/hi r3,r6 /* runs r6 down to first or second long bound */
+ mov.l r0,@-r4 /* store second long */
+ mov.l r1,@-r4 /* store first long; NOT ALIGNED - no speed loss here! */
+ bt .loop_do0r
+
+ add #-4,r3 /* readjust end address */
+ cmp/hi r3,r6 /* first long left? */
+ bf .start_b2r /* no, jump to trailing byte loop */
+
+ mov.l @(4,r6),r0 /* load first long */
+ add #-4,r6 /* decrement source addr */
+ bra .start_b2r /* jump to trailing byte loop */
+ mov.l r0,@-r4 /* store first long */
+
+ /* word aligned destination (long + 2) */
+ .align 2
+.loop_do2r:
+ mov.l @r6,r1 /* load first long */
+ add #-8,r6 /* decrement source addr */
+ mov.l @(12,r6),r0 /* load second long */
+ cmp/hi r3,r6 /* runs r6 down to first or second long bound */
+ mov.w r0,@-r4 /* store low word of second long */
+ xtrct r1,r0 /* extract low word of first long & high word of second long */
+ mov.l r0,@-r4 /* and store as long */
+ shlr16 r1 /* get high word of first long */
+ mov.w r1,@-r4 /* and store it */
+ bt .loop_do2r
+
+ add #-4,r3 /* readjust end address */
+ cmp/hi r3,r6 /* first long left? */
+ bf .start_b2r /* no, jump to trailing byte loop */
+
+ mov.l @(4,r6),r0 /* load first long & decrement source addr */
+ add #-4,r6 /* decrement source addr */
+ mov.w r0,@-r4 /* store low word */
+ shlr16 r0 /* get high word */
+ bra .start_b2r /* jump to trailing byte loop */
+ mov.w r0,@-r4 /* and store it */
+
+ /* jumptable for loop selector */
+ .align 2
+.jmptab_r:
+ .byte .loop_do0r - .jmptab_r /* placed in the middle because the SH1 */
+ .byte .loop_do1r - .jmptab_r /* loads bytes sign-extended. Otherwise */
+ .byte .loop_do2r - .jmptab_r /* the last loop would be out of reach */
+ .byte .loop_do3r - .jmptab_r /* of the offset range. */
+
+ /* byte aligned destination (long + 1) */
+ .align 2
+.loop_do1r:
+ mov.l @r6,r1 /* load first long */
+ add #-8,r6 /* decrement source addr */
+ mov.l @(12,r6),r0 /* load second long */
+ cmp/hi r3,r6 /* runs r6 down to first or second long bound */
+ mov.b r0,@-r4 /* store low byte of second long */
+ shlr8 r0 /* get upper 3 bytes */
+ mov r1,r2 /* copy first long */
+ shll16 r2 /* move low byte of first long all the way up, .. */
+ shll8 r2
+ or r2,r0 /* ..combine with the 3 bytes of second long.. */
+ mov.l r0,@-r4 /* ..and store as long */
+ shlr8 r1 /* get middle 2 bytes */
+ mov.w r1,@-r4 /* store as word */
+ shlr16 r1 /* get upper byte */
+ mov.b r1,@-r4 /* and store */
+ bt .loop_do1r
+
+ add #-4,r3 /* readjust end address */
+.last_do13r:
+ cmp/hi r3,r6 /* first long left? */
+ bf .start_b2r /* no, jump to trailing byte loop */
+
+ nop /* alignment */
+ mov.l @(4,r6),r0 /* load first long */
+ add #-4,r6 /* decrement source addr */
+ mov.b r0,@-r4 /* store low byte */
+ shlr8 r0 /* get middle 2 bytes */
+ mov.w r0,@-r4 /* store as word */
+ shlr16 r0 /* get upper byte */
+ bra .start_b2r /* jump to trailing byte loop */
+ mov.b r0,@-r4 /* and store */
+
+ /* byte aligned destination (long + 3) */
+ .align 2
+.loop_do3r:
+ mov.l @r6,r1 /* load first long */
+ add #-8,r6 /* decrement source addr */
+ mov.l @(12,r6),r0 /* load second long */
+ mov r1,r2 /* copy first long */
+ mov.b r0,@-r4 /* store low byte of second long */
+ shlr8 r0 /* get middle 2 bytes */
+ mov.w r0,@-r4 /* store as word */
+ shlr16 r0 /* get upper byte */
+ shll8 r2 /* move lower 3 bytes of first long one up.. */
+ or r2,r0 /* ..combine with the 1 byte of second long.. */
+ mov.l r0,@-r4 /* ..and store as long */
+ shlr16 r1 /* get upper byte of first long */
+ shlr8 r1
+ cmp/hi r3,r6 /* runs r6 down to first or second long bound */
+ mov.b r1,@-r4 /* ..and store */
+ bt .loop_do3r
+
+ bra .last_do13r /* handle first longword: reuse routine for (long + 1) */
+ add #-4,r3 /* readjust end address */
+
+ /* trailing byte loop: copies 0..3 bytes (or all for < 11 in total) */
+ .align 2
+.loop_b2r:
+ mov.b @(7,r6),r0 /* load byte */
+ add #-1,r6 /* decrement source addr */
+ mov.b r0,@-r4 /* store byte */
+.start_b2r:
+ cmp/hi r5,r6 /* runs r6 down to start address */
+ bt .loop_b2r
+
+ rts
+ mov r4,r0 /* return dest start address */
+.end:
+ .size _memmove,.end-_memmove
diff --git a/firmware/asm/sh/memset.S b/firmware/asm/sh/memset.S
new file mode 100644
index 0000000000..8cae1ea112
--- /dev/null
+++ b/firmware/asm/sh/memset.S
@@ -0,0 +1,109 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2004 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",@progbits
+
+ .align 2
+ .global _memset
+ .type _memset,@function
+
+/* Fills a memory region with specified byte value
+ * This version is optimized for speed
+ *
+ * arguments:
+ * r4 - start address
+ * r5 - data
+ * r6 - length
+ *
+ * return value:
+ * r0 - start address (like ANSI version)
+ *
+ * register usage:
+ * r0 - temporary
+ * r1 - start address +11 for main loop
+ * r4 - start address
+ * r5 - data (spread to all 4 bytes when using long stores)
+ * r6 - current address (runs down from end to start)
+ *
+ * The instruction order below is devised in a way to utilize the pipelining
+ * of the SH1 to the max. The routine fills memory from end to start in
+ * order to utilize the auto-decrementing store instructions.
+ */
+
+_memset:
+ neg r4,r0
+ and #3,r0 /* r0 = (4 - align_offset) % 4 */
+ add #4,r0
+ cmp/hs r0,r6 /* at least one aligned longword to fill? */
+ add r4,r6 /* r6 = end_address */
+ bf .no_longs /* no, jump directly to byte loop */
+
+ extu.b r5,r5 /* start: spread data to all 4 bytes */
+ swap.b r5,r0
+ or r0,r5 /* data now in 2 lower bytes of r5 */
+ swap.w r5,r0
+ or r0,r5 /* data now in all 4 bytes of r5 */
+
+ mov r6,r0
+ tst #3,r0 /* r0 already long aligned? */
+ bt .end_b1 /* yes: skip loop */
+
+ /* leading byte loop: sets 0..3 bytes */
+.loop_b1:
+ mov.b r5,@-r0 /* store byte */
+ tst #3,r0 /* r0 long aligned? */
+ bf .loop_b1 /* runs r0 down until long aligned */
+
+ mov r0,r6 /* r6 = last long bound */
+ nop /* keep alignment */
+
+.end_b1:
+ mov r4,r1 /* r1 = start_address... */
+ add #11,r1 /* ... + 11, combined for rounding and offset */
+ xor r1,r0
+ tst #4,r0 /* bit 2 tells whether an even or odd number of */
+ bf .loop_odd /* longwords to set */
+
+ /* main loop: set 2 longs per pass */
+.loop_2l:
+ mov.l r5,@-r6 /* store first long */
+.loop_odd:
+ cmp/hi r1,r6 /* runs r6 down to first long bound */
+ mov.l r5,@-r6 /* store second long */
+ bt .loop_2l
+
+.no_longs:
+ cmp/hi r4,r6 /* any bytes left? */
+ bf .end_b2 /* no: skip loop */
+
+ /* trailing byte loop */
+.loop_b2:
+ mov.b r5,@-r6 /* store byte */
+ cmp/hi r4,r6 /* runs r6 down to the start address */
+ bt .loop_b2
+
+.end_b2:
+ rts
+ mov r4,r0 /* return start address */
+
+.end:
+ .size _memset,.end-_memset
diff --git a/firmware/asm/sh/strlen.S b/firmware/asm/sh/strlen.S
new file mode 100644
index 0000000000..e7169e25db
--- /dev/null
+++ b/firmware/asm/sh/strlen.S
@@ -0,0 +1,96 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2005 by Jens Arnold
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+ .section .icode,"ax",@progbits
+
+ .align 2
+ .global _strlen
+ .type _strlen,@function
+
+/* Works out the length of a string
+ * This version is optimized for speed
+ *
+ * arguments:
+ * r4 - start address
+ *
+ * return value:
+ * r0 - string length
+ *
+ * register usage:
+ * r0 - current address
+ * r1 - current value (byte/long)
+ * r2 - mask for alignment / zero (for cmp/str)
+ * r4 - start address
+ *
+ */
+
+_strlen:
+ mov r4,r0 /* r0 = start address */
+ tst #3,r0 /* long aligned? */
+ bt .start_l /* yes, jump directly to the longword loop */
+
+ /* not long aligned: check the first 3 bytes */
+ mov.b @r0+,r1 /* fetch first byte */
+ tst r1,r1 /* byte == 0 ? */
+ bt .hitzero /* yes, string end found */
+ mov.b @r0+,r1 /* fetch second byte */
+ mov #3,r2 /* prepare mask: r2 = 0..00000011b */
+ tst r1,r1 /* byte == 0 ? */
+ bt .hitzero /* yes, string end found */
+ mov.b @r0+,r1 /* fetch third byte */
+ not r2,r2 /* prepare mask: r2 = 1..11111100b */
+ tst r1,r1 /* byte == 0 ? */
+ bt .hitzero /* yes, string end found */
+
+ /* not yet found, fall through into longword loop */
+ and r2,r0 /* align down to long bound */
+
+ /* main loop: check longwords */
+.start_l:
+ mov #0,r2 /* zero longword for cmp/str */
+.loop_l:
+ mov.l @r0+,r1 /* fetch long word */
+ cmp/str r1,r2 /* any zero byte within? */
+ bf .loop_l /* no, loop */
+ add #-4,r0 /* set address back to start of this longword */
+
+ /* the last longword contains the string end: figure out the byte */
+ mov.b @r0+,r1 /* fetch first byte */
+ tst r1,r1 /* byte == 0 ? */
+ bt .hitzero /* yes, string end found */
+ mov.b @r0+,r1 /* fetch second byte */
+ tst r1,r1 /* byte == 0 ? */
+ bt .hitzero /* yes, string end found */
+ mov.b @r0+,r1 /* fetch third byte */
+ tst r1,r1 /* byte == 0 ? */
+ bt .hitzero /* yes, string end found */
+ rts /* must be the fourth byte */
+ sub r4,r0 /* len = string_end - string_start */
+
+.hitzero:
+ add #-1,r0 /* undo address increment */
+ rts
+ sub r4,r0 /* len = string_end - string_start */
+
+.end:
+ .size _strlen,.end-_strlen
+
diff --git a/firmware/asm/strlen.c b/firmware/asm/strlen.c
new file mode 100644
index 0000000000..649df6764b
--- /dev/null
+++ b/firmware/asm/strlen.c
@@ -0,0 +1,93 @@
+/*
+FUNCTION
+ <<strlen>>---character string length
+
+INDEX
+ strlen
+
+ANSI_SYNOPSIS
+ #include <string.h>
+ size_t strlen(const char *<[str]>);
+
+TRAD_SYNOPSIS
+ #include <string.h>
+ size_t strlen(<[str]>)
+ char *<[src]>;
+
+DESCRIPTION
+ The <<strlen>> function works out the length of the string
+ starting at <<*<[str]>>> by counting chararacters until it
+ reaches a <<NULL>> character.
+
+RETURNS
+ <<strlen>> returns the character count.
+
+PORTABILITY
+<<strlen>> is ANSI C.
+
+<<strlen>> requires no supporting OS subroutines.
+
+QUICKREF
+ strlen ansi pure
+*/
+
+#include "config.h"
+#include "_ansi.h"
+#include <string.h>
+#include <limits.h>
+
+#define LBLOCKSIZE (sizeof (long))
+#define UNALIGNED(X) ((long)X & (LBLOCKSIZE - 1))
+
+#if LONG_MAX == 2147483647L
+#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
+#else
+#if LONG_MAX == 9223372036854775807L
+/* Nonzero if X (a long int) contains a NULL byte. */
+#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080)
+#else
+#error long int is not a 32bit or 64bit type.
+#endif
+#endif
+
+#ifndef DETECTNULL
+#error long int is not a 32bit or 64bit byte
+#endif
+
+size_t
+_DEFUN (strlen, (str),
+ _CONST char *str) ICODE_ATTR;
+
+size_t
+_DEFUN (strlen, (str),
+ _CONST char *str)
+{
+#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
+ _CONST char *start = str;
+
+ while (*str)
+ str++;
+
+ return str - start;
+#else
+ _CONST char *start = str;
+ unsigned long *aligned_addr;
+
+ if (!UNALIGNED (str))
+ {
+ /* If the string is word-aligned, we can check for the presence of
+ a null in each word-sized block. */
+ aligned_addr = (unsigned long*)str;
+ while (!DETECTNULL (*aligned_addr))
+ aligned_addr++;
+
+ /* Once a null is detected, we check each byte in that block for a
+ precise position of the null. */
+ str = (char*)aligned_addr;
+ }
+
+ while (*str)
+ str++;
+ return str - start;
+#endif /* not PREFER_SIZE_OVER_SPEED */
+}