summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--firmware/target/arm/thread-arm.c112
-rw-r--r--firmware/target/arm/thread-pp.c540
-rw-r--r--firmware/target/coldfire/thread-coldfire.c97
-rw-r--r--firmware/target/mips/thread-mips32.c133
-rw-r--r--firmware/target/sh/thread-sh.c109
-rw-r--r--firmware/thread.c891
6 files changed, 1004 insertions, 878 deletions
diff --git a/firmware/target/arm/thread-arm.c b/firmware/target/arm/thread-arm.c
new file mode 100644
index 0000000000..c2d91cec25
--- /dev/null
+++ b/firmware/target/arm/thread-arm.c
@@ -0,0 +1,112 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2005 by Thom Johansen
+ *
+ * Generic ARM threading support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+/*---------------------------------------------------------------------------
+ * Start the thread running and terminate it if it returns
+ *---------------------------------------------------------------------------
+ */
+static void __attribute__((naked,used)) start_thread(void)
+{
+ /* r0 = context */
+ asm volatile (
+ "ldr sp, [r0, #32] \n" /* Load initial sp */
+ "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
+ "mov r1, #0 \n" /* Mark thread as running */
+ "str r1, [r0, #40] \n"
+#if NUM_CORES > 1
+ "ldr r0, =cpucache_invalidate \n" /* Invalidate this core's cache. */
+ "mov lr, pc \n" /* This could be the first entry into */
+ "bx r0 \n" /* plugin or codec code for this core. */
+#endif
+ "mov lr, pc \n" /* Call thread function */
+ "bx r4 \n"
+ ); /* No clobber list - new thread doesn't care */
+ thread_exit();
+#if 0
+ asm volatile (".ltorg"); /* Dump constant pool */
+#endif
+}
+
+/* For startup, place context pointer in r4 slot, start_thread pointer in r5
+ * slot, and thread function pointer in context.start. See load_context for
+ * what happens when thread is initially going to run. */
+#define THREAD_STARTUP_INIT(core, thread, function) \
+ ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
+ (thread)->context.r[1] = (uint32_t)start_thread, \
+ (thread)->context.start = (uint32_t)function; })
+
+
+/*---------------------------------------------------------------------------
+ * Store non-volatile context.
+ *---------------------------------------------------------------------------
+ */
+static inline void store_context(void* addr)
+{
+ asm volatile(
+ "stmia %0, { r4-r11, sp, lr } \n"
+ : : "r" (addr)
+ );
+}
+
+/*---------------------------------------------------------------------------
+ * Load non-volatile context.
+ *---------------------------------------------------------------------------
+ */
+static inline void load_context(const void* addr)
+{
+ asm volatile(
+ "ldr r0, [%0, #40] \n" /* Load start pointer */
+ "cmp r0, #0 \n" /* Check for NULL */
+ "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
+ "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
+ : : "r" (addr) : "r0" /* only! */
+ );
+}
+
+#if defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \
+|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \
+|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2
+/* Use the generic ARMv4/v5/v6 wait for IRQ */
+static inline void core_sleep(void)
+{
+ asm volatile (
+ "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */
+#if CONFIG_CPU == IMX31L
+ "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */
+#endif
+ : : "r"(0)
+ );
+ enable_irq();
+}
+#else
+/* Skip this if special code is required and implemented */
+#ifndef CPU_PP
+static inline void core_sleep(void)
+{
+ #warning core_sleep not implemented, battery life will be decreased
+ enable_irq();
+}
+#endif /* CPU_PP */
+#endif
+
+
diff --git a/firmware/target/arm/thread-pp.c b/firmware/target/arm/thread-pp.c
new file mode 100644
index 0000000000..20105ccb59
--- /dev/null
+++ b/firmware/target/arm/thread-pp.c
@@ -0,0 +1,540 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2007 by Daniel Ankers
+ *
+ * PP5002 and PP502x SoC threading support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
+/* Support a special workaround object for large-sector disks */
+#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
+#endif
+
+#if NUM_CORES > 1
+extern uintptr_t cpu_idlestackbegin[];
+extern uintptr_t cpu_idlestackend[];
+extern uintptr_t cop_idlestackbegin[];
+extern uintptr_t cop_idlestackend[];
+static uintptr_t * const idle_stacks[NUM_CORES] =
+{
+ [CPU] = cpu_idlestackbegin,
+ [COP] = cop_idlestackbegin
+};
+
+#if CONFIG_CPU == PP5002
+/* Bytes to emulate the PP502x mailbox bits */
+struct core_semaphores
+{
+ volatile uint8_t intend_wake; /* 00h */
+ volatile uint8_t stay_awake; /* 01h */
+ volatile uint8_t intend_sleep; /* 02h */
+ volatile uint8_t unused; /* 03h */
+};
+
+static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
+#endif /* CONFIG_CPU == PP5002 */
+
+#endif /* NUM_CORES */
+
+#if CONFIG_CORELOCK == SW_CORELOCK
+/* Software core locks using Peterson's mutual exclusion algorithm */
+
+/*---------------------------------------------------------------------------
+ * Initialize the corelock structure.
+ *---------------------------------------------------------------------------
+ */
+void corelock_init(struct corelock *cl)
+{
+ memset(cl, 0, sizeof (*cl));
+}
+
+#if 1 /* Assembly locks to minimize overhead */
+/*---------------------------------------------------------------------------
+ * Wait for the corelock to become free and acquire it when it does.
+ *---------------------------------------------------------------------------
+ */
+void corelock_lock(struct corelock *cl) __attribute__((naked));
+void corelock_lock(struct corelock *cl)
+{
+ /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
+ asm volatile (
+ "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "ldrb r1, [r1] \n"
+ "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
+ "eor r2, r1, #0xff \n" /* r2 = othercore */
+ "strb r2, [r0, #2] \n" /* cl->turn = othercore */
+ "1: \n"
+ "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
+ "cmp r3, #0 \n" /* yes? lock acquired */
+ "bxeq lr \n"
+ "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
+ "cmp r3, r1 \n"
+ "bxeq lr \n" /* yes? lock acquired */
+ "b 1b \n" /* keep trying */
+ : : "i"(&PROCESSOR_ID)
+ );
+ (void)cl;
+}
+
+/*---------------------------------------------------------------------------
+ * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
+ *---------------------------------------------------------------------------
+ */
+int corelock_try_lock(struct corelock *cl) __attribute__((naked));
+int corelock_try_lock(struct corelock *cl)
+{
+ /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
+ asm volatile (
+ "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "ldrb r1, [r1] \n"
+ "mov r3, r0 \n"
+ "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
+ "eor r2, r1, #0xff \n" /* r2 = othercore */
+ "strb r2, [r0, #2] \n" /* cl->turn = othercore */
+ "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
+ "eors r0, r0, r2 \n" /* yes? lock acquired */
+ "bxne lr \n"
+ "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
+ "ands r0, r0, r1 \n"
+ "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
+ "bx lr \n" /* return result */
+ : : "i"(&PROCESSOR_ID)
+ );
+
+ return 0;
+ (void)cl;
+}
+
+/*---------------------------------------------------------------------------
+ * Release ownership of the corelock
+ *---------------------------------------------------------------------------
+ */
+void corelock_unlock(struct corelock *cl) __attribute__((naked));
+void corelock_unlock(struct corelock *cl)
+{
+ asm volatile (
+ "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "ldrb r1, [r1] \n"
+ "mov r2, #0 \n" /* cl->myl[core] = 0 */
+ "strb r2, [r0, r1, lsr #7] \n"
+ "bx lr \n"
+ : : "i"(&PROCESSOR_ID)
+ );
+ (void)cl;
+}
+#else /* C versions for reference */
+/*---------------------------------------------------------------------------
+ * Wait for the corelock to become free and aquire it when it does.
+ *---------------------------------------------------------------------------
+ */
+void corelock_lock(struct corelock *cl)
+{
+ const unsigned int core = CURRENT_CORE;
+ const unsigned int othercore = 1 - core;
+
+ cl->myl[core] = core;
+ cl->turn = othercore;
+
+ for (;;)
+ {
+ if (cl->myl[othercore] == 0 || cl->turn == core)
+ break;
+ }
+}
+
+/*---------------------------------------------------------------------------
+ * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
+ *---------------------------------------------------------------------------
+ */
+int corelock_try_lock(struct corelock *cl)
+{
+ const unsigned int core = CURRENT_CORE;
+ const unsigned int othercore = 1 - core;
+
+ cl->myl[core] = core;
+ cl->turn = othercore;
+
+ if (cl->myl[othercore] == 0 || cl->turn == core)
+ {
+ return 1;
+ }
+
+ cl->myl[core] = 0;
+ return 0;
+}
+
+/*---------------------------------------------------------------------------
+ * Release ownership of the corelock
+ *---------------------------------------------------------------------------
+ */
+void corelock_unlock(struct corelock *cl)
+{
+ cl->myl[CURRENT_CORE] = 0;
+}
+#endif /* ASM / C selection */
+
+#endif /* CONFIG_CORELOCK == SW_CORELOCK */
+
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state if waking list wasn't repopulated and if
+ * no other core requested a wakeup for it to perform a task.
+ *---------------------------------------------------------------------------
+ */
+#ifdef CPU_PP502x
+#if NUM_CORES == 1
+static inline void core_sleep(void)
+{
+ sleep_core(CURRENT_CORE);
+ enable_irq();
+}
+#else
+static inline void core_sleep(unsigned int core)
+{
+#if 1
+ asm volatile (
+ "mov r0, #4 \n" /* r0 = 0x4 << core */
+ "mov r0, r0, lsl %[c] \n"
+ "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
+ "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
+ "tst r1, r0, lsl #2 \n"
+ "moveq r1, #0x80000000 \n" /* Then sleep */
+ "streq r1, [%[ctl], %[c], lsl #2] \n"
+ "moveq r1, #0 \n" /* Clear control reg */
+ "streq r1, [%[ctl], %[c], lsl #2] \n"
+ "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
+ "str r1, [%[mbx], #8] \n"
+ "1: \n" /* Wait for wake procedure to finish */
+ "ldr r1, [%[mbx], #0] \n"
+ "tst r1, r0, lsr #2 \n"
+ "bne 1b \n"
+ :
+ : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
+ : "r0", "r1");
+#else /* C version for reference */
+ /* Signal intent to sleep */
+ MBX_MSG_SET = 0x4 << core;
+
+ /* Something waking or other processor intends to wake us? */
+ if ((MBX_MSG_STAT & (0x10 << core)) == 0)
+ {
+ sleep_core(core);
+ wake_core(core);
+ }
+
+ /* Signal wake - clear wake flag */
+ MBX_MSG_CLR = 0x14 << core;
+
+ /* Wait for other processor to finish wake procedure */
+ while (MBX_MSG_STAT & (0x1 << core));
+#endif /* ASM/C selection */
+ enable_irq();
+}
+#endif /* NUM_CORES */
+#elif CONFIG_CPU == PP5002
+#if NUM_CORES == 1
+static inline void core_sleep(void)
+{
+ sleep_core(CURRENT_CORE);
+ enable_irq();
+}
+/* PP5002 has no mailboxes - emulate using bytes */
+static inline void core_sleep(unsigned int core)
+{
+#if 1
+ asm volatile (
+ "mov r0, #1 \n" /* Signal intent to sleep */
+ "strb r0, [%[sem], #2] \n"
+ "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
+ "cmp r0, #0 \n"
+ "bne 2f \n"
+ /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
+ * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
+ * that the correct alternative is executed. Don't change the order
+ * of the next 4 instructions! */
+ "tst pc, #0x0c \n"
+ "mov r0, #0xca \n"
+ "strne r0, [%[ctl], %[c], lsl #2] \n"
+ "streq r0, [%[ctl], %[c], lsl #2] \n"
+ "nop \n" /* nop's needed because of pipeline */
+ "nop \n"
+ "nop \n"
+ "2: \n"
+ "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
+ "strb r0, [%[sem], #1] \n"
+ "strb r0, [%[sem], #2] \n"
+ "1: \n" /* Wait for wake procedure to finish */
+ "ldrb r0, [%[sem], #0] \n"
+ "cmp r0, #0 \n"
+ "bne 1b \n"
+ :
+ : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
+ [ctl]"r"(&CPU_CTL)
+ : "r0"
+ );
+#else /* C version for reference */
+ /* Signal intent to sleep */
+ core_semaphores[core].intend_sleep = 1;
+
+ /* Something waking or other processor intends to wake us? */
+ if (core_semaphores[core].stay_awake == 0)
+ {
+ sleep_core(core);
+ }
+
+ /* Signal wake - clear wake flag */
+ core_semaphores[core].stay_awake = 0;
+ core_semaphores[core].intend_sleep = 0;
+
+ /* Wait for other processor to finish wake procedure */
+ while (core_semaphores[core].intend_wake != 0);
+
+ /* Enable IRQ */
+#endif /* ASM/C selection */
+ enable_irq();
+}
+#endif /* NUM_CORES */
+#endif /* PP CPU type */
+
+/*---------------------------------------------------------------------------
+ * Wake another processor core that is sleeping or prevent it from doing so
+ * if it was already destined. FIQ, IRQ should be disabled before calling.
+ *---------------------------------------------------------------------------
+ */
+#if NUM_CORES == 1
+/* Shared single-core build debugging version */
+void core_wake(void)
+{
+ /* No wakey - core already wakey */
+}
+#elif defined (CPU_PP502x)
+void core_wake(unsigned int othercore)
+{
+#if 1
+ /* avoid r0 since that contains othercore */
+ asm volatile (
+ "mrs r3, cpsr \n" /* Disable IRQ */
+ "orr r1, r3, #0x80 \n"
+ "msr cpsr_c, r1 \n"
+ "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
+ "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
+ "str r2, [%[mbx], #4] \n"
+ "1: \n" /* If it intends to sleep, let it first */
+ "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
+ "eor r1, r1, #0xc \n"
+ "tst r1, r2, lsr #2 \n"
+ "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
+ "tsteq r1, #0x80000000 \n"
+ "beq 1b \n" /* Wait for sleep or wake */
+ "tst r1, #0x80000000 \n" /* If sleeping, wake it */
+ "movne r1, #0x0 \n"
+ "strne r1, [%[ctl], %[oc], lsl #2] \n"
+ "mov r1, r2, lsr #4 \n"
+ "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
+ "msr cpsr_c, r3 \n" /* Restore IRQ */
+ :
+ : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
+ [oc]"r"(othercore)
+ : "r1", "r2", "r3");
+#else /* C version for reference */
+ /* Disable interrupts - avoid reentrancy from the tick */
+ int oldlevel = disable_irq_save();
+
+ /* Signal intent to wake other processor - set stay awake */
+ MBX_MSG_SET = 0x11 << othercore;
+
+ /* If it intends to sleep, wait until it does or aborts */
+ while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
+ (PROC_CTL(othercore) & PROC_SLEEP) == 0);
+
+ /* If sleeping, wake it up */
+ if (PROC_CTL(othercore) & PROC_SLEEP)
+ PROC_CTL(othercore) = 0;
+
+ /* Done with wake procedure */
+ MBX_MSG_CLR = 0x1 << othercore;
+ restore_irq(oldlevel);
+#endif /* ASM/C selection */
+}
+#elif CONFIG_CPU == PP5002
+/* PP5002 has no mailboxes - emulate using bytes */
+void core_wake(unsigned int othercore)
+{
+#if 1
+ /* avoid r0 since that contains othercore */
+ asm volatile (
+ "mrs r3, cpsr \n" /* Disable IRQ */
+ "orr r1, r3, #0x80 \n"
+ "msr cpsr_c, r1 \n"
+ "mov r1, #1 \n" /* Signal intent to wake other core */
+ "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
+ "strh r1, [%[sem], #0] \n"
+ "mov r2, #0x8000 \n"
+ "1: \n" /* If it intends to sleep, let it first */
+ "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
+ "cmp r1, #1 \n"
+ "ldr r1, [%[st]] \n" /* && not sleeping ? */
+ "tsteq r1, r2, lsr %[oc] \n"
+ "beq 1b \n" /* Wait for sleep or wake */
+ "tst r1, r2, lsr %[oc] \n"
+ "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
+ "movne r1, #0xce \n"
+ "strne r1, [r2, %[oc], lsl #2] \n"
+ "mov r1, #0 \n" /* Done with wake procedure */
+ "strb r1, [%[sem], #0] \n"
+ "msr cpsr_c, r3 \n" /* Restore IRQ */
+ :
+ : [sem]"r"(&core_semaphores[othercore]),
+ [st]"r"(&PROC_STAT),
+ [oc]"r"(othercore)
+ : "r1", "r2", "r3"
+ );
+#else /* C version for reference */
+ /* Disable interrupts - avoid reentrancy from the tick */
+ int oldlevel = disable_irq_save();
+
+ /* Signal intent to wake other processor - set stay awake */
+ core_semaphores[othercore].intend_wake = 1;
+ core_semaphores[othercore].stay_awake = 1;
+
+ /* If it intends to sleep, wait until it does or aborts */
+ while (core_semaphores[othercore].intend_sleep != 0 &&
+ (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
+
+ /* If sleeping, wake it up */
+ if (PROC_STAT & PROC_SLEEPING(othercore))
+ wake_core(othercore);
+
+ /* Done with wake procedure */
+ core_semaphores[othercore].intend_wake = 0;
+ restore_irq(oldlevel);
+#endif /* ASM/C selection */
+}
+#endif /* CPU type */
+
+#if NUM_CORES > 1
+/*---------------------------------------------------------------------------
+ * Switches to a stack that always resides in the Rockbox core.
+ *
+ * Needed when a thread suicides on a core other than the main CPU since the
+ * stack used when idling is the stack of the last thread to run. This stack
+ * may not reside in the core firmware in which case the core will continue
+ * to use a stack from an unloaded module until another thread runs on it.
+ *---------------------------------------------------------------------------
+ */
+static inline void switch_to_idle_stack(const unsigned int core)
+{
+ asm volatile (
+ "str sp, [%0] \n" /* save original stack pointer on idle stack */
+ "mov sp, %0 \n" /* switch stacks */
+ : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
+ (void)core;
+}
+
+/*---------------------------------------------------------------------------
+ * Perform core switch steps that need to take place inside switch_thread.
+ *
+ * These steps must take place while before changing the processor and after
+ * having entered switch_thread since switch_thread may not do a normal return
+ * because the stack being used for anything the compiler saved will not belong
+ * to the thread's destination core and it may have been recycled for other
+ * purposes by the time a normal context load has taken place. switch_thread
+ * will also clobber anything stashed in the thread's context or stored in the
+ * nonvolatile registers if it is saved there before the call since the
+ * compiler's order of operations cannot be known for certain.
+ */
+static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
+{
+ /* Flush our data to ram */
+ cpucache_flush();
+ /* Stash thread in r4 slot */
+ thread->context.r[0] = (uint32_t)thread;
+ /* Stash restart address in r5 slot */
+ thread->context.r[1] = thread->context.start;
+ /* Save sp in context.sp while still running on old core */
+ thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
+}
+
+/*---------------------------------------------------------------------------
+ * Machine-specific helper function for switching the processor a thread is
+ * running on. Basically, the thread suicides on the departing core and is
+ * reborn on the destination. Were it not for gcc's ill-behavior regarding
+ * naked functions written in C where it actually clobbers non-volatile
+ * registers before the intended prologue code, this would all be much
+ * simpler. Generic setup is done in switch_core itself.
+ */
+
+/*---------------------------------------------------------------------------
+ * This actually performs the core switch.
+ */
+static void __attribute__((naked))
+ switch_thread_core(unsigned int core, struct thread_entry *thread)
+{
+ /* Pure asm for this because compiler behavior isn't sufficiently predictable.
+ * Stack access also isn't permitted until restoring the original stack and
+ * context. */
+ asm volatile (
+ "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
+ "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
+ "ldr r2, [r2, r0, lsl #2] \n"
+ "add r2, r2, %0*4 \n"
+ "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
+ "mov sp, r2 \n" /* switch stacks */
+ "adr r2, 1f \n" /* r2 = new core restart address */
+ "str r2, [r1, #40] \n" /* thread->context.start = r2 */
+ "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
+ "1: \n"
+ "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
+ "mov r1, #0 \n" /* Clear start address */
+ "str r1, [r0, #40] \n"
+ "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
+ "mov lr, pc \n"
+ "bx r0 \n"
+ "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
+ ".ltorg \n" /* Dump constant pool */
+ : : "i"(IDLE_STACK_WORDS)
+ );
+ (void)core; (void)thread;
+}
+
+/*---------------------------------------------------------------------------
+ * Do any device-specific inits for the threads and synchronize the kernel
+ * initializations.
+ *---------------------------------------------------------------------------
+ */
+static void core_thread_init(unsigned int core) INIT_ATTR;
+static void core_thread_init(unsigned int core)
+{
+ if (core == CPU)
+ {
+ /* Wake up coprocessor and let it initialize kernel and threads */
+#ifdef CPU_PP502x
+ MBX_MSG_CLR = 0x3f;
+#endif
+ wake_core(COP);
+ /* Sleep until COP has finished */
+ sleep_core(CPU);
+ }
+ else
+ {
+ /* Wake the CPU and return */
+ wake_core(CPU);
+ }
+}
+#endif /* NUM_CORES */
+
diff --git a/firmware/target/coldfire/thread-coldfire.c b/firmware/target/coldfire/thread-coldfire.c
new file mode 100644
index 0000000000..f151a971c7
--- /dev/null
+++ b/firmware/target/coldfire/thread-coldfire.c
@@ -0,0 +1,97 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2004 by Linus Nielsen Feltzing
+ *
+ * Coldfire processor threading support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+/*---------------------------------------------------------------------------
+ * Start the thread running and terminate it if it returns
+ *---------------------------------------------------------------------------
+ */
+void start_thread(void); /* Provide C access to ASM label */
+static void __attribute__((used)) __start_thread(void)
+{
+ /* a0=macsr, a1=context */
+ asm volatile (
+ "start_thread: \n" /* Start here - no naked attribute */
+ "move.l %a0, %macsr \n" /* Set initial mac status reg */
+ "lea.l 48(%a1), %a1 \n"
+ "move.l (%a1)+, %sp \n" /* Set initial stack */
+ "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
+ "clr.l (%a1) \n" /* Mark thread running */
+ "jsr (%a2) \n" /* Call thread function */
+ );
+ thread_exit();
+}
+
+/* Set EMAC unit to fractional mode with saturation for each new thread,
+ * since that's what'll be the most useful for most things which the dsp
+ * will do. Codecs should still initialize their preferred modes
+ * explicitly. Context pointer is placed in d2 slot and start_thread
+ * pointer in d3 slot. thread function pointer is placed in context.start.
+ * See load_context for what happens when thread is initially going to
+ * run.
+ */
+#define THREAD_STARTUP_INIT(core, thread, function) \
+ ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
+ (thread)->context.d[0] = (uint32_t)&(thread)->context, \
+ (thread)->context.d[1] = (uint32_t)start_thread, \
+ (thread)->context.start = (uint32_t)(function); })
+
+/*---------------------------------------------------------------------------
+ * Store non-volatile context.
+ *---------------------------------------------------------------------------
+ */
+static inline void store_context(void* addr)
+{
+ asm volatile (
+ "move.l %%macsr,%%d0 \n"
+ "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
+ : : "a" (addr) : "d0" /* only! */
+ );
+}
+
+/*---------------------------------------------------------------------------
+ * Load non-volatile context.
+ *---------------------------------------------------------------------------
+ */
+static inline void load_context(const void* addr)
+{
+ asm volatile (
+ "move.l 52(%0), %%d0 \n" /* Get start address */
+ "beq.b 1f \n" /* NULL -> already running */
+ "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
+ "jmp (%%a2) \n" /* Start the thread */
+ "1: \n"
+ "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
+ "move.l %%d0, %%macsr \n"
+ : : "a" (addr) : "d0" /* only! */
+ );
+}
+
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state if waking list wasn't repopulated.
+ *---------------------------------------------------------------------------
+ */
+static inline void core_sleep(void)
+{
+ /* Supervisor mode, interrupts enabled upon wakeup */
+ asm volatile ("stop #0x2000");
+};
diff --git a/firmware/target/mips/thread-mips32.c b/firmware/target/mips/thread-mips32.c
new file mode 100644
index 0000000000..e2fccb8022
--- /dev/null
+++ b/firmware/target/mips/thread-mips32.c
@@ -0,0 +1,133 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2008 by Maurus Cuelenaere
+ *
+ * 32-bit MIPS threading support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+/*---------------------------------------------------------------------------
+ * Start the thread running and terminate it if it returns
+ *---------------------------------------------------------------------------
+ */
+
+void start_thread(void); /* Provide C access to ASM label */
+static void __attribute__((used)) _start_thread(void)
+{
+ /* t1 = context */
+ asm volatile (
+ "start_thread: \n"
+ ".set noreorder \n"
+ ".set noat \n"
+ "lw $8, 4($9) \n" /* Fetch thread function pointer ($8 = t0, $9 = t1) */
+ "lw $29, 36($9) \n" /* Set initial sp(=$29) */
+ "jalr $8 \n" /* Start the thread */
+ "sw $0, 44($9) \n" /* Clear start address */
+ ".set at \n"
+ ".set reorder \n"
+ );
+ thread_exit();
+}
+
+/* Place context pointer in s0 slot, function pointer in s1 slot, and
+ * start_thread pointer in context_start */
+#define THREAD_STARTUP_INIT(core, thread, function) \
+ ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
+ (thread)->context.r[1] = (uint32_t)(function), \
+ (thread)->context.start = (uint32_t)start_thread; })
+
+/*---------------------------------------------------------------------------
+ * Store non-volatile context.
+ *---------------------------------------------------------------------------
+ */
+static inline void store_context(void* addr)
+{
+ asm volatile (
+ ".set noreorder \n"
+ ".set noat \n"
+ "sw $16, 0(%0) \n" /* s0 */
+ "sw $17, 4(%0) \n" /* s1 */
+ "sw $18, 8(%0) \n" /* s2 */
+ "sw $19, 12(%0) \n" /* s3 */
+ "sw $20, 16(%0) \n" /* s4 */
+ "sw $21, 20(%0) \n" /* s5 */
+ "sw $22, 24(%0) \n" /* s6 */
+ "sw $23, 28(%0) \n" /* s7 */
+ "sw $30, 32(%0) \n" /* fp */
+ "sw $29, 36(%0) \n" /* sp */
+ "sw $31, 40(%0) \n" /* ra */
+ ".set at \n"
+ ".set reorder \n"
+ : : "r" (addr)
+ );
+}
+
+/*---------------------------------------------------------------------------
+ * Load non-volatile context.
+ *---------------------------------------------------------------------------
+ */
+static inline void load_context(const void* addr)
+{
+ asm volatile (
+ ".set noat \n"
+ ".set noreorder \n"
+ "lw $8, 44(%0) \n" /* Get start address ($8 = t0) */
+ "beqz $8, running \n" /* NULL -> already running */
+ "nop \n"
+ "jr $8 \n"
+ "move $9, %0 \n" /* t1 = context */
+ "running: \n"
+ "lw $16, 0(%0) \n" /* s0 */
+ "lw $17, 4(%0) \n" /* s1 */
+ "lw $18, 8(%0) \n" /* s2 */
+ "lw $19, 12(%0) \n" /* s3 */
+ "lw $20, 16(%0) \n" /* s4 */
+ "lw $21, 20(%0) \n" /* s5 */
+ "lw $22, 24(%0) \n" /* s6 */
+ "lw $23, 28(%0) \n" /* s7 */
+ "lw $30, 32(%0) \n" /* fp */
+ "lw $29, 36(%0) \n" /* sp */
+ "lw $31, 40(%0) \n" /* ra */
+ ".set at \n"
+ ".set reorder \n"
+ : : "r" (addr) : "t0", "t1"
+ );
+}
+
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state.
+ *---------------------------------------------------------------------------
+ */
+static inline void core_sleep(void)
+{
+#if CONFIG_CPU == JZ4732
+ __cpm_idle_mode();
+#endif
+ asm volatile(".set mips32r2 \n"
+ "mfc0 $8, $12 \n" /* mfc t0, $12 */
+ "move $9, $8 \n" /* move t1, t0 */
+ "la $10, 0x8000000 \n" /* la t2, 0x8000000 */
+ "or $8, $8, $10 \n" /* Enable reduced power mode */
+ "mtc0 $8, $12 \n" /* mtc t0, $12 */
+ "wait \n"
+ "mtc0 $9, $12 \n" /* mtc t1, $12 */
+ ".set mips0 \n"
+ ::: "t0", "t1", "t2"
+ );
+ enable_irq();
+}
diff --git a/firmware/target/sh/thread-sh.c b/firmware/target/sh/thread-sh.c
new file mode 100644
index 0000000000..25e0aadf96
--- /dev/null
+++ b/firmware/target/sh/thread-sh.c
@@ -0,0 +1,109 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Ulf Ralberg
+ *
+ * SH processor threading support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+/*---------------------------------------------------------------------------
+ * Start the thread running and terminate it if it returns
+ *---------------------------------------------------------------------------
+ */
+void start_thread(void); /* Provide C access to ASM label */
+static void __attribute__((used)) __start_thread(void)
+{
+ /* r8 = context */
+ asm volatile (
+ "_start_thread: \n" /* Start here - no naked attribute */
+ "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
+ "mov.l @(28, r8), r15 \n" /* Set initial sp */
+ "mov #0, r1 \n" /* Start the thread */
+ "jsr @r0 \n"
+ "mov.l r1, @(36, r8) \n" /* Clear start address */
+ );
+ thread_exit();
+}
+
+/* Place context pointer in r8 slot, function pointer in r9 slot, and
+ * start_thread pointer in context_start */
+#define THREAD_STARTUP_INIT(core, thread, function) \
+ ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
+ (thread)->context.r[1] = (uint32_t)(function), \
+ (thread)->context.start = (uint32_t)start_thread; })
+
+/*---------------------------------------------------------------------------
+ * Store non-volatile context.
+ *---------------------------------------------------------------------------
+ */
+static inline void store_context(void* addr)
+{
+ asm volatile (
+ "add #36, %0 \n" /* Start at last reg. By the time routine */
+ "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
+ "mov.l r15,@-%0 \n"
+ "mov.l r14,@-%0 \n"
+ "mov.l r13,@-%0 \n"
+ "mov.l r12,@-%0 \n"
+ "mov.l r11,@-%0 \n"
+ "mov.l r10,@-%0 \n"
+ "mov.l r9, @-%0 \n"
+ "mov.l r8, @-%0 \n"
+ : : "r" (addr)
+ );
+}
+
+/*---------------------------------------------------------------------------
+ * Load non-volatile context.
+ *---------------------------------------------------------------------------
+ */
+static inline void load_context(const void* addr)
+{
+ asm volatile (
+ "mov.l @(36, %0), r0 \n" /* Get start address */
+ "tst r0, r0 \n"
+ "bt .running \n" /* NULL -> already running */
+ "jmp @r0 \n" /* r8 = context */
+ ".running: \n"
+ "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
+ "mov.l @%0+, r9 \n"
+ "mov.l @%0+, r10 \n"
+ "mov.l @%0+, r11 \n"
+ "mov.l @%0+, r12 \n"
+ "mov.l @%0+, r13 \n"
+ "mov.l @%0+, r14 \n"
+ "mov.l @%0+, r15 \n"
+ "lds.l @%0+, pr \n"
+ : : "r" (addr) : "r0" /* only! */
+ );
+}
+
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state.
+ *---------------------------------------------------------------------------
+ */
+static inline void core_sleep(void)
+{
+ asm volatile (
+ "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
+ "mov #0, r1 \n" /* Enable interrupts */
+ "ldc r1, sr \n" /* Following instruction cannot be interrupted */
+ "sleep \n" /* Execute standby */
+ : : "z"(&SBYCR-GBR) : "r1");
+}
+
diff --git a/firmware/thread.c b/firmware/thread.c
index 54d966ffe5..5cad67b657 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -160,892 +160,27 @@ void switch_thread(void)
__attribute__((noinline));
/****************************************************************************
- * Processor-specific section
+ * Processor-specific section - include necessary core support
*/
-
-#if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
-/* Support a special workaround object for large-sector disks */
-#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
-#else
-#define IF_NO_SKIP_YIELD(...)
-#endif
-
#if defined(CPU_ARM)
-/*---------------------------------------------------------------------------
- * Start the thread running and terminate it if it returns
- *---------------------------------------------------------------------------
- */
-static void __attribute__((naked,used)) start_thread(void)
-{
- /* r0 = context */
- asm volatile (
- "ldr sp, [r0, #32] \n" /* Load initial sp */
- "ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
- "mov r1, #0 \n" /* Mark thread as running */
- "str r1, [r0, #40] \n"
-#if NUM_CORES > 1
- "ldr r0, =cpucache_invalidate \n" /* Invalidate this core's cache. */
- "mov lr, pc \n" /* This could be the first entry into */
- "bx r0 \n" /* plugin or codec code for this core. */
-#endif
- "mov lr, pc \n" /* Call thread function */
- "bx r4 \n"
- ); /* No clobber list - new thread doesn't care */
- thread_exit();
- //asm volatile (".ltorg"); /* Dump constant pool */
-}
-
-/* For startup, place context pointer in r4 slot, start_thread pointer in r5
- * slot, and thread function pointer in context.start. See load_context for
- * what happens when thread is initially going to run. */
-#define THREAD_STARTUP_INIT(core, thread, function) \
- ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
- (thread)->context.r[1] = (uint32_t)start_thread, \
- (thread)->context.start = (uint32_t)function; })
-
-/*---------------------------------------------------------------------------
- * Store non-volatile context.
- *---------------------------------------------------------------------------
- */
-static inline void store_context(void* addr)
-{
- asm volatile(
- "stmia %0, { r4-r11, sp, lr } \n"
- : : "r" (addr)
- );
-}
-
-/*---------------------------------------------------------------------------
- * Load non-volatile context.
- *---------------------------------------------------------------------------
- */
-static inline void load_context(const void* addr)
-{
- asm volatile(
- "ldr r0, [%0, #40] \n" /* Load start pointer */
- "cmp r0, #0 \n" /* Check for NULL */
- "ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
- "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
- : : "r" (addr) : "r0" /* only! */
- );
-}
-
+#include "thread-arm.c"
#if defined (CPU_PP)
-
-#if NUM_CORES > 1
-extern uintptr_t cpu_idlestackbegin[];
-extern uintptr_t cpu_idlestackend[];
-extern uintptr_t cop_idlestackbegin[];
-extern uintptr_t cop_idlestackend[];
-static uintptr_t * const idle_stacks[NUM_CORES] =
-{
- [CPU] = cpu_idlestackbegin,
- [COP] = cop_idlestackbegin
-};
-
-#if CONFIG_CPU == PP5002
-/* Bytes to emulate the PP502x mailbox bits */
-struct core_semaphores
-{
- volatile uint8_t intend_wake; /* 00h */
- volatile uint8_t stay_awake; /* 01h */
- volatile uint8_t intend_sleep; /* 02h */
- volatile uint8_t unused; /* 03h */
-};
-
-static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
-#endif /* CONFIG_CPU == PP5002 */
-
-#endif /* NUM_CORES */
-
-#if CONFIG_CORELOCK == SW_CORELOCK
-/* Software core locks using Peterson's mutual exclusion algorithm */
-
-/*---------------------------------------------------------------------------
- * Initialize the corelock structure.
- *---------------------------------------------------------------------------
- */
-void corelock_init(struct corelock *cl)
-{
- memset(cl, 0, sizeof (*cl));
-}
-
-#if 1 /* Assembly locks to minimize overhead */
-/*---------------------------------------------------------------------------
- * Wait for the corelock to become free and acquire it when it does.
- *---------------------------------------------------------------------------
- */
-void corelock_lock(struct corelock *cl) __attribute__((naked));
-void corelock_lock(struct corelock *cl)
-{
- /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
- asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
- "ldrb r1, [r1] \n"
- "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
- "eor r2, r1, #0xff \n" /* r2 = othercore */
- "strb r2, [r0, #2] \n" /* cl->turn = othercore */
- "1: \n"
- "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
- "cmp r3, #0 \n" /* yes? lock acquired */
- "bxeq lr \n"
- "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
- "cmp r3, r1 \n"
- "bxeq lr \n" /* yes? lock acquired */
- "b 1b \n" /* keep trying */
- : : "i"(&PROCESSOR_ID)
- );
- (void)cl;
-}
-
-/*---------------------------------------------------------------------------
- * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
- *---------------------------------------------------------------------------
- */
-int corelock_try_lock(struct corelock *cl) __attribute__((naked));
-int corelock_try_lock(struct corelock *cl)
-{
- /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
- asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
- "ldrb r1, [r1] \n"
- "mov r3, r0 \n"
- "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
- "eor r2, r1, #0xff \n" /* r2 = othercore */
- "strb r2, [r0, #2] \n" /* cl->turn = othercore */
- "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
- "eors r0, r0, r2 \n" /* yes? lock acquired */
- "bxne lr \n"
- "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
- "ands r0, r0, r1 \n"
- "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
- "bx lr \n" /* return result */
- : : "i"(&PROCESSOR_ID)
- );
-
- return 0;
- (void)cl;
-}
-
-/*---------------------------------------------------------------------------
- * Release ownership of the corelock
- *---------------------------------------------------------------------------
- */
-void corelock_unlock(struct corelock *cl) __attribute__((naked));
-void corelock_unlock(struct corelock *cl)
-{
- asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
- "ldrb r1, [r1] \n"
- "mov r2, #0 \n" /* cl->myl[core] = 0 */
- "strb r2, [r0, r1, lsr #7] \n"
- "bx lr \n"
- : : "i"(&PROCESSOR_ID)
- );
- (void)cl;
-}
-#else /* C versions for reference */
-/*---------------------------------------------------------------------------
- * Wait for the corelock to become free and aquire it when it does.
- *---------------------------------------------------------------------------
- */
-void corelock_lock(struct corelock *cl)
-{
- const unsigned int core = CURRENT_CORE;
- const unsigned int othercore = 1 - core;
-
- cl->myl[core] = core;
- cl->turn = othercore;
-
- for (;;)
- {
- if (cl->myl[othercore] == 0 || cl->turn == core)
- break;
- }
-}
-
-/*---------------------------------------------------------------------------
- * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
- *---------------------------------------------------------------------------
- */
-int corelock_try_lock(struct corelock *cl)
-{
- const unsigned int core = CURRENT_CORE;
- const unsigned int othercore = 1 - core;
-
- cl->myl[core] = core;
- cl->turn = othercore;
-
- if (cl->myl[othercore] == 0 || cl->turn == core)
- {
- return 1;
- }
-
- cl->myl[core] = 0;
- return 0;
-}
-
-/*---------------------------------------------------------------------------
- * Release ownership of the corelock
- *---------------------------------------------------------------------------
- */
-void corelock_unlock(struct corelock *cl)
-{
- cl->myl[CURRENT_CORE] = 0;
-}
-#endif /* ASM / C selection */
-
-#endif /* CONFIG_CORELOCK == SW_CORELOCK */
-
-/*---------------------------------------------------------------------------
- * Put core in a power-saving state if waking list wasn't repopulated and if
- * no other core requested a wakeup for it to perform a task.
- *---------------------------------------------------------------------------
- */
-#ifdef CPU_PP502x
-#if NUM_CORES == 1
-static inline void core_sleep(void)
-{
- sleep_core(CURRENT_CORE);
- enable_irq();
-}
-#else
-static inline void core_sleep(unsigned int core)
-{
-#if 1
- asm volatile (
- "mov r0, #4 \n" /* r0 = 0x4 << core */
- "mov r0, r0, lsl %[c] \n"
- "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
- "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
- "tst r1, r0, lsl #2 \n"
- "moveq r1, #0x80000000 \n" /* Then sleep */
- "streq r1, [%[ctl], %[c], lsl #2] \n"
- "moveq r1, #0 \n" /* Clear control reg */
- "streq r1, [%[ctl], %[c], lsl #2] \n"
- "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
- "str r1, [%[mbx], #8] \n"
- "1: \n" /* Wait for wake procedure to finish */
- "ldr r1, [%[mbx], #0] \n"
- "tst r1, r0, lsr #2 \n"
- "bne 1b \n"
- :
- : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
- : "r0", "r1");
-#else /* C version for reference */
- /* Signal intent to sleep */
- MBX_MSG_SET = 0x4 << core;
-
- /* Something waking or other processor intends to wake us? */
- if ((MBX_MSG_STAT & (0x10 << core)) == 0)
- {
- sleep_core(core);
- wake_core(core);
- }
-
- /* Signal wake - clear wake flag */
- MBX_MSG_CLR = 0x14 << core;
-
- /* Wait for other processor to finish wake procedure */
- while (MBX_MSG_STAT & (0x1 << core));
-#endif /* ASM/C selection */
- enable_irq();
-}
-#endif /* NUM_CORES */
-#elif CONFIG_CPU == PP5002
-#if NUM_CORES == 1
-static inline void core_sleep(void)
-{
- sleep_core(CURRENT_CORE);
- enable_irq();
-}
-#else
-/* PP5002 has no mailboxes - emulate using bytes */
-static inline void core_sleep(unsigned int core)
-{
-#if 1
- asm volatile (
- "mov r0, #1 \n" /* Signal intent to sleep */
- "strb r0, [%[sem], #2] \n"
- "ldrb r0, [%[sem], #1] \n" /* && stay_awake == 0? */
- "cmp r0, #0 \n"
- "bne 2f \n"
- /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
- * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
- * that the correct alternative is executed. Don't change the order
- * of the next 4 instructions! */
- "tst pc, #0x0c \n"
- "mov r0, #0xca \n"
- "strne r0, [%[ctl], %[c], lsl #2] \n"
- "streq r0, [%[ctl], %[c], lsl #2] \n"
- "nop \n" /* nop's needed because of pipeline */
- "nop \n"
- "nop \n"
- "2: \n"
- "mov r0, #0 \n" /* Clear stay_awake and sleep intent */
- "strb r0, [%[sem], #1] \n"
- "strb r0, [%[sem], #2] \n"
- "1: \n" /* Wait for wake procedure to finish */
- "ldrb r0, [%[sem], #0] \n"
- "cmp r0, #0 \n"
- "bne 1b \n"
- :
- : [sem]"r"(&core_semaphores[core]), [c]"r"(core),
- [ctl]"r"(&CPU_CTL)
- : "r0"
- );
-#else /* C version for reference */
- /* Signal intent to sleep */
- core_semaphores[core].intend_sleep = 1;
-
- /* Something waking or other processor intends to wake us? */
- if (core_semaphores[core].stay_awake == 0)
- {
- sleep_core(core);
- }
-
- /* Signal wake - clear wake flag */
- core_semaphores[core].stay_awake = 0;
- core_semaphores[core].intend_sleep = 0;
-
- /* Wait for other processor to finish wake procedure */
- while (core_semaphores[core].intend_wake != 0);
-
- /* Enable IRQ */
-#endif /* ASM/C selection */
- enable_irq();
-}
-#endif /* NUM_CORES */
-#endif /* PP CPU type */
-
-/*---------------------------------------------------------------------------
- * Wake another processor core that is sleeping or prevent it from doing so
- * if it was already destined. FIQ, IRQ should be disabled before calling.
- *---------------------------------------------------------------------------
- */
-#if NUM_CORES == 1
-/* Shared single-core build debugging version */
-void core_wake(void)
-{
- /* No wakey - core already wakey */
-}
-#elif defined (CPU_PP502x)
-void core_wake(unsigned int othercore)
-{
-#if 1
- /* avoid r0 since that contains othercore */
- asm volatile (
- "mrs r3, cpsr \n" /* Disable IRQ */
- "orr r1, r3, #0x80 \n"
- "msr cpsr_c, r1 \n"
- "mov r2, #0x11 \n" /* r2 = (0x11 << othercore) */
- "mov r2, r2, lsl %[oc] \n" /* Signal intent to wake othercore */
- "str r2, [%[mbx], #4] \n"
- "1: \n" /* If it intends to sleep, let it first */
- "ldr r1, [%[mbx], #0] \n" /* (MSG_MSG_STAT & (0x4 << othercore)) != 0 ? */
- "eor r1, r1, #0xc \n"
- "tst r1, r2, lsr #2 \n"
- "ldr r1, [%[ctl], %[oc], lsl #2] \n" /* && (PROC_CTL(othercore) & PROC_SLEEP) == 0 ? */
- "tsteq r1, #0x80000000 \n"
- "beq 1b \n" /* Wait for sleep or wake */
- "tst r1, #0x80000000 \n" /* If sleeping, wake it */
- "movne r1, #0x0 \n"
- "strne r1, [%[ctl], %[oc], lsl #2] \n"
- "mov r1, r2, lsr #4 \n"
- "str r1, [%[mbx], #8] \n" /* Done with wake procedure */
- "msr cpsr_c, r3 \n" /* Restore IRQ */
- :
- : [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
- [oc]"r"(othercore)
- : "r1", "r2", "r3");
-#else /* C version for reference */
- /* Disable interrupts - avoid reentrancy from the tick */
- int oldlevel = disable_irq_save();
-
- /* Signal intent to wake other processor - set stay awake */
- MBX_MSG_SET = 0x11 << othercore;
-
- /* If it intends to sleep, wait until it does or aborts */
- while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
- (PROC_CTL(othercore) & PROC_SLEEP) == 0);
-
- /* If sleeping, wake it up */
- if (PROC_CTL(othercore) & PROC_SLEEP)
- PROC_CTL(othercore) = 0;
-
- /* Done with wake procedure */
- MBX_MSG_CLR = 0x1 << othercore;
- restore_irq(oldlevel);
-#endif /* ASM/C selection */
-}
-#elif CONFIG_CPU == PP5002
-/* PP5002 has no mailboxes - emulate using bytes */
-void core_wake(unsigned int othercore)
-{
-#if 1
- /* avoid r0 since that contains othercore */
- asm volatile (
- "mrs r3, cpsr \n" /* Disable IRQ */
- "orr r1, r3, #0x80 \n"
- "msr cpsr_c, r1 \n"
- "mov r1, #1 \n" /* Signal intent to wake other core */
- "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
- "strh r1, [%[sem], #0] \n"
- "mov r2, #0x8000 \n"
- "1: \n" /* If it intends to sleep, let it first */
- "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
- "cmp r1, #1 \n"
- "ldr r1, [%[st]] \n" /* && not sleeping ? */
- "tsteq r1, r2, lsr %[oc] \n"
- "beq 1b \n" /* Wait for sleep or wake */
- "tst r1, r2, lsr %[oc] \n"
- "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
- "movne r1, #0xce \n"
- "strne r1, [r2, %[oc], lsl #2] \n"
- "mov r1, #0 \n" /* Done with wake procedure */
- "strb r1, [%[sem], #0] \n"
- "msr cpsr_c, r3 \n" /* Restore IRQ */
- :
- : [sem]"r"(&core_semaphores[othercore]),
- [st]"r"(&PROC_STAT),
- [oc]"r"(othercore)
- : "r1", "r2", "r3"
- );
-#else /* C version for reference */
- /* Disable interrupts - avoid reentrancy from the tick */
- int oldlevel = disable_irq_save();
-
- /* Signal intent to wake other processor - set stay awake */
- core_semaphores[othercore].intend_wake = 1;
- core_semaphores[othercore].stay_awake = 1;
-
- /* If it intends to sleep, wait until it does or aborts */
- while (core_semaphores[othercore].intend_sleep != 0 &&
- (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
-
- /* If sleeping, wake it up */
- if (PROC_STAT & PROC_SLEEPING(othercore))
- wake_core(othercore);
-
- /* Done with wake procedure */
- core_semaphores[othercore].intend_wake = 0;
- restore_irq(oldlevel);
-#endif /* ASM/C selection */
-}
-#endif /* CPU type */
-
-#if NUM_CORES > 1
-/*---------------------------------------------------------------------------
- * Switches to a stack that always resides in the Rockbox core.
- *
- * Needed when a thread suicides on a core other than the main CPU since the
- * stack used when idling is the stack of the last thread to run. This stack
- * may not reside in the core firmware in which case the core will continue
- * to use a stack from an unloaded module until another thread runs on it.
- *---------------------------------------------------------------------------
- */
-static inline void switch_to_idle_stack(const unsigned int core)
-{
- asm volatile (
- "str sp, [%0] \n" /* save original stack pointer on idle stack */
- "mov sp, %0 \n" /* switch stacks */
- : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
- (void)core;
-}
-
-/*---------------------------------------------------------------------------
- * Perform core switch steps that need to take place inside switch_thread.
- *
- * These steps must take place while before changing the processor and after
- * having entered switch_thread since switch_thread may not do a normal return
- * because the stack being used for anything the compiler saved will not belong
- * to the thread's destination core and it may have been recycled for other
- * purposes by the time a normal context load has taken place. switch_thread
- * will also clobber anything stashed in the thread's context or stored in the
- * nonvolatile registers if it is saved there before the call since the
- * compiler's order of operations cannot be known for certain.
- */
-static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
-{
- /* Flush our data to ram */
- cpucache_flush();
- /* Stash thread in r4 slot */
- thread->context.r[0] = (uint32_t)thread;
- /* Stash restart address in r5 slot */
- thread->context.r[1] = thread->context.start;
- /* Save sp in context.sp while still running on old core */
- thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
-}
-
-/*---------------------------------------------------------------------------
- * Machine-specific helper function for switching the processor a thread is
- * running on. Basically, the thread suicides on the departing core and is
- * reborn on the destination. Were it not for gcc's ill-behavior regarding
- * naked functions written in C where it actually clobbers non-volatile
- * registers before the intended prologue code, this would all be much
- * simpler. Generic setup is done in switch_core itself.
- */
-
-/*---------------------------------------------------------------------------
- * This actually performs the core switch.
- */
-static void __attribute__((naked))
- switch_thread_core(unsigned int core, struct thread_entry *thread)
-{
- /* Pure asm for this because compiler behavior isn't sufficiently predictable.
- * Stack access also isn't permitted until restoring the original stack and
- * context. */
- asm volatile (
- "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
- "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
- "ldr r2, [r2, r0, lsl #2] \n"
- "add r2, r2, %0*4 \n"
- "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
- "mov sp, r2 \n" /* switch stacks */
- "adr r2, 1f \n" /* r2 = new core restart address */
- "str r2, [r1, #40] \n" /* thread->context.start = r2 */
- "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
- "1: \n"
- "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
- "mov r1, #0 \n" /* Clear start address */
- "str r1, [r0, #40] \n"
- "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
- "mov lr, pc \n"
- "bx r0 \n"
- "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
- ".ltorg \n" /* Dump constant pool */
- : : "i"(IDLE_STACK_WORDS)
- );
- (void)core; (void)thread;
-}
-
-/*---------------------------------------------------------------------------
- * Do any device-specific inits for the threads and synchronize the kernel
- * initializations.
- *---------------------------------------------------------------------------
- */
-static void core_thread_init(unsigned int core) INIT_ATTR;
-static void core_thread_init(unsigned int core)
-{
- if (core == CPU)
- {
- /* Wake up coprocessor and let it initialize kernel and threads */
-#ifdef CPU_PP502x
- MBX_MSG_CLR = 0x3f;
-#endif
- wake_core(COP);
- /* Sleep until COP has finished */
- sleep_core(CPU);
- }
- else
- {
- /* Wake the CPU and return */
- wake_core(CPU);
- }
-}
-#endif /* NUM_CORES */
-
-#elif defined(CPU_TCC780X) || defined(CPU_TCC77X) /* Single core only for now */ \
-|| CONFIG_CPU == IMX31L || CONFIG_CPU == DM320 || CONFIG_CPU == AS3525 \
-|| CONFIG_CPU == S3C2440 || CONFIG_CPU == S5L8701 || CONFIG_CPU == AS3525v2
-/* Use the generic ARMv4/v5/v6 wait for IRQ */
-static inline void core_sleep(void)
-{
- asm volatile (
- "mcr p15, 0, %0, c7, c0, 4 \n" /* Wait for interrupt */
-#if CONFIG_CPU == IMX31L
- "nop\n nop\n nop\n nop\n nop\n" /* Clean out the pipes */
-#endif
- : : "r"(0)
- );
- enable_irq();
-}
-#else
-static inline void core_sleep(void)
-{
- #warning core_sleep not implemented, battery life will be decreased
- enable_irq();
-}
-#endif /* CONFIG_CPU == */
-
+#include "thread-pp.c"
+#endif /* CPU_PP */
#elif defined(CPU_COLDFIRE)
-/*---------------------------------------------------------------------------
- * Start the thread running and terminate it if it returns
- *---------------------------------------------------------------------------
- */
-void start_thread(void); /* Provide C access to ASM label */
-static void __attribute__((used)) __start_thread(void)
-{
- /* a0=macsr, a1=context */
- asm volatile (
- "start_thread: \n" /* Start here - no naked attribute */
- "move.l %a0, %macsr \n" /* Set initial mac status reg */
- "lea.l 48(%a1), %a1 \n"
- "move.l (%a1)+, %sp \n" /* Set initial stack */
- "move.l (%a1), %a2 \n" /* Fetch thread function pointer */
- "clr.l (%a1) \n" /* Mark thread running */
- "jsr (%a2) \n" /* Call thread function */
- );
- thread_exit();
-}
-
-/* Set EMAC unit to fractional mode with saturation for each new thread,
- * since that's what'll be the most useful for most things which the dsp
- * will do. Codecs should still initialize their preferred modes
- * explicitly. Context pointer is placed in d2 slot and start_thread
- * pointer in d3 slot. thread function pointer is placed in context.start.
- * See load_context for what happens when thread is initially going to
- * run.
- */
-#define THREAD_STARTUP_INIT(core, thread, function) \
- ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
- (thread)->context.d[0] = (uint32_t)&(thread)->context, \
- (thread)->context.d[1] = (uint32_t)start_thread, \
- (thread)->context.start = (uint32_t)(function); })
-
-/*---------------------------------------------------------------------------
- * Store non-volatile context.
- *---------------------------------------------------------------------------
- */
-static inline void store_context(void* addr)
-{
- asm volatile (
- "move.l %%macsr,%%d0 \n"
- "movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
- : : "a" (addr) : "d0" /* only! */
- );
-}
-
-/*---------------------------------------------------------------------------
- * Load non-volatile context.
- *---------------------------------------------------------------------------
- */
-static inline void load_context(const void* addr)
-{
- asm volatile (
- "move.l 52(%0), %%d0 \n" /* Get start address */
- "beq.b 1f \n" /* NULL -> already running */
- "movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
- "jmp (%%a2) \n" /* Start the thread */
- "1: \n"
- "movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
- "move.l %%d0, %%macsr \n"
- : : "a" (addr) : "d0" /* only! */
- );
-}
-
-/*---------------------------------------------------------------------------
- * Put core in a power-saving state if waking list wasn't repopulated.
- *---------------------------------------------------------------------------
- */
-static inline void core_sleep(void)
-{
- /* Supervisor mode, interrupts enabled upon wakeup */
- asm volatile ("stop #0x2000");
-};
-
+#include "thread-coldfire.c"
#elif CONFIG_CPU == SH7034
-/*---------------------------------------------------------------------------
- * Start the thread running and terminate it if it returns
- *---------------------------------------------------------------------------
- */
-void start_thread(void); /* Provide C access to ASM label */
-static void __attribute__((used)) __start_thread(void)
-{
- /* r8 = context */
- asm volatile (
- "_start_thread: \n" /* Start here - no naked attribute */
- "mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
- "mov.l @(28, r8), r15 \n" /* Set initial sp */
- "mov #0, r1 \n" /* Start the thread */
- "jsr @r0 \n"
- "mov.l r1, @(36, r8) \n" /* Clear start address */
- );
- thread_exit();
-}
-
-/* Place context pointer in r8 slot, function pointer in r9 slot, and
- * start_thread pointer in context_start */
-#define THREAD_STARTUP_INIT(core, thread, function) \
- ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
- (thread)->context.r[1] = (uint32_t)(function), \
- (thread)->context.start = (uint32_t)start_thread; })
-
-/*---------------------------------------------------------------------------
- * Store non-volatile context.
- *---------------------------------------------------------------------------
- */
-static inline void store_context(void* addr)
-{
- asm volatile (
- "add #36, %0 \n" /* Start at last reg. By the time routine */
- "sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
- "mov.l r15,@-%0 \n"
- "mov.l r14,@-%0 \n"
- "mov.l r13,@-%0 \n"
- "mov.l r12,@-%0 \n"
- "mov.l r11,@-%0 \n"
- "mov.l r10,@-%0 \n"
- "mov.l r9, @-%0 \n"
- "mov.l r8, @-%0 \n"
- : : "r" (addr)
- );
-}
-
-/*---------------------------------------------------------------------------
- * Load non-volatile context.
- *---------------------------------------------------------------------------
- */
-static inline void load_context(const void* addr)
-{
- asm volatile (
- "mov.l @(36, %0), r0 \n" /* Get start address */
- "tst r0, r0 \n"
- "bt .running \n" /* NULL -> already running */
- "jmp @r0 \n" /* r8 = context */
- ".running: \n"
- "mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
- "mov.l @%0+, r9 \n"
- "mov.l @%0+, r10 \n"
- "mov.l @%0+, r11 \n"
- "mov.l @%0+, r12 \n"
- "mov.l @%0+, r13 \n"
- "mov.l @%0+, r14 \n"
- "mov.l @%0+, r15 \n"
- "lds.l @%0+, pr \n"
- : : "r" (addr) : "r0" /* only! */
- );
-}
-
-/*---------------------------------------------------------------------------
- * Put core in a power-saving state.
- *---------------------------------------------------------------------------
- */
-static inline void core_sleep(void)
-{
- asm volatile (
- "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
- "mov #0, r1 \n" /* Enable interrupts */
- "ldc r1, sr \n" /* Following instruction cannot be interrupted */
- "sleep \n" /* Execute standby */
- : : "z"(&SBYCR-GBR) : "r1");
-}
-
+#include "thread-sh.c"
#elif defined(CPU_MIPS) && CPU_MIPS == 32
+#include "thread-mips32.c"
+#else
+/* Wouldn't compile anyway */
+#error Processor not implemented.
+#endif /* CONFIG_CPU == */
-/*---------------------------------------------------------------------------
- * Start the thread running and terminate it if it returns
- *---------------------------------------------------------------------------
- */
-
-void start_thread(void); /* Provide C access to ASM label */
-static void __attribute__((used)) _start_thread(void)
-{
- /* t1 = context */
- asm volatile (
- "start_thread: \n"
- ".set noreorder \n"
- ".set noat \n"
- "lw $8, 4($9) \n" /* Fetch thread function pointer ($8 = t0, $9 = t1) */
- "lw $29, 36($9) \n" /* Set initial sp(=$29) */
- "jalr $8 \n" /* Start the thread */
- "sw $0, 44($9) \n" /* Clear start address */
- ".set at \n"
- ".set reorder \n"
- );
- thread_exit();
-}
-
-/* Place context pointer in s0 slot, function pointer in s1 slot, and
- * start_thread pointer in context_start */
-#define THREAD_STARTUP_INIT(core, thread, function) \
- ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
- (thread)->context.r[1] = (uint32_t)(function), \
- (thread)->context.start = (uint32_t)start_thread; })
-
-/*---------------------------------------------------------------------------
- * Store non-volatile context.
- *---------------------------------------------------------------------------
- */
-static inline void store_context(void* addr)
-{
- asm volatile (
- ".set noreorder \n"
- ".set noat \n"
- "sw $16, 0(%0) \n" /* s0 */
- "sw $17, 4(%0) \n" /* s1 */
- "sw $18, 8(%0) \n" /* s2 */
- "sw $19, 12(%0) \n" /* s3 */
- "sw $20, 16(%0) \n" /* s4 */
- "sw $21, 20(%0) \n" /* s5 */
- "sw $22, 24(%0) \n" /* s6 */
- "sw $23, 28(%0) \n" /* s7 */
- "sw $30, 32(%0) \n" /* fp */
- "sw $29, 36(%0) \n" /* sp */
- "sw $31, 40(%0) \n" /* ra */
- ".set at \n"
- ".set reorder \n"
- : : "r" (addr)
- );
-}
-
-/*---------------------------------------------------------------------------
- * Load non-volatile context.
- *---------------------------------------------------------------------------
- */
-static inline void load_context(const void* addr)
-{
- asm volatile (
- ".set noat \n"
- ".set noreorder \n"
- "lw $8, 44(%0) \n" /* Get start address ($8 = t0) */
- "beqz $8, running \n" /* NULL -> already running */
- "nop \n"
- "jr $8 \n"
- "move $9, %0 \n" /* t1 = context */
- "running: \n"
- "lw $16, 0(%0) \n" /* s0 */
- "lw $17, 4(%0) \n" /* s1 */
- "lw $18, 8(%0) \n" /* s2 */
- "lw $19, 12(%0) \n" /* s3 */
- "lw $20, 16(%0) \n" /* s4 */
- "lw $21, 20(%0) \n" /* s5 */
- "lw $22, 24(%0) \n" /* s6 */
- "lw $23, 28(%0) \n" /* s7 */
- "lw $30, 32(%0) \n" /* fp */
- "lw $29, 36(%0) \n" /* sp */
- "lw $31, 40(%0) \n" /* ra */
- ".set at \n"
- ".set reorder \n"
- : : "r" (addr) : "t0", "t1"
- );
-}
-
-/*---------------------------------------------------------------------------
- * Put core in a power-saving state.
- *---------------------------------------------------------------------------
- */
-static inline void core_sleep(void)
-{
-#if CONFIG_CPU == JZ4732
- __cpm_idle_mode();
+#ifndef IF_NO_SKIP_YIELD
+#define IF_NO_SKIP_YIELD(...)
#endif
- asm volatile(".set mips32r2 \n"
- "mfc0 $8, $12 \n" /* mfc t0, $12 */
- "move $9, $8 \n" /* move t1, t0 */
- "la $10, 0x8000000 \n" /* la t2, 0x8000000 */
- "or $8, $8, $10 \n" /* Enable reduced power mode */
- "mtc0 $8, $12 \n" /* mtc t0, $12 */
- "wait \n"
- "mtc0 $9, $12 \n" /* mtc t1, $12 */
- ".set mips0 \n"
- ::: "t0", "t1", "t2"
- );
- enable_irq();
-}
-
-
-#endif /* CONFIG_CPU == */
/*
* End Processor-specific section