summaryrefslogtreecommitdiffstats
path: root/firmware/kernel
diff options
context:
space:
mode:
authorThomas Martitz <kugel@rockbox.org>2013-12-04 17:06:17 +0100
committerThomas Martitz <kugel@rockbox.org>2014-03-03 18:11:57 +0100
commit382d1861af12741af4ff235b9d18f179c0adc4c5 (patch)
tree26166c130d2889bb1ae1082e8f7aba103534f49e /firmware/kernel
parent8bae5f2644b5d5759499fbf1066b9c35c6f859ad (diff)
downloadrockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.tar.gz
rockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.tar.bz2
rockbox-382d1861af12741af4ff235b9d18f179c0adc4c5.zip
kernel: Break out kernel primitives into separate files and move to separate dir.
No code changed, just shuffling stuff around. This should make it easier to build only select parts kernel and use different implementations. Change-Id: Ie1f00f93008833ce38419d760afd70062c5e22b5
Diffstat (limited to 'firmware/kernel')
-rw-r--r--firmware/kernel/corelock.c40
-rw-r--r--firmware/kernel/include/corelock.h53
-rw-r--r--firmware/kernel/include/kernel.h69
-rw-r--r--firmware/kernel/include/mutex.h62
-rw-r--r--firmware/kernel/include/queue.h157
-rw-r--r--firmware/kernel/include/semaphore.h40
-rw-r--r--firmware/kernel/include/thread.h387
-rw-r--r--firmware/kernel/include/tick.h67
-rw-r--r--firmware/kernel/include/timeout.h46
-rw-r--r--firmware/kernel/kernel-internal.h49
-rw-r--r--firmware/kernel/mutex.c152
-rw-r--r--firmware/kernel/queue.c786
-rw-r--r--firmware/kernel/semaphore.c142
-rw-r--r--firmware/kernel/thread-internal.h357
-rw-r--r--firmware/kernel/thread.c2442
-rw-r--r--firmware/kernel/tick.c74
-rw-r--r--firmware/kernel/timeout.c97
17 files changed, 5020 insertions, 0 deletions
diff --git a/firmware/kernel/corelock.c b/firmware/kernel/corelock.c
new file mode 100644
index 0000000000..53d08a9069
--- /dev/null
+++ b/firmware/kernel/corelock.c
@@ -0,0 +1,40 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2007 by Daniel Ankers
+ *
+ * PP5002 and PP502x SoC threading support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include <string.h>
+#include "corelock.h"
+
+/* Core locks using Peterson's mutual exclusion algorithm */
+
+
+/*---------------------------------------------------------------------------
+ * Initialize the corelock structure.
+ *---------------------------------------------------------------------------
+ */
+void corelock_init(struct corelock *cl)
+{
+ memset(cl, 0, sizeof (*cl));
+}
+
+/* other corelock methods are ASM-optimized */
+#include "asm/corelock.c"
diff --git a/firmware/kernel/include/corelock.h b/firmware/kernel/include/corelock.h
new file mode 100644
index 0000000000..79302e0e3c
--- /dev/null
+++ b/firmware/kernel/include/corelock.h
@@ -0,0 +1,53 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Ulf Ralberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+
+#ifndef CORELOCK_H
+#define CORELOCK_H
+
+#include "config.h"
+
+#ifndef HAVE_CORELOCK_OBJECT
+
+/* No atomic corelock op needed or just none defined */
+#define corelock_init(cl)
+#define corelock_lock(cl)
+#define corelock_try_lock(cl)
+#define corelock_unlock(cl)
+
+#else
+
+/* No reliable atomic instruction available - use Peterson's algorithm */
+struct corelock
+{
+ volatile unsigned char myl[NUM_CORES];
+ volatile unsigned char turn;
+} __attribute__((packed));
+
+/* Too big to inline everywhere */
+extern void corelock_init(struct corelock *cl);
+extern void corelock_lock(struct corelock *cl);
+extern int corelock_try_lock(struct corelock *cl);
+extern void corelock_unlock(struct corelock *cl);
+
+#endif /* HAVE_CORELOCK_OBJECT */
+
+#endif /* CORELOCK_H */
diff --git a/firmware/kernel/include/kernel.h b/firmware/kernel/include/kernel.h
new file mode 100644
index 0000000000..fafff25ce4
--- /dev/null
+++ b/firmware/kernel/include/kernel.h
@@ -0,0 +1,69 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#ifndef KERNEL_H
+#define KERNEL_H
+
+#include "config.h"
+
+#include "system.h"
+#include "queue.h"
+#include "mutex.h"
+#include "tick.h"
+
+#ifdef INCLUDE_TIMEOUT_API
+#include "timeout.h"
+#endif
+
+#ifdef HAVE_SEMAPHORE_OBJECTS
+#include "semaphore.h"
+#endif
+
+#ifdef HAVE_CORELOCK_OBJECT
+#include "corelock.h"
+#endif
+
+#define OBJ_WAIT_TIMEDOUT (-1)
+#define OBJ_WAIT_FAILED 0
+#define OBJ_WAIT_SUCCEEDED 1
+
+#define TIMEOUT_BLOCK -1
+#define TIMEOUT_NOBLOCK 0
+
+static inline void kernel_init(void)
+{
+ /* Init the threading API */
+ init_threads();
+
+ /* Other processors will not reach this point in a multicore build.
+ * In a single-core build with multiple cores they fall-through and
+ * sleep in cop_main without returning. */
+ if (CURRENT_CORE == CPU)
+ {
+ init_queues();
+ init_tick();
+#ifdef KDEV_INIT
+ kernel_device_init();
+#endif
+ }
+}
+
+
+#endif /* KERNEL_H */
diff --git a/firmware/kernel/include/mutex.h b/firmware/kernel/include/mutex.h
new file mode 100644
index 0000000000..bcf5701bd9
--- /dev/null
+++ b/firmware/kernel/include/mutex.h
@@ -0,0 +1,62 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#ifndef MUTEX_H
+#define MUTEX_H
+
+#include <stdbool.h>
+#include "config.h"
+#include "thread.h"
+
+struct mutex
+{
+ struct thread_entry *queue; /* waiter list */
+ int recursion; /* lock owner recursion count */
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct blocker blocker; /* priority inheritance info
+ for waiters */
+ bool no_preempt; /* don't allow higher-priority thread
+ to be scheduled even if woken */
+#else
+ struct thread_entry *thread; /* Indicates owner thread - an owner
+ implies a locked state - same goes
+ for priority scheduling
+ (in blocker struct for that) */
+#endif
+ IF_COP( struct corelock cl; ) /* multiprocessor sync */
+};
+
+extern void mutex_init(struct mutex *m);
+extern void mutex_lock(struct mutex *m);
+extern void mutex_unlock(struct mutex *m);
+#ifdef HAVE_PRIORITY_SCHEDULING
+/* Deprecated temporary function to disable mutex preempting a thread on
+ * unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c -
+ * reliance on it is a bug! */
+static inline void mutex_set_preempt(struct mutex *m, bool preempt)
+ { m->no_preempt = !preempt; }
+#else
+/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
+static inline bool mutex_test(const struct mutex *m)
+ { return m->thread != NULL; }
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+#endif /* MUTEX_H */
diff --git a/firmware/kernel/include/queue.h b/firmware/kernel/include/queue.h
new file mode 100644
index 0000000000..1b404f8297
--- /dev/null
+++ b/firmware/kernel/include/queue.h
@@ -0,0 +1,157 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#ifndef QUEUE_H
+#define QUEUE_H
+
+#include <stdint.h>
+#include "config.h"
+#include "thread.h"
+
+/* System defined message ID's - |sign bit = 1|class|id| */
+/* Event class list */
+#define SYS_EVENT_CLS_QUEUE 0
+#define SYS_EVENT_CLS_USB 1
+#define SYS_EVENT_CLS_POWER 2
+#define SYS_EVENT_CLS_FILESYS 3
+#define SYS_EVENT_CLS_PLUG 4
+#define SYS_EVENT_CLS_MISC 5
+#define SYS_EVENT_CLS_PRIVATE 7 /* For use inside plugins */
+/* make sure SYS_EVENT_CLS_BITS has enough range */
+
+/* Bit 31->|S|c...c|i...i| */
+#define SYS_EVENT ((long)(int)(1 << 31))
+#define SYS_EVENT_CLS_BITS (3)
+#define SYS_EVENT_CLS_SHIFT (31-SYS_EVENT_CLS_BITS)
+#define SYS_EVENT_CLS_MASK (((1l << SYS_EVENT_CLS_BITS)-1) << SYS_EVENT_SHIFT)
+#define MAKE_SYS_EVENT(cls, id) (SYS_EVENT | ((long)(cls) << SYS_EVENT_CLS_SHIFT) | (long)(id))
+/* Macros for extracting codes */
+#define SYS_EVENT_CLS(e) (((e) & SYS_EVENT_CLS_MASK) >> SYS_EVENT_SHIFT)
+#define SYS_EVENT_ID(e) ((e) & ~(SYS_EVENT|SYS_EVENT_CLS_MASK))
+
+#define SYS_TIMEOUT MAKE_SYS_EVENT(SYS_EVENT_CLS_QUEUE, 0)
+#define SYS_USB_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 0)
+#define SYS_USB_CONNECTED_ACK MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 1)
+#define SYS_USB_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 2)
+#define SYS_USB_LUN_LOCKED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 4)
+#define SYS_USB_READ_DATA MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 5)
+#define SYS_USB_WRITE_DATA MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 6)
+#define SYS_POWEROFF MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 0)
+#define SYS_CHARGER_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 1)
+#define SYS_CHARGER_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 2)
+#define SYS_BATTERY_UPDATE MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 3)
+#define SYS_FS_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_FILESYS, 0)
+#define SYS_HOTSWAP_INSERTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 0)
+#define SYS_HOTSWAP_EXTRACTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 1)
+#define SYS_PHONE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 2)
+#define SYS_PHONE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 3)
+#define SYS_REMOTE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 4)
+#define SYS_REMOTE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 5)
+#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
+#define SYS_CALL_INCOMING MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 3)
+#define SYS_CALL_HUNG_UP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 4)
+#define SYS_VOLUME_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 5)
+
+#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
+
+#define MAX_NUM_QUEUES 32
+#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
+#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
+
+struct queue_event
+{
+ long id;
+ intptr_t data;
+};
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+struct queue_sender_list
+{
+ /* If non-NULL, there is a thread waiting for the corresponding event */
+ /* Must be statically allocated to put in non-cached ram. */
+ struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
+ struct thread_entry *list; /* list of senders in map */
+ /* Send info for last message dequeued or NULL if replied or not sent */
+ struct thread_entry * volatile curr_sender;
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct blocker blocker;
+#endif
+};
+#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+#define QUEUE_GET_THREAD(q) \
+ (((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
+#else
+/* Queue without priority enabled have no owner provision _at this time_ */
+#define QUEUE_GET_THREAD(q) \
+ (NULL)
+#endif
+
+struct event_queue
+{
+ struct thread_entry *queue; /* waiter list */
+ struct queue_event events[QUEUE_LENGTH]; /* list of events */
+ unsigned int volatile read; /* head of queue */
+ unsigned int volatile write; /* tail of queue */
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ struct queue_sender_list * volatile send; /* list of threads waiting for
+ reply to an event */
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct blocker *blocker_p; /* priority inheritance info
+ for sync message senders */
+#endif
+#endif
+ IF_COP( struct corelock cl; ) /* multiprocessor sync */
+};
+
+extern void queue_init(struct event_queue *q, bool register_queue);
+extern void queue_delete(struct event_queue *q);
+extern void queue_wait(struct event_queue *q, struct queue_event *ev);
+extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
+ int ticks);
+extern void queue_post(struct event_queue *q, long id, intptr_t data);
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+extern void queue_enable_queue_send(struct event_queue *q,
+ struct queue_sender_list *send,
+ unsigned int owner_id);
+extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data);
+extern void queue_reply(struct event_queue *q, intptr_t retval);
+extern bool queue_in_queue_send(struct event_queue *q);
+#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+extern bool queue_empty(const struct event_queue* q);
+extern bool queue_peek(struct event_queue *q, struct queue_event *ev);
+
+#define QPEEK_FILTER_COUNT_MASK (0xffu) /* 0x00=1 filter, 0xff=256 filters */
+#define QPEEK_FILTER_HEAD_ONLY (1u << 8) /* Ignored if no filters */
+#define QPEEK_REMOVE_EVENTS (1u << 9) /* Remove or discard events */
+extern bool queue_peek_ex(struct event_queue *q,
+ struct queue_event *ev,
+ unsigned int flags,
+ const long (*filters)[2]);
+
+extern void queue_clear(struct event_queue* q);
+extern void queue_remove_from_head(struct event_queue *q, long id);
+extern int queue_count(const struct event_queue *q);
+extern int queue_broadcast(long id, intptr_t data);
+extern void init_queues(void);
+
+#endif /* QUEUE_H */
diff --git a/firmware/kernel/include/semaphore.h b/firmware/kernel/include/semaphore.h
new file mode 100644
index 0000000000..40e60bb88d
--- /dev/null
+++ b/firmware/kernel/include/semaphore.h
@@ -0,0 +1,40 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#ifndef SEMAPHORE_H
+#define SEMAPHORE_H
+
+#include "config.h"
+#include "thread.h"
+
+struct semaphore
+{
+ struct thread_entry *queue; /* Waiter list */
+ int volatile count; /* # of waits remaining before unsignaled */
+ int max; /* maximum # of waits to remain signaled */
+ IF_COP( struct corelock cl; ) /* multiprocessor sync */
+};
+
+extern void semaphore_init(struct semaphore *s, int max, int start);
+extern int semaphore_wait(struct semaphore *s, int timeout);
+extern void semaphore_release(struct semaphore *s);
+
+#endif /* SEMAPHORE_H */
diff --git a/firmware/kernel/include/thread.h b/firmware/kernel/include/thread.h
new file mode 100644
index 0000000000..9cc33b23ae
--- /dev/null
+++ b/firmware/kernel/include/thread.h
@@ -0,0 +1,387 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Ulf Ralberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#ifndef THREAD_H
+#define THREAD_H
+
+#include "config.h"
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include "gcc_extensions.h"
+#include "corelock.h"
+
+/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
+ * by giving high priority threads more CPU time than lower priority threads
+ * when they need it. Priority is differential such that the priority
+ * difference between a lower priority runnable thread and the highest priority
+ * runnable thread determines the amount of aging necessary for the lower
+ * priority thread to be scheduled in order to prevent starvation.
+ *
+ * If software playback codec pcm buffer is going down to critical, codec
+ * can gradually raise its own priority to override user interface and
+ * prevent playback skipping.
+ */
+#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
+#define PRIORITY_RESERVED_LOW 32 /* Reserved */
+#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
+#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
+/* Realtime range reserved for threads that will not allow threads of lower
+ * priority to age and run (future expansion) */
+#define PRIORITY_REALTIME_1 1
+#define PRIORITY_REALTIME_2 2
+#define PRIORITY_REALTIME_3 3
+#define PRIORITY_REALTIME_4 4
+#define PRIORITY_REALTIME 4 /* Lowest realtime range */
+#define PRIORITY_BUFFERING 15 /* Codec buffering thread */
+#define PRIORITY_USER_INTERFACE 16 /* The main thread */
+#define PRIORITY_RECORDING 16 /* Recording thread */
+#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
+#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
+#define PRIORITY_SYSTEM 18 /* All other firmware threads */
+#define PRIORITY_BACKGROUND 20 /* Normal application threads */
+#define NUM_PRIORITIES 32
+#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
+
+#define IO_PRIORITY_IMMEDIATE 0
+#define IO_PRIORITY_BACKGROUND 32
+
+
+#if CONFIG_CODEC == SWCODEC
+# ifdef HAVE_HARDWARE_CLICK
+# define BASETHREADS 17
+# else
+# define BASETHREADS 16
+# endif
+#else
+# define BASETHREADS 11
+#endif /* CONFIG_CODE == * */
+
+#ifndef TARGET_EXTRA_THREADS
+#define TARGET_EXTRA_THREADS 0
+#endif
+
+#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
+/*
+ * We need more stack when we run under a host
+ * maybe more expensive C lib functions?
+ *
+ * simulator (possibly) doesn't simulate stack usage anyway but well ... */
+
+#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
+struct regs
+{
+ void *t; /* OS thread */
+ void *told; /* Last thread in slot (explained in thead-sdl.c) */
+ void *s; /* Semaphore for blocking and wakeup */
+ void (*start)(void); /* Start function */
+};
+
+#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
+#else
+#include "asm/thread.h"
+#endif /* HAVE_SDL_THREADS */
+
+/* NOTE: The use of the word "queue" may also refer to a linked list of
+ threads being maintained that are normally dealt with in FIFO order
+ and not necessarily kernel event_queue */
+enum
+{
+ /* States without a timeout must be first */
+ STATE_KILLED = 0, /* Thread is killed (default) */
+ STATE_RUNNING, /* Thread is currently running */
+ STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
+ /* These states involve adding the thread to the tmo list */
+ STATE_SLEEPING, /* Thread is sleeping with a timeout */
+ STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
+ /* Miscellaneous states */
+ STATE_FROZEN, /* Thread is suspended and will not run until
+ thread_thaw is called with its ID */
+ THREAD_NUM_STATES,
+ TIMEOUT_STATE_FIRST = STATE_SLEEPING,
+};
+
+#if NUM_CORES > 1
+/* Pointer value for name field to indicate thread is being killed. Using
+ * an alternate STATE_* won't work since that would interfere with operation
+ * while the thread is still running. */
+#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
+#endif
+
+/* Link information for lists thread is in */
+struct thread_entry; /* forward */
+struct thread_list
+{
+ struct thread_entry *prev; /* Previous thread in a list */
+ struct thread_entry *next; /* Next thread in a list */
+};
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+struct blocker
+{
+ struct thread_entry * volatile thread; /* thread blocking other threads
+ (aka. object owner) */
+ int priority; /* highest priority waiter */
+ struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
+};
+
+/* Choices of wakeup protocol */
+
+/* For transfer of object ownership by one thread to another thread by
+ * the owning thread itself (mutexes) */
+struct thread_entry *
+ wakeup_priority_protocol_transfer(struct thread_entry *thread);
+
+/* For release by owner where ownership doesn't change - other threads,
+ * interrupts, timeouts, etc. (mutex timeout, queues) */
+struct thread_entry *
+ wakeup_priority_protocol_release(struct thread_entry *thread);
+
+
+struct priority_distribution
+{
+ uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
+ uint32_t mask; /* Bitmask of hist entries that are not zero */
+};
+
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+/* Information kept in each thread slot
+ * members are arranged according to size - largest first - in order
+ * to ensure both alignment and packing at the same time.
+ */
+struct thread_entry
+{
+ struct regs context; /* Register context at switch -
+ _must_ be first member */
+ uintptr_t *stack; /* Pointer to top of stack */
+ const char *name; /* Thread name */
+ long tmo_tick; /* Tick when thread should be woken from
+ timeout -
+ states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
+ struct thread_list l; /* Links for blocked/waking/running -
+ circular linkage in both directions */
+ struct thread_list tmo; /* Links for timeout list -
+ Circular in reverse direction, NULL-terminated in
+ forward direction -
+ states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
+ struct thread_entry **bqp; /* Pointer to list variable in kernel
+ object where thread is blocked - used
+ for implicit unblock and explicit wake
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
+#ifdef HAVE_CORELOCK_OBJECT
+ struct corelock *obj_cl; /* Object corelock where thead is blocked -
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
+ struct corelock waiter_cl; /* Corelock for thread_wait */
+ struct corelock slot_cl; /* Corelock to lock thread slot */
+ unsigned char core; /* The core to which thread belongs */
+#endif
+ struct thread_entry *queue; /* List of threads waiting for thread to be
+ removed */
+#ifdef HAVE_WAKEUP_EXT_CB
+ void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
+ performs special steps needed when being
+ forced off of an object's wait queue that
+ go beyond the standard wait queue removal
+ and priority disinheritance */
+ /* Only enabled when using queue_send for now */
+#endif
+#if defined(HAVE_SEMAPHORE_OBJECTS) || \
+ defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
+ NUM_CORES > 1
+ volatile intptr_t retval; /* Return value from a blocked operation/
+ misc. use */
+#endif
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* Priority summary of owned objects that support inheritance */
+ struct blocker *blocker; /* Pointer to blocker when this thread is blocked
+ on an object that supports PIP -
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
+ struct priority_distribution pdist; /* Priority summary of owned objects
+ that have blocked threads and thread's own
+ base priority */
+ int skip_count; /* Number of times skipped if higher priority
+ thread was running */
+ unsigned char base_priority; /* Base priority (set explicitly during
+ creation or thread_set_priority) */
+ unsigned char priority; /* Scheduled priority (higher of base or
+ all threads blocked by this one) */
+#endif
+ uint16_t id; /* Current slot id */
+ unsigned short stack_size; /* Size of stack in bytes */
+ unsigned char state; /* Thread slot state (STATE_*) */
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ unsigned char cpu_boost; /* CPU frequency boost flag */
+#endif
+#ifdef HAVE_IO_PRIORITY
+ unsigned char io_priority;
+#endif
+};
+
+/*** Macros for internal use ***/
+/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
+#define THREAD_ID_VERSION_SHIFT 8
+#define THREAD_ID_VERSION_MASK 0xff00
+#define THREAD_ID_SLOT_MASK 0x00ff
+#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
+
+#ifdef HAVE_CORELOCK_OBJECT
+/* Operations to be performed just before stopping a thread and starting
+ a new one if specified before calling switch_thread */
+enum
+{
+ TBOP_CLEAR = 0, /* No operation to do */
+ TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
+ TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
+};
+
+struct thread_blk_ops
+{
+ struct corelock *cl_p; /* pointer to corelock */
+ unsigned char flags; /* TBOP_* flags */
+};
+#endif /* NUM_CORES > 1 */
+
+/* Information kept for each core
+ * Members are arranged for the same reason as in thread_entry
+ */
+struct core_entry
+{
+ /* "Active" lists - core is constantly active on these and are never
+ locked and interrupts do not access them */
+ struct thread_entry *running; /* threads that are running (RTR) */
+ struct thread_entry *timeout; /* threads that are on a timeout before
+ running again */
+ struct thread_entry *block_task; /* Task going off running list */
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct priority_distribution rtr; /* Summary of running and ready-to-run
+ threads */
+#endif
+ long next_tmo_check; /* soonest time to check tmo threads */
+#ifdef HAVE_CORELOCK_OBJECT
+ struct thread_blk_ops blk_ops; /* operations to perform when
+ blocking a thread */
+ struct corelock rtr_cl; /* Lock for rtr list */
+#endif /* NUM_CORES */
+};
+
+extern void yield(void);
+extern unsigned sleep(unsigned ticks);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+#define IF_PRIO(...) __VA_ARGS__
+#define IFN_PRIO(...)
+#else
+#define IF_PRIO(...)
+#define IFN_PRIO(...) __VA_ARGS__
+#endif
+
+void core_idle(void);
+void core_wake(IF_COP_VOID(unsigned int core));
+
+/* Initialize the scheduler */
+void init_threads(void) INIT_ATTR;
+
+/* Allocate a thread in the scheduler */
+#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
+unsigned int create_thread(void (*function)(void),
+ void* stack, size_t stack_size,
+ unsigned flags, const char *name
+ IF_PRIO(, int priority)
+ IF_COP(, unsigned int core));
+
+/* Set and clear the CPU frequency boost flag for the calling thread */
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+void trigger_cpu_boost(void);
+void cancel_cpu_boost(void);
+#else
+#define trigger_cpu_boost() do { } while(0)
+#define cancel_cpu_boost() do { } while(0)
+#endif
+/* Return thread entry from id */
+struct thread_entry *thread_id_entry(unsigned int thread_id);
+/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
+ * Has no effect on a thread not frozen. */
+void thread_thaw(unsigned int thread_id);
+/* Wait for a thread to exit */
+void thread_wait(unsigned int thread_id);
+/* Exit the current thread */
+void thread_exit(void) NORETURN_ATTR;
+#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
+#define ALLOW_REMOVE_THREAD
+/* Remove a thread from the scheduler */
+void remove_thread(unsigned int thread_id);
+#endif
+
+/* Switch to next runnable thread */
+void switch_thread(void);
+/* Blocks a thread for at least the specified number of ticks (0 = wait until
+ * next tick) */
+void sleep_thread(int ticks);
+/* Indefinitely blocks the current thread on a thread queue */
+void block_thread(struct thread_entry *current);
+/* Blocks the current thread on a thread queue until explicitely woken or
+ * the timeout is reached */
+void block_thread_w_tmo(struct thread_entry *current, int timeout);
+
+/* Return bit flags for thread wakeup */
+#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
+#define THREAD_OK 0x1 /* A thread was woken up */
+#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
+ higher priority than current were woken) */
+
+/* A convenience function for waking an entire queue of threads. */
+unsigned int thread_queue_wake(struct thread_entry **list);
+
+/* Wakeup a thread at the head of a list */
+unsigned int wakeup_thread(struct thread_entry **list);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+int thread_set_priority(unsigned int thread_id, int priority);
+int thread_get_priority(unsigned int thread_id);
+#endif /* HAVE_PRIORITY_SCHEDULING */
+#ifdef HAVE_IO_PRIORITY
+void thread_set_io_priority(unsigned int thread_id, int io_priority);
+int thread_get_io_priority(unsigned int thread_id);
+#endif /* HAVE_IO_PRIORITY */
+#if NUM_CORES > 1
+unsigned int switch_core(unsigned int new_core);
+#endif
+
+/* Return the id of the calling thread. */
+unsigned int thread_self(void);
+
+/* Return the thread_entry for the calling thread.
+ * INTERNAL: Intended for use by kernel and not for programs. */
+struct thread_entry* thread_self_entry(void);
+
+/* Debugging info - only! */
+int thread_stack_usage(const struct thread_entry *thread);
+#if NUM_CORES > 1
+int idle_stack_usage(unsigned int core);
+#endif
+void thread_get_name(char *buffer, int size,
+ struct thread_entry *thread);
+#ifdef RB_PROFILE
+void profile_thread(void);
+#endif
+
+#endif /* THREAD_H */
diff --git a/firmware/kernel/include/tick.h b/firmware/kernel/include/tick.h
new file mode 100644
index 0000000000..9810f4a1e5
--- /dev/null
+++ b/firmware/kernel/include/tick.h
@@ -0,0 +1,67 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#ifndef TICK_H
+#define TICK_H
+
+#include "config.h"
+#include "system.h" /* for NULL */
+extern void init_tick(void);
+
+#define HZ 100 /* number of ticks per second */
+
+#define MAX_NUM_TICK_TASKS 8
+
+/* global tick variable */
+#if defined(CPU_PP) && defined(BOOTLOADER) && \
+ !defined(HAVE_BOOTLOADER_USB_MODE)
+/* We don't enable interrupts in the PP bootloader unless USB mode is
+ enabled for it, so we need to fake the current_tick variable */
+#define current_tick (signed)(USEC_TIMER/10000)
+
+static inline void call_tick_tasks(void)
+{
+}
+#else
+extern volatile long current_tick;
+
+/* inline helper for implementing target interrupt handler */
+static inline void call_tick_tasks(void)
+{
+ extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
+ void (**p)(void) = tick_funcs;
+ void (*fn)(void);
+
+ current_tick++;
+
+ for(fn = *p; fn != NULL; fn = *(++p))
+ {
+ fn();
+ }
+}
+#endif
+
+/* implemented in target tree */
+extern void tick_start(unsigned int interval_in_ms) INIT_ATTR;
+
+extern int tick_add_task(void (*f)(void));
+extern int tick_remove_task(void (*f)(void));
+
+#endif /* TICK_H */
diff --git a/firmware/kernel/include/timeout.h b/firmware/kernel/include/timeout.h
new file mode 100644
index 0000000000..0b7c52ba4c
--- /dev/null
+++ b/firmware/kernel/include/timeout.h
@@ -0,0 +1,46 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#ifndef _KERNEL_H_
+#define _KERNEL_H_
+
+#include "config.h"
+
+struct timeout;
+
+/* timeout callback type
+ * tmo - pointer to struct timeout associated with event
+ * return next interval or <= 0 to stop event
+ */
+#define MAX_NUM_TIMEOUTS 8
+typedef int (* timeout_cb_type)(struct timeout *tmo);
+
+struct timeout
+{
+ timeout_cb_type callback;/* callback - returning false cancels */
+ intptr_t data; /* data passed to callback */
+ long expires; /* expiration tick */
+};
+
+void timeout_register(struct timeout *tmo, timeout_cb_type callback,
+ int ticks, intptr_t data);
+void timeout_cancel(struct timeout *tmo);
+
+#endif /* _KERNEL_H_ */
diff --git a/firmware/kernel/kernel-internal.h b/firmware/kernel/kernel-internal.h
new file mode 100644
index 0000000000..51c589ac8f
--- /dev/null
+++ b/firmware/kernel/kernel-internal.h
@@ -0,0 +1,49 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Ulf Ralberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#ifndef KERNEL_INTERNAL_H
+#define KERNEL_INTERNAL_H
+
+#include "config.h"
+#include "debug.h"
+
+/* Make this nonzero to enable more elaborate checks on objects */
+#if defined(DEBUG) || defined(SIMULATOR)
+#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
+#else
+#define KERNEL_OBJECT_CHECKS 0
+#endif
+
+#if KERNEL_OBJECT_CHECKS
+#ifdef SIMULATOR
+#include <stdlib.h>
+#define KERNEL_ASSERT(exp, msg...) \
+ ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
+#else
+#define KERNEL_ASSERT(exp, msg...) \
+ ({ if (!({ exp; })) panicf(msg); })
+#endif
+#else
+#define KERNEL_ASSERT(exp, msg...) ({})
+#endif
+
+
+#endif /* KERNEL_INTERNAL_H */
diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c
new file mode 100644
index 0000000000..f1e4b3c722
--- /dev/null
+++ b/firmware/kernel/mutex.c
@@ -0,0 +1,152 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+
+/****************************************************************************
+ * Simple mutex functions ;)
+ ****************************************************************************/
+
+#include <stdbool.h>
+#include "config.h"
+#include "system.h"
+#include "mutex.h"
+#include "corelock.h"
+#include "thread-internal.h"
+#include "kernel-internal.h"
+
+static inline void __attribute__((always_inline))
+mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
+{
+#ifdef HAVE_PRIORITY_SCHEDULING
+ mtx->blocker.thread = td;
+#else
+ mtx->thread = td;
+#endif
+}
+
+static inline struct thread_entry * __attribute__((always_inline))
+mutex_get_thread(volatile struct mutex *mtx)
+{
+#ifdef HAVE_PRIORITY_SCHEDULING
+ return mtx->blocker.thread;
+#else
+ return mtx->thread;
+#endif
+}
+
+/* Initialize a mutex object - call before any use and do not call again once
+ * the object is available to other threads */
+void mutex_init(struct mutex *m)
+{
+ corelock_init(&m->cl);
+ m->queue = NULL;
+ m->recursion = 0;
+ mutex_set_thread(m, NULL);
+#ifdef HAVE_PRIORITY_SCHEDULING
+ m->blocker.priority = PRIORITY_IDLE;
+ m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
+ m->no_preempt = false;
+#endif
+}
+
+/* Gain ownership of a mutex object or block until it becomes free */
+void mutex_lock(struct mutex *m)
+{
+ struct thread_entry *current = thread_self_entry();
+
+ if(current == mutex_get_thread(m))
+ {
+ /* current thread already owns this mutex */
+ m->recursion++;
+ return;
+ }
+
+ /* lock out other cores */
+ corelock_lock(&m->cl);
+
+ /* must read thread again inside cs (a multiprocessor concern really) */
+ if(LIKELY(mutex_get_thread(m) == NULL))
+ {
+ /* lock is open */
+ mutex_set_thread(m, current);
+ corelock_unlock(&m->cl);
+ return;
+ }
+
+ /* block until the lock is open... */
+ IF_COP( current->obj_cl = &m->cl; )
+ IF_PRIO( current->blocker = &m->blocker; )
+ current->bqp = &m->queue;
+
+ disable_irq();
+ block_thread(current);
+
+ corelock_unlock(&m->cl);
+
+ /* ...and turn control over to next thread */
+ switch_thread();
+}
+
+/* Release ownership of a mutex object - only owning thread must call this */
+void mutex_unlock(struct mutex *m)
+{
+ /* unlocker not being the owner is an unlocking violation */
+ KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
+ "mutex_unlock->wrong thread (%s != %s)\n",
+ mutex_get_thread(m)->name,
+ thread_self_entry()->name);
+
+ if(m->recursion > 0)
+ {
+ /* this thread still owns lock */
+ m->recursion--;
+ return;
+ }
+
+ /* lock out other cores */
+ corelock_lock(&m->cl);
+
+ /* transfer to next queued thread if any */
+ if(LIKELY(m->queue == NULL))
+ {
+ /* no threads waiting - open the lock */
+ mutex_set_thread(m, NULL);
+ corelock_unlock(&m->cl);
+ return;
+ }
+ else
+ {
+ const int oldlevel = disable_irq_save();
+ /* Tranfer of owning thread is handled in the wakeup protocol
+ * if priorities are enabled otherwise just set it from the
+ * queue head. */
+ IFN_PRIO( mutex_set_thread(m, m->queue); )
+ IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
+ restore_irq(oldlevel);
+
+ corelock_unlock(&m->cl);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if((result & THREAD_SWITCH) && !m->no_preempt)
+ switch_thread();
+#endif
+ }
+}
diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c
new file mode 100644
index 0000000000..379e3f62c8
--- /dev/null
+++ b/firmware/kernel/queue.c
@@ -0,0 +1,786 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include <string.h>
+#include "config.h"
+#include "kernel.h"
+#include "system.h"
+#include "queue.h"
+#include "corelock.h"
+#include "kernel-internal.h"
+#include "general.h"
+#include "panic.h"
+
+/* This array holds all queues that are initiated. It is used for broadcast. */
+static struct
+{
+ struct event_queue *queues[MAX_NUM_QUEUES+1];
+#ifdef HAVE_CORELOCK_OBJECT
+ struct corelock cl;
+#endif
+} all_queues SHAREDBSS_ATTR;
+
+/****************************************************************************
+ * Queue handling stuff
+ ****************************************************************************/
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+/****************************************************************************
+ * Sender thread queue structure that aids implementation of priority
+ * inheritance on queues because the send list structure is the same as
+ * for all other kernel objects:
+ *
+ * Example state:
+ * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
+ * E3 was posted with queue_post
+ * 4 events remain enqueued (E1-E4)
+ *
+ * rd wr
+ * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
+ * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
+ * \/ \/ \/
+ * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
+ * q->send->curr_sender: /\
+ *
+ * Thread has E0 in its own struct queue_event.
+ *
+ ****************************************************************************/
+
+/* Puts the specified return value in the waiting thread's return value
+ * and wakes the thread.
+ *
+ * A sender should be confirmed to exist before calling which makes it
+ * more efficent to reject the majority of cases that don't need this
+ * called.
+ */
+static void queue_release_sender(struct thread_entry * volatile * sender,
+ intptr_t retval)
+{
+ struct thread_entry *thread = *sender;
+
+ *sender = NULL; /* Clear slot. */
+#ifdef HAVE_WAKEUP_EXT_CB
+ thread->wakeup_ext_cb = NULL; /* Clear callback. */
+#endif
+ thread->retval = retval; /* Assign thread-local return value. */
+ *thread->bqp = thread; /* Move blocking queue head to thread since
+ wakeup_thread wakes the first thread in
+ the list. */
+ wakeup_thread(thread->bqp);
+}
+
+/* Releases any waiting threads that are queued with queue_send -
+ * reply with 0.
+ */
+static void queue_release_all_senders(struct event_queue *q)
+{
+ if(q->send)
+ {
+ unsigned int i;
+ for(i = q->read; i != q->write; i++)
+ {
+ struct thread_entry **spp =
+ &q->send->senders[i & QUEUE_LENGTH_MASK];
+
+ if(*spp)
+ {
+ queue_release_sender(spp, 0);
+ }
+ }
+ }
+}
+
+/* Callback to do extra forced removal steps from sender list in addition
+ * to the normal blocking queue removal and priority dis-inherit */
+static void queue_remove_sender_thread_cb(struct thread_entry *thread)
+{
+ *((struct thread_entry **)thread->retval) = NULL;
+#ifdef HAVE_WAKEUP_EXT_CB
+ thread->wakeup_ext_cb = NULL;
+#endif
+ thread->retval = 0;
+}
+
+/* Enables queue_send on the specified queue - caller allocates the extra
+ * data structure. Only queues which are taken to be owned by a thread should
+ * enable this however an official owner is not compulsory but must be
+ * specified for priority inheritance to operate.
+ *
+ * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
+ * messages results in an undefined order of message replies or possible default
+ * replies if two or more waits happen before a reply is done.
+ */
+void queue_enable_queue_send(struct event_queue *q,
+ struct queue_sender_list *send,
+ unsigned int owner_id)
+{
+ int oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ if(send != NULL && q->send == NULL)
+ {
+ memset(send, 0, sizeof(*send));
+#ifdef HAVE_PRIORITY_SCHEDULING
+ send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
+ send->blocker.priority = PRIORITY_IDLE;
+ if(owner_id != 0)
+ {
+ send->blocker.thread = thread_id_entry(owner_id);
+ q->blocker_p = &send->blocker;
+ }
+#endif
+ q->send = send;
+ }
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+
+ (void)owner_id;
+}
+
+/* Unblock a blocked thread at a given event index */
+static inline void queue_do_unblock_sender(struct queue_sender_list *send,
+ unsigned int i)
+{
+ if(send)
+ {
+ struct thread_entry **spp = &send->senders[i];
+
+ if(UNLIKELY(*spp))
+ {
+ queue_release_sender(spp, 0);
+ }
+ }
+}
+
+/* Perform the auto-reply sequence */
+static inline void queue_do_auto_reply(struct queue_sender_list *send)
+{
+ if(send && send->curr_sender)
+ {
+ /* auto-reply */
+ queue_release_sender(&send->curr_sender, 0);
+ }
+}
+
+/* Moves waiting thread's refrence from the senders array to the
+ * current_sender which represents the thread waiting for a reponse to the
+ * last message removed from the queue. This also protects the thread from
+ * being bumped due to overflow which would not be a valid action since its
+ * message _is_ being processed at this point. */
+static inline void queue_do_fetch_sender(struct queue_sender_list *send,
+ unsigned int rd)
+{
+ if(send)
+ {
+ struct thread_entry **spp = &send->senders[rd];
+
+ if(*spp)
+ {
+ /* Move thread reference from array to the next thread
+ that queue_reply will release */
+ send->curr_sender = *spp;
+ (*spp)->retval = (intptr_t)spp;
+ *spp = NULL;
+ }
+ /* else message was posted asynchronously with queue_post */
+ }
+}
+#else
+/* Empty macros for when synchoronous sending is not made */
+#define queue_release_all_senders(q)
+#define queue_do_unblock_sender(send, i)
+#define queue_do_auto_reply(send)
+#define queue_do_fetch_sender(send, rd)
+#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+
+/* Queue must not be available for use during this call */
+void queue_init(struct event_queue *q, bool register_queue)
+{
+ int oldlevel = disable_irq_save();
+
+ if(register_queue)
+ {
+ corelock_lock(&all_queues.cl);
+ }
+
+ corelock_init(&q->cl);
+ q->queue = NULL;
+ /* What garbage is in write is irrelevant because of the masking design-
+ * any other functions the empty the queue do this as well so that
+ * queue_count and queue_empty return sane values in the case of a
+ * concurrent change without locking inside them. */
+ q->read = q->write;
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ q->send = NULL; /* No message sending by default */
+ IF_PRIO( q->blocker_p = NULL; )
+#endif
+
+ if(register_queue)
+ {
+ void **queues = (void **)all_queues.queues;
+ void **p = find_array_ptr(queues, q);
+
+ if(p - queues >= MAX_NUM_QUEUES)
+ {
+ panicf("queue_init->out of queues");
+ }
+
+ if(*p == NULL)
+ {
+ /* Add it to the all_queues array */
+ *p = q;
+ corelock_unlock(&all_queues.cl);
+ }
+ }
+
+ restore_irq(oldlevel);
+}
+
+/* Queue must not be available for use during this call */
+void queue_delete(struct event_queue *q)
+{
+ int oldlevel = disable_irq_save();
+ corelock_lock(&all_queues.cl);
+ corelock_lock(&q->cl);
+
+ /* Remove the queue if registered */
+ remove_array_ptr((void **)all_queues.queues, q);
+
+ corelock_unlock(&all_queues.cl);
+
+ /* Release thread(s) waiting on queue head */
+ thread_queue_wake(&q->queue);
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ if(q->send)
+ {
+ /* Release threads waiting for replies */
+ queue_release_all_senders(q);
+
+ /* Reply to any dequeued message waiting for one */
+ queue_do_auto_reply(q->send);
+
+ q->send = NULL;
+ IF_PRIO( q->blocker_p = NULL; )
+ }
+#endif
+
+ q->read = q->write;
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+}
+
+/* NOTE: multiple threads waiting on a queue head cannot have a well-
+ defined release order if timeouts are used. If multiple threads must
+ access the queue head, use a dispatcher or queue_wait only. */
+void queue_wait(struct event_queue *q, struct queue_event *ev)
+{
+ int oldlevel;
+ unsigned int rd;
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
+ QUEUE_GET_THREAD(q) == thread_self_entry(),
+ "queue_wait->wrong thread\n");
+#endif
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
+ queue_do_auto_reply(q->send);
+#endif
+
+ while(1)
+ {
+ struct thread_entry *current;
+
+ rd = q->read;
+ if (rd != q->write) /* A waking message could disappear */
+ break;
+
+ current = thread_self_entry();
+
+ IF_COP( current->obj_cl = &q->cl; )
+ current->bqp = &q->queue;
+
+ block_thread(current);
+
+ corelock_unlock(&q->cl);
+ switch_thread();
+
+ disable_irq();
+ corelock_lock(&q->cl);
+ }
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ if(ev)
+#endif
+ {
+ q->read = rd + 1;
+ rd &= QUEUE_LENGTH_MASK;
+ *ev = q->events[rd];
+
+ /* Get data for a waiting thread if one */
+ queue_do_fetch_sender(q->send, rd);
+ }
+ /* else just waiting on non-empty */
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+}
+
+void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
+{
+ int oldlevel;
+ unsigned int rd, wr;
+
+ /* this function works only with a positive number (or zero) of ticks */
+ if (ticks == TIMEOUT_BLOCK)
+ {
+ queue_wait(q, ev);
+ return;
+ }
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
+ QUEUE_GET_THREAD(q) == thread_self_entry(),
+ "queue_wait_w_tmo->wrong thread\n");
+#endif
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
+ queue_do_auto_reply(q->send);
+#endif
+
+ rd = q->read;
+ wr = q->write;
+ if (rd == wr && ticks > 0)
+ {
+ struct thread_entry *current = thread_self_entry();
+
+ IF_COP( current->obj_cl = &q->cl; )
+ current->bqp = &q->queue;
+
+ block_thread_w_tmo(current, ticks);
+ corelock_unlock(&q->cl);
+
+ switch_thread();
+
+ disable_irq();
+ corelock_lock(&q->cl);
+
+ rd = q->read;
+ wr = q->write;
+ }
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ if(ev)
+#endif
+ {
+ /* no worry about a removed message here - status is checked inside
+ locks - perhaps verify if timeout or false alarm */
+ if (rd != wr)
+ {
+ q->read = rd + 1;
+ rd &= QUEUE_LENGTH_MASK;
+ *ev = q->events[rd];
+ /* Get data for a waiting thread if one */
+ queue_do_fetch_sender(q->send, rd);
+ }
+ else
+ {
+ ev->id = SYS_TIMEOUT;
+ }
+ }
+ /* else just waiting on non-empty */
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+}
+
+void queue_post(struct event_queue *q, long id, intptr_t data)
+{
+ int oldlevel;
+ unsigned int wr;
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ wr = q->write++ & QUEUE_LENGTH_MASK;
+
+ KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
+ "queue_post ovf q=%08lX", (long)q);
+
+ q->events[wr].id = id;
+ q->events[wr].data = data;
+
+ /* overflow protect - unblock any thread waiting at this index */
+ queue_do_unblock_sender(q->send, wr);
+
+ /* Wakeup a waiting thread if any */
+ wakeup_thread(&q->queue);
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+}
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+/* IRQ handlers are not allowed use of this function - we only aim to
+ protect the queue integrity by turning them off. */
+intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
+{
+ int oldlevel;
+ unsigned int wr;
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ wr = q->write++ & QUEUE_LENGTH_MASK;
+
+ KERNEL_ASSERT((q->write - q->read) <= QUEUE_LENGTH,
+ "queue_send ovf q=%08lX", (long)q);
+
+ q->events[wr].id = id;
+ q->events[wr].data = data;
+
+ if(LIKELY(q->send))
+ {
+ struct queue_sender_list *send = q->send;
+ struct thread_entry **spp = &send->senders[wr];
+ struct thread_entry *current = thread_self_entry();
+
+ if(UNLIKELY(*spp))
+ {
+ /* overflow protect - unblock any thread waiting at this index */
+ queue_release_sender(spp, 0);
+ }
+
+ /* Wakeup a waiting thread if any */
+ wakeup_thread(&q->queue);
+
+ /* Save thread in slot, add to list and wait for reply */
+ *spp = current;
+ IF_COP( current->obj_cl = &q->cl; )
+ IF_PRIO( current->blocker = q->blocker_p; )
+#ifdef HAVE_WAKEUP_EXT_CB
+ current->wakeup_ext_cb = queue_remove_sender_thread_cb;
+#endif
+ current->retval = (intptr_t)spp;
+ current->bqp = &send->list;
+
+ block_thread(current);
+
+ corelock_unlock(&q->cl);
+ switch_thread();
+
+ return current->retval;
+ }
+
+ /* Function as queue_post if sending is not enabled */
+ wakeup_thread(&q->queue);
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+
+ return 0;
+}
+
+#if 0 /* not used now but probably will be later */
+/* Query if the last message dequeued was added by queue_send or not */
+bool queue_in_queue_send(struct event_queue *q)
+{
+ bool in_send;
+
+#if NUM_CORES > 1
+ int oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+#endif
+
+ in_send = q->send && q->send->curr_sender;
+
+#if NUM_CORES > 1
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+#endif
+
+ return in_send;
+}
+#endif
+
+/* Replies with retval to the last dequeued message sent with queue_send */
+void queue_reply(struct event_queue *q, intptr_t retval)
+{
+ if(q->send && q->send->curr_sender)
+ {
+ struct queue_sender_list *sender;
+
+ int oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ sender = q->send;
+
+ /* Double-check locking */
+ if(LIKELY(sender && sender->curr_sender))
+ queue_release_sender(&sender->curr_sender, retval);
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+ }
+}
+#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+/* Scan the even queue from head to tail, returning any event from the
+ filter list that was found, optionally removing the event. If an
+ event is returned, synchronous events are handled in the same manner as
+ with queue_wait(_w_tmo); if discarded, then as queue_clear.
+ If filters are NULL, any event matches. If filters exist, the default
+ is to search the full queue depth.
+ Earlier filters take precedence.
+
+ Return true if an event was found, false otherwise. */
+bool queue_peek_ex(struct event_queue *q, struct queue_event *ev,
+ unsigned int flags, const long (*filters)[2])
+{
+ bool have_msg;
+ unsigned int rd, wr;
+ int oldlevel;
+
+ if(LIKELY(q->read == q->write))
+ return false; /* Empty: do nothing further */
+
+ have_msg = false;
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ /* Starting at the head, find first match */
+ for(rd = q->read, wr = q->write; rd != wr; rd++)
+ {
+ struct queue_event *e = &q->events[rd & QUEUE_LENGTH_MASK];
+
+ if(filters)
+ {
+ /* Have filters - find the first thing that passes */
+ const long (* f)[2] = filters;
+ const long (* const f_last)[2] =
+ &filters[flags & QPEEK_FILTER_COUNT_MASK];
+ long id = e->id;
+
+ do
+ {
+ if(UNLIKELY(id >= (*f)[0] && id <= (*f)[1]))
+ goto passed_filter;
+ }
+ while(++f <= f_last);
+
+ if(LIKELY(!(flags & QPEEK_FILTER_HEAD_ONLY)))
+ continue; /* No match; test next event */
+ else
+ break; /* Only check the head */
+ }
+ /* else - anything passes */
+
+ passed_filter:
+
+ /* Found a matching event */
+ have_msg = true;
+
+ if(ev)
+ *ev = *e; /* Caller wants the event */
+
+ if(flags & QPEEK_REMOVE_EVENTS)
+ {
+ /* Do event removal */
+ unsigned int r = q->read;
+ q->read = r + 1; /* Advance head */
+
+ if(ev)
+ {
+ /* Auto-reply */
+ queue_do_auto_reply(q->send);
+ /* Get the thread waiting for reply, if any */
+ queue_do_fetch_sender(q->send, rd & QUEUE_LENGTH_MASK);
+ }
+ else
+ {
+ /* Release any thread waiting on this message */
+ queue_do_unblock_sender(q->send, rd & QUEUE_LENGTH_MASK);
+ }
+
+ /* Slide messages forward into the gap if not at the head */
+ while(rd != r)
+ {
+ unsigned int dst = rd & QUEUE_LENGTH_MASK;
+ unsigned int src = --rd & QUEUE_LENGTH_MASK;
+
+ q->events[dst] = q->events[src];
+ /* Keep sender wait list in sync */
+ if(q->send)
+ q->send->senders[dst] = q->send->senders[src];
+ }
+ }
+
+ break;
+ }
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+
+ return have_msg;
+}
+
+bool queue_peek(struct event_queue *q, struct queue_event *ev)
+{
+ return queue_peek_ex(q, ev, 0, NULL);
+}
+
+void queue_remove_from_head(struct event_queue *q, long id)
+{
+ const long f[2] = { id, id };
+ while (queue_peek_ex(q, NULL,
+ QPEEK_FILTER_HEAD_ONLY | QPEEK_REMOVE_EVENTS, &f));
+}
+#else /* !HAVE_EXTENDED_MESSAGING_AND_NAME */
+/* The more powerful routines aren't required */
+bool queue_peek(struct event_queue *q, struct queue_event *ev)
+{
+ unsigned int rd;
+
+ if(q->read == q->write)
+ return false;
+
+ bool have_msg = false;
+
+ int oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ rd = q->read;
+ if(rd != q->write)
+ {
+ *ev = q->events[rd & QUEUE_LENGTH_MASK];
+ have_msg = true;
+ }
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+
+ return have_msg;
+}
+
+void queue_remove_from_head(struct event_queue *q, long id)
+{
+ int oldlevel;
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ while(q->read != q->write)
+ {
+ unsigned int rd = q->read & QUEUE_LENGTH_MASK;
+
+ if(q->events[rd].id != id)
+ {
+ break;
+ }
+
+ /* Release any thread waiting on this message */
+ queue_do_unblock_sender(q->send, rd);
+
+ q->read++;
+ }
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+}
+#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+
+/* Poll queue to see if a message exists - careful in using the result if
+ * queue_remove_from_head is called when messages are posted - possibly use
+ * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
+ * unsignals the queue may cause an unwanted block */
+bool queue_empty(const struct event_queue* q)
+{
+ return ( q->read == q->write );
+}
+
+void queue_clear(struct event_queue* q)
+{
+ int oldlevel;
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ /* Release all threads waiting in the queue for a reply -
+ dequeued sent message will be handled by owning thread */
+ queue_release_all_senders(q);
+
+ q->read = q->write;
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+}
+
+/**
+ * The number of events waiting in the queue.
+ *
+ * @param struct of event_queue
+ * @return number of events in the queue
+ */
+int queue_count(const struct event_queue *q)
+{
+ return q->write - q->read;
+}
+
+int queue_broadcast(long id, intptr_t data)
+{
+ struct event_queue **p = all_queues.queues;
+ struct event_queue *q;
+
+#if NUM_CORES > 1
+ int oldlevel = disable_irq_save();
+ corelock_lock(&all_queues.cl);
+#endif
+
+ for(q = *p; q != NULL; q = *(++p))
+ {
+ queue_post(q, id, data);
+ }
+
+#if NUM_CORES > 1
+ corelock_unlock(&all_queues.cl);
+ restore_irq(oldlevel);
+#endif
+
+ return p - all_queues.queues;
+}
+
+void init_queues(void)
+{
+ corelock_init(&all_queues.cl);
+}
diff --git a/firmware/kernel/semaphore.c b/firmware/kernel/semaphore.c
new file mode 100644
index 0000000000..f9ff0ad987
--- /dev/null
+++ b/firmware/kernel/semaphore.c
@@ -0,0 +1,142 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+
+/****************************************************************************
+ * Simple mutex functions ;)
+ ****************************************************************************/
+
+#include <stdbool.h>
+#include "config.h"
+#include "kernel.h"
+#include "semaphore.h"
+#include "kernel-internal.h"
+#include "thread-internal.h"
+
+/****************************************************************************
+ * Simple semaphore functions ;)
+ ****************************************************************************/
+/* Initialize the semaphore object.
+ * max = maximum up count the semaphore may assume (max >= 1)
+ * start = initial count of semaphore (0 <= count <= max) */
+void semaphore_init(struct semaphore *s, int max, int start)
+{
+ KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
+ "semaphore_init->inv arg\n");
+ s->queue = NULL;
+ s->max = max;
+ s->count = start;
+ corelock_init(&s->cl);
+}
+
+/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
+ * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
+ * safely be used in an ISR. */
+int semaphore_wait(struct semaphore *s, int timeout)
+{
+ int ret;
+ int oldlevel;
+ int count;
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&s->cl);
+
+ count = s->count;
+
+ if(LIKELY(count > 0))
+ {
+ /* count is not zero; down it */
+ s->count = count - 1;
+ ret = OBJ_WAIT_SUCCEEDED;
+ }
+ else if(timeout == 0)
+ {
+ /* just polling it */
+ ret = OBJ_WAIT_TIMEDOUT;
+ }
+ else
+ {
+ /* too many waits - block until count is upped... */
+ struct thread_entry * current = thread_self_entry();
+ IF_COP( current->obj_cl = &s->cl; )
+ current->bqp = &s->queue;
+ /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
+ * explicit in semaphore_release */
+ current->retval = OBJ_WAIT_TIMEDOUT;
+
+ if(timeout > 0)
+ block_thread_w_tmo(current, timeout); /* ...or timed out... */
+ else
+ block_thread(current); /* -timeout = infinite */
+
+ corelock_unlock(&s->cl);
+
+ /* ...and turn control over to next thread */
+ switch_thread();
+
+ return current->retval;
+ }
+
+ corelock_unlock(&s->cl);
+ restore_irq(oldlevel);
+
+ return ret;
+}
+
+/* Up the semaphore's count and release any thread waiting at the head of the
+ * queue. The count is saturated to the value of the 'max' parameter specified
+ * in 'semaphore_init'. */
+void semaphore_release(struct semaphore *s)
+{
+ unsigned int result = THREAD_NONE;
+ int oldlevel;
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&s->cl);
+
+ if(LIKELY(s->queue != NULL))
+ {
+ /* a thread was queued - wake it up and keep count at 0 */
+ KERNEL_ASSERT(s->count == 0,
+ "semaphore_release->threads queued but count=%d!\n", s->count);
+ s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
+ result = wakeup_thread(&s->queue);
+ }
+ else
+ {
+ int count = s->count;
+ if(count < s->max)
+ {
+ /* nothing waiting - up it */
+ s->count = count + 1;
+ }
+ }
+
+ corelock_unlock(&s->cl);
+ restore_irq(oldlevel);
+
+#if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context)
+ /* No thread switch if not thread context */
+ if((result & THREAD_SWITCH) && is_thread_context())
+ switch_thread();
+#endif
+ (void)result;
+}
diff --git a/firmware/kernel/thread-internal.h b/firmware/kernel/thread-internal.h
new file mode 100644
index 0000000000..c2acdfbaa9
--- /dev/null
+++ b/firmware/kernel/thread-internal.h
@@ -0,0 +1,357 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Ulf Ralberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#ifndef THREAD_H
+#define THREAD_H
+
+#include "config.h"
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include "gcc_extensions.h"
+
+/*
+ * We need more stack when we run under a host
+ * maybe more expensive C lib functions?
+ *
+ * simulator (possibly) doesn't simulate stack usage anyway but well ... */
+
+#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
+struct regs
+{
+ void *t; /* OS thread */
+ void *told; /* Last thread in slot (explained in thead-sdl.c) */
+ void *s; /* Semaphore for blocking and wakeup */
+ void (*start)(void); /* Start function */
+};
+
+#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
+#else
+#include "asm/thread.h"
+#endif /* HAVE_SDL_THREADS */
+
+#ifdef CPU_PP
+#ifdef HAVE_CORELOCK_OBJECT
+/* No reliable atomic instruction available - use Peterson's algorithm */
+struct corelock
+{
+ volatile unsigned char myl[NUM_CORES];
+ volatile unsigned char turn;
+} __attribute__((packed));
+
+/* Too big to inline everywhere */
+void corelock_init(struct corelock *cl);
+void corelock_lock(struct corelock *cl);
+int corelock_try_lock(struct corelock *cl);
+void corelock_unlock(struct corelock *cl);
+#endif /* HAVE_CORELOCK_OBJECT */
+#endif /* CPU_PP */
+
+/* NOTE: The use of the word "queue" may also refer to a linked list of
+ threads being maintained that are normally dealt with in FIFO order
+ and not necessarily kernel event_queue */
+enum
+{
+ /* States without a timeout must be first */
+ STATE_KILLED = 0, /* Thread is killed (default) */
+ STATE_RUNNING, /* Thread is currently running */
+ STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
+ /* These states involve adding the thread to the tmo list */
+ STATE_SLEEPING, /* Thread is sleeping with a timeout */
+ STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
+ /* Miscellaneous states */
+ STATE_FROZEN, /* Thread is suspended and will not run until
+ thread_thaw is called with its ID */
+ THREAD_NUM_STATES,
+ TIMEOUT_STATE_FIRST = STATE_SLEEPING,
+};
+
+#if NUM_CORES > 1
+/* Pointer value for name field to indicate thread is being killed. Using
+ * an alternate STATE_* won't work since that would interfere with operation
+ * while the thread is still running. */
+#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
+#endif
+
+/* Link information for lists thread is in */
+struct thread_entry; /* forward */
+struct thread_list
+{
+ struct thread_entry *prev; /* Previous thread in a list */
+ struct thread_entry *next; /* Next thread in a list */
+};
+
+#ifndef HAVE_CORELOCK_OBJECT
+/* No atomic corelock op needed or just none defined */
+#define corelock_init(cl)
+#define corelock_lock(cl)
+#define corelock_try_lock(cl)
+#define corelock_unlock(cl)
+#endif /* HAVE_CORELOCK_OBJECT */
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+struct blocker
+{
+ struct thread_entry * volatile thread; /* thread blocking other threads
+ (aka. object owner) */
+ int priority; /* highest priority waiter */
+ struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
+};
+
+/* Choices of wakeup protocol */
+
+/* For transfer of object ownership by one thread to another thread by
+ * the owning thread itself (mutexes) */
+struct thread_entry *
+ wakeup_priority_protocol_transfer(struct thread_entry *thread);
+
+/* For release by owner where ownership doesn't change - other threads,
+ * interrupts, timeouts, etc. (mutex timeout, queues) */
+struct thread_entry *
+ wakeup_priority_protocol_release(struct thread_entry *thread);
+
+
+struct priority_distribution
+{
+ uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
+ uint32_t mask; /* Bitmask of hist entries that are not zero */
+};
+
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+/* Information kept in each thread slot
+ * members are arranged according to size - largest first - in order
+ * to ensure both alignment and packing at the same time.
+ */
+struct thread_entry
+{
+ struct regs context; /* Register context at switch -
+ _must_ be first member */
+ uintptr_t *stack; /* Pointer to top of stack */
+ const char *name; /* Thread name */
+ long tmo_tick; /* Tick when thread should be woken from
+ timeout -
+ states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
+ struct thread_list l; /* Links for blocked/waking/running -
+ circular linkage in both directions */
+ struct thread_list tmo; /* Links for timeout list -
+ Circular in reverse direction, NULL-terminated in
+ forward direction -
+ states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
+ struct thread_entry **bqp; /* Pointer to list variable in kernel
+ object where thread is blocked - used
+ for implicit unblock and explicit wake
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
+#ifdef HAVE_CORELOCK_OBJECT
+ struct corelock *obj_cl; /* Object corelock where thead is blocked -
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
+ struct corelock waiter_cl; /* Corelock for thread_wait */
+ struct corelock slot_cl; /* Corelock to lock thread slot */
+ unsigned char core; /* The core to which thread belongs */
+#endif
+ struct thread_entry *queue; /* List of threads waiting for thread to be
+ removed */
+#ifdef HAVE_WAKEUP_EXT_CB
+ void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
+ performs special steps needed when being
+ forced off of an object's wait queue that
+ go beyond the standard wait queue removal
+ and priority disinheritance */
+ /* Only enabled when using queue_send for now */
+#endif
+#if defined(HAVE_SEMAPHORE_OBJECTS) || \
+ defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
+ NUM_CORES > 1
+ volatile intptr_t retval; /* Return value from a blocked operation/
+ misc. use */
+#endif
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* Priority summary of owned objects that support inheritance */
+ struct blocker *blocker; /* Pointer to blocker when this thread is blocked
+ on an object that supports PIP -
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
+ struct priority_distribution pdist; /* Priority summary of owned objects
+ that have blocked threads and thread's own
+ base priority */
+ int skip_count; /* Number of times skipped if higher priority
+ thread was running */
+ unsigned char base_priority; /* Base priority (set explicitly during
+ creation or thread_set_priority) */
+ unsigned char priority; /* Scheduled priority (higher of base or
+ all threads blocked by this one) */
+#endif
+ uint16_t id; /* Current slot id */
+ unsigned short stack_size; /* Size of stack in bytes */
+ unsigned char state; /* Thread slot state (STATE_*) */
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ unsigned char cpu_boost; /* CPU frequency boost flag */
+#endif
+#ifdef HAVE_IO_PRIORITY
+ unsigned char io_priority;
+#endif
+};
+
+/*** Macros for internal use ***/
+/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
+#define THREAD_ID_VERSION_SHIFT 8
+#define THREAD_ID_VERSION_MASK 0xff00
+#define THREAD_ID_SLOT_MASK 0x00ff
+#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
+
+#ifdef HAVE_CORELOCK_OBJECT
+/* Operations to be performed just before stopping a thread and starting
+ a new one if specified before calling switch_thread */
+enum
+{
+ TBOP_CLEAR = 0, /* No operation to do */
+ TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
+ TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
+};
+
+struct thread_blk_ops
+{
+ struct corelock *cl_p; /* pointer to corelock */
+ unsigned char flags; /* TBOP_* flags */
+};
+#endif /* NUM_CORES > 1 */
+
+/* Information kept for each core
+ * Members are arranged for the same reason as in thread_entry
+ */
+struct core_entry
+{
+ /* "Active" lists - core is constantly active on these and are never
+ locked and interrupts do not access them */
+ struct thread_entry *running; /* threads that are running (RTR) */
+ struct thread_entry *timeout; /* threads that are on a timeout before
+ running again */
+ struct thread_entry *block_task; /* Task going off running list */
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct priority_distribution rtr; /* Summary of running and ready-to-run
+ threads */
+#endif
+ long next_tmo_check; /* soonest time to check tmo threads */
+#ifdef HAVE_CORELOCK_OBJECT
+ struct thread_blk_ops blk_ops; /* operations to perform when
+ blocking a thread */
+ struct corelock rtr_cl; /* Lock for rtr list */
+#endif /* NUM_CORES */
+};
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+#define IF_PRIO(...) __VA_ARGS__
+#define IFN_PRIO(...)
+#else
+#define IF_PRIO(...)
+#define IFN_PRIO(...) __VA_ARGS__
+#endif
+
+void core_idle(void);
+void core_wake(IF_COP_VOID(unsigned int core));
+
+/* Initialize the scheduler */
+void init_threads(void) INIT_ATTR;
+
+/* Allocate a thread in the scheduler */
+#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
+unsigned int create_thread(void (*function)(void),
+ void* stack, size_t stack_size,
+ unsigned flags, const char *name
+ IF_PRIO(, int priority)
+ IF_COP(, unsigned int core));
+
+/* Set and clear the CPU frequency boost flag for the calling thread */
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+void trigger_cpu_boost(void);
+void cancel_cpu_boost(void);
+#else
+#define trigger_cpu_boost() do { } while(0)
+#define cancel_cpu_boost() do { } while(0)
+#endif
+/* Return thread entry from id */
+struct thread_entry *thread_id_entry(unsigned int thread_id);
+/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
+ * Has no effect on a thread not frozen. */
+void thread_thaw(unsigned int thread_id);
+/* Wait for a thread to exit */
+void thread_wait(unsigned int thread_id);
+/* Exit the current thread */
+void thread_exit(void) NORETURN_ATTR;
+#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
+#define ALLOW_REMOVE_THREAD
+/* Remove a thread from the scheduler */
+void remove_thread(unsigned int thread_id);
+#endif
+
+/* Switch to next runnable thread */
+void switch_thread(void);
+/* Blocks a thread for at least the specified number of ticks (0 = wait until
+ * next tick) */
+void sleep_thread(int ticks);
+/* Indefinitely blocks the current thread on a thread queue */
+void block_thread(struct thread_entry *current);
+/* Blocks the current thread on a thread queue until explicitely woken or
+ * the timeout is reached */
+void block_thread_w_tmo(struct thread_entry *current, int timeout);
+
+/* Return bit flags for thread wakeup */
+#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
+#define THREAD_OK 0x1 /* A thread was woken up */
+#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
+ higher priority than current were woken) */
+
+/* A convenience function for waking an entire queue of threads. */
+unsigned int thread_queue_wake(struct thread_entry **list);
+
+/* Wakeup a thread at the head of a list */
+unsigned int wakeup_thread(struct thread_entry **list);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+int thread_set_priority(unsigned int thread_id, int priority);
+int thread_get_priority(unsigned int thread_id);
+#endif /* HAVE_PRIORITY_SCHEDULING */
+#ifdef HAVE_IO_PRIORITY
+void thread_set_io_priority(unsigned int thread_id, int io_priority);
+int thread_get_io_priority(unsigned int thread_id);
+#endif /* HAVE_IO_PRIORITY */
+#if NUM_CORES > 1
+unsigned int switch_core(unsigned int new_core);
+#endif
+
+/* Return the id of the calling thread. */
+unsigned int thread_self(void);
+
+/* Return the thread_entry for the calling thread.
+ * INTERNAL: Intended for use by kernel and not for programs. */
+struct thread_entry* thread_self_entry(void);
+
+/* Debugging info - only! */
+int thread_stack_usage(const struct thread_entry *thread);
+#if NUM_CORES > 1
+int idle_stack_usage(unsigned int core);
+#endif
+void thread_get_name(char *buffer, int size,
+ struct thread_entry *thread);
+#ifdef RB_PROFILE
+void profile_thread(void);
+#endif
+
+#endif /* THREAD_H */
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c
new file mode 100644
index 0000000000..43ff584a68
--- /dev/null
+++ b/firmware/kernel/thread.c
@@ -0,0 +1,2442 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Ulf Ralberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+
+#ifdef HAVE_SIGALTSTACK_THREADS
+/*
+ * The sp check in glibc __longjmp_chk() will cause
+ * a fatal error when switching threads via longjmp().
+ */
+#undef _FORTIFY_SOURCE
+#endif
+
+#include <stdbool.h>
+#include <stdio.h>
+#include "thread.h"
+#include "panic.h"
+#include "system.h"
+#include "kernel.h"
+#include "cpu.h"
+#include "string.h"
+#ifdef RB_PROFILE
+#include <profile.h>
+#endif
+#include "core_alloc.h"
+#include "gcc_extensions.h"
+#include "corelock.h"
+
+/****************************************************************************
+ * ATTENTION!! *
+ * See notes below on implementing processor-specific portions! *
+ ***************************************************************************/
+
+/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
+#ifdef DEBUG
+#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
+#else
+#define THREAD_EXTRA_CHECKS 0
+#endif
+
+/**
+ * General locking order to guarantee progress. Order must be observed but
+ * all stages are not nescessarily obligatory. Going from 1) to 3) is
+ * perfectly legal.
+ *
+ * 1) IRQ
+ * This is first because of the likelyhood of having an interrupt occur that
+ * also accesses one of the objects farther down the list. Any non-blocking
+ * synchronization done may already have a lock on something during normal
+ * execution and if an interrupt handler running on the same processor as
+ * the one that has the resource locked were to attempt to access the
+ * resource, the interrupt handler would wait forever waiting for an unlock
+ * that will never happen. There is no danger if the interrupt occurs on
+ * a different processor because the one that has the lock will eventually
+ * unlock and the other processor's handler may proceed at that time. Not
+ * nescessary when the resource in question is definitely not available to
+ * interrupt handlers.
+ *
+ * 2) Kernel Object
+ * 1) May be needed beforehand if the kernel object allows dual-use such as
+ * event queues. The kernel object must have a scheme to protect itself from
+ * access by another processor and is responsible for serializing the calls
+ * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
+ * other. Objects' queues are also protected here.
+ *
+ * 3) Thread Slot
+ * This locks access to the thread's slot such that its state cannot be
+ * altered by another processor when a state change is in progress such as
+ * when it is in the process of going on a blocked list. An attempt to wake
+ * a thread while it is still blocking will likely desync its state with
+ * the other resources used for that state.
+ *
+ * 4) Core Lists
+ * These lists are specific to a particular processor core and are accessible
+ * by all processor cores and interrupt handlers. The running (rtr) list is
+ * the prime example where a thread may be added by any means.
+ */
+
+/*---------------------------------------------------------------------------
+ * Processor specific: core_sleep/core_wake/misc. notes
+ *
+ * ARM notes:
+ * FIQ is not dealt with by the scheduler code and is simply restored if it
+ * must by masked for some reason - because threading modifies a register
+ * that FIQ may also modify and there's no way to accomplish it atomically.
+ * s3c2440 is such a case.
+ *
+ * Audio interrupts are generally treated at a higher priority than others
+ * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
+ * are not in general safe. Special cases may be constructed on a per-
+ * source basis and blocking operations are not available.
+ *
+ * core_sleep procedure to implement for any CPU to ensure an asychronous
+ * wakup never results in requiring a wait until the next tick (up to
+ * 10000uS!). May require assembly and careful instruction ordering.
+ *
+ * 1) On multicore, stay awake if directed to do so by another. If so, goto
+ * step 4.
+ * 2) If processor requires, atomically reenable interrupts and perform step
+ * 3.
+ * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
+ * on Coldfire) goto step 5.
+ * 4) Enable interrupts.
+ * 5) Exit procedure.
+ *
+ * core_wake and multprocessor notes for sleep/wake coordination:
+ * If possible, to wake up another processor, the forcing of an interrupt on
+ * the woken core by the waker core is the easiest way to ensure a non-
+ * delayed wake and immediate execution of any woken threads. If that isn't
+ * available then some careful non-blocking synchonization is needed (as on
+ * PP targets at the moment).
+ *---------------------------------------------------------------------------
+ */
+
+/* Cast to the the machine pointer size, whose size could be < 4 or > 32
+ * (someday :). */
+#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
+static struct core_entry cores[NUM_CORES] IBSS_ATTR;
+struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
+
+static const char main_thread_name[] = "main";
+#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
+extern uintptr_t stackbegin[];
+extern uintptr_t stackend[];
+#else
+extern uintptr_t *stackbegin;
+extern uintptr_t *stackend;
+#endif
+
+static inline void core_sleep(IF_COP_VOID(unsigned int core))
+ __attribute__((always_inline));
+
+void check_tmo_threads(void)
+ __attribute__((noinline));
+
+static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
+ __attribute__((always_inline));
+
+static void add_to_list_tmo(struct thread_entry *thread)
+ __attribute__((noinline));
+
+static void core_schedule_wakeup(struct thread_entry *thread)
+ __attribute__((noinline));
+
+#if NUM_CORES > 1
+static inline void run_blocking_ops(
+ unsigned int core, struct thread_entry *thread)
+ __attribute__((always_inline));
+#endif
+
+static void thread_stkov(struct thread_entry *thread)
+ __attribute__((noinline));
+
+static inline void store_context(void* addr)
+ __attribute__((always_inline));
+
+static inline void load_context(const void* addr)
+ __attribute__((always_inline));
+
+#if NUM_CORES > 1
+static void thread_final_exit_do(struct thread_entry *current)
+ __attribute__((noinline)) NORETURN_ATTR USED_ATTR;
+#else
+static inline void thread_final_exit(struct thread_entry *current)
+ __attribute__((always_inline)) NORETURN_ATTR;
+#endif
+
+void switch_thread(void)
+ __attribute__((noinline));
+
+/****************************************************************************
+ * Processor/OS-specific section - include necessary core support
+ */
+
+
+#include "asm/thread.c"
+
+#if defined (CPU_PP)
+#include "thread-pp.c"
+#endif /* CPU_PP */
+
+#ifndef IF_NO_SKIP_YIELD
+#define IF_NO_SKIP_YIELD(...)
+#endif
+
+/*
+ * End Processor-specific section
+ ***************************************************************************/
+
+#if THREAD_EXTRA_CHECKS
+static void thread_panicf(const char *msg, struct thread_entry *thread)
+{
+ IF_COP( const unsigned int core = thread->core; )
+ static char name[32];
+ thread_get_name(name, 32, thread);
+ panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
+}
+static void thread_stkov(struct thread_entry *thread)
+{
+ thread_panicf("Stkov", thread);
+}
+#define THREAD_PANICF(msg, thread) \
+ thread_panicf(msg, thread)
+#define THREAD_ASSERT(exp, msg, thread) \
+ ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
+#else
+static void thread_stkov(struct thread_entry *thread)
+{
+ IF_COP( const unsigned int core = thread->core; )
+ static char name[32];
+ thread_get_name(name, 32, thread);
+ panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
+}
+#define THREAD_PANICF(msg, thread)
+#define THREAD_ASSERT(exp, msg, thread)
+#endif /* THREAD_EXTRA_CHECKS */
+
+/* Thread locking */
+#if NUM_CORES > 1
+#define LOCK_THREAD(thread) \
+ ({ corelock_lock(&(thread)->slot_cl); })
+#define TRY_LOCK_THREAD(thread) \
+ ({ corelock_try_lock(&(thread)->slot_cl); })
+#define UNLOCK_THREAD(thread) \
+ ({ corelock_unlock(&(thread)->slot_cl); })
+#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
+ ({ unsigned int _core = (thread)->core; \
+ cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
+ cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
+#else
+#define LOCK_THREAD(thread) \
+ ({ })
+#define TRY_LOCK_THREAD(thread) \
+ ({ })
+#define UNLOCK_THREAD(thread) \
+ ({ })
+#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
+ ({ })
+#endif
+
+/* RTR list */
+#define RTR_LOCK(core) \
+ ({ corelock_lock(&cores[core].rtr_cl); })
+#define RTR_UNLOCK(core) \
+ ({ corelock_unlock(&cores[core].rtr_cl); })
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+#define rtr_add_entry(core, priority) \
+ prio_add_entry(&cores[core].rtr, (priority))
+
+#define rtr_subtract_entry(core, priority) \
+ prio_subtract_entry(&cores[core].rtr, (priority))
+
+#define rtr_move_entry(core, from, to) \
+ prio_move_entry(&cores[core].rtr, (from), (to))
+#else
+#define rtr_add_entry(core, priority)
+#define rtr_add_entry_inl(core, priority)
+#define rtr_subtract_entry(core, priority)
+#define rtr_subtract_entry_inl(core, priotity)
+#define rtr_move_entry(core, from, to)
+#define rtr_move_entry_inl(core, from, to)
+#endif
+
+/*---------------------------------------------------------------------------
+ * Thread list structure - circular:
+ * +------------------------------+
+ * | |
+ * +--+---+<-+---+<-+---+<-+---+<-+
+ * Head->| T | | T | | T | | T |
+ * +->+---+->+---+->+---+->+---+--+
+ * | |
+ * +------------------------------+
+ *---------------------------------------------------------------------------
+ */
+
+/*---------------------------------------------------------------------------
+ * Adds a thread to a list of threads using "insert last". Uses the "l"
+ * links.
+ *---------------------------------------------------------------------------
+ */
+static void add_to_list_l(struct thread_entry **list,
+ struct thread_entry *thread)
+{
+ struct thread_entry *l = *list;
+
+ if (l == NULL)
+ {
+ /* Insert into unoccupied list */
+ thread->l.prev = thread;
+ thread->l.next = thread;
+ *list = thread;
+ return;
+ }
+
+ /* Insert last */
+ thread->l.prev = l->l.prev;
+ thread->l.next = l;
+ l->l.prev->l.next = thread;
+ l->l.prev = thread;
+}
+
+/*---------------------------------------------------------------------------
+ * Removes a thread from a list of threads. Uses the "l" links.
+ *---------------------------------------------------------------------------
+ */
+static void remove_from_list_l(struct thread_entry **list,
+ struct thread_entry *thread)
+{
+ struct thread_entry *prev, *next;
+
+ next = thread->l.next;
+
+ if (thread == next)
+ {
+ /* The only item */
+ *list = NULL;
+ return;
+ }
+
+ if (thread == *list)
+ {
+ /* List becomes next item */
+ *list = next;
+ }
+
+ prev = thread->l.prev;
+
+ /* Fix links to jump over the removed entry. */
+ next->l.prev = prev;
+ prev->l.next = next;
+}
+
+/*---------------------------------------------------------------------------
+ * Timeout list structure - circular reverse (to make "remove item" O(1)),
+ * NULL-terminated forward (to ease the far more common forward traversal):
+ * +------------------------------+
+ * | |
+ * +--+---+<-+---+<-+---+<-+---+<-+
+ * Head->| T | | T | | T | | T |
+ * +---+->+---+->+---+->+---+-X
+ *---------------------------------------------------------------------------
+ */
+
+/*---------------------------------------------------------------------------
+ * Add a thread from the core's timout list by linking the pointers in its
+ * tmo structure.
+ *---------------------------------------------------------------------------
+ */
+static void add_to_list_tmo(struct thread_entry *thread)
+{
+ struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
+ THREAD_ASSERT(thread->tmo.prev == NULL,
+ "add_to_list_tmo->already listed", thread);
+
+ thread->tmo.next = NULL;
+
+ if (tmo == NULL)
+ {
+ /* Insert into unoccupied list */
+ thread->tmo.prev = thread;
+ cores[IF_COP_CORE(thread->core)].timeout = thread;
+ return;
+ }
+
+ /* Insert Last */
+ thread->tmo.prev = tmo->tmo.prev;
+ tmo->tmo.prev->tmo.next = thread;
+ tmo->tmo.prev = thread;
+}
+
+/*---------------------------------------------------------------------------
+ * Remove a thread from the core's timout list by unlinking the pointers in
+ * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
+ * is cancelled.
+ *---------------------------------------------------------------------------
+ */
+static void remove_from_list_tmo(struct thread_entry *thread)
+{
+ struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
+ struct thread_entry *prev = thread->tmo.prev;
+ struct thread_entry *next = thread->tmo.next;
+
+ THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
+
+ if (next != NULL)
+ next->tmo.prev = prev;
+
+ if (thread == *list)
+ {
+ /* List becomes next item and empty if next == NULL */
+ *list = next;
+ /* Mark as unlisted */
+ thread->tmo.prev = NULL;
+ }
+ else
+ {
+ if (next == NULL)
+ (*list)->tmo.prev = prev;
+ prev->tmo.next = next;
+ /* Mark as unlisted */
+ thread->tmo.prev = NULL;
+ }
+}
+
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+/*---------------------------------------------------------------------------
+ * Priority distribution structure (one category for each possible priority):
+ *
+ * +----+----+----+ ... +-----+
+ * hist: | F0 | F1 | F2 | | F31 |
+ * +----+----+----+ ... +-----+
+ * mask: | b0 | b1 | b2 | | b31 |
+ * +----+----+----+ ... +-----+
+ *
+ * F = count of threads at priority category n (frequency)
+ * b = bitmask of non-zero priority categories (occupancy)
+ *
+ * / if H[n] != 0 : 1
+ * b[n] = |
+ * \ else : 0
+ *
+ *---------------------------------------------------------------------------
+ * Basic priority inheritance priotocol (PIP):
+ *
+ * Mn = mutex n, Tn = thread n
+ *
+ * A lower priority thread inherits the priority of the highest priority
+ * thread blocked waiting for it to complete an action (such as release a
+ * mutex or respond to a message via queue_send):
+ *
+ * 1) T2->M1->T1
+ *
+ * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
+ * priority than T1 then T1 inherits the priority of T2.
+ *
+ * 2) T3
+ * \/
+ * T2->M1->T1
+ *
+ * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
+ * T1 inherits the higher of T2 and T3.
+ *
+ * 3) T3->M2->T2->M1->T1
+ *
+ * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
+ * then T1 inherits the priority of T3 through T2.
+ *
+ * Blocking chains can grow arbitrarily complex (though it's best that they
+ * not form at all very often :) and build-up from these units.
+ *---------------------------------------------------------------------------
+ */
+
+/*---------------------------------------------------------------------------
+ * Increment frequency at category "priority"
+ *---------------------------------------------------------------------------
+ */
+static inline unsigned int prio_add_entry(
+ struct priority_distribution *pd, int priority)
+{
+ unsigned int count;
+ /* Enough size/instruction count difference for ARM makes it worth it to
+ * use different code (192 bytes for ARM). Only thing better is ASM. */
+#ifdef CPU_ARM
+ count = pd->hist[priority];
+ if (++count == 1)
+ pd->mask |= 1 << priority;
+ pd->hist[priority] = count;
+#else /* This one's better for Coldfire */
+ if ((count = ++pd->hist[priority]) == 1)
+ pd->mask |= 1 << priority;
+#endif
+
+ return count;
+}
+
+/*---------------------------------------------------------------------------
+ * Decrement frequency at category "priority"
+ *---------------------------------------------------------------------------
+ */
+static inline unsigned int prio_subtract_entry(
+ struct priority_distribution *pd, int priority)
+{
+ unsigned int count;
+
+#ifdef CPU_ARM
+ count = pd->hist[priority];
+ if (--count == 0)
+ pd->mask &= ~(1 << priority);
+ pd->hist[priority] = count;
+#else
+ if ((count = --pd->hist[priority]) == 0)
+ pd->mask &= ~(1 << priority);
+#endif
+
+ return count;
+}
+
+/*---------------------------------------------------------------------------
+ * Remove from one category and add to another
+ *---------------------------------------------------------------------------
+ */
+static inline void prio_move_entry(
+ struct priority_distribution *pd, int from, int to)
+{
+ uint32_t mask = pd->mask;
+
+#ifdef CPU_ARM
+ unsigned int count;
+
+ count = pd->hist[from];
+ if (--count == 0)
+ mask &= ~(1 << from);
+ pd->hist[from] = count;
+
+ count = pd->hist[to];
+ if (++count == 1)
+ mask |= 1 << to;
+ pd->hist[to] = count;
+#else
+ if (--pd->hist[from] == 0)
+ mask &= ~(1 << from);
+
+ if (++pd->hist[to] == 1)
+ mask |= 1 << to;
+#endif
+
+ pd->mask = mask;
+}
+
+/*---------------------------------------------------------------------------
+ * Change the priority and rtr entry for a running thread
+ *---------------------------------------------------------------------------
+ */
+static inline void set_running_thread_priority(
+ struct thread_entry *thread, int priority)
+{
+ const unsigned int core = IF_COP_CORE(thread->core);
+ RTR_LOCK(core);
+ rtr_move_entry(core, thread->priority, priority);
+ thread->priority = priority;
+ RTR_UNLOCK(core);
+}
+
+/*---------------------------------------------------------------------------
+ * Finds the highest priority thread in a list of threads. If the list is
+ * empty, the PRIORITY_IDLE is returned.
+ *
+ * It is possible to use the struct priority_distribution within an object
+ * instead of scanning the remaining threads in the list but as a compromise,
+ * the resulting per-object memory overhead is saved at a slight speed
+ * penalty under high contention.
+ *---------------------------------------------------------------------------
+ */
+static int find_highest_priority_in_list_l(
+ struct thread_entry * const thread)
+{
+ if (LIKELY(thread != NULL))
+ {
+ /* Go though list until the ending up at the initial thread */
+ int highest_priority = thread->priority;
+ struct thread_entry *curr = thread;
+
+ do
+ {
+ int priority = curr->priority;
+
+ if (priority < highest_priority)
+ highest_priority = priority;
+
+ curr = curr->l.next;
+ }
+ while (curr != thread);
+
+ return highest_priority;
+ }
+
+ return PRIORITY_IDLE;
+}
+
+/*---------------------------------------------------------------------------
+ * Register priority with blocking system and bubble it down the chain if
+ * any until we reach the end or something is already equal or higher.
+ *
+ * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
+ * targets but that same action also guarantees a circular block anyway and
+ * those are prevented, right? :-)
+ *---------------------------------------------------------------------------
+ */
+static struct thread_entry *
+ blocker_inherit_priority(struct thread_entry *current)
+{
+ const int priority = current->priority;
+ struct blocker *bl = current->blocker;
+ struct thread_entry * const tstart = current;
+ struct thread_entry *bl_t = bl->thread;
+
+ /* Blocker cannot change since the object protection is held */
+ LOCK_THREAD(bl_t);
+
+ for (;;)
+ {
+ struct thread_entry *next;
+ int bl_pr = bl->priority;
+
+ if (priority >= bl_pr)
+ break; /* Object priority already high enough */
+
+ bl->priority = priority;
+
+ /* Add this one */
+ prio_add_entry(&bl_t->pdist, priority);
+
+ if (bl_pr < PRIORITY_IDLE)
+ {
+ /* Not first waiter - subtract old one */
+ prio_subtract_entry(&bl_t->pdist, bl_pr);
+ }
+
+ if (priority >= bl_t->priority)
+ break; /* Thread priority high enough */
+
+ if (bl_t->state == STATE_RUNNING)
+ {
+ /* Blocking thread is a running thread therefore there are no
+ * further blockers. Change the "run queue" on which it
+ * resides. */
+ set_running_thread_priority(bl_t, priority);
+ break;
+ }
+
+ bl_t->priority = priority;
+
+ /* If blocking thread has a blocker, apply transitive inheritance */
+ bl = bl_t->blocker;
+
+ if (bl == NULL)
+ break; /* End of chain or object doesn't support inheritance */
+
+ next = bl->thread;
+
+ if (UNLIKELY(next == tstart))
+ break; /* Full-circle - deadlock! */
+
+ UNLOCK_THREAD(current);
+
+#if NUM_CORES > 1
+ for (;;)
+ {
+ LOCK_THREAD(next);
+
+ /* Blocker could change - retest condition */
+ if (LIKELY(bl->thread == next))
+ break;
+
+ UNLOCK_THREAD(next);
+ next = bl->thread;
+ }
+#endif
+ current = bl_t;
+ bl_t = next;
+ }
+
+ UNLOCK_THREAD(bl_t);
+
+ return current;
+}
+
+/*---------------------------------------------------------------------------
+ * Readjust priorities when waking a thread blocked waiting for another
+ * in essence "releasing" the thread's effect on the object owner. Can be
+ * performed from any context.
+ *---------------------------------------------------------------------------
+ */
+struct thread_entry *
+ wakeup_priority_protocol_release(struct thread_entry *thread)
+{
+ const int priority = thread->priority;
+ struct blocker *bl = thread->blocker;
+ struct thread_entry * const tstart = thread;
+ struct thread_entry *bl_t = bl->thread;
+
+ /* Blocker cannot change since object will be locked */
+ LOCK_THREAD(bl_t);
+
+ thread->blocker = NULL; /* Thread not blocked */
+
+ for (;;)
+ {
+ struct thread_entry *next;
+ int bl_pr = bl->priority;
+
+ if (priority > bl_pr)
+ break; /* Object priority higher */
+
+ next = *thread->bqp;
+
+ if (next == NULL)
+ {
+ /* No more threads in queue */
+ prio_subtract_entry(&bl_t->pdist, bl_pr);
+ bl->priority = PRIORITY_IDLE;
+ }
+ else
+ {
+ /* Check list for highest remaining priority */
+ int queue_pr = find_highest_priority_in_list_l(next);
+
+ if (queue_pr == bl_pr)
+ break; /* Object priority not changing */
+
+ /* Change queue priority */
+ prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
+ bl->priority = queue_pr;
+ }
+
+ if (bl_pr > bl_t->priority)
+ break; /* thread priority is higher */
+
+ bl_pr = find_first_set_bit(bl_t->pdist.mask);
+
+ if (bl_pr == bl_t->priority)
+ break; /* Thread priority not changing */
+
+ if (bl_t->state == STATE_RUNNING)
+ {
+ /* No further blockers */
+ set_running_thread_priority(bl_t, bl_pr);
+ break;
+ }
+
+ bl_t->priority = bl_pr;
+
+ /* If blocking thread has a blocker, apply transitive inheritance */
+ bl = bl_t->blocker;
+
+ if (bl == NULL)
+ break; /* End of chain or object doesn't support inheritance */
+
+ next = bl->thread;
+
+ if (UNLIKELY(next == tstart))
+ break; /* Full-circle - deadlock! */
+
+ UNLOCK_THREAD(thread);
+
+#if NUM_CORES > 1
+ for (;;)
+ {
+ LOCK_THREAD(next);
+
+ /* Blocker could change - retest condition */
+ if (LIKELY(bl->thread == next))
+ break;
+
+ UNLOCK_THREAD(next);
+ next = bl->thread;
+ }
+#endif
+ thread = bl_t;
+ bl_t = next;
+ }
+
+ UNLOCK_THREAD(bl_t);
+
+#if NUM_CORES > 1
+ if (UNLIKELY(thread != tstart))
+ {
+ /* Relock original if it changed */
+ LOCK_THREAD(tstart);
+ }
+#endif
+
+ return cores[CURRENT_CORE].running;
+}
+
+/*---------------------------------------------------------------------------
+ * Transfer ownership to a thread waiting for an objects and transfer
+ * inherited priority boost from other waiters. This algorithm knows that
+ * blocking chains may only unblock from the very end.
+ *
+ * Only the owning thread itself may call this and so the assumption that
+ * it is the running thread is made.
+ *---------------------------------------------------------------------------
+ */
+struct thread_entry *
+ wakeup_priority_protocol_transfer(struct thread_entry *thread)
+{
+ /* Waking thread inherits priority boost from object owner */
+ struct blocker *bl = thread->blocker;
+ struct thread_entry *bl_t = bl->thread;
+ struct thread_entry *next;
+ int bl_pr;
+
+ THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t,
+ "UPPT->wrong thread", cores[CURRENT_CORE].running);
+
+ LOCK_THREAD(bl_t);
+
+ bl_pr = bl->priority;
+
+ /* Remove the object's boost from the owning thread */
+ if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
+ bl_pr <= bl_t->priority)
+ {
+ /* No more threads at this priority are waiting and the old level is
+ * at least the thread level */
+ int priority = find_first_set_bit(bl_t->pdist.mask);
+
+ if (priority != bl_t->priority)
+ {
+ /* Adjust this thread's priority */
+ set_running_thread_priority(bl_t, priority);
+ }
+ }
+
+ next = *thread->bqp;
+
+ if (LIKELY(next == NULL))
+ {
+ /* Expected shortcut - no more waiters */
+ bl_pr = PRIORITY_IDLE;
+ }
+ else
+ {
+ if (thread->priority <= bl_pr)
+ {
+ /* Need to scan threads remaining in queue */
+ bl_pr = find_highest_priority_in_list_l(next);
+ }
+
+ if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
+ bl_pr < thread->priority)
+ {
+ /* Thread priority must be raised */
+ thread->priority = bl_pr;
+ }
+ }
+
+ bl->thread = thread; /* This thread pwns */
+ bl->priority = bl_pr; /* Save highest blocked priority */
+ thread->blocker = NULL; /* Thread not blocked */
+
+ UNLOCK_THREAD(bl_t);
+
+ return bl_t;
+}
+
+/*---------------------------------------------------------------------------
+ * No threads must be blocked waiting for this thread except for it to exit.
+ * The alternative is more elaborate cleanup and object registration code.
+ * Check this for risk of silent data corruption when objects with
+ * inheritable blocking are abandoned by the owner - not precise but may
+ * catch something.
+ *---------------------------------------------------------------------------
+ */
+static void __attribute__((noinline)) check_for_obj_waiters(
+ const char *function, struct thread_entry *thread)
+{
+ /* Only one bit in the mask should be set with a frequency on 1 which
+ * represents the thread's own base priority */
+ uint32_t mask = thread->pdist.mask;
+ if ((mask & (mask - 1)) != 0 ||
+ thread->pdist.hist[find_first_set_bit(mask)] > 1)
+ {
+ unsigned char name[32];
+ thread_get_name(name, 32, thread);
+ panicf("%s->%s with obj. waiters", function, name);
+ }
+}
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+/*---------------------------------------------------------------------------
+ * Move a thread back to a running state on its core.
+ *---------------------------------------------------------------------------
+ */
+static void core_schedule_wakeup(struct thread_entry *thread)
+{
+ const unsigned int core = IF_COP_CORE(thread->core);
+
+ RTR_LOCK(core);
+
+ thread->state = STATE_RUNNING;
+
+ add_to_list_l(&cores[core].running, thread);
+ rtr_add_entry(core, thread->priority);
+
+ RTR_UNLOCK(core);
+
+#if NUM_CORES > 1
+ if (core != CURRENT_CORE)
+ core_wake(core);
+#endif
+}
+
+/*---------------------------------------------------------------------------
+ * Check the core's timeout list when at least one thread is due to wake.
+ * Filtering for the condition is done before making the call. Resets the
+ * tick when the next check will occur.
+ *---------------------------------------------------------------------------
+ */
+void check_tmo_threads(void)
+{
+ const unsigned int core = CURRENT_CORE;
+ const long tick = current_tick; /* snapshot the current tick */
+ long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
+ struct thread_entry *next = cores[core].timeout;
+
+ /* If there are no processes waiting for a timeout, just keep the check
+ tick from falling into the past. */
+
+ /* Break the loop once we have walked through the list of all
+ * sleeping processes or have removed them all. */
+ while (next != NULL)
+ {
+ /* Check sleeping threads. Allow interrupts between checks. */
+ enable_irq();
+
+ struct thread_entry *curr = next;
+
+ next = curr->tmo.next;
+
+ /* Lock thread slot against explicit wakeup */
+ disable_irq();
+ LOCK_THREAD(curr);
+
+ unsigned state = curr->state;
+
+ if (state < TIMEOUT_STATE_FIRST)
+ {
+ /* Cleanup threads no longer on a timeout but still on the
+ * list. */
+ remove_from_list_tmo(curr);
+ }
+ else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick)))
+ {
+ /* Timeout still pending - this will be the usual case */
+ if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
+ {
+ /* Earliest timeout found so far - move the next check up
+ to its time */
+ next_tmo_check = curr->tmo_tick;
+ }
+ }
+ else
+ {
+ /* Sleep timeout has been reached so bring the thread back to
+ * life again. */
+ if (state == STATE_BLOCKED_W_TMO)
+ {
+#ifdef HAVE_CORELOCK_OBJECT
+ /* Lock the waiting thread's kernel object */
+ struct corelock *ocl = curr->obj_cl;
+
+ if (UNLIKELY(corelock_try_lock(ocl) == 0))
+ {
+ /* Need to retry in the correct order though the need is
+ * unlikely */
+ UNLOCK_THREAD(curr);
+ corelock_lock(ocl);
+ LOCK_THREAD(curr);
+
+ if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO))
+ {
+ /* Thread was woken or removed explicitely while slot
+ * was unlocked */
+ corelock_unlock(ocl);
+ remove_from_list_tmo(curr);
+ UNLOCK_THREAD(curr);
+ continue;
+ }
+ }
+#endif /* NUM_CORES */
+
+ remove_from_list_l(curr->bqp, curr);
+
+#ifdef HAVE_WAKEUP_EXT_CB
+ if (curr->wakeup_ext_cb != NULL)
+ curr->wakeup_ext_cb(curr);
+#endif
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if (curr->blocker != NULL)
+ wakeup_priority_protocol_release(curr);
+#endif
+ corelock_unlock(ocl);
+ }
+ /* else state == STATE_SLEEPING */
+
+ remove_from_list_tmo(curr);
+
+ RTR_LOCK(core);
+
+ curr->state = STATE_RUNNING;
+
+ add_to_list_l(&cores[core].running, curr);
+ rtr_add_entry(core, curr->priority);
+
+ RTR_UNLOCK(core);
+ }
+
+ UNLOCK_THREAD(curr);
+ }
+
+ cores[core].next_tmo_check = next_tmo_check;
+}
+
+/*---------------------------------------------------------------------------
+ * Performs operations that must be done before blocking a thread but after
+ * the state is saved.
+ *---------------------------------------------------------------------------
+ */
+#if NUM_CORES > 1
+static inline void run_blocking_ops(
+ unsigned int core, struct thread_entry *thread)
+{
+ struct thread_blk_ops *ops = &cores[core].blk_ops;
+ const unsigned flags = ops->flags;
+
+ if (LIKELY(flags == TBOP_CLEAR))
+ return;
+
+ switch (flags)
+ {
+ case TBOP_SWITCH_CORE:
+ core_switch_blk_op(core, thread);
+ /* Fall-through */
+ case TBOP_UNLOCK_CORELOCK:
+ corelock_unlock(ops->cl_p);
+ break;
+ }
+
+ ops->flags = TBOP_CLEAR;
+}
+#endif /* NUM_CORES > 1 */
+
+#ifdef RB_PROFILE
+void profile_thread(void)
+{
+ profstart(cores[CURRENT_CORE].running - threads);
+}
+#endif
+
+/*---------------------------------------------------------------------------
+ * Prepares a thread to block on an object's list and/or for a specified
+ * duration - expects object and slot to be appropriately locked if needed
+ * and interrupts to be masked.
+ *---------------------------------------------------------------------------
+ */
+static inline void block_thread_on_l(struct thread_entry *thread,
+ unsigned state)
+{
+ /* If inlined, unreachable branches will be pruned with no size penalty
+ because state is passed as a constant parameter. */
+ const unsigned int core = IF_COP_CORE(thread->core);
+
+ /* Remove the thread from the list of running threads. */
+ RTR_LOCK(core);
+ remove_from_list_l(&cores[core].running, thread);
+ rtr_subtract_entry(core, thread->priority);
+ RTR_UNLOCK(core);
+
+ /* Add a timeout to the block if not infinite */
+ switch (state)
+ {
+ case STATE_BLOCKED:
+ case STATE_BLOCKED_W_TMO:
+ /* Put the thread into a new list of inactive threads. */
+ add_to_list_l(thread->bqp, thread);
+
+ if (state == STATE_BLOCKED)
+ break;
+
+ /* Fall-through */
+ case STATE_SLEEPING:
+ /* If this thread times out sooner than any other thread, update
+ next_tmo_check to its timeout */
+ if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
+ {
+ cores[core].next_tmo_check = thread->tmo_tick;
+ }
+
+ if (thread->tmo.prev == NULL)
+ {
+ add_to_list_tmo(thread);
+ }
+ /* else thread was never removed from list - just keep it there */
+ break;
+ }
+
+ /* Remember the the next thread about to block. */
+ cores[core].block_task = thread;
+
+ /* Report new state. */
+ thread->state = state;
+}
+
+/*---------------------------------------------------------------------------
+ * Switch thread in round robin fashion for any given priority. Any thread
+ * that removed itself from the running list first must specify itself in
+ * the paramter.
+ *
+ * INTERNAL: Intended for use by kernel and not for programs.
+ *---------------------------------------------------------------------------
+ */
+void switch_thread(void)
+{
+
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *block = cores[core].block_task;
+ struct thread_entry *thread = cores[core].running;
+
+ /* Get context to save - next thread to run is unknown until all wakeups
+ * are evaluated */
+ if (block != NULL)
+ {
+ cores[core].block_task = NULL;
+
+#if NUM_CORES > 1
+ if (UNLIKELY(thread == block))
+ {
+ /* This was the last thread running and another core woke us before
+ * reaching here. Force next thread selection to give tmo threads or
+ * other threads woken before this block a first chance. */
+ block = NULL;
+ }
+ else
+#endif
+ {
+ /* Blocking task is the old one */
+ thread = block;
+ }
+ }
+
+#ifdef RB_PROFILE
+#ifdef CPU_COLDFIRE
+ _profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
+#else
+ profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
+#endif
+#endif
+
+ /* Begin task switching by saving our current context so that we can
+ * restore the state of the current thread later to the point prior
+ * to this call. */
+ store_context(&thread->context);
+
+#ifdef DEBUG
+ /* Check core_ctx buflib integrity */
+ core_check_valid();
+#endif
+
+ /* Check if the current thread stack is overflown */
+ if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
+ thread_stkov(thread);
+
+#if NUM_CORES > 1
+ /* Run any blocking operations requested before switching/sleeping */
+ run_blocking_ops(core, thread);
+#endif
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
+ /* Reset the value of thread's skip count */
+ thread->skip_count = 0;
+#endif
+
+ for (;;)
+ {
+ /* If there are threads on a timeout and the earliest wakeup is due,
+ * check the list and wake any threads that need to start running
+ * again. */
+ if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
+ {
+ check_tmo_threads();
+ }
+
+ disable_irq();
+ RTR_LOCK(core);
+
+ thread = cores[core].running;
+
+ if (UNLIKELY(thread == NULL))
+ {
+ /* Enter sleep mode to reduce power usage - woken up on interrupt
+ * or wakeup request from another core - expected to enable
+ * interrupts. */
+ RTR_UNLOCK(core);
+ core_sleep(IF_COP(core));
+ }
+ else
+ {
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* Select the new task based on priorities and the last time a
+ * process got CPU time relative to the highest priority runnable
+ * task. */
+ struct priority_distribution *pd = &cores[core].rtr;
+ int max = find_first_set_bit(pd->mask);
+
+ if (block == NULL)
+ {
+ /* Not switching on a block, tentatively select next thread */
+ thread = thread->l.next;
+ }
+
+ for (;;)
+ {
+ int priority = thread->priority;
+ int diff;
+
+ /* This ridiculously simple method of aging seems to work
+ * suspiciously well. It does tend to reward CPU hogs (under
+ * yielding) but that's generally not desirable at all. On
+ * the plus side, it, relatively to other threads, penalizes
+ * excess yielding which is good if some high priority thread
+ * is performing no useful work such as polling for a device
+ * to be ready. Of course, aging is only employed when higher
+ * and lower priority threads are runnable. The highest
+ * priority runnable thread(s) are never skipped unless a
+ * lower-priority process has aged sufficiently. Priorities
+ * of REALTIME class are run strictly according to priority
+ * thus are not subject to switchout due to lower-priority
+ * processes aging; they must give up the processor by going
+ * off the run list. */
+ if (LIKELY(priority <= max) ||
+ IF_NO_SKIP_YIELD( thread->skip_count == -1 || )
+ (priority > PRIORITY_REALTIME &&
+ (diff = priority - max,
+ ++thread->skip_count > diff*diff)))
+ {
+ cores[core].running = thread;
+ break;
+ }
+
+ thread = thread->l.next;
+ }
+#else
+ /* Without priority use a simple FCFS algorithm */
+ if (block == NULL)
+ {
+ /* Not switching on a block, select next thread */
+ thread = thread->l.next;
+ cores[core].running = thread;
+ }
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+ RTR_UNLOCK(core);
+ enable_irq();
+ break;
+ }
+ }
+
+ /* And finally give control to the next thread. */
+ load_context(&thread->context);
+
+#ifdef RB_PROFILE
+ profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
+#endif
+
+}
+
+/*---------------------------------------------------------------------------
+ * Sleeps a thread for at least a specified number of ticks with zero being
+ * a wait until the next tick.
+ *
+ * INTERNAL: Intended for use by kernel and not for programs.
+ *---------------------------------------------------------------------------
+ */
+void sleep_thread(int ticks)
+{
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+
+ LOCK_THREAD(current);
+
+ /* Set our timeout, remove from run list and join timeout list. */
+ current->tmo_tick = current_tick + ticks + 1;
+ block_thread_on_l(current, STATE_SLEEPING);
+
+ UNLOCK_THREAD(current);
+}
+
+/*---------------------------------------------------------------------------
+ * Indefinitely block a thread on a blocking queue for explicit wakeup.
+ *
+ * INTERNAL: Intended for use by kernel objects and not for programs.
+ *---------------------------------------------------------------------------
+ */
+void block_thread(struct thread_entry *current)
+{
+ /* Set the state to blocked and take us off of the run queue until we
+ * are explicitly woken */
+ LOCK_THREAD(current);
+
+ /* Set the list for explicit wakeup */
+ block_thread_on_l(current, STATE_BLOCKED);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if (current->blocker != NULL)
+ {
+ /* Object supports PIP */
+ current = blocker_inherit_priority(current);
+ }
+#endif
+
+ UNLOCK_THREAD(current);
+}
+
+/*---------------------------------------------------------------------------
+ * Block a thread on a blocking queue for a specified time interval or until
+ * explicitly woken - whichever happens first.
+ *
+ * INTERNAL: Intended for use by kernel objects and not for programs.
+ *---------------------------------------------------------------------------
+ */
+void block_thread_w_tmo(struct thread_entry *current, int timeout)
+{
+ /* Get the entry for the current running thread. */
+ LOCK_THREAD(current);
+
+ /* Set the state to blocked with the specified timeout */
+ current->tmo_tick = current_tick + timeout;
+
+ /* Set the list for explicit wakeup */
+ block_thread_on_l(current, STATE_BLOCKED_W_TMO);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if (current->blocker != NULL)
+ {
+ /* Object supports PIP */
+ current = blocker_inherit_priority(current);
+ }
+#endif
+
+ UNLOCK_THREAD(current);
+}
+
+/*---------------------------------------------------------------------------
+ * Explicitly wakeup a thread on a blocking queue. Only effects threads of
+ * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
+ *
+ * This code should be considered a critical section by the caller meaning
+ * that the object's corelock should be held.
+ *
+ * INTERNAL: Intended for use by kernel objects and not for programs.
+ *---------------------------------------------------------------------------
+ */
+unsigned int wakeup_thread(struct thread_entry **list)
+{
+ struct thread_entry *thread = *list;
+ unsigned int result = THREAD_NONE;
+
+ /* Check if there is a blocked thread at all. */
+ if (thread == NULL)
+ return result;
+
+ LOCK_THREAD(thread);
+
+ /* Determine thread's current state. */
+ switch (thread->state)
+ {
+ case STATE_BLOCKED:
+ case STATE_BLOCKED_W_TMO:
+ remove_from_list_l(list, thread);
+
+ result = THREAD_OK;
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct thread_entry *current;
+ struct blocker *bl = thread->blocker;
+
+ if (bl == NULL)
+ {
+ /* No inheritance - just boost the thread by aging */
+ IF_NO_SKIP_YIELD( if (thread->skip_count != -1) )
+ thread->skip_count = thread->priority;
+ current = cores[CURRENT_CORE].running;
+ }
+ else
+ {
+ /* Call the specified unblocking PIP */
+ current = bl->wakeup_protocol(thread);
+ }
+
+ if (current != NULL &&
+ find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask)
+ < current->priority)
+ {
+ /* There is a thread ready to run of higher or same priority on
+ * the same core as the current one; recommend a task switch.
+ * Knowing if this is an interrupt call would be helpful here. */
+ result |= THREAD_SWITCH;
+ }
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+ core_schedule_wakeup(thread);
+ break;
+
+ /* Nothing to do. State is not blocked. */
+#if THREAD_EXTRA_CHECKS
+ default:
+ THREAD_PANICF("wakeup_thread->block invalid", thread);
+ case STATE_RUNNING:
+ case STATE_KILLED:
+ break;
+#endif
+ }
+
+ UNLOCK_THREAD(thread);
+ return result;
+}
+
+/*---------------------------------------------------------------------------
+ * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
+ * from each operation or THREAD_NONE of nothing was awakened. Object owning
+ * the queue must be locked first.
+ *
+ * INTERNAL: Intended for use by kernel objects and not for programs.
+ *---------------------------------------------------------------------------
+ */
+unsigned int thread_queue_wake(struct thread_entry **list)
+{
+ unsigned result = THREAD_NONE;
+
+ for (;;)
+ {
+ unsigned int rc = wakeup_thread(list);
+
+ if (rc == THREAD_NONE)
+ break; /* No more threads */
+
+ result |= rc;
+ }
+
+ return result;
+}
+
+/*---------------------------------------------------------------------------
+ * Assign the thread slot a new ID. Version is 1-255.
+ *---------------------------------------------------------------------------
+ */
+static void new_thread_id(unsigned int slot_num,
+ struct thread_entry *thread)
+{
+ unsigned int version =
+ (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
+ & THREAD_ID_VERSION_MASK;
+
+ /* If wrapped to 0, make it 1 */
+ if (version == 0)
+ version = 1u << THREAD_ID_VERSION_SHIFT;
+
+ thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
+}
+
+/*---------------------------------------------------------------------------
+ * Find an empty thread slot or MAXTHREADS if none found. The slot returned
+ * will be locked on multicore.
+ *---------------------------------------------------------------------------
+ */
+static struct thread_entry * find_empty_thread_slot(void)
+{
+ /* Any slot could be on an interrupt-accessible list */
+ IF_COP( int oldlevel = disable_irq_save(); )
+ struct thread_entry *thread = NULL;
+ int n;
+
+ for (n = 0; n < MAXTHREADS; n++)
+ {
+ /* Obtain current slot state - lock it on multicore */
+ struct thread_entry *t = &threads[n];
+ LOCK_THREAD(t);
+
+ if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
+ {
+ /* Slot is empty - leave it locked and caller will unlock */
+ thread = t;
+ break;
+ }
+
+ /* Finished examining slot - no longer busy - unlock on multicore */
+ UNLOCK_THREAD(t);
+ }
+
+ IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
+ not accesible to them yet */
+ return thread;
+}
+
+/*---------------------------------------------------------------------------
+ * Return the thread_entry pointer for a thread_id. Return the current
+ * thread if the ID is (unsigned int)-1 (alias for current).
+ *---------------------------------------------------------------------------
+ */
+struct thread_entry * thread_id_entry(unsigned int thread_id)
+{
+ return &threads[thread_id & THREAD_ID_SLOT_MASK];
+}
+
+/*---------------------------------------------------------------------------
+ * Return the thread id of the calling thread
+ * --------------------------------------------------------------------------
+ */
+unsigned int thread_self(void)
+{
+ return cores[CURRENT_CORE].running->id;
+}
+
+/*---------------------------------------------------------------------------
+ * Return the thread entry of the calling thread.
+ *
+ * INTERNAL: Intended for use by kernel and not for programs.
+ *---------------------------------------------------------------------------
+ */
+struct thread_entry* thread_self_entry(void)
+{
+ return cores[CURRENT_CORE].running;
+}
+
+/*---------------------------------------------------------------------------
+ * Place the current core in idle mode - woken up on interrupt or wake
+ * request from another core.
+ *---------------------------------------------------------------------------
+ */
+void core_idle(void)
+{
+ IF_COP( const unsigned int core = CURRENT_CORE; )
+ disable_irq();
+ core_sleep(IF_COP(core));
+}
+
+/*---------------------------------------------------------------------------
+ * Create a thread. If using a dual core architecture, specify which core to
+ * start the thread on.
+ *
+ * Return ID if context area could be allocated, else NULL.
+ *---------------------------------------------------------------------------
+ */
+unsigned int create_thread(void (*function)(void),
+ void* stack, size_t stack_size,
+ unsigned flags, const char *name
+ IF_PRIO(, int priority)
+ IF_COP(, unsigned int core))
+{
+ unsigned int i;
+ unsigned int stack_words;
+ uintptr_t stackptr, stackend;
+ struct thread_entry *thread;
+ unsigned state;
+ int oldlevel;
+
+ thread = find_empty_thread_slot();
+ if (thread == NULL)
+ {
+ return 0;
+ }
+
+ oldlevel = disable_irq_save();
+
+ /* Munge the stack to make it easy to spot stack overflows */
+ stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
+ stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
+ stack_size = stackend - stackptr;
+ stack_words = stack_size / sizeof (uintptr_t);
+
+ for (i = 0; i < stack_words; i++)
+ {
+ ((uintptr_t *)stackptr)[i] = DEADBEEF;
+ }
+
+ /* Store interesting information */
+ thread->name = name;
+ thread->stack = (uintptr_t *)stackptr;
+ thread->stack_size = stack_size;
+ thread->queue = NULL;
+#ifdef HAVE_WAKEUP_EXT_CB
+ thread->wakeup_ext_cb = NULL;
+#endif
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ thread->cpu_boost = 0;
+#endif
+#ifdef HAVE_PRIORITY_SCHEDULING
+ memset(&thread->pdist, 0, sizeof(thread->pdist));
+ thread->blocker = NULL;
+ thread->base_priority = priority;
+ thread->priority = priority;
+ thread->skip_count = priority;
+ prio_add_entry(&thread->pdist, priority);
+#endif
+
+#ifdef HAVE_IO_PRIORITY
+ /* Default to high (foreground) priority */
+ thread->io_priority = IO_PRIORITY_IMMEDIATE;
+#endif
+
+#if NUM_CORES > 1
+ thread->core = core;
+
+ /* Writeback stack munging or anything else before starting */
+ if (core != CURRENT_CORE)
+ {
+ commit_dcache();
+ }
+#endif
+
+ /* Thread is not on any timeout list but be a bit paranoid */
+ thread->tmo.prev = NULL;
+
+ state = (flags & CREATE_THREAD_FROZEN) ?
+ STATE_FROZEN : STATE_RUNNING;
+
+ thread->context.sp = (typeof (thread->context.sp))stackend;
+
+ /* Load the thread's context structure with needed startup information */
+ THREAD_STARTUP_INIT(core, thread, function);
+
+ thread->state = state;
+ i = thread->id; /* Snapshot while locked */
+
+ if (state == STATE_RUNNING)
+ core_schedule_wakeup(thread);
+
+ UNLOCK_THREAD(thread);
+ restore_irq(oldlevel);
+
+ return i;
+}
+
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+/*---------------------------------------------------------------------------
+ * Change the boost state of a thread boosting or unboosting the CPU
+ * as required.
+ *---------------------------------------------------------------------------
+ */
+static inline void boost_thread(struct thread_entry *thread, bool boost)
+{
+ if ((thread->cpu_boost != 0) != boost)
+ {
+ thread->cpu_boost = boost;
+ cpu_boost(boost);
+ }
+}
+
+void trigger_cpu_boost(void)
+{
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+ boost_thread(current, true);
+}
+
+void cancel_cpu_boost(void)
+{
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+ boost_thread(current, false);
+}
+#endif /* HAVE_SCHEDULER_BOOSTCTRL */
+
+/*---------------------------------------------------------------------------
+ * Block the current thread until another thread terminates. A thread may
+ * wait on itself to terminate which prevents it from running again and it
+ * will need to be killed externally.
+ * Parameter is the ID as returned from create_thread().
+ *---------------------------------------------------------------------------
+ */
+void thread_wait(unsigned int thread_id)
+{
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+ struct thread_entry *thread = thread_id_entry(thread_id);
+
+ /* Lock thread-as-waitable-object lock */
+ corelock_lock(&thread->waiter_cl);
+
+ /* Be sure it hasn't been killed yet */
+ if (thread->id == thread_id && thread->state != STATE_KILLED)
+ {
+ IF_COP( current->obj_cl = &thread->waiter_cl; )
+ current->bqp = &thread->queue;
+
+ disable_irq();
+ block_thread(current);
+
+ corelock_unlock(&thread->waiter_cl);
+
+ switch_thread();
+ return;
+ }
+
+ corelock_unlock(&thread->waiter_cl);
+}
+
+/*---------------------------------------------------------------------------
+ * Exit the current thread. The Right Way to Do Things (TM).
+ *---------------------------------------------------------------------------
+ */
+/* This is done to foil optimizations that may require the current stack,
+ * such as optimizing subexpressions that put variables on the stack that
+ * get used after switching stacks. */
+#if NUM_CORES > 1
+/* Called by ASM stub */
+static void thread_final_exit_do(struct thread_entry *current)
+#else
+/* No special procedure is required before calling */
+static inline void thread_final_exit(struct thread_entry *current)
+#endif
+{
+ /* At this point, this thread isn't using resources allocated for
+ * execution except the slot itself. */
+
+ /* Signal this thread */
+ thread_queue_wake(&current->queue);
+ corelock_unlock(&current->waiter_cl);
+ switch_thread();
+ /* This should never and must never be reached - if it is, the
+ * state is corrupted */
+ THREAD_PANICF("thread_exit->K:*R", current);
+ while (1);
+}
+
+void thread_exit(void)
+{
+ register struct thread_entry * current = cores[CURRENT_CORE].running;
+
+ /* Cancel CPU boost if any */
+ cancel_cpu_boost();
+
+ disable_irq();
+
+ corelock_lock(&current->waiter_cl);
+ LOCK_THREAD(current);
+
+#if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
+ if (current->name == THREAD_DESTRUCT)
+ {
+ /* Thread being killed - become a waiter */
+ unsigned int id = current->id;
+ UNLOCK_THREAD(current);
+ corelock_unlock(&current->waiter_cl);
+ thread_wait(id);
+ THREAD_PANICF("thread_exit->WK:*R", current);
+ }
+#endif
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ check_for_obj_waiters("thread_exit", current);
+#endif
+
+ if (current->tmo.prev != NULL)
+ {
+ /* Cancel pending timeout list removal */
+ remove_from_list_tmo(current);
+ }
+
+ /* Switch tasks and never return */
+ block_thread_on_l(current, STATE_KILLED);
+
+ /* Slot must be unusable until thread is really gone */
+ UNLOCK_THREAD_AT_TASK_SWITCH(current);
+
+ /* Update ID for this slot */
+ new_thread_id(current->id, current);
+ current->name = NULL;
+
+ /* Do final cleanup and remove the thread */
+ thread_final_exit(current);
+}
+
+#ifdef ALLOW_REMOVE_THREAD
+/*---------------------------------------------------------------------------
+ * Remove a thread from the scheduler. Not The Right Way to Do Things in
+ * normal programs.
+ *
+ * Parameter is the ID as returned from create_thread().
+ *
+ * Use with care on threads that are not under careful control as this may
+ * leave various objects in an undefined state.
+ *---------------------------------------------------------------------------
+ */
+void remove_thread(unsigned int thread_id)
+{
+#ifdef HAVE_CORELOCK_OBJECT
+ /* core is not constant here because of core switching */
+ unsigned int core = CURRENT_CORE;
+ unsigned int old_core = NUM_CORES;
+ struct corelock *ocl = NULL;
+#else
+ const unsigned int core = CURRENT_CORE;
+#endif
+ struct thread_entry *current = cores[core].running;
+ struct thread_entry *thread = thread_id_entry(thread_id);
+
+ unsigned state;
+ int oldlevel;
+
+ if (thread == current)
+ thread_exit(); /* Current thread - do normal exit */
+
+ oldlevel = disable_irq_save();
+
+ corelock_lock(&thread->waiter_cl);
+ LOCK_THREAD(thread);
+
+ state = thread->state;
+
+ if (thread->id != thread_id || state == STATE_KILLED)
+ goto thread_killed;
+
+#if NUM_CORES > 1
+ if (thread->name == THREAD_DESTRUCT)
+ {
+ /* Thread being killed - become a waiter */
+ UNLOCK_THREAD(thread);
+ corelock_unlock(&thread->waiter_cl);
+ restore_irq(oldlevel);
+ thread_wait(thread_id);
+ return;
+ }
+
+ thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ check_for_obj_waiters("remove_thread", thread);
+#endif
+
+ if (thread->core != core)
+ {
+ /* Switch cores and safely extract the thread there */
+ /* Slot HAS to be unlocked or a deadlock could occur which means other
+ * threads have to be guided into becoming thread waiters if they
+ * attempt to remove it. */
+ unsigned int new_core = thread->core;
+
+ corelock_unlock(&thread->waiter_cl);
+
+ UNLOCK_THREAD(thread);
+ restore_irq(oldlevel);
+
+ old_core = switch_core(new_core);
+
+ oldlevel = disable_irq_save();
+
+ corelock_lock(&thread->waiter_cl);
+ LOCK_THREAD(thread);
+
+ state = thread->state;
+ core = new_core;
+ /* Perform the extraction and switch ourselves back to the original
+ processor */
+ }
+#endif /* NUM_CORES > 1 */
+
+ if (thread->tmo.prev != NULL)
+ {
+ /* Clean thread off the timeout list if a timeout check hasn't
+ * run yet */
+ remove_from_list_tmo(thread);
+ }
+
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ /* Cancel CPU boost if any */
+ boost_thread(thread, false);
+#endif
+
+IF_COP( retry_state: )
+
+ switch (state)
+ {
+ case STATE_RUNNING:
+ RTR_LOCK(core);
+ /* Remove thread from ready to run tasks */
+ remove_from_list_l(&cores[core].running, thread);
+ rtr_subtract_entry(core, thread->priority);
+ RTR_UNLOCK(core);
+ break;
+ case STATE_BLOCKED:
+ case STATE_BLOCKED_W_TMO:
+ /* Remove thread from the queue it's blocked on - including its
+ * own if waiting there */
+#if NUM_CORES > 1
+ if (&thread->waiter_cl != thread->obj_cl)
+ {
+ ocl = thread->obj_cl;
+
+ if (UNLIKELY(corelock_try_lock(ocl) == 0))
+ {
+ UNLOCK_THREAD(thread);
+ corelock_lock(ocl);
+ LOCK_THREAD(thread);
+
+ if (UNLIKELY(thread->state != state))
+ {
+ /* Something woke the thread */
+ state = thread->state;
+ corelock_unlock(ocl);
+ goto retry_state;
+ }
+ }
+ }
+#endif
+ remove_from_list_l(thread->bqp, thread);
+
+#ifdef HAVE_WAKEUP_EXT_CB
+ if (thread->wakeup_ext_cb != NULL)
+ thread->wakeup_ext_cb(thread);
+#endif
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if (thread->blocker != NULL)
+ {
+ /* Remove thread's priority influence from its chain */
+ wakeup_priority_protocol_release(thread);
+ }
+#endif
+
+#if NUM_CORES > 1
+ if (ocl != NULL)
+ corelock_unlock(ocl);
+#endif
+ break;
+ /* Otherwise thread is frozen and hasn't run yet */
+ }
+
+ new_thread_id(thread_id, thread);
+ thread->state = STATE_KILLED;
+
+ /* If thread was waiting on itself, it will have been removed above.
+ * The wrong order would result in waking the thread first and deadlocking
+ * since the slot is already locked. */
+ thread_queue_wake(&thread->queue);
+
+ thread->name = NULL;
+
+thread_killed: /* Thread was already killed */
+ /* Removal complete - safe to unlock and reenable interrupts */
+ corelock_unlock(&thread->waiter_cl);
+ UNLOCK_THREAD(thread);
+ restore_irq(oldlevel);
+
+#if NUM_CORES > 1
+ if (old_core < NUM_CORES)
+ {
+ /* Did a removal on another processor's thread - switch back to
+ native core */
+ switch_core(old_core);
+ }
+#endif
+}
+#endif /* ALLOW_REMOVE_THREAD */
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+/*---------------------------------------------------------------------------
+ * Sets the thread's relative base priority for the core it runs on. Any
+ * needed inheritance changes also may happen.
+ *---------------------------------------------------------------------------
+ */
+int thread_set_priority(unsigned int thread_id, int priority)
+{
+ int old_base_priority = -1;
+ struct thread_entry *thread = thread_id_entry(thread_id);
+
+ /* A little safety measure */
+ if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
+ return -1;
+
+ /* Thread could be on any list and therefore on an interrupt accessible
+ one - disable interrupts */
+ int oldlevel = disable_irq_save();
+
+ LOCK_THREAD(thread);
+
+ /* Make sure it's not killed */
+ if (thread->id == thread_id && thread->state != STATE_KILLED)
+ {
+ int old_priority = thread->priority;
+
+ old_base_priority = thread->base_priority;
+ thread->base_priority = priority;
+
+ prio_move_entry(&thread->pdist, old_base_priority, priority);
+ priority = find_first_set_bit(thread->pdist.mask);
+
+ if (old_priority == priority)
+ {
+ /* No priority change - do nothing */
+ }
+ else if (thread->state == STATE_RUNNING)
+ {
+ /* This thread is running - change location on the run
+ * queue. No transitive inheritance needed. */
+ set_running_thread_priority(thread, priority);
+ }
+ else
+ {
+ thread->priority = priority;
+
+ if (thread->blocker != NULL)
+ {
+ /* Bubble new priority down the chain */
+ struct blocker *bl = thread->blocker; /* Blocker struct */
+ struct thread_entry *bl_t = bl->thread; /* Blocking thread */
+ struct thread_entry * const tstart = thread; /* Initial thread */
+ const int highest = MIN(priority, old_priority); /* Higher of new or old */
+
+ for (;;)
+ {
+ struct thread_entry *next; /* Next thread to check */
+ int bl_pr; /* Highest blocked thread */
+ int queue_pr; /* New highest blocked thread */
+#if NUM_CORES > 1
+ /* Owner can change but thread cannot be dislodged - thread
+ * may not be the first in the queue which allows other
+ * threads ahead in the list to be given ownership during the
+ * operation. If thread is next then the waker will have to
+ * wait for us and the owner of the object will remain fixed.
+ * If we successfully grab the owner -- which at some point
+ * is guaranteed -- then the queue remains fixed until we
+ * pass by. */
+ for (;;)
+ {
+ LOCK_THREAD(bl_t);
+
+ /* Double-check the owner - retry if it changed */
+ if (LIKELY(bl->thread == bl_t))
+ break;
+
+ UNLOCK_THREAD(bl_t);
+ bl_t = bl->thread;
+ }
+#endif
+ bl_pr = bl->priority;
+
+ if (highest > bl_pr)
+ break; /* Object priority won't change */
+
+ /* This will include the thread being set */
+ queue_pr = find_highest_priority_in_list_l(*thread->bqp);
+
+ if (queue_pr == bl_pr)
+ break; /* Object priority not changing */
+
+ /* Update thread boost for this object */
+ bl->priority = queue_pr;
+ prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
+ bl_pr = find_first_set_bit(bl_t->pdist.mask);
+
+ if (bl_t->priority == bl_pr)
+ break; /* Blocking thread priority not changing */
+
+ if (bl_t->state == STATE_RUNNING)
+ {
+ /* Thread not blocked - we're done */
+ set_running_thread_priority(bl_t, bl_pr);
+ break;
+ }
+
+ bl_t->priority = bl_pr;
+ bl = bl_t->blocker; /* Blocking thread has a blocker? */
+
+ if (bl == NULL)
+ break; /* End of chain */
+
+ next = bl->thread;
+
+ if (UNLIKELY(next == tstart))
+ break; /* Full-circle */
+
+ UNLOCK_THREAD(thread);
+
+ thread = bl_t;
+ bl_t = next;
+ } /* for (;;) */
+
+ UNLOCK_THREAD(bl_t);
+ }
+ }
+ }
+
+ UNLOCK_THREAD(thread);
+
+ restore_irq(oldlevel);
+
+ return old_base_priority;
+}
+
+/*---------------------------------------------------------------------------
+ * Returns the current base priority for a thread.
+ *---------------------------------------------------------------------------
+ */
+int thread_get_priority(unsigned int thread_id)
+{
+ struct thread_entry *thread = thread_id_entry(thread_id);
+ int base_priority = thread->base_priority;
+
+ /* Simply check without locking slot. It may or may not be valid by the
+ * time the function returns anyway. If all tests pass, it is the
+ * correct value for when it was valid. */
+ if (thread->id != thread_id || thread->state == STATE_KILLED)
+ base_priority = -1;
+
+ return base_priority;
+}
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+#ifdef HAVE_IO_PRIORITY
+int thread_get_io_priority(unsigned int thread_id)
+{
+ struct thread_entry *thread = thread_id_entry(thread_id);
+ return thread->io_priority;
+}
+
+void thread_set_io_priority(unsigned int thread_id,int io_priority)
+{
+ struct thread_entry *thread = thread_id_entry(thread_id);
+ thread->io_priority = io_priority;
+}
+#endif
+
+/*---------------------------------------------------------------------------
+ * Starts a frozen thread - similar semantics to wakeup_thread except that
+ * the thread is on no scheduler or wakeup queue at all. It exists simply by
+ * virtue of the slot having a state of STATE_FROZEN.
+ *---------------------------------------------------------------------------
+ */
+void thread_thaw(unsigned int thread_id)
+{
+ struct thread_entry *thread = thread_id_entry(thread_id);
+ int oldlevel = disable_irq_save();
+
+ LOCK_THREAD(thread);
+
+ /* If thread is the current one, it cannot be frozen, therefore
+ * there is no need to check that. */
+ if (thread->id == thread_id && thread->state == STATE_FROZEN)
+ core_schedule_wakeup(thread);
+
+ UNLOCK_THREAD(thread);
+ restore_irq(oldlevel);
+}
+
+#if NUM_CORES > 1
+/*---------------------------------------------------------------------------
+ * Switch the processor that the currently executing thread runs on.
+ *---------------------------------------------------------------------------
+ */
+unsigned int switch_core(unsigned int new_core)
+{
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *current = cores[core].running;
+
+ if (core == new_core)
+ {
+ /* No change - just return same core */
+ return core;
+ }
+
+ int oldlevel = disable_irq_save();
+ LOCK_THREAD(current);
+
+ if (current->name == THREAD_DESTRUCT)
+ {
+ /* Thread being killed - deactivate and let process complete */
+ unsigned int id = current->id;
+ UNLOCK_THREAD(current);
+ restore_irq(oldlevel);
+ thread_wait(id);
+ /* Should never be reached */
+ THREAD_PANICF("switch_core->D:*R", current);
+ }
+
+ /* Get us off the running list for the current core */
+ RTR_LOCK(core);
+ remove_from_list_l(&cores[core].running, current);
+ rtr_subtract_entry(core, current->priority);
+ RTR_UNLOCK(core);
+
+ /* Stash return value (old core) in a safe place */
+ current->retval = core;
+
+ /* If a timeout hadn't yet been cleaned-up it must be removed now or
+ * the other core will likely attempt a removal from the wrong list! */
+ if (current->tmo.prev != NULL)
+ {
+ remove_from_list_tmo(current);
+ }
+
+ /* Change the core number for this thread slot */
+ current->core = new_core;
+
+ /* Do not use core_schedule_wakeup here since this will result in
+ * the thread starting to run on the other core before being finished on
+ * this one. Delay the list unlock to keep the other core stuck
+ * until this thread is ready. */
+ RTR_LOCK(new_core);
+
+ rtr_add_entry(new_core, current->priority);
+ add_to_list_l(&cores[new_core].running, current);
+
+ /* Make a callback into device-specific code, unlock the wakeup list so
+ * that execution may resume on the new core, unlock our slot and finally
+ * restore the interrupt level */
+ cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
+ cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
+ cores[core].block_task = current;
+
+ UNLOCK_THREAD(current);
+
+ /* Alert other core to activity */
+ core_wake(new_core);
+
+ /* Do the stack switching, cache_maintenence and switch_thread call -
+ requires native code */
+ switch_thread_core(core, current);
+
+ /* Finally return the old core to caller */
+ return current->retval;
+}
+#endif /* NUM_CORES > 1 */
+
+/*---------------------------------------------------------------------------
+ * Initialize threading API. This assumes interrupts are not yet enabled. On
+ * multicore setups, no core is allowed to proceed until create_thread calls
+ * are safe to perform.
+ *---------------------------------------------------------------------------
+ */
+void init_threads(void)
+{
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *thread;
+
+ if (core == CPU)
+ {
+ /* Initialize core locks and IDs in all slots */
+ int n;
+ for (n = 0; n < MAXTHREADS; n++)
+ {
+ thread = &threads[n];
+ corelock_init(&thread->waiter_cl);
+ corelock_init(&thread->slot_cl);
+ thread->id = THREAD_ID_INIT(n);
+ }
+ }
+
+ /* CPU will initialize first and then sleep */
+ thread = find_empty_thread_slot();
+
+ if (thread == NULL)
+ {
+ /* WTF? There really must be a slot available at this stage.
+ * This can fail if, for example, .bss isn't zero'ed out by the loader
+ * or threads is in the wrong section. */
+ THREAD_PANICF("init_threads->no slot", NULL);
+ }
+
+ /* Initialize initially non-zero members of core */
+ cores[core].next_tmo_check = current_tick; /* Something not in the past */
+
+ /* Initialize initially non-zero members of slot */
+ UNLOCK_THREAD(thread); /* No sync worries yet */
+ thread->name = main_thread_name;
+ thread->state = STATE_RUNNING;
+ IF_COP( thread->core = core; )
+#ifdef HAVE_PRIORITY_SCHEDULING
+ corelock_init(&cores[core].rtr_cl);
+ thread->base_priority = PRIORITY_USER_INTERFACE;
+ prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
+ thread->priority = PRIORITY_USER_INTERFACE;
+ rtr_add_entry(core, PRIORITY_USER_INTERFACE);
+#endif
+
+ add_to_list_l(&cores[core].running, thread);
+
+ if (core == CPU)
+ {
+ thread->stack = stackbegin;
+ thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
+#if NUM_CORES > 1 /* This code path will not be run on single core targets */
+ /* Wait for other processors to finish their inits since create_thread
+ * isn't safe to call until the kernel inits are done. The first
+ * threads created in the system must of course be created by CPU.
+ * Another possible approach is to initialize all cores and slots
+ * for each core by CPU, let the remainder proceed in parallel and
+ * signal CPU when all are finished. */
+ core_thread_init(CPU);
+ }
+ else
+ {
+ /* Initial stack is the idle stack */
+ thread->stack = idle_stacks[core];
+ thread->stack_size = IDLE_STACK_SIZE;
+ /* After last processor completes, it should signal all others to
+ * proceed or may signal the next and call thread_exit(). The last one
+ * to finish will signal CPU. */
+ core_thread_init(core);
+ /* Other cores do not have a main thread - go idle inside switch_thread
+ * until a thread can run on the core. */
+ thread_exit();
+#endif /* NUM_CORES */
+ }
+#ifdef INIT_MAIN_THREAD
+ init_main_thread(&thread->context);
+#endif
+}
+
+/* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
+#if NUM_CORES == 1
+static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
+#else
+static int stack_usage(uintptr_t *stackptr, size_t stack_size)
+#endif
+{
+ unsigned int stack_words = stack_size / sizeof (uintptr_t);
+ unsigned int i;
+ int usage = 0;
+
+ for (i = 0; i < stack_words; i++)
+ {
+ if (stackptr[i] != DEADBEEF)
+ {
+ usage = ((stack_words - i) * 100) / stack_words;
+ break;
+ }
+ }
+
+ return usage;
+}
+
+/*---------------------------------------------------------------------------
+ * Returns the maximum percentage of stack a thread ever used while running.
+ * NOTE: Some large buffer allocations that don't use enough the buffer to
+ * overwrite stackptr[0] will not be seen.
+ *---------------------------------------------------------------------------
+ */
+int thread_stack_usage(const struct thread_entry *thread)
+{
+ if (LIKELY(thread->stack_size > 0))
+ return stack_usage(thread->stack, thread->stack_size);
+ return 0;
+}
+
+#if NUM_CORES > 1
+/*---------------------------------------------------------------------------
+ * Returns the maximum percentage of the core's idle stack ever used during
+ * runtime.
+ *---------------------------------------------------------------------------
+ */
+int idle_stack_usage(unsigned int core)
+{
+ return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
+}
+#endif
+
+/*---------------------------------------------------------------------------
+ * Fills in the buffer with the specified thread's name. If the name is NULL,
+ * empty, or the thread is in destruct state a formatted ID is written
+ * instead.
+ *---------------------------------------------------------------------------
+ */
+void thread_get_name(char *buffer, int size,
+ struct thread_entry *thread)
+{
+ if (size <= 0)
+ return;
+
+ *buffer = '\0';
+
+ if (thread)
+ {
+ /* Display thread name if one or ID if none */
+ const char *name = thread->name;
+ const char *fmt = "%s";
+ if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
+ {
+ name = (const char *)(uintptr_t)thread->id;
+ fmt = "%04lX";
+ }
+ snprintf(buffer, size, fmt, name);
+ }
+}
+
+/* Unless otherwise defined, do nothing */
+#ifndef YIELD_KERNEL_HOOK
+#define YIELD_KERNEL_HOOK() false
+#endif
+#ifndef SLEEP_KERNEL_HOOK
+#define SLEEP_KERNEL_HOOK(ticks) false
+#endif
+
+/*---------------------------------------------------------------------------
+ * Suspends a thread's execution for at least the specified number of ticks.
+ *
+ * May result in CPU core entering wait-for-interrupt mode if no other thread
+ * may be scheduled.
+ *
+ * NOTE: sleep(0) sleeps until the end of the current tick
+ * sleep(n) that doesn't result in rescheduling:
+ * n <= ticks suspended < n + 1
+ * n to n+1 is a lower bound. Other factors may affect the actual time
+ * a thread is suspended before it runs again.
+ *---------------------------------------------------------------------------
+ */
+unsigned sleep(unsigned ticks)
+{
+ /* In certain situations, certain bootloaders in particular, a normal
+ * threading call is inappropriate. */
+ if (SLEEP_KERNEL_HOOK(ticks))
+ return 0; /* Handled */
+
+ disable_irq();
+ sleep_thread(ticks);
+ switch_thread();
+ return 0;
+}
+
+/*---------------------------------------------------------------------------
+ * Elects another thread to run or, if no other thread may be made ready to
+ * run, immediately returns control back to the calling thread.
+ *---------------------------------------------------------------------------
+ */
+void yield(void)
+{
+ /* In certain situations, certain bootloaders in particular, a normal
+ * threading call is inappropriate. */
+ if (YIELD_KERNEL_HOOK())
+ return; /* handled */
+
+ switch_thread();
+}
diff --git a/firmware/kernel/tick.c b/firmware/kernel/tick.c
new file mode 100644
index 0000000000..c524560687
--- /dev/null
+++ b/firmware/kernel/tick.c
@@ -0,0 +1,74 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Björn Stenberg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include "config.h"
+#include "tick.h"
+#include "general.h"
+#include "panic.h"
+
+/****************************************************************************
+ * Timer tick
+ *****************************************************************************/
+
+
+/* List of tick tasks - final element always NULL for termination */
+void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
+
+#if !defined(CPU_PP) || !defined(BOOTLOADER) || \
+ defined(HAVE_BOOTLOADER_USB_MODE)
+volatile long current_tick SHAREDDATA_ATTR = 0;
+#endif
+
+/* - Timer initialization and interrupt handler is defined at
+ * the target level: tick_start() is implemented in the target tree */
+
+int tick_add_task(void (*f)(void))
+{
+ int oldlevel = disable_irq_save();
+ void **arr = (void **)tick_funcs;
+ void **p = find_array_ptr(arr, f);
+
+ /* Add a task if there is room */
+ if(p - arr < MAX_NUM_TICK_TASKS)
+ {
+ *p = f; /* If already in list, no problem. */
+ }
+ else
+ {
+ panicf("Error! tick_add_task(): out of tasks");
+ }
+
+ restore_irq(oldlevel);
+ return 0;
+}
+
+int tick_remove_task(void (*f)(void))
+{
+ int oldlevel = disable_irq_save();
+ int rc = remove_array_ptr((void **)tick_funcs, f);
+ restore_irq(oldlevel);
+ return rc;
+}
+
+void init_tick(void)
+{
+ tick_start(1000/HZ);
+}
diff --git a/firmware/kernel/timeout.c b/firmware/kernel/timeout.c
new file mode 100644
index 0000000000..8039e56ffb
--- /dev/null
+++ b/firmware/kernel/timeout.c
@@ -0,0 +1,97 @@
+
+/****************************************************************************
+ * Tick-based interval timers/one-shots - be mindful this is not really
+ * intended for continuous timers but for events that need to run for a short
+ * time and be cancelled without further software intervention.
+ ****************************************************************************/
+
+#include "config.h"
+#include "system.h" /* TIME_AFTER */
+#include "kernel.h"
+#include "timeout.h"
+#include "general.h"
+
+/* list of active timeout events */
+static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
+
+/* timeout tick task - calls event handlers when they expire
+ * Event handlers may alter expiration, callback and data during operation.
+ */
+static void timeout_tick(void)
+{
+ unsigned long tick = current_tick;
+ struct timeout **p = tmo_list;
+ struct timeout *curr;
+
+ for(curr = *p; curr != NULL; curr = *(++p))
+ {
+ int ticks;
+
+ if(TIME_BEFORE(tick, curr->expires))
+ continue;
+
+ /* this event has expired - call callback */
+ ticks = curr->callback(curr);
+ if(ticks > 0)
+ {
+ curr->expires = tick + ticks; /* reload */
+ }
+ else
+ {
+ timeout_cancel(curr); /* cancel */
+ }
+ }
+}
+
+/* Cancels a timeout callback - can be called from the ISR */
+void timeout_cancel(struct timeout *tmo)
+{
+ int oldlevel = disable_irq_save();
+ int rc = remove_array_ptr((void **)tmo_list, tmo);
+
+ if(rc >= 0 && *tmo_list == NULL)
+ {
+ tick_remove_task(timeout_tick); /* Last one - remove task */
+ }
+
+ restore_irq(oldlevel);
+}
+
+/* Adds a timeout callback - calling with an active timeout resets the
+ interval - can be called from the ISR */
+void timeout_register(struct timeout *tmo, timeout_cb_type callback,
+ int ticks, intptr_t data)
+{
+ int oldlevel;
+ void **arr, **p;
+
+ if(tmo == NULL)
+ return;
+
+ oldlevel = disable_irq_save();
+
+ /* See if this one is already registered */
+ arr = (void **)tmo_list;
+ p = find_array_ptr(arr, tmo);
+
+ if(p - arr < MAX_NUM_TIMEOUTS)
+ {
+ /* Vacancy */
+ if(*p == NULL)
+ {
+ /* Not present */
+ if(*tmo_list == NULL)
+ {
+ tick_add_task(timeout_tick); /* First one - add task */
+ }
+
+ *p = tmo;
+ }
+
+ tmo->callback = callback;
+ tmo->data = data;
+ tmo->expires = current_tick + ticks;
+ }
+
+ restore_irq(oldlevel);
+}