summaryrefslogtreecommitdiffstats
path: root/firmware
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2007-10-16 01:25:17 +0000
committerMichael Sevakis <jethead71@rockbox.org>2007-10-16 01:25:17 +0000
commita9b2fb5ee3114fe835f6515b6aeae7454f66d821 (patch)
treefc4e96d0c1f215565918406c8827b16b806c1345 /firmware
parenta3fbbc9fa7e12fd3fce122bbd235dc362050e024 (diff)
downloadrockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.tar.gz
rockbox-a9b2fb5ee3114fe835f6515b6aeae7454f66d821.zip
Finally full multicore support for PortalPlayer 502x targets with an eye towards the possibility of other types. All SVN targets the low-lag code to speed up blocking operations. Most files are modified here simple due to a name change to actually support a real event object and a param change to create_thread. Add some use of new features but just sit on things for a bit and leave full integration for later. Work will continue on to address size on sensitive targets and simplify things if possible. Any PP target having problems with SWP can easily be changed to sw corelocks with one #define change in config.h though only PP5020 has shown an issue and seems to work without any difficulties.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15134 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'firmware')
-rw-r--r--firmware/backlight.c8
-rw-r--r--firmware/common/dircache.c11
-rw-r--r--firmware/drivers/ata.c62
-rw-r--r--firmware/drivers/ata_mmc.c7
-rw-r--r--firmware/drivers/button.c6
-rw-r--r--firmware/drivers/fat.c2
-rw-r--r--firmware/export/config.h53
-rw-r--r--firmware/export/i2c-pp.h4
-rw-r--r--firmware/export/kernel.h103
-rw-r--r--firmware/export/pp5002.h2
-rw-r--r--firmware/export/pp5020.h15
-rw-r--r--firmware/export/system.h4
-rw-r--r--firmware/export/thread.h552
-rw-r--r--firmware/kernel.c731
-rw-r--r--firmware/mpeg.c7
-rw-r--r--firmware/pcm_record.c10
-rw-r--r--firmware/powermgmt.c4
-rw-r--r--firmware/rolo.c4
-rw-r--r--firmware/scroll_engine.c8
-rw-r--r--firmware/system.c74
-rw-r--r--firmware/target/arm/i2c-pp.c16
-rw-r--r--firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c8
-rw-r--r--firmware/target/arm/sandisk/adc-c200_e200.c7
-rw-r--r--firmware/target/arm/sandisk/ata-c200_e200.c27
-rw-r--r--firmware/target/arm/system-pp502x.c55
-rw-r--r--firmware/target/arm/system-target.h5
-rw-r--r--firmware/test/i2c/main.c4
-rw-r--r--firmware/test/kernel/main.c6
-rw-r--r--firmware/thread.c2323
-rw-r--r--firmware/usb.c12
30 files changed, 3295 insertions, 835 deletions
diff --git a/firmware/backlight.c b/firmware/backlight.c
index 7cbdeb45e8..cfe87b387e 100644
--- a/firmware/backlight.c
+++ b/firmware/backlight.c
@@ -95,7 +95,7 @@ const signed char backlight_timeout_value[19] =
static void backlight_thread(void);
static long backlight_stack[DEFAULT_STACK_SIZE/sizeof(long)];
static const char backlight_thread_name[] = "backlight";
-static struct event_queue backlight_queue;
+static struct event_queue backlight_queue NOCACHEBSS_ATTR;
static int backlight_timer;
static int backlight_timeout;
@@ -465,7 +465,7 @@ static void remote_backlight_update_state(void)
void backlight_thread(void)
{
- struct event ev;
+ struct queue_event ev;
bool locked = false;
while(1)
@@ -627,9 +627,9 @@ void backlight_init(void)
* status if necessary. */
create_thread(backlight_thread, backlight_stack,
- sizeof(backlight_stack), backlight_thread_name
+ sizeof(backlight_stack), 0, backlight_thread_name
IF_PRIO(, PRIORITY_SYSTEM)
- IF_COP(, CPU, false));
+ IF_COP(, CPU));
tick_add_task(backlight_tick);
}
diff --git a/firmware/common/dircache.c b/firmware/common/dircache.c
index c39dd7f7b5..b92d8fe974 100644
--- a/firmware/common/dircache.c
+++ b/firmware/common/dircache.c
@@ -62,7 +62,7 @@ static unsigned long reserve_used = 0;
static unsigned int cache_build_ticks = 0;
static char dircache_cur_path[MAX_PATH*2];
-static struct event_queue dircache_queue;
+static struct event_queue dircache_queue NOCACHEBSS_ATTR;
static long dircache_stack[(DEFAULT_STACK_SIZE + 0x900)/sizeof(long)];
static const char dircache_thread_name[] = "dircache";
@@ -147,7 +147,7 @@ static struct travel_data dir_recursion[MAX_SCAN_DEPTH];
*/
static bool check_event_queue(void)
{
- struct event ev;
+ struct queue_event ev;
queue_wait_w_tmo(&dircache_queue, &ev, 0);
switch (ev.id)
@@ -598,7 +598,7 @@ static int dircache_do_rebuild(void)
*/
static void dircache_thread(void)
{
- struct event ev;
+ struct queue_event ev;
while (1)
{
@@ -701,8 +701,9 @@ void dircache_init(void)
queue_init(&dircache_queue, true);
create_thread(dircache_thread, dircache_stack,
- sizeof(dircache_stack), dircache_thread_name IF_PRIO(, PRIORITY_BACKGROUND)
- IF_COP(, CPU, false));
+ sizeof(dircache_stack), 0, dircache_thread_name
+ IF_PRIO(, PRIORITY_BACKGROUND)
+ IF_COP(, CPU));
}
/**
diff --git a/firmware/drivers/ata.c b/firmware/drivers/ata.c
index 2c8033a533..2119216234 100644
--- a/firmware/drivers/ata.c
+++ b/firmware/drivers/ata.c
@@ -66,7 +66,7 @@
#define ATA_POWER_OFF_TIMEOUT 2*HZ
#endif
-static struct mutex ata_mtx;
+static struct spinlock ata_spinlock NOCACHEBSS_ATTR;
int ata_device; /* device 0 (master) or 1 (slave) */
int ata_spinup_time = 0;
@@ -83,7 +83,7 @@ static bool lba48 = false; /* set for 48 bit addressing */
#endif
static long ata_stack[(DEFAULT_STACK_SIZE*3)/sizeof(long)];
static const char ata_thread_name[] = "ata";
-static struct event_queue ata_queue;
+static struct event_queue ata_queue NOCACHEBSS_ATTR;
static bool initialized = false;
static long last_user_activity = -1;
@@ -234,7 +234,7 @@ int ata_read_sectors(IF_MV2(int drive,)
#ifdef HAVE_MULTIVOLUME
(void)drive; /* unused for now */
#endif
- spinlock_lock(&ata_mtx);
+ spinlock_lock(&ata_spinlock);
#endif
last_disk_activity = current_tick;
@@ -246,14 +246,14 @@ int ata_read_sectors(IF_MV2(int drive,)
spinup = true;
if (poweroff) {
if (ata_power_on()) {
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
ata_led(false);
return -1;
}
}
else {
if (perform_soft_reset()) {
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
ata_led(false);
return -1;
}
@@ -265,7 +265,7 @@ int ata_read_sectors(IF_MV2(int drive,)
SET_REG(ATA_SELECT, ata_device);
if (!wait_for_rdy())
{
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
ata_led(false);
return -2;
}
@@ -376,7 +376,7 @@ int ata_read_sectors(IF_MV2(int drive,)
ata_led(false);
#ifndef MAX_PHYS_SECTOR_SIZE
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
#endif
return ret;
@@ -442,7 +442,7 @@ int ata_write_sectors(IF_MV2(int drive,)
#ifdef HAVE_MULTIVOLUME
(void)drive; /* unused for now */
#endif
- spinlock_lock(&ata_mtx);
+ spinlock_lock(&ata_spinlock);
#endif
last_disk_activity = current_tick;
@@ -454,14 +454,14 @@ int ata_write_sectors(IF_MV2(int drive,)
spinup = true;
if (poweroff) {
if (ata_power_on()) {
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
ata_led(false);
return -1;
}
}
else {
if (perform_soft_reset()) {
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
ata_led(false);
return -1;
}
@@ -471,7 +471,7 @@ int ata_write_sectors(IF_MV2(int drive,)
SET_REG(ATA_SELECT, ata_device);
if (!wait_for_rdy())
{
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
ata_led(false);
return -2;
}
@@ -534,7 +534,7 @@ int ata_write_sectors(IF_MV2(int drive,)
ata_led(false);
#ifndef MAX_PHYS_SECTOR_SIZE
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
#endif
return ret;
@@ -580,7 +580,7 @@ int ata_read_sectors(IF_MV2(int drive,)
#ifdef HAVE_MULTIVOLUME
(void)drive; /* unused for now */
#endif
- spinlock_lock(&ata_mtx);
+ spinlock_lock(&ata_spinlock);
offset = start & (phys_sector_mult - 1);
@@ -630,7 +630,7 @@ int ata_read_sectors(IF_MV2(int drive,)
}
error:
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
return rc;
}
@@ -646,7 +646,7 @@ int ata_write_sectors(IF_MV2(int drive,)
#ifdef HAVE_MULTIVOLUME
(void)drive; /* unused for now */
#endif
- spinlock_lock(&ata_mtx);
+ spinlock_lock(&ata_spinlock);
offset = start & (phys_sector_mult - 1);
@@ -707,7 +707,7 @@ int ata_write_sectors(IF_MV2(int drive,)
}
error:
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
return rc;
}
@@ -767,13 +767,13 @@ static int ata_perform_sleep(void)
{
int ret = 0;
- spinlock_lock(&ata_mtx);
+ spinlock_lock(&ata_spinlock);
SET_REG(ATA_SELECT, ata_device);
if(!wait_for_rdy()) {
DEBUGF("ata_perform_sleep() - not RDY\n");
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
return -1;
}
@@ -786,7 +786,7 @@ static int ata_perform_sleep(void)
}
sleeping = true;
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
return ret;
}
@@ -797,7 +797,7 @@ void ata_sleep(void)
void ata_sleepnow(void)
{
- if (!spinup && !sleeping && !ata_mtx.locked && initialized)
+ if (!spinup && !sleeping && !ata_spinlock.locked && initialized)
{
call_ata_idle_notifys(false);
ata_perform_sleep();
@@ -812,14 +812,14 @@ void ata_spin(void)
static void ata_thread(void)
{
static long last_sleep = 0;
- struct event ev;
+ struct queue_event ev;
static long last_seen_mtx_unlock = 0;
while (1) {
while ( queue_empty( &ata_queue ) ) {
if (!spinup && !sleeping)
{
- if (!ata_mtx.locked)
+ if (!ata_spinlock.locked)
{
if (!last_seen_mtx_unlock)
last_seen_mtx_unlock = current_tick;
@@ -844,9 +844,9 @@ static void ata_thread(void)
if ( !spinup && sleeping && !poweroff &&
TIME_AFTER( current_tick, last_sleep + ATA_POWER_OFF_TIMEOUT ))
{
- spinlock_lock(&ata_mtx);
+ spinlock_lock(&ata_spinlock);
ide_power_enable(false);
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
poweroff = true;
}
#endif
@@ -858,11 +858,11 @@ static void ata_thread(void)
#ifndef USB_NONE
case SYS_USB_CONNECTED:
if (poweroff) {
- spinlock_lock(&ata_mtx);
+ spinlock_lock(&ata_spinlock);
ata_led(true);
ata_power_on();
ata_led(false);
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
}
/* Tell the USB thread that we are safe */
@@ -936,11 +936,11 @@ int ata_soft_reset(void)
{
int ret;
- spinlock_lock(&ata_mtx);
+ spinlock_lock(&ata_spinlock);
ret = perform_soft_reset();
- spinlock_unlock(&ata_mtx);
+ spinlock_unlock(&ata_spinlock);
return ret;
}
@@ -1131,7 +1131,7 @@ int ata_init(void)
bool coldstart = ata_is_coldstart();
/* must be called before ata_device_init() */
- spinlock_init(&ata_mtx);
+ spinlock_init(&ata_spinlock IF_COP(, SPINLOCK_TASK_SWITCH));
ata_led(false);
ata_device_init();
@@ -1205,9 +1205,9 @@ int ata_init(void)
last_disk_activity = current_tick;
create_thread(ata_thread, ata_stack,
- sizeof(ata_stack), ata_thread_name
+ sizeof(ata_stack), 0, ata_thread_name
IF_PRIO(, PRIORITY_SYSTEM)
- IF_COP(, CPU, false));
+ IF_COP(, CPU));
initialized = true;
}
diff --git a/firmware/drivers/ata_mmc.c b/firmware/drivers/ata_mmc.c
index 66e60ead1d..604d1dde34 100644
--- a/firmware/drivers/ata_mmc.c
+++ b/firmware/drivers/ata_mmc.c
@@ -959,7 +959,7 @@ void ata_spin(void)
static void mmc_thread(void)
{
- struct event ev;
+ struct queue_event ev;
bool idle_notified = false;
while (1) {
@@ -1153,8 +1153,9 @@ int ata_init(void)
queue_init(&mmc_queue, true);
create_thread(mmc_thread, mmc_stack,
- sizeof(mmc_stack), mmc_thread_name IF_PRIO(, PRIORITY_SYSTEM)
- IF_COP(, CPU, false));
+ sizeof(mmc_stack), 0, mmc_thread_name
+ IF_PRIO(, PRIORITY_SYSTEM)
+ IF_COP(, CPU));
tick_add_task(mmc_tick);
initialized = true;
}
diff --git a/firmware/drivers/button.c b/firmware/drivers/button.c
index 851b5b9b20..25c590323a 100644
--- a/firmware/drivers/button.c
+++ b/firmware/drivers/button.c
@@ -46,7 +46,7 @@
#define MAX_EVENT_AGE HZ
#endif
-struct event_queue button_queue;
+struct event_queue button_queue NOCACHEBSS_ATTR;
static long lastbtn; /* Last valid button status */
static long last_read; /* Last button status, for debouncing/filtering */
@@ -300,7 +300,7 @@ int button_queue_count( void )
long button_get(bool block)
{
- struct event ev;
+ struct queue_event ev;
int pending_count = queue_count(&button_queue);
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
@@ -330,7 +330,7 @@ long button_get(bool block)
long button_get_w_tmo(int ticks)
{
- struct event ev;
+ struct queue_event ev;
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
/* Be sure to keep boosted state. */
diff --git a/firmware/drivers/fat.c b/firmware/drivers/fat.c
index a4fa7aa933..cfd4767032 100644
--- a/firmware/drivers/fat.c
+++ b/firmware/drivers/fat.c
@@ -197,7 +197,7 @@ struct fat_cache_entry
static char fat_cache_sectors[FAT_CACHE_SIZE][SECTOR_SIZE];
static struct fat_cache_entry fat_cache[FAT_CACHE_SIZE];
-static struct mutex cache_mutex;
+static struct mutex cache_mutex NOCACHEBSS_ATTR;
static long cluster2sec(IF_MV2(struct bpb* fat_bpb,) long cluster)
{
diff --git a/firmware/export/config.h b/firmware/export/config.h
index 46d4336e70..46c4d3dfd2 100644
--- a/firmware/export/config.h
+++ b/firmware/export/config.h
@@ -282,9 +282,13 @@
#define HAVE_EXTENDED_MESSAGING_AND_NAME
#endif
-#if (CONFIG_CODEC == SWCODEC) && !defined(SIMULATOR) && !defined(BOOTLOADER)
+#if (CONFIG_CODEC == SWCODEC) && !defined(BOOTLOADER)
+#ifndef SIMULATOR
#define HAVE_PRIORITY_SCHEDULING
#define HAVE_SCHEDULER_BOOSTCTRL
+#endif /* SIMULATOR */
+#define HAVE_SEMAPHORE_OBJECTS
+#define HAVE_EVENT_OBJECTS
#endif
/* define for all cpus from SH family */
@@ -363,31 +367,70 @@
#define IRAM_LCDFRAMEBUFFER
#endif
+/* Change this if you want to build a single-core firmware for a multicore
+ * target for debugging */
+#if defined(BOOTLOADER)
+#define FORCE_SINGLE_CORE
+#endif
+
+/* Core locking types - specifies type of atomic operation */
+#define CORELOCK_NONE 0
+#define SW_CORELOCK 1 /* Mutual exclusion provided by a software algorithm
+ and not a special semaphore instruction */
+#define CORELOCK_SWAP 2 /* A swap (exchange) instruction */
+
/* Dual core support - not yet working on the 1G/2G and 3G iPod */
#if defined(CPU_PP)
#define IDLE_STACK_SIZE 0x80
#define IDLE_STACK_WORDS 0x20
-#if !defined(BOOTLOADER) && CONFIG_CPU != PP5002
+#if !defined(FORCE_SINGLE_CORE) && CONFIG_CPU != PP5002
+
#define NUM_CORES 2
#define CURRENT_CORE current_core()
-/* Hopefully at some point we will learn how to mark areas of main memory as
- * not to be cached. Until then, use IRAM for variables shared across cores */
+/* Use IRAM for variables shared across cores - large memory buffers should
+ * use UNCACHED_ADDR(a) and be appropriately aligned and padded */
#define NOCACHEBSS_ATTR IBSS_ATTR
#define NOCACHEDATA_ATTR IDATA_ATTR
-#define IF_COP(...) __VA_ARGS__
+#define IF_COP(...) __VA_ARGS__
+#define IF_COP_VOID(...) __VA_ARGS__
+#define IF_COP_CORE(core) core
+
+#if CONFIG_CPU == PP5020
+#define CONFIG_CORELOCK SW_CORELOCK /* SWP(B) is broken */
+#else
+#define CONFIG_CORELOCK CORELOCK_SWAP
+#endif
+
#endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */
+
#endif /* CPU_PP */
+#ifndef CONFIG_CORELOCK
+#define CONFIG_CORELOCK CORELOCK_NONE
+#endif
+
+#if CONFIG_CORELOCK == SW_CORELOCK
+#define IF_SWCL(...) __VA_ARGS__
+#define IFN_SWCL(...)
+#else
+#define IF_SWCL(...)
+#define IFN_SWCL(...) __VA_ARGS__
+#endif /* CONFIG_CORELOCK == */
+
#ifndef NUM_CORES
/* Default to single core */
#define NUM_CORES 1
#define CURRENT_CORE CPU
#define NOCACHEBSS_ATTR
#define NOCACHEDATA_ATTR
+#define CONFIG_CORELOCK CORELOCK_NONE
#define IF_COP(...)
+#define IF_COP_VOID(...) void
+#define IF_COP_CORE(core) CURRENT_CORE
+
#endif /* NUM_CORES */
#endif /* __CONFIG_H__ */
diff --git a/firmware/export/i2c-pp.h b/firmware/export/i2c-pp.h
index 3048acbaba..908db22554 100644
--- a/firmware/export/i2c-pp.h
+++ b/firmware/export/i2c-pp.h
@@ -45,6 +45,10 @@
/* TODO: Fully implement i2c driver */
+/* To be used by drivers that need to do multiple i2c operations
+ atomically */
+extern struct spinlock i2c_spin;
+
void i2c_init(void);
int i2c_readbyte(unsigned int dev_addr, int addr);
int pp_i2c_send(unsigned int addr, int data0, int data1);
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index 3d70e49a4c..a72e004b33 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -23,6 +23,8 @@
#include <inttypes.h>
#include "config.h"
+#include "thread.h"
+
/* wrap-safe macros for tick comparison */
#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
@@ -31,6 +33,7 @@
#define MAX_NUM_TICK_TASKS 8
+#define MAX_NUM_QUEUES 32
#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
@@ -72,7 +75,7 @@
#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
-struct event
+struct queue_event
{
long id;
intptr_t data;
@@ -91,20 +94,66 @@ struct queue_sender_list
struct event_queue
{
- struct event events[QUEUE_LENGTH];
- struct thread_entry *thread;
- unsigned int read;
- unsigned int write;
+ struct thread_queue queue; /* Waiter list */
+ struct queue_event events[QUEUE_LENGTH]; /* list of events */
+ unsigned int read; /* head of queue */
+ unsigned int write; /* tail of queue */
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- struct queue_sender_list *send;
+ struct queue_sender_list *send; /* list of threads waiting for
+ reply to an event */
+#endif
+#if NUM_CORES > 1
+ struct corelock cl; /* inter-core sync */
#endif
};
struct mutex
{
- uint32_t locked;
- struct thread_entry *thread;
+ struct thread_entry *queue; /* Waiter list */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ struct corelock cl; /* inter-core sync */
+#endif
+ struct thread_entry *thread; /* thread that owns lock */
+ int count; /* lock owner recursion count */
+ unsigned char locked; /* locked semaphore */
+};
+
+struct spinlock
+{
+#if NUM_CORES > 1
+ struct corelock cl; /* inter-core sync */
+#endif
+ struct thread_entry *thread; /* lock owner */
+ int count; /* lock owner recursion count */
+ unsigned char locked; /* is locked if nonzero */
+#if NUM_CORES > 1
+ unsigned char task_switch; /* can task switch? */
+#endif
+};
+
+#ifdef HAVE_SEMAPHORE_OBJECTS
+struct semaphore
+{
+ struct thread_entry *queue; /* Waiter list */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ struct corelock cl; /* inter-core sync */
+#endif
+ int count; /* # of waits remaining before unsignaled */
+ int max; /* maximum # of waits to remain signaled */
+};
+#endif
+
+#ifdef HAVE_EVENT_OBJECTS
+struct event
+{
+ struct thread_entry *queues[2]; /* waiters for each state */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ struct corelock cl; /* inter-core sync */
+#endif
+ unsigned char automatic; /* event performs auto-reset */
+ unsigned char state; /* state: 1 = signaled */
};
+#endif
/* global tick variable */
#if defined(CPU_PP) && defined(BOOTLOADER)
@@ -127,6 +176,7 @@ extern void yield(void);
extern void sleep(int ticks);
int tick_add_task(void (*f)(void));
int tick_remove_task(void (*f)(void));
+extern void tick_start(unsigned int interval_in_ms);
struct timeout;
@@ -150,10 +200,17 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
int ticks, intptr_t data);
void timeout_cancel(struct timeout *tmo);
+#define STATE_NONSIGNALED 0
+#define STATE_SIGNALED 1
+
+#define WAIT_TIMEDOUT (-1)
+#define WAIT_SUCCEEDED 1
+
extern void queue_init(struct event_queue *q, bool register_queue);
extern void queue_delete(struct event_queue *q);
-extern void queue_wait(struct event_queue *q, struct event *ev);
-extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks);
+extern void queue_wait(struct event_queue *q, struct queue_event *ev);
+extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
+ int ticks);
extern void queue_post(struct event_queue *q, long id, intptr_t data);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send);
@@ -168,14 +225,26 @@ extern int queue_count(const struct event_queue *q);
extern int queue_broadcast(long id, intptr_t data);
extern void mutex_init(struct mutex *m);
-static inline void spinlock_init(struct mutex *m)
-{ mutex_init(m); } /* Same thing for now */
extern void mutex_lock(struct mutex *m);
extern void mutex_unlock(struct mutex *m);
-extern void spinlock_lock(struct mutex *m);
-extern void spinlock_unlock(struct mutex *m);
-extern void tick_start(unsigned int interval_in_ms);
-
+#define SPINLOCK_TASK_SWITCH 0x10
+#define SPINLOCK_NO_TASK_SWITCH 0x00
+extern void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags));
+extern void spinlock_lock(struct spinlock *l);
+extern void spinlock_unlock(struct spinlock *l);
+extern int spinlock_lock_w_tmo(struct spinlock *l, int ticks);
+#ifdef HAVE_SEMAPHORE_OBJECTS
+extern void semaphore_init(struct semaphore *s, int max, int start);
+extern void semaphore_wait(struct semaphore *s);
+extern void semaphore_release(struct semaphore *s);
+#endif /* HAVE_SEMAPHORE_OBJECTS */
+#ifdef HAVE_EVENT_OBJECTS
+#define EVENT_AUTOMATIC 0x10
+#define EVENT_MANUAL 0x00
+extern void event_init(struct event *e, unsigned int flags);
+extern void event_wait(struct event *e, unsigned int for_state);
+extern void event_set_state(struct event *e, unsigned int state);
+#endif /* HAVE_EVENT_OBJECTS */
#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
-#endif
+#endif /* _KERNEL_H_ */
diff --git a/firmware/export/pp5002.h b/firmware/export/pp5002.h
index b2e02f6174..021c248690 100644
--- a/firmware/export/pp5002.h
+++ b/firmware/export/pp5002.h
@@ -139,6 +139,8 @@
#define CPU_CTL (*(volatile unsigned char *)(0xcf004054))
#define COP_CTL (*(volatile unsigned char *)(0xcf004058))
+#define PROC_CTL(core) ((&CPU_CTL)[(core)*4])
+
#define PROC_SLEEP 0xca
#define PROC_WAKE 0xce
diff --git a/firmware/export/pp5020.h b/firmware/export/pp5020.h
index 5654a7de63..b591bce695 100644
--- a/firmware/export/pp5020.h
+++ b/firmware/export/pp5020.h
@@ -34,11 +34,15 @@
/* Each processor has two mailboxes it can write to and two which
it can read from. We define the first to be for sending messages
and the second for replying to messages */
-#define CPU_MESSAGE (*(volatile unsigned long *)(0x60001000))
-#define COP_MESSAGE (*(volatile unsigned long *)(0x60001004))
-#define CPU_REPLY (*(volatile unsigned long *)(0x60001008))
-#define COP_REPLY (*(volatile unsigned long *)(0x6000100c))
-#define MBOX_CONTROL (*(volatile unsigned long *)(0x60001010))
+#define CPU_MESSAGE (*(volatile unsigned long *)(0x60001000))
+#define COP_MESSAGE (*(volatile unsigned long *)(0x60001004))
+#define CPU_REPLY (*(volatile unsigned long *)(0x60001008))
+#define COP_REPLY (*(volatile unsigned long *)(0x6000100c))
+#define MBOX_CONTROL (*(volatile unsigned long *)(0x60001010))
+
+/* Simple convenient array-like access */
+#define PROC_MESSAGE(core) ((&CPU_MESSAGE)[core])
+#define PROC_REPLY(core) ((&CPU_REPLY)[core])
/* Interrupts */
#define CPU_INT_STAT (*(volatile unsigned long*)(0x60004000))
@@ -142,6 +146,7 @@
/* Processors Control */
#define CPU_CTL (*(volatile unsigned long *)(0x60007000))
#define COP_CTL (*(volatile unsigned long *)(0x60007004))
+#define PROC_CTL(core) ((&CPU_CTL)[core])
#define PROC_SLEEP 0x80000000
#define PROC_WAIT 0x40000000
diff --git a/firmware/export/system.h b/firmware/export/system.h
index 24e1a2d861..dc10c4545f 100644
--- a/firmware/export/system.h
+++ b/firmware/export/system.h
@@ -45,6 +45,10 @@ bool detect_original_firmware(void);
#endif
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
+#if NUM_CORES > 1
+extern struct spinlock boostctrl_spin;
+#endif
+void cpu_boost_init(void);
#define FREQ cpu_frequency
void set_cpu_frequency(long frequency);
#ifdef CPU_BOOST_LOGGING
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 7c683ddde5..20cde1a8e3 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -21,6 +21,7 @@
#include "config.h"
#include <inttypes.h>
+#include <stddef.h>
#include <stdbool.h>
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
@@ -31,13 +32,15 @@
* can change it own priority to REALTIME to override user interface and
* prevent playback skipping.
*/
+#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
+#define LOWEST_PRIORITY 100 /* The lowest possible thread priority */
#define PRIORITY_REALTIME 1
-#define PRIORITY_USER_INTERFACE 4 /* The main thread */
-#define PRIORITY_RECORDING 4 /* Recording thread */
-#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
-#define PRIORITY_BUFFERING 4 /* Codec buffering thread */
-#define PRIORITY_SYSTEM 6 /* All other firmware threads */
-#define PRIORITY_BACKGROUND 8 /* Normal application threads */
+#define PRIORITY_USER_INTERFACE 4 /* The main thread */
+#define PRIORITY_RECORDING 4 /* Recording thread */
+#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
+#define PRIORITY_BUFFERING 4 /* Codec buffering thread */
+#define PRIORITY_SYSTEM 6 /* All other firmware threads */
+#define PRIORITY_BACKGROUND 8 /* Normal application threads */
#if CONFIG_CODEC == SWCODEC
#define MAXTHREADS 16
@@ -47,6 +50,46 @@
#define DEFAULT_STACK_SIZE 0x400 /* Bytes */
+/**
+ * "Busy" values that can be swapped into a variable to indicate
+ * that the variable or object pointed to is in use by another processor
+ * core. When accessed, the busy value is swapped-in while the current
+ * value is atomically returned. If the swap returns the busy value,
+ * the processor should retry the operation until some other value is
+ * returned. When modification is finished, the new value should be
+ * written which unlocks it and updates it atomically.
+ *
+ * Procedure:
+ * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
+ *
+ * Modify/examine object at mem location or variable. Create "new_value"
+ * as suitable.
+ *
+ * variable = new_value or curr_value;
+ *
+ * To check a value for busy and perform an operation if not:
+ * curr_value = swap(&variable, BUSY_VALUE);
+ *
+ * if (curr_value != BUSY_VALUE)
+ * {
+ * Modify/examine object at mem location or variable. Create "new_value"
+ * as suitable.
+ * variable = new_value or curr_value;
+ * }
+ * else
+ * {
+ * Do nothing - already busy
+ * }
+ *
+ * Only ever restore when an actual value is returned or else it could leave
+ * the variable locked permanently if another processor unlocked in the
+ * meantime. The next access attempt would deadlock for all processors since
+ * an abandoned busy status would be left behind.
+ */
+#define STATE_BUSYuptr ((void*)UINTPTR_MAX)
+#define STATE_BUSYu8 UINT8_MAX
+#define STATE_BUSYi INT_MIN
+
#ifndef SIMULATOR
/* Need to keep structures inside the header file because debug_menu
* needs them. */
@@ -58,7 +101,7 @@ struct regs
unsigned int a[5]; /* 28-44 - a2-a6 */
void *sp; /* 48 - Stack pointer (a7) */
void *start; /* 52 - Thread start address, or NULL when started */
-} __attribute__((packed));
+};
#elif CONFIG_CPU == SH7034
struct regs
{
@@ -66,7 +109,7 @@ struct regs
void *sp; /* 28 - Stack pointer (r15) */
void *pr; /* 32 - Procedure register */
void *start; /* 36 - Thread start address, or NULL when started */
-} __attribute__((packed));
+};
#elif defined(CPU_ARM)
struct regs
{
@@ -74,7 +117,7 @@ struct regs
void *sp; /* 32 - Stack pointer (r13) */
unsigned int lr; /* 36 - r14 (lr) */
void *start; /* 40 - Thread start address, or NULL when started */
-} __attribute__((packed));
+};
#endif /* CONFIG_CPU */
#else
struct regs
@@ -85,58 +128,206 @@ struct regs
};
#endif /* !SIMULATOR */
-#define STATE_RUNNING 0x00000000
-#define STATE_BLOCKED 0x20000000
-#define STATE_SLEEPING 0x40000000
-#define STATE_BLOCKED_W_TMO 0x60000000
-
-#define THREAD_STATE_MASK 0x60000000
-#define STATE_ARG_MASK 0x1FFFFFFF
-
-#define GET_STATE_ARG(state) (state & STATE_ARG_MASK)
-#define GET_STATE(state) (state & THREAD_STATE_MASK)
-#define SET_STATE(var,state,arg) (var = (state | ((arg) & STATE_ARG_MASK)))
-#define CLEAR_STATE_ARG(var) (var &= ~STATE_ARG_MASK)
-
-#define STATE_BOOSTED 0x80000000
-#define STATE_IS_BOOSTED(var) (var & STATE_BOOSTED)
-#define SET_BOOST_STATE(var) (var |= STATE_BOOSTED)
-
-struct thread_entry {
- struct regs context;
- const char *name;
- void *stack;
- unsigned long statearg;
- unsigned short stack_size;
-# if NUM_CORES > 1
- unsigned char core; /* To which core threads belongs to. */
-# endif
-#ifdef HAVE_PRIORITY_SCHEDULING
- unsigned char priority;
- unsigned char priority_x;
- long last_run;
+/* NOTE: The use of the word "queue" may also refer to a linked list of
+ threads being maintainted that are normally dealt with in FIFO order
+ and not nescessarily kernel event_queue */
+enum
+{
+ /* States without a timeout must be first */
+ STATE_KILLED = 0, /* Thread is killed (default) */
+ STATE_RUNNING, /* Thread is currently running */
+ STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
+ /* These states involve adding the thread to the tmo list */
+ STATE_SLEEPING, /* Thread is sleeping with a timeout */
+ STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
+ /* Miscellaneous states */
+ STATE_FROZEN, /* Thread is suspended and will not run until
+ thread_thaw is called with its ID */
+ THREAD_NUM_STATES,
+ TIMEOUT_STATE_FIRST = STATE_SLEEPING,
+#if NUM_CORES > 1
+ STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
#endif
- struct thread_entry *next, *prev;
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- intptr_t retval;
+};
+
+#if NUM_CORES > 1
+#define THREAD_DESTRUCT ((const char *)0x84905617)
#endif
+
+/* Link information for lists thread is in */
+struct thread_entry; /* forward */
+struct thread_list
+{
+ struct thread_entry *prev; /* Previous thread in a list */
+ struct thread_entry *next; /* Next thread in a list */
};
-struct core_entry {
- struct thread_entry *running;
- struct thread_entry *sleeping;
- struct thread_entry *waking;
- struct thread_entry **wakeup_list;
+/* Small objects for core-wise mutual exclusion */
+#if CONFIG_CORELOCK == SW_CORELOCK
+/* No reliable atomic instruction available - use Peterson's algorithm */
+struct corelock
+{
+ volatile unsigned char myl[NUM_CORES];
+ volatile unsigned char turn;
+} __attribute__((packed));
+
+void corelock_init(struct corelock *cl);
+void corelock_lock(struct corelock *cl);
+int corelock_try_lock(struct corelock *cl);
+void corelock_unlock(struct corelock *cl);
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+/* Use native atomic swap/exchange instruction */
+struct corelock
+{
+ unsigned char locked;
+} __attribute__((packed));
+
+#define corelock_init(cl) \
+ ({ (cl)->locked = 0; })
+#define corelock_lock(cl) \
+ ({ while (test_and_set(&(cl)->locked, 1)); })
+#define corelock_try_lock(cl) \
+ ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
+#define corelock_unlock(cl) \
+ ({ (cl)->locked = 0; })
+#else
+/* No atomic corelock op needed or just none defined */
+#define corelock_init(cl)
+#define corelock_lock(cl)
+#define corelock_try_lock(cl)
+#define corelock_unlock(cl)
+#endif /* core locking selection */
+
+struct thread_queue
+{
+ struct thread_entry *queue; /* list of threads waiting -
+ _must_ be first member */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ struct corelock cl; /* lock for atomic list operations */
+#endif
+};
+
+/* Information kept in each thread slot
+ * members are arranged according to size - largest first - in order
+ * to ensure both alignment and packing at the same time.
+ */
+struct thread_entry
+{
+ struct regs context; /* Register context at switch -
+ _must_ be first member */
+ void *stack; /* Pointer to top of stack */
+ const char *name; /* Thread name */
+ long tmo_tick; /* Tick when thread should be woken from
+ timeout */
+ struct thread_list l; /* Links for blocked/waking/running -
+ circular linkage in both directions */
+ struct thread_list tmo; /* Links for timeout list -
+ Self-pointer-terminated in reverse direction,
+ NULL-terminated in forward direction */
+ struct thread_queue *bqp; /* Pointer to list variable in kernel
+ object where thread is blocked - used
+ for implicit unblock and explicit wake */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ struct thread_entry **bqnlp; /* Pointer to list variable in kernel
+ object where thread is blocked - non-locked
+ operations will be used */
+#endif
+ struct thread_entry *queue; /* List of threads waiting for thread to be
+ removed */
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ intptr_t retval; /* Return value from a blocked operation */
+#endif
+#ifdef HAVE_PRIORITY_SCHEDULING
+ long last_run; /* Last tick when started */
+#endif
+ unsigned short stack_size; /* Size of stack in bytes */
#ifdef HAVE_PRIORITY_SCHEDULING
- long highest_priority;
+ unsigned char priority; /* Current priority */
+ unsigned char priority_x; /* Inherited priority - right now just a
+ runtime guarantee flag */
#endif
+ unsigned char state; /* Thread slot state (STATE_*) */
#if NUM_CORES > 1
- volatile bool lock_issued;
- volatile bool kernel_running;
+ unsigned char core; /* The core to which thread belongs */
+#endif
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ unsigned char boosted; /* CPU frequency boost flag */
+#endif
+#if CONFIG_CORELOCK == SW_CORELOCK
+ struct corelock cl; /* Corelock to lock thread slot */
+#endif
+};
+
+#if NUM_CORES > 1
+/* Operations to be performed just before stopping a thread and starting
+ a new one if specified before calling switch_thread */
+#define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */
+#if CONFIG_CORELOCK == CORELOCK_SWAP
+#define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */
+#define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */
+#define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/
+#endif /* CONFIG_CORELOCK */
+#define TBOP_UNLOCK_CORELOCK 0x04
+#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
+#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
+#define TBOP_IRQ_LEVEL 0x20 /* Set a new irq level */
+#define TBOP_SWITCH_CORE 0x40 /* Call the core switch preparation routine */
+
+struct thread_blk_ops
+{
+ int irq_level; /* new IRQ level to set */
+#if CONFIG_CORELOCK != SW_CORELOCK
+ union
+ {
+ int var_iv; /* int variable value to set */
+ uint8_t var_u8v; /* unsigned char valur to set */
+ struct thread_entry *list_v; /* list pointer queue value to set */
+ };
+#endif
+ union
+ {
+#if CONFIG_CORELOCK != SW_CORELOCK
+ int *var_ip; /* pointer to int variable */
+ uint8_t *var_u8p; /* pointer to unsigned char varuable */
+#endif
+ struct thread_queue *list_p; /* pointer to list variable */
+ };
+#if CONFIG_CORELOCK == SW_CORELOCK
+ struct corelock *cl_p; /* corelock to unlock */
+ struct thread_entry *thread; /* thread to unlock */
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ unsigned char state; /* new thread state (performs unlock) */
+#endif /* SOFTWARE_CORELOCK */
+ unsigned char flags; /* TBOP_* flags */
+};
+#endif /* NUM_CORES > 1 */
+
+/* Information kept for each core
+ * Member are arranged for the same reason as in thread_entry
+ */
+struct core_entry
+{
+ /* "Active" lists - core is constantly active on these and are never
+ locked and interrupts do not access them */
+ struct thread_entry *running; /* threads that are running */
+ struct thread_entry *timeout; /* threads that are on a timeout before
+ running again */
+ /* "Shared" lists - cores interact in a synchronized manner - access
+ is locked between cores and interrupts */
+ struct thread_queue waking; /* intermediate locked list that
+ hold threads other core should wake up
+ on next task switch */
+ long next_tmo_check; /* soonest time to check tmo threads */
+#if NUM_CORES > 1
+ struct thread_blk_ops blk_ops; /* operations to perform when
+ blocking a thread */
+#else
+ #define STAY_IRQ_LEVEL (-1)
+ int irq_level; /* sets the irq level to irq_level */
+#endif /* NUM_CORES */
+#ifdef HAVE_PRIORITY_SCHEDULING
+ unsigned char highest_priority;
#endif
- long last_tick;
- int switch_to_irq_level;
- #define STAY_IRQ_LEVEL -1
};
#ifdef HAVE_PRIORITY_SCHEDULING
@@ -145,82 +336,210 @@ struct core_entry {
#define IF_PRIO(...)
#endif
-/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
- * Just use it for ARM, Coldfire and whatever else well...why not?
- */
-
/* Macros generate better code than an inline function is this case */
-#if (defined (CPU_PP) || defined (CPU_ARM)) && CONFIG_CPU != PP5020
-#define test_and_set(x_, v_) \
-({ \
- uint32_t old; \
- asm volatile ( \
- "swpb %[old], %[v], [%[x]] \r\n" \
- : [old]"=r"(old) \
- : [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \
- ); \
- old; \
- })
+#if (defined (CPU_PP) || defined (CPU_ARM))
+/* atomic */
+#ifdef SOFTWARE_CORELOCK
+#define test_and_set(a, v, cl) \
+ xchg8((a), (v), (cl))
+/* atomic */
+#define xchg8(a, v, cl) \
+({ uint32_t o; \
+ corelock_lock(cl); \
+ o = *(uint8_t *)(a); \
+ *(uint8_t *)(a) = (v); \
+ corelock_unlock(cl); \
+ o; })
+#define xchg32(a, v, cl) \
+({ uint32_t o; \
+ corelock_lock(cl); \
+ o = *(uint32_t *)(a); \
+ *(uint32_t *)(a) = (v); \
+ corelock_unlock(cl); \
+ o; })
+#define xchgptr(a, v, cl) \
+({ typeof (*(a)) o; \
+ corelock_lock(cl); \
+ o = *(a); \
+ *(a) = (v); \
+ corelock_unlock(cl); \
+ o; })
+#else
+/* atomic */
+#define test_and_set(a, v, ...) \
+ xchg8((a), (v))
+#define xchg8(a, v, ...) \
+({ uint32_t o; \
+ asm volatile( \
+ "swpb %0, %1, [%2]" \
+ : "=r"(o) \
+ : "r"(v), \
+ "r"((uint8_t*)(a))); \
+ o; })
+/* atomic */
+#define xchg32(a, v, ...) \
+({ uint32_t o; \
+ asm volatile( \
+ "swp %0, %1, [%2]" \
+ : "=r"(o) \
+ : "r"((uint32_t)(v)), \
+ "r"((uint32_t*)(a))); \
+ o; })
+/* atomic */
+#define xchgptr(a, v, ...) \
+({ typeof (*(a)) o; \
+ asm volatile( \
+ "swp %0, %1, [%2]" \
+ : "=r"(o) \
+ : "r"(v), "r"(a)); \
+ o; })
+#endif /* SOFTWARE_CORELOCK */
#elif defined (CPU_COLDFIRE)
-#define test_and_set(x_, v_) \
-({ \
- uint8_t old; \
- asm volatile ( \
- "bset.l %[v], (%[x]) \r\n" \
- "sne.b %[old] \r\n" \
- : [old]"=d,d"(old) \
- : [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \
- ); \
- old; \
- })
+/* atomic */
+/* one branch will be optimized away if v is a constant expression */
+#define test_and_set(a, v, ...) \
+({ uint32_t o = 0; \
+ if (v) { \
+ asm volatile ( \
+ "bset.b #0, (%0)" \
+ : : "a"((uint8_t*)(a)) \
+ : "cc"); \
+ } else { \
+ asm volatile ( \
+ "bclr.b #0, (%0)" \
+ : : "a"((uint8_t*)(a)) \
+ : "cc"); \
+ } \
+ asm volatile ("sne.b %0" \
+ : "+d"(o)); \
+ o; })
#elif CONFIG_CPU == SH7034
-#define test_and_set(x_, v_) \
-({ \
- uint32_t old; \
- asm volatile ( \
- "tas.b @%[x] \r\n" \
- "mov #-1, %[old] \r\n" \
- "negc %[old], %[old] \r\n" \
- : [old]"=r"(old) \
- : [v]"M"((uint32_t)v_), /* Value of v_ must be 1 */ \
- [x]"r"((uint8_t *)x_) \
- ); \
- old; \
- })
-#else
-/* default for no asm version */
-#define test_and_set(x_, v_) \
-({ \
- uint32_t old = *(uint32_t *)x_; \
- *(uint32_t *)x_ = v_; \
- old; \
- })
-#endif
+/* atomic */
+#define test_and_set(a, v, ...) \
+({ uint32_t o; \
+ asm volatile ( \
+ "tas.b @%2 \n" \
+ "mov #-1, %0 \n" \
+ "negc %0, %0 \n" \
+ : "=r"(o) \
+ : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
+ "r"((uint8_t *)(a))); \
+ o; })
+#endif /* CONFIG_CPU == */
+
+/* defaults for no asm version */
+#ifndef test_and_set
+/* not atomic */
+#define test_and_set(a, v, ...) \
+({ uint32_t o = *(uint8_t *)(a); \
+ *(uint8_t *)(a) = (v); \
+ o; })
+#endif /* test_and_set */
+#ifndef xchg8
+/* not atomic */
+#define xchg8(a, v, ...) \
+({ uint32_t o = *(uint8_t *)(a); \
+ *(uint8_t *)(a) = (v); \
+ o; })
+#endif /* xchg8 */
+#ifndef xchg32
+/* not atomic */
+#define xchg32(a, v, ...) \
+({ uint32_t o = *(uint32_t *)(a); \
+ *(uint32_t *)(a) = (v); \
+ o; })
+#endif /* xchg32 */
+#ifndef xchgptr
+/* not atomic */
+#define xchgptr(a, v, ...) \
+({ typeof (*(a)) o = *(a); \
+ *(a) = (v); \
+ o; })
+#endif /* xchgptr */
+void core_idle(void);
+void core_wake(IF_COP_VOID(unsigned int core));
+
+#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
struct thread_entry*
create_thread(void (*function)(void), void* stack, int stack_size,
- const char *name IF_PRIO(, int priority)
- IF_COP(, unsigned int core, bool fallback));
+ unsigned flags, const char *name
+ IF_PRIO(, int priority)
+ IF_COP(, unsigned int core));
#ifdef HAVE_SCHEDULER_BOOSTCTRL
void trigger_cpu_boost(void);
#else
#define trigger_cpu_boost()
#endif
-
+void thread_thaw(struct thread_entry *thread);
+void thread_wait(struct thread_entry *thread);
void remove_thread(struct thread_entry *thread);
-void switch_thread(bool save_context, struct thread_entry **blocked_list);
+void switch_thread(struct thread_entry *old);
void sleep_thread(int ticks);
-void block_thread(struct thread_entry **thread);
-void block_thread_w_tmo(struct thread_entry **thread, int timeout);
-void set_irq_level_and_block_thread(struct thread_entry **thread, int level);
-void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
- int timeout, int level);
-void wakeup_thread(struct thread_entry **thread);
-void wakeup_thread_irq_safe(struct thread_entry **thread);
+
+/**
+ * Setup to allow using thread queues as locked or non-locked without speed
+ * sacrifices in both core locking types.
+ *
+ * The blocking/waking function inline two different version of the real
+ * function into the stubs when a software or other separate core locking
+ * mechanism is employed.
+ *
+ * When a simple test-and-set or similar instruction is available, locking
+ * has no cost and so one version is used and the internal worker is called
+ * directly.
+ *
+ * CORELOCK_NONE is treated the same as when an atomic instruction can be
+ * used.
+ */
+
+/* Blocks the current thread on a thread queue */
+#if CONFIG_CORELOCK == SW_CORELOCK
+void block_thread(struct thread_queue *tq);
+void block_thread_no_listlock(struct thread_entry **list);
+#else
+void _block_thread(struct thread_queue *tq);
+static inline void block_thread(struct thread_queue *tq)
+ { _block_thread(tq); }
+static inline void block_thread_no_listlock(struct thread_entry **list)
+ { _block_thread((struct thread_queue *)list); }
+#endif /* CONFIG_CORELOCK */
+
+/* Blocks the current thread on a thread queue for a max amount of time
+ * There is no "_no_listlock" version because timeout blocks without sync on
+ * the blocking queues is not permitted since either core could access the
+ * list at any time to do an implicit wake. In other words, objects with
+ * timeout support require lockable queues. */
+void block_thread_w_tmo(struct thread_queue *tq, int timeout);
+
+/* Wakes up the thread at the head of the queue */
+#define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
+#define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
+#if CONFIG_CORELOCK == SW_CORELOCK
+struct thread_entry * wakeup_thread(struct thread_queue *tq);
+struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
+#else
+struct thread_entry * _wakeup_thread(struct thread_queue *list);
+static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
+ { return _wakeup_thread(tq); }
+static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
+ { return _wakeup_thread((struct thread_queue *)list); }
+#endif /* CONFIG_CORELOCK */
+
+/* Initialize a thread_queue object. */
+static inline void thread_queue_init(struct thread_queue *tq)
+ { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
+/* A convenience function for waking an entire queue of threads. */
+static inline void thread_queue_wake(struct thread_queue *tq)
+ { while (wakeup_thread(tq) != NULL); }
+/* The no-listlock version of thread_queue_wake() */
+static inline void thread_queue_wake_no_listlock(struct thread_entry **list)
+ { while (wakeup_thread_no_listlock(list) != NULL); }
+
#ifdef HAVE_PRIORITY_SCHEDULING
int thread_set_priority(struct thread_entry *thread, int priority);
-int thread_get_priority(struct thread_entry *thread);
+int thread_get_priority(struct thread_entry *thread);
/* Yield that guarantees thread execution once per round regardless of
thread's scheduler priority - basically a transient realtime boost
without altering the scheduler's thread precedence. */
@@ -228,17 +547,20 @@ void priority_yield(void);
#else
#define priority_yield yield
#endif /* HAVE_PRIORITY_SCHEDULING */
+#if NUM_CORES > 1
+unsigned int switch_core(unsigned int new_core);
+#endif
struct thread_entry * thread_get_current(void);
void init_threads(void);
int thread_stack_usage(const struct thread_entry *thread);
#if NUM_CORES > 1
int idle_stack_usage(unsigned int core);
#endif
-int thread_get_status(const struct thread_entry *thread);
+unsigned thread_get_status(const struct thread_entry *thread);
void thread_get_name(char *buffer, int size,
struct thread_entry *thread);
#ifdef RB_PROFILE
void profile_thread(void);
#endif
-#endif
+#endif /* THREAD_H */
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 1b6e9f933b..4e56c2919a 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -28,15 +28,37 @@
#include "avic-imx31.h"
#endif
+/* Make this nonzero to enable more elaborate checks on objects */
+#ifdef DEBUG
+#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */
+#else
+#define KERNEL_OBJECT_CHECKS 0
+#endif
+
+#if KERNEL_OBJECT_CHECKS
+#define KERNEL_ASSERT(exp, msg...) \
+ ({ if (!({ exp; })) panicf(msg); })
+#else
+#define KERNEL_ASSERT(exp, msg...) ({})
+#endif
+
#if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER)
volatile long current_tick NOCACHEDATA_ATTR = 0;
#endif
void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
+extern struct core_entry cores[NUM_CORES];
+
/* This array holds all queues that are initiated. It is used for broadcast. */
-static struct event_queue *all_queues[32] NOCACHEBSS_ATTR;
-static int num_queues NOCACHEBSS_ATTR;
+static struct
+{
+ int count;
+ struct event_queue *queues[MAX_NUM_QUEUES];
+#if NUM_CORES > 1
+ struct corelock cl;
+#endif
+} all_queues NOCACHEBSS_ATTR;
/****************************************************************************
* Standard kernel stuff
@@ -52,8 +74,8 @@ void kernel_init(void)
if (CURRENT_CORE == CPU)
{
memset(tick_funcs, 0, sizeof(tick_funcs));
- num_queues = 0;
- memset(all_queues, 0, sizeof(all_queues));
+ memset(&all_queues, 0, sizeof(all_queues));
+ corelock_init(&all_queues.cl);
tick_start(1000/HZ);
}
}
@@ -77,7 +99,7 @@ void sleep(int ticks)
#elif defined(CPU_PP) && defined(BOOTLOADER)
unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
while (TIME_BEFORE(USEC_TIMER, stop))
- switch_thread(true,NULL);
+ switch_thread(NULL);
#else
sleep_thread(ticks);
#endif
@@ -88,7 +110,7 @@ void yield(void)
#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER))
/* Some targets don't like yielding in the bootloader */
#else
- switch_thread(true, NULL);
+ switch_thread(NULL);
#endif
}
@@ -104,7 +126,7 @@ static void queue_fetch_sender(struct queue_sender_list *send,
{
struct thread_entry **spp = &send->senders[i];
- if (*spp)
+ if(*spp)
{
send->curr_sender = *spp;
*spp = NULL;
@@ -124,18 +146,16 @@ static void queue_release_sender(struct thread_entry **sender,
intptr_t retval)
{
(*sender)->retval = retval;
- wakeup_thread_irq_safe(sender);
-#if 0
+ wakeup_thread_no_listlock(sender);
/* This should _never_ happen - there must never be multiple
threads in this list and it is a corrupt state */
- if (*sender != NULL)
- panicf("Queue: send slot ovf");
-#endif
+ KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender);
}
/* Releases any waiting threads that are queued with queue_send -
* reply with 0.
- * Disable IRQs before calling since it uses queue_release_sender.
+ * Disable IRQs and lock before calling since it uses
+ * queue_release_sender.
*/
static void queue_release_all_senders(struct event_queue *q)
{
@@ -156,79 +176,114 @@ static void queue_release_all_senders(struct event_queue *q)
}
/* Enables queue_send on the specified queue - caller allocates the extra
- data structure */
+ data structure. Only queues which are taken to be owned by a thread should
+ enable this. Public waiting is not permitted. */
void queue_enable_queue_send(struct event_queue *q,
struct queue_sender_list *send)
{
- q->send = send;
- memset(send, 0, sizeof(struct queue_sender_list));
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
+
+ q->send = NULL;
+ if(send != NULL)
+ {
+ memset(send, 0, sizeof(*send));
+ q->send = send;
+ }
+
+ corelock_unlock(&q->cl);
+ set_irq_level(oldlevel);
}
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
-
+/* Queue must not be available for use during this call */
void queue_init(struct event_queue *q, bool register_queue)
{
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
+ if(register_queue)
+ {
+ corelock_lock(&all_queues.cl);
+ }
+
+ corelock_init(&q->cl);
+ thread_queue_init(&q->queue);
q->read = 0;
q->write = 0;
- q->thread = NULL;
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
q->send = NULL; /* No message sending by default */
#endif
if(register_queue)
{
+ if(all_queues.count >= MAX_NUM_QUEUES)
+ {
+ panicf("queue_init->out of queues");
+ }
/* Add it to the all_queues array */
- all_queues[num_queues++] = q;
+ all_queues.queues[all_queues.count++] = q;
+ corelock_unlock(&all_queues.cl);
}
+
+ set_irq_level(oldlevel);
}
+/* Queue must not be available for use during this call */
void queue_delete(struct event_queue *q)
{
+ int oldlevel;
int i;
- bool found = false;
-
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- /* Release theads waiting on queue */
- wakeup_thread(&q->thread);
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&all_queues.cl);
+ corelock_lock(&q->cl);
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- /* Release waiting threads and reply to any dequeued message
- waiting for one. */
- queue_release_all_senders(q);
- queue_reply(q, 0);
-#endif
-
/* Find the queue to be deleted */
- for(i = 0;i < num_queues;i++)
+ for(i = 0;i < all_queues.count;i++)
{
- if(all_queues[i] == q)
+ if(all_queues.queues[i] == q)
{
- found = true;
+ /* Move the following queues up in the list */
+ all_queues.count--;
+
+ for(;i < all_queues.count;i++)
+ {
+ all_queues.queues[i] = all_queues.queues[i+1];
+ }
+
break;
}
}
- if(found)
- {
- /* Move the following queues up in the list */
- for(;i < num_queues-1;i++)
- {
- all_queues[i] = all_queues[i+1];
- }
-
- num_queues--;
- }
-
+ corelock_unlock(&all_queues.cl);
+
+ /* Release threads waiting on queue head */
+ thread_queue_wake(&q->queue);
+
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ /* Release waiting threads for reply and reply to any dequeued
+ message waiting for one. */
+ queue_release_all_senders(q);
+ queue_reply(q, 0);
+#endif
+
+ q->read = 0;
+ q->write = 0;
+
+ corelock_unlock(&q->cl);
set_irq_level(oldlevel);
}
-void queue_wait(struct event_queue *q, struct event *ev)
+/* NOTE: multiple threads waiting on a queue head cannot have a well-
+ defined release order if timeouts are used. If multiple threads must
+ access the queue head, use a dispatcher or queue_wait only. */
+void queue_wait(struct event_queue *q, struct queue_event *ev)
{
int oldlevel;
unsigned int rd;
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
if(q->send && q->send->curr_sender)
@@ -240,8 +295,28 @@ void queue_wait(struct event_queue *q, struct event *ev)
if (q->read == q->write)
{
- set_irq_level_and_block_thread(&q->thread, oldlevel);
- oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ do
+ {
+#if CONFIG_CORELOCK == CORELOCK_NONE
+ cores[CURRENT_CORE].irq_level = oldlevel;
+#elif CONFIG_CORELOCK == SW_CORELOCK
+ const unsigned int core = CURRENT_CORE;
+ cores[core].blk_ops.irq_level = oldlevel;
+ cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.cl_p = &q->cl;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ const unsigned int core = CURRENT_CORE;
+ cores[core].blk_ops.irq_level = oldlevel;
+ cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.var_u8p = &q->cl.locked;
+ cores[core].blk_ops.var_u8v = 0;
+#endif /* CONFIG_CORELOCK */
+ block_thread(&q->queue);
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
+ }
+ /* A message that woke us could now be gone */
+ while (q->read == q->write);
}
rd = q->read++ & QUEUE_LENGTH_MASK;
@@ -254,13 +329,17 @@ void queue_wait(struct event_queue *q, struct event *ev)
queue_fetch_sender(q->send, rd);
}
#endif
-
+
+ corelock_unlock(&q->cl);
set_irq_level(oldlevel);
}
-void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
+void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ int oldlevel;
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
if (q->send && q->send->curr_sender)
@@ -269,13 +348,30 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
queue_release_sender(&q->send->curr_sender, 0);
}
#endif
-
+
if (q->read == q->write && ticks > 0)
{
- set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel);
+#if CONFIG_CORELOCK == CORELOCK_NONE
+ cores[CURRENT_CORE].irq_level = oldlevel;
+#elif CONFIG_CORELOCK == SW_CORELOCK
+ const unsigned int core = CURRENT_CORE;
+ cores[core].blk_ops.irq_level = oldlevel;
+ cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.cl_p = &q->cl;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ const unsigned int core = CURRENT_CORE;
+ cores[core].blk_ops.irq_level = oldlevel;
+ cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.var_u8p = &q->cl.locked;
+ cores[core].blk_ops.var_u8v = 0;
+#endif
+ block_thread_w_tmo(&q->queue, ticks);
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
}
+ /* no worry about a removed message here - status is checked inside
+ locks - perhaps verify if timeout or false alarm */
if (q->read != q->write)
{
unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
@@ -293,15 +389,19 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
{
ev->id = SYS_TIMEOUT;
}
-
+
+ corelock_unlock(&q->cl);
set_irq_level(oldlevel);
}
void queue_post(struct event_queue *q, long id, intptr_t data)
{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ int oldlevel;
unsigned int wr;
-
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
+
wr = q->write++ & QUEUE_LENGTH_MASK;
q->events[wr].id = id;
@@ -320,20 +420,24 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
}
#endif
- wakeup_thread_irq_safe(&q->thread);
+ /* Wakeup a waiting thread if any */
+ wakeup_thread(&q->queue);
+
+ corelock_unlock(&q->cl);
set_irq_level(oldlevel);
-
}
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
-/* No wakeup_thread_irq_safe here because IRQ handlers are not allowed
- use of this function - we only aim to protect the queue integrity by
- turning them off. */
+/* IRQ handlers are not allowed use of this function - we only aim to
+ protect the queue integrity by turning them off. */
intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ int oldlevel;
unsigned int wr;
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
+
wr = q->write++ & QUEUE_LENGTH_MASK;
q->events[wr].id = id;
@@ -341,21 +445,38 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
if(q->send)
{
+ const unsigned int core = CURRENT_CORE;
struct thread_entry **spp = &q->send->senders[wr];
- if (*spp)
+ if(*spp)
{
/* overflow protect - unblock any thread waiting at this index */
queue_release_sender(spp, 0);
}
- wakeup_thread(&q->thread);
- set_irq_level_and_block_thread(spp, oldlevel);
- return thread_get_current()->retval;
+ /* Wakeup a waiting thread if any */
+ wakeup_thread(&q->queue);
+
+#if CONFIG_CORELOCK == CORELOCK_NONE
+ cores[core].irq_level = oldlevel;
+#elif CONFIG_CORELOCK == SW_CORELOCK
+ cores[core].blk_ops.irq_level = oldlevel;
+ cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.cl_p = &q->cl;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ cores[core].blk_ops.irq_level = oldlevel;
+ cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.var_u8p = &q->cl.locked;
+ cores[core].blk_ops.var_u8v = 0;
+#endif
+ block_thread_no_listlock(spp);
+ return cores[core].running->retval;
}
/* Function as queue_post if sending is not enabled */
- wakeup_thread(&q->thread);
+ wakeup_thread(&q->queue);
+
+ corelock_unlock(&q->cl);
set_irq_level(oldlevel);
return 0;
@@ -365,21 +486,52 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
/* Query if the last message dequeued was added by queue_send or not */
bool queue_in_queue_send(struct event_queue *q)
{
- return q->send && q->send->curr_sender;
+ bool in_send;
+
+#if NUM_CORES > 1
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
+#endif
+
+ in_send = q->send && q->send->curr_sender;
+
+#if NUM_CORES > 1
+ corelock_unlock(&q->cl);
+ set_irq_level(oldlevel);
+#endif
+
+ return in_send;
}
#endif
-/* Replies with retval to any dequeued message sent with queue_send */
+/* Replies with retval to the last dequeued message sent with queue_send */
void queue_reply(struct event_queue *q, intptr_t retval)
{
- /* No IRQ lock here since IRQs cannot change this */
if(q->send && q->send->curr_sender)
{
- queue_release_sender(&q->send->curr_sender, retval);
+#if NUM_CORES > 1
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
+ /* Double-check locking */
+ if(q->send && q->send->curr_sender)
+ {
+#endif
+
+ queue_release_sender(&q->send->curr_sender, retval);
+
+#if NUM_CORES > 1
+ }
+ corelock_unlock(&q->cl);
+ set_irq_level(oldlevel);
+#endif
}
}
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+/* Poll queue to see if a message exists - careful in using the result if
+ * queue_remove_from_head is called when messages are posted - possibly use
+ * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
+ * unsignals the queue may cause an unwanted block */
bool queue_empty(const struct event_queue* q)
{
return ( q->read == q->write );
@@ -387,23 +539,30 @@ bool queue_empty(const struct event_queue* q)
void queue_clear(struct event_queue* q)
{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ int oldlevel;
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- /* Release all thread waiting in the queue for a reply -
+ /* Release all threads waiting in the queue for a reply -
dequeued sent message will be handled by owning thread */
queue_release_all_senders(q);
#endif
q->read = 0;
q->write = 0;
-
+
+ corelock_unlock(&q->cl);
set_irq_level(oldlevel);
}
void queue_remove_from_head(struct event_queue *q, long id)
{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ int oldlevel;
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&q->cl);
while(q->read != q->write)
{
@@ -428,7 +587,8 @@ void queue_remove_from_head(struct event_queue *q, long id)
#endif
q->read++;
}
-
+
+ corelock_unlock(&q->cl);
set_irq_level(oldlevel);
}
@@ -446,13 +606,23 @@ int queue_count(const struct event_queue *q)
int queue_broadcast(long id, intptr_t data)
{
int i;
+
+#if NUM_CORES > 1
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ corelock_lock(&all_queues.cl);
+#endif
- for(i = 0;i < num_queues;i++)
+ for(i = 0;i < all_queues.count;i++)
{
- queue_post(all_queues[i], id, data);
+ queue_post(all_queues.queues[i], id, data);
}
+
+#if NUM_CORES > 1
+ corelock_unlock(&all_queues.cl);
+ set_irq_level(oldlevel);
+#endif
- return num_queues;
+ return i;
}
/****************************************************************************
@@ -567,6 +737,7 @@ void TIMER1(void)
{
int i;
+ /* Run through the list of tick tasks (using main core) */
TIMER1_VAL; /* Read value to ack IRQ */
/* Run through the list of tick tasks using main CPU core -
@@ -580,24 +751,8 @@ void TIMER1(void)
}
#if NUM_CORES > 1
-#ifdef CPU_PP502x
- {
- /* If COP is sleeping - give it a kick */
- /* TODO: Use a mailbox in addition to make sure it doesn't go to
- * sleep if kicked just as it's headed to rest to make sure its
- * tick checks won't be jittery. Don't bother at all if it owns no
- * threads. */
- unsigned int cop_ctl;
-
- cop_ctl = COP_CTL;
- if (cop_ctl & PROC_SLEEP)
- {
- COP_CTL = cop_ctl & ~PROC_SLEEP;
- }
- }
-#else
- /* TODO: PP5002 */
-#endif
+ /* Pulse the COP */
+ core_wake(COP);
#endif /* NUM_CORES */
current_tick++;
@@ -837,49 +992,391 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
#endif /* INCLUDE_TIMEOUT_API */
-#ifndef SIMULATOR
-/*
- * Simulator versions in uisimulator/SIMVER/
- */
-
/****************************************************************************
- * Simple mutex functions
+ * Simple mutex functions ;)
****************************************************************************/
void mutex_init(struct mutex *m)
{
- m->locked = false;
+ m->queue = NULL;
m->thread = NULL;
+ m->count = 0;
+ m->locked = 0;
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_init(&m->cl);
+#endif
}
void mutex_lock(struct mutex *m)
{
- if (test_and_set(&m->locked, 1))
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *const thread = cores[core].running;
+
+ if(thread == m->thread)
{
- /* Wait until the lock is open... */
- block_thread(&m->thread);
+ m->count++;
+ return;
}
+
+ /* Repeat some stuff here or else all the variation is too difficult to
+ read */
+#if CONFIG_CORELOCK == CORELOCK_SWAP
+ /* peek at lock until it's no longer busy */
+ unsigned int locked;
+ while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8);
+ if(locked == 0)
+ {
+ m->thread = thread;
+ m->locked = 1;
+ return;
+ }
+
+ /* Block until the lock is open... */
+ cores[core].blk_ops.flags = TBOP_SET_VARu8;
+ cores[core].blk_ops.var_u8p = &m->locked;
+ cores[core].blk_ops.var_u8v = 1;
+#else
+ corelock_lock(&m->cl);
+ if (m->locked == 0)
+ {
+ m->locked = 1;
+ m->thread = thread;
+ corelock_unlock(&m->cl);
+ return;
+ }
+
+ /* Block until the lock is open... */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
+ cores[core].blk_ops.cl_p = &m->cl;
+#endif
+#endif /* CONFIG_CORELOCK */
+
+ block_thread_no_listlock(&m->queue);
}
void mutex_unlock(struct mutex *m)
{
- if (m->thread == NULL)
- m->locked = 0;
+ /* unlocker not being the owner is an unlocking violation */
+ KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running,
+ "mutex_unlock->wrong thread (recurse)");
+
+ if(m->count > 0)
+ {
+ /* this thread still owns lock */
+ m->count--;
+ return;
+ }
+
+#if CONFIG_CORELOCK == SW_CORELOCK
+ /* lock out other cores */
+ corelock_lock(&m->cl);
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ /* wait for peeker to move on */
+ while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8);
+#endif
+
+ /* transfer to next queued thread if any */
+ m->thread = wakeup_thread_no_listlock(&m->queue);
+
+ if(m->thread == NULL)
+ {
+ m->locked = 0; /* release lock */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_unlock(&m->cl);
+#endif
+ }
+ else /* another thread is waiting - remain locked */
+ {
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_unlock(&m->cl);
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ m->locked = 1;
+#endif
+ }
+}
+
+/****************************************************************************
+ * Simpl-er mutex functions ;)
+ ****************************************************************************/
+void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags))
+{
+ l->locked = 0;
+ l->thread = NULL;
+ l->count = 0;
+#if NUM_CORES > 1
+ l->task_switch = flags & SPINLOCK_TASK_SWITCH;
+ corelock_init(&l->cl);
+#endif
+}
+
+void spinlock_lock(struct spinlock *l)
+{
+ struct thread_entry *const thread = cores[CURRENT_CORE].running;
+
+ if (l->thread == thread)
+ {
+ l->count++;
+ return;
+ }
+
+#if NUM_CORES > 1
+ if (l->task_switch != 0)
+#endif
+ {
+ /* Let other threads run until the lock is free */
+ while(test_and_set(&l->locked, 1, &l->cl) != 0)
+ {
+ /* spin and switch until the lock is open... */
+ switch_thread(NULL);
+ }
+ }
+#if NUM_CORES > 1
else
- wakeup_thread(&m->thread);
+ {
+ /* Use the corelock purely */
+ corelock_lock(&l->cl);
+ }
+#endif
+
+ l->thread = thread;
}
-void spinlock_lock(struct mutex *m)
+void spinlock_unlock(struct spinlock *l)
{
- while (test_and_set(&m->locked, 1))
+ /* unlocker not being the owner is an unlocking violation */
+ KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
+ "spinlock_unlock->wrong thread");
+
+ if (l->count > 0)
+ {
+ /* this thread still owns lock */
+ l->count--;
+ return;
+ }
+
+ /* clear owner */
+ l->thread = NULL;
+
+#if NUM_CORES > 1
+ if (l->task_switch != 0)
+#endif
{
- /* wait until the lock is open... */
- switch_thread(true, NULL);
+ /* release lock */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ /* This must be done since our unlock could be missed by the
+ test_and_set and leave the object locked permanently */
+ corelock_lock(&l->cl);
+#endif
+ l->locked = 0;
}
+
+#if NUM_CORES > 1
+ corelock_unlock(&l->cl);
+#endif
}
-void spinlock_unlock(struct mutex *m)
+/****************************************************************************
+ * Simple semaphore functions ;)
+ ****************************************************************************/
+#ifdef HAVE_SEMAPHORE_OBJECTS
+void semaphore_init(struct semaphore *s, int max, int start)
{
- m->locked = 0;
+ KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
+ "semaphore_init->inv arg");
+ s->queue = NULL;
+ s->max = max;
+ s->count = start;
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_init(&s->cl);
+#endif
}
-#endif /* ndef SIMULATOR */
+void semaphore_wait(struct semaphore *s)
+{
+#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
+ corelock_lock(&s->cl);
+ if(--s->count >= 0)
+ {
+ corelock_unlock(&s->cl);
+ return;
+ }
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ int count;
+ while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
+ if(--count >= 0)
+ {
+ s->count = count;
+ return;
+ }
+#endif
+
+ /* too many waits - block until dequeued */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ const unsigned int core = CURRENT_CORE;
+ cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
+ cores[core].blk_ops.cl_p = &s->cl;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ const unsigned int core = CURRENT_CORE;
+ cores[core].blk_ops.flags = TBOP_SET_VARi;
+ cores[core].blk_ops.var_ip = &s->count;
+ cores[core].blk_ops.var_iv = count;
+#endif
+ block_thread_no_listlock(&s->queue);
+}
+
+void semaphore_release(struct semaphore *s)
+{
+#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
+ corelock_lock(&s->cl);
+ if (s->count < s->max)
+ {
+ if (++s->count <= 0)
+ {
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ int count;
+ while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
+ if(count < s->max)
+ {
+ if(++count <= 0)
+ {
+#endif /* CONFIG_CORELOCK */
+
+ /* there should be threads in this queue */
+ KERNEL_ASSERT(s->queue.queue != NULL, "semaphore->wakeup");
+ /* a thread was queued - wake it up */
+ wakeup_thread_no_listlock(&s->queue);
+ }
+ }
+
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_unlock(&s->cl);
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ s->count = count;
+#endif
+}
+#endif /* HAVE_SEMAPHORE_OBJECTS */
+
+/****************************************************************************
+ * Simple event functions ;)
+ ****************************************************************************/
+#ifdef HAVE_EVENT_OBJECTS
+void event_init(struct event *e, unsigned int flags)
+{
+ e->queues[STATE_NONSIGNALED] = NULL;
+ e->queues[STATE_SIGNALED] = NULL;
+ e->state = flags & STATE_SIGNALED;
+ e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_init(&e->cl);
+#endif
+}
+
+void event_wait(struct event *e, unsigned int for_state)
+{
+ unsigned int last_state;
+#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
+ corelock_lock(&e->cl);
+ last_state = e->state;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
+#endif
+
+ if(e->automatic != 0)
+ {
+ /* wait for false always satisfied by definition
+ or if it just changed to false */
+ if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
+ {
+ /* automatic - unsignal */
+ e->state = STATE_NONSIGNALED;
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_unlock(&e->cl);
+#endif
+ return;
+ }
+ /* block until state matches */
+ }
+ else if(for_state == last_state)
+ {
+ /* the state being waited for is the current state */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_unlock(&e->cl);
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ e->state = last_state;
+#endif
+ return;
+ }
+
+ {
+ /* current state does not match wait-for state */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ const unsigned int core = CURRENT_CORE;
+ cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
+ cores[core].blk_ops.cl_p = &e->cl;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ const unsigned int core = CURRENT_CORE;
+ cores[core].blk_ops.flags = TBOP_SET_VARu8;
+ cores[core].blk_ops.var_u8p = &e->state;
+ cores[core].blk_ops.var_u8v = last_state;
+#endif
+ block_thread_no_listlock(&e->queues[for_state]);
+ }
+}
+
+void event_set_state(struct event *e, unsigned int state)
+{
+ unsigned int last_state;
+#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
+ corelock_lock(&e->cl);
+ last_state = e->state;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
+#endif
+
+ if(last_state == state)
+ {
+ /* no change */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_unlock(&e->cl);
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ e->state = last_state;
+#endif
+ return;
+ }
+
+ if(state == STATE_SIGNALED)
+ {
+ if(e->automatic != 0)
+ {
+ struct thread_entry *thread;
+ /* no thread should have ever blocked for unsignaled */
+ KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL,
+ "set_event_state->queue[NS]:S");
+ /* pass to next thread and keep unsignaled - "pulse" */
+ thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
+ e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
+ }
+ else
+ {
+ /* release all threads waiting for signaled */
+ thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
+ e->state = STATE_SIGNALED;
+ }
+ }
+ else
+ {
+ /* release all threads waiting for unsignaled */
+
+ /* no thread should have ever blocked if automatic */
+ KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL ||
+ e->automatic == 0, "set_event_state->queue[NS]:NS");
+
+ thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
+ e->state = STATE_NONSIGNALED;
+ }
+
+#if CONFIG_CORELOCK == SW_CORELOCK
+ corelock_unlock(&e->cl);
+#endif
+}
+#endif /* HAVE_EVENT_OBJECTS */
diff --git a/firmware/mpeg.c b/firmware/mpeg.c
index 65fb024db3..a4632aae38 100644
--- a/firmware/mpeg.c
+++ b/firmware/mpeg.c
@@ -1230,7 +1230,7 @@ static void mpeg_thread(void)
{
static int pause_tick = 0;
static unsigned int pause_track = 0;
- struct event ev;
+ struct queue_event ev;
int len;
int free_space_left;
int unplayed_space_left;
@@ -2910,8 +2910,9 @@ void audio_init(void)
queue_init(&mpeg_queue, true);
#endif /* !SIMULATOR */
create_thread(mpeg_thread, mpeg_stack,
- sizeof(mpeg_stack), mpeg_thread_name IF_PRIO(, PRIORITY_SYSTEM)
- IF_COP(, CPU, false));
+ sizeof(mpeg_stack), 0, mpeg_thread_name
+ IF_PRIO(, PRIORITY_SYSTEM)
+ IF_COP(, CPU));
memset(trackdata, sizeof(trackdata), 0);
diff --git a/firmware/pcm_record.c b/firmware/pcm_record.c
index 361689de3a..c2d2719d05 100644
--- a/firmware/pcm_record.c
+++ b/firmware/pcm_record.c
@@ -213,8 +213,8 @@ enum
/***************************************************************************/
-static struct event_queue pcmrec_queue;
-static struct queue_sender_list pcmrec_queue_send;
+static struct event_queue pcmrec_queue NOCACHEBSS_ATTR;
+static struct queue_sender_list pcmrec_queue_send NOCACHEBSS_ATTR;
static long pcmrec_stack[3*DEFAULT_STACK_SIZE/sizeof(long)];
static const char pcmrec_thread_name[] = "pcmrec";
static struct thread_entry *pcmrec_thread_p;
@@ -365,8 +365,8 @@ void pcm_rec_init(void)
queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send);
pcmrec_thread_p =
create_thread(pcmrec_thread, pcmrec_stack, sizeof(pcmrec_stack),
- pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING)
- IF_COP(, CPU, false));
+ 0, pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING)
+ IF_COP(, CPU));
} /* pcm_rec_init */
/** audio_* group **/
@@ -1437,7 +1437,7 @@ static void pcmrec_resume(void)
static void pcmrec_thread(void) __attribute__((noreturn));
static void pcmrec_thread(void)
{
- struct event ev;
+ struct queue_event ev;
logf("thread pcmrec start");
diff --git a/firmware/powermgmt.c b/firmware/powermgmt.c
index bb88fce318..fcc3030861 100644
--- a/firmware/powermgmt.c
+++ b/firmware/powermgmt.c
@@ -1103,9 +1103,9 @@ void powermgmt_init(void)
{
/* init history to 0 */
memset(power_history, 0x00, sizeof(power_history));
- create_thread(power_thread, power_stack, sizeof(power_stack),
+ create_thread(power_thread, power_stack, sizeof(power_stack), 0,
power_thread_name IF_PRIO(, PRIORITY_SYSTEM)
- IF_COP(, CPU, false));
+ IF_COP(, CPU));
}
#endif /* SIMULATOR */
diff --git a/firmware/rolo.c b/firmware/rolo.c
index fa1748341b..2a4b753948 100644
--- a/firmware/rolo.c
+++ b/firmware/rolo.c
@@ -63,8 +63,8 @@ void rolo_restart_cop(void)
{
/* There should be free thread slots aplenty */
create_thread(rolo_restart_cop, cop_idlestackbegin, IDLE_STACK_SIZE,
- "rolo COP" IF_PRIO(, PRIORITY_REALTIME)
- IF_COP(, COP, false));
+ 0, "rolo COP" IF_PRIO(, PRIORITY_REALTIME)
+ IF_COP(, COP));
return;
}
diff --git a/firmware/scroll_engine.c b/firmware/scroll_engine.c
index 63ca8883de..7c66601d6a 100644
--- a/firmware/scroll_engine.c
+++ b/firmware/scroll_engine.c
@@ -46,7 +46,7 @@ struct scrollinfo lcd_scroll[LCD_SCROLLABLE_LINES];
#ifdef HAVE_REMOTE_LCD
struct scrollinfo lcd_remote_scroll[LCD_REMOTE_SCROLLABLE_LINES];
-struct event_queue scroll_queue;
+struct event_queue scroll_queue NOCACHEBSS_ATTR;
#endif
struct scroll_screen_info lcd_scroll_info =
@@ -150,7 +150,7 @@ static void sync_display_ticks(void)
static bool scroll_process_message(int delay)
{
- struct event ev;
+ struct queue_event ev;
do
{
@@ -268,7 +268,7 @@ void scroll_init(void)
queue_init(&scroll_queue, true);
#endif
create_thread(scroll_thread, scroll_stack,
- sizeof(scroll_stack), scroll_name
+ sizeof(scroll_stack), 0, scroll_name
IF_PRIO(, PRIORITY_USER_INTERFACE)
- IF_COP(, CPU, false));
+ IF_COP(, CPU));
}
diff --git a/firmware/system.c b/firmware/system.c
index 6ff0dbb5d1..0b5ae1719e 100644
--- a/firmware/system.c
+++ b/firmware/system.c
@@ -35,6 +35,13 @@ long cpu_frequency NOCACHEBSS_ATTR = CPU_FREQ;
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
static int boost_counter NOCACHEBSS_ATTR = 0;
static bool cpu_idle NOCACHEBSS_ATTR = false;
+#if NUM_CORES > 1
+struct spinlock boostctrl_spin NOCACHEBSS_ATTR;
+void cpu_boost_init(void)
+{
+ spinlock_init(&boostctrl_spin, SPINLOCK_NO_TASK_SWITCH);
+}
+#endif
int get_cpu_boost_counter(void)
{
@@ -52,25 +59,51 @@ int cpu_boost_log_getcount(void)
}
char * cpu_boost_log_getlog_first(void)
{
+ char *first;
+#if NUM_CORES > 1
+ spinlock_lock(&boostctrl_spin);
+#endif
+
+ first = NULL;
+
if (cpu_boost_calls_count)
{
cpu_boost_track_message = 1;
- return cpu_boost_calls[cpu_boost_first];
+ first = cpu_boost_calls[cpu_boost_first];
}
- else return NULL;
+
+#if NUM_CORES > 1
+ spinlock_unlock(&boostctrl_spin);
+#endif
}
char * cpu_boost_log_getlog_next(void)
{
- int message = (cpu_boost_track_message+cpu_boost_first)%MAX_BOOST_LOG;
+ int message;
+ char *next;
+
+#if NUM_CORES > 1
+ spinlock_lock(&boostctrl_spin);
+#endif
+
+ message = (cpu_boost_track_message+cpu_boost_first)%MAX_BOOST_LOG;
+ next = NULL;
+
if (cpu_boost_track_message < cpu_boost_calls_count)
{
cpu_boost_track_message++;
- return cpu_boost_calls[message];
+ next = cpu_boost_calls[message];
}
- else return NULL;
+
+#if NUM_CORES > 1
+ spinlock_unlock(&boostctrl_spin);
+#endif
}
void cpu_boost_(bool on_off, char* location, int line)
{
+#if NUM_CORES > 1
+ spinlock_lock(&boostctrl_spin);
+#endif
+
if (cpu_boost_calls_count == MAX_BOOST_LOG)
{
cpu_boost_first = (cpu_boost_first+1)%MAX_BOOST_LOG;
@@ -88,32 +121,46 @@ void cpu_boost_(bool on_off, char* location, int line)
#else
void cpu_boost(bool on_off)
{
+#if NUM_CORES > 1
+ spinlock_lock(&boostctrl_spin);
#endif
+
+#endif /* CPU_BOOST_LOGGING */
if(on_off)
{
/* Boost the frequency if not already boosted */
- if(boost_counter++ == 0)
+ if(++boost_counter == 1)
set_cpu_frequency(CPUFREQ_MAX);
}
else
{
/* Lower the frequency if the counter reaches 0 */
- if(--boost_counter == 0)
+ if(--boost_counter <= 0)
{
if(cpu_idle)
set_cpu_frequency(CPUFREQ_DEFAULT);
else
set_cpu_frequency(CPUFREQ_NORMAL);
- }
- /* Safety measure */
- if(boost_counter < 0)
- boost_counter = 0;
+ /* Safety measure */
+ if (boost_counter < 0)
+ {
+ boost_counter = 0;
+ }
+ }
}
+
+#if NUM_CORES > 1
+ spinlock_unlock(&boostctrl_spin);
+#endif
}
void cpu_idle_mode(bool on_off)
{
+#if NUM_CORES > 1
+ spinlock_lock(&boostctrl_spin);
+#endif
+
cpu_idle = on_off;
/* We need to adjust the frequency immediately if the CPU
@@ -125,6 +172,10 @@ void cpu_idle_mode(bool on_off)
else
set_cpu_frequency(CPUFREQ_NORMAL);
}
+
+#if NUM_CORES > 1
+ spinlock_unlock(&boostctrl_spin);
+#endif
}
#endif /* HAVE_ADJUSTABLE_CPU_FREQ */
@@ -199,6 +250,7 @@ void UIE(unsigned int pc, unsigned int num)
/* TODO: perhaps add button handling in here when we get a polling
driver some day.
*/
+ core_idle();
}
}
diff --git a/firmware/target/arm/i2c-pp.c b/firmware/target/arm/i2c-pp.c
index 1cc25a1a10..e5813f9f9a 100644
--- a/firmware/target/arm/i2c-pp.c
+++ b/firmware/target/arm/i2c-pp.c
@@ -132,18 +132,18 @@ static int pp_i2c_send_byte(unsigned int addr, int data0)
}
/* Public functions */
-static struct mutex i2c_mutex;
+struct spinlock i2c_spin NOCACHEBSS_ATTR;
int i2c_readbytes(unsigned int dev_addr, int addr, int len, unsigned char *data) {
unsigned int temp;
int i;
- spinlock_lock(&i2c_mutex);
+ spinlock_lock(&i2c_spin);
pp_i2c_send_byte(dev_addr, addr);
for (i = 0; i < len; i++) {
pp_i2c_read_byte(dev_addr, &temp);
data[i] = temp;
}
- spinlock_unlock(&i2c_mutex);
+ spinlock_unlock(&i2c_spin);
return i;
}
@@ -151,10 +151,10 @@ int i2c_readbyte(unsigned int dev_addr, int addr)
{
int data;
- spinlock_lock(&i2c_mutex);
+ spinlock_lock(&i2c_spin);
pp_i2c_send_byte(dev_addr, addr);
pp_i2c_read_byte(dev_addr, &data);
- spinlock_unlock(&i2c_mutex);
+ spinlock_unlock(&i2c_spin);
return data;
}
@@ -167,9 +167,9 @@ int pp_i2c_send(unsigned int addr, int data0, int data1)
data[0] = data0;
data[1] = data1;
- spinlock_lock(&i2c_mutex);
+ spinlock_lock(&i2c_spin);
retval = pp_i2c_send_bytes(addr, 2, data);
- spinlock_unlock(&i2c_mutex);
+ spinlock_unlock(&i2c_spin);
return retval;
}
@@ -221,7 +221,7 @@ void i2c_init(void)
#endif
#endif
- spinlock_init(&i2c_mutex);
+ spinlock_init(&i2c_spin IF_COP(, SPINLOCK_TASK_SWITCH));
i2c_readbyte(0x8, 0);
}
diff --git a/firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c b/firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c
index 8866c3dcde..3a854afcdc 100644
--- a/firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c
+++ b/firmware/target/arm/ipod/1g2g/adc-ipod-1g2g.c
@@ -22,7 +22,7 @@
#include "hwcompat.h"
#include "kernel.h"
-static struct mutex adc_mutex NOCACHEBSS_ATTR;
+static struct spinlock adc_spin NOCACHEBSS_ATTR;
/* used in the 2nd gen ADC interrupt */
static unsigned int_data;
@@ -33,7 +33,7 @@ unsigned short adc_scan(int channel)
unsigned short data = 0;
(void)channel; /* there is only one */
- spinlock_lock(&adc_mutex);
+ spinlock_lock(&adc_spin);
if ((IPOD_HW_REVISION >> 16) == 1)
{
@@ -69,7 +69,7 @@ unsigned short adc_scan(int channel)
data = int_data & 0xff;
}
- spinlock_unlock(&adc_mutex);
+ spinlock_unlock(&adc_spin);
return data;
}
@@ -100,7 +100,7 @@ void ipod_2g_adc_int(void)
void adc_init(void)
{
- spinlock_init(&adc_mutex);
+ spinlock_init(&adc_spin IF_COP(, SPINLOCK_TASK_SWITCH));
GPIOB_ENABLE |= 0x1e; /* enable B1..B4 */
diff --git a/firmware/target/arm/sandisk/adc-c200_e200.c b/firmware/target/arm/sandisk/adc-c200_e200.c
index 31321ece37..9dc8f3aabb 100644
--- a/firmware/target/arm/sandisk/adc-c200_e200.c
+++ b/firmware/target/arm/sandisk/adc-c200_e200.c
@@ -21,8 +21,6 @@
#include "i2c-pp.h"
#include "as3514.h"
-static struct mutex adc_mutex NOCACHEBSS_ATTR;
-
/* Read 10-bit channel data */
unsigned short adc_read(int channel)
{
@@ -30,7 +28,7 @@ unsigned short adc_read(int channel)
if ((unsigned)channel < NUM_ADC_CHANNELS)
{
- spinlock_lock(&adc_mutex);
+ spinlock_lock(&i2c_spin);
/* Select channel */
if (pp_i2c_send( AS3514_I2C_ADDR, ADC_0, (channel << 4)) >= 0)
@@ -44,7 +42,7 @@ unsigned short adc_read(int channel)
}
}
- spinlock_unlock(&adc_mutex);
+ spinlock_unlock(&i2c_spin);
}
return data;
@@ -52,5 +50,4 @@ unsigned short adc_read(int channel)
void adc_init(void)
{
- spinlock_init(&adc_mutex);
}
diff --git a/firmware/target/arm/sandisk/ata-c200_e200.c b/firmware/target/arm/sandisk/ata-c200_e200.c
index 14be27e19d..8e17152e6f 100644
--- a/firmware/target/arm/sandisk/ata-c200_e200.c
+++ b/firmware/target/arm/sandisk/ata-c200_e200.c
@@ -162,7 +162,7 @@ static struct sd_card_status sd_status[NUM_VOLUMES] =
/* Shoot for around 75% usage */
static long sd_stack [(DEFAULT_STACK_SIZE*2 + 0x1c0)/sizeof(long)];
static const char sd_thread_name[] = "ata/sd";
-static struct mutex sd_mtx;
+static struct spinlock sd_spin NOCACHEBSS_ATTR;
static struct event_queue sd_queue;
/* Posted when card plugged status has changed */
@@ -801,7 +801,7 @@ int ata_read_sectors(IF_MV2(int drive,) unsigned long start, int incount,
/* TODO: Add DMA support. */
- spinlock_lock(&sd_mtx);
+ spinlock_lock(&sd_spin);
ata_led(true);
@@ -888,7 +888,7 @@ ata_read_retry:
while (1)
{
ata_led(false);
- spinlock_unlock(&sd_mtx);
+ spinlock_unlock(&sd_spin);
return ret;
@@ -916,7 +916,7 @@ int ata_write_sectors(IF_MV2(int drive,) unsigned long start, int count,
const unsigned char *buf, *buf_end;
int bank;
- spinlock_lock(&sd_mtx);
+ spinlock_lock(&sd_spin);
ata_led(true);
@@ -1016,7 +1016,7 @@ ata_write_retry:
while (1)
{
ata_led(false);
- spinlock_unlock(&sd_mtx);
+ spinlock_unlock(&sd_spin);
return ret;
@@ -1034,7 +1034,7 @@ ata_write_error:
static void sd_thread(void) __attribute__((noreturn));
static void sd_thread(void)
{
- struct event ev;
+ struct queue_event ev;
bool idle_notified = false;
while (1)
@@ -1050,10 +1050,9 @@ static void sd_thread(void)
/* Lock to keep us from messing with this variable while an init
may be in progress */
- spinlock_lock(&sd_mtx);
+ spinlock_lock(&sd_spin);
card_info[1].initialized = 0;
sd_status[1].retry = 0;
- spinlock_unlock(&sd_mtx);
/* Either unmount because the card was pulled or unmount and
remount if already mounted since multiple messages may be
@@ -1073,6 +1072,8 @@ static void sd_thread(void)
if (action != SDA_NONE)
queue_broadcast(SYS_FS_CHANGED, 0);
+
+ spinlock_unlock(&sd_spin);
break;
} /* SD_HOTSWAP */
#endif /* HAVE_HOTSWAP */
@@ -1155,9 +1156,9 @@ int ata_init(void)
{
initialized = true;
- spinlock_init(&sd_mtx);
+ spinlock_init(&sd_spin IF_COP(, SPINLOCK_TASK_SWITCH));
- spinlock_lock(&sd_mtx);
+ spinlock_lock(&sd_spin);
/* init controller */
outl(inl(0x70000088) & ~(0x4), 0x70000088);
@@ -1181,8 +1182,8 @@ int ata_init(void)
ret = currcard->initialized;
queue_init(&sd_queue, true);
- create_thread(sd_thread, sd_stack, sizeof(sd_stack),
- sd_thread_name IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU, false));
+ create_thread(sd_thread, sd_stack, sizeof(sd_stack), 0,
+ sd_thread_name IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU));
/* enable interupt for the mSD card */
sleep(HZ/10);
@@ -1195,7 +1196,7 @@ int ata_init(void)
GPIOA_INT_CLR = 0x80;
GPIOA_INT_EN |= 0x80;
#endif
- spinlock_unlock(&sd_mtx);
+ spinlock_unlock(&sd_spin);
}
return ret;
diff --git a/firmware/target/arm/system-pp502x.c b/firmware/target/arm/system-pp502x.c
index 576459d6c1..d24d19f747 100644
--- a/firmware/target/arm/system-pp502x.c
+++ b/firmware/target/arm/system-pp502x.c
@@ -21,10 +21,6 @@
#include "i2s.h"
#include "i2c-pp.h"
-#if NUM_CORES > 1
-struct mutex boostctrl_mtx NOCACHEBSS_ATTR;
-#endif
-
#ifndef BOOTLOADER
extern void TIMER1(void);
extern void TIMER2(void);
@@ -129,16 +125,42 @@ static void init_cache(void)
}
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
+void scale_suspend_core(bool suspend) ICODE_ATTR;
+void scale_suspend_core(bool suspend)
+{
+ unsigned int core = CURRENT_CORE;
+ unsigned int othercore = 1 - core;
+ static unsigned long proc_bits IBSS_ATTR;
+ static int oldstatus IBSS_ATTR;
+
+ if (suspend)
+ {
+ oldstatus = set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
+ proc_bits = PROC_CTL(othercore) & 0xc0000000;
+ PROC_CTL(othercore) = 0x40000000; nop;
+ PROC_CTL(core) = 0x48000003; nop;
+ }
+ else
+ {
+ PROC_CTL(core) = 0x4800001f; nop;
+ if (proc_bits == 0)
+ PROC_CTL(othercore) = 0;
+ set_interrupt_status(oldstatus, IRQ_FIQ_STATUS);
+ }
+}
+
+void set_cpu_frequency(long frequency) ICODE_ATTR;
void set_cpu_frequency(long frequency)
#else
static void pp_set_cpu_frequency(long frequency)
#endif
{
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
- /* Using mutex or spinlock isn't safe here. */
- while (test_and_set(&boostctrl_mtx.locked, 1)) ;
+ spinlock_lock(&boostctrl_spin);
#endif
+ scale_suspend_core(true);
+
cpu_frequency = frequency;
switch (frequency)
@@ -149,17 +171,20 @@ static void pp_set_cpu_frequency(long frequency)
* have this limitation (and the post divider?) */
case CPUFREQ_MAX:
CLOCK_SOURCE = 0x10007772; /* source #1: 24MHz, #2, #3, #4: PLL */
- DEV_TIMING1 = 0x00000808;
+ DEV_TIMING1 = 0x00000303;
#if CONFIG_CPU == PP5020
PLL_CONTROL = 0x8a020a03; /* 10/3 * 24MHz */
PLL_STATUS = 0xd19b; /* unlock frequencies > 66MHz */
PLL_CONTROL = 0x8a020a03; /* repeat setup */
+ scale_suspend_core(false);
udelay(500); /* wait for relock */
#elif (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024)
PLL_CONTROL = 0x8a121403; /* (20/3 * 24MHz) / 2 */
+ scale_suspend_core(false);
udelay(250);
while (!(PLL_STATUS & 0x80000000)); /* wait for relock */
#endif
+ scale_suspend_core(true);
break;
case CPUFREQ_NORMAL:
@@ -167,18 +192,23 @@ static void pp_set_cpu_frequency(long frequency)
DEV_TIMING1 = 0x00000303;
#if CONFIG_CPU == PP5020
PLL_CONTROL = 0x8a020504; /* 5/4 * 24MHz */
+ scale_suspend_core(false);
udelay(500); /* wait for relock */
#elif (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024)
PLL_CONTROL = 0x8a220501; /* (5/1 * 24MHz) / 4 */
+ scale_suspend_core(false);
udelay(250);
while (!(PLL_STATUS & 0x80000000)); /* wait for relock */
#endif
+ scale_suspend_core(true);
break;
case CPUFREQ_SLEEP:
CLOCK_SOURCE = 0x10002202; /* source #2: 32kHz, #1, #3, #4: 24MHz */
PLL_CONTROL &= ~0x80000000; /* disable PLL */
+ scale_suspend_core(false);
udelay(10000); /* let 32kHz source stabilize? */
+ scale_suspend_core(true);
break;
default:
@@ -186,12 +216,19 @@ static void pp_set_cpu_frequency(long frequency)
DEV_TIMING1 = 0x00000303;
PLL_CONTROL &= ~0x80000000; /* disable PLL */
cpu_frequency = CPUFREQ_DEFAULT;
+ PROC_CTL(CURRENT_CORE) = 0x4800001f; nop;
break;
}
+
+ if (frequency == CPUFREQ_MAX)
+ DEV_TIMING1 = 0x00000808;
+
CLOCK_SOURCE = (CLOCK_SOURCE & ~0xf0000000) | 0x20000000; /* select source #2 */
+ scale_suspend_core(false);
+
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
- boostctrl_mtx.locked = 0;
+ spinlock_unlock(&boostctrl_spin);
#endif
}
#endif /* !BOOTLOADER */
@@ -256,7 +293,7 @@ void system_init(void)
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
#if NUM_CORES > 1
- spinlock_init(&boostctrl_mtx);
+ cpu_boost_init();
#endif
#else
pp_set_cpu_frequency(CPUFREQ_MAX);
diff --git a/firmware/target/arm/system-target.h b/firmware/target/arm/system-target.h
index 7a1ff4f79a..6e433be9d5 100644
--- a/firmware/target/arm/system-target.h
+++ b/firmware/target/arm/system-target.h
@@ -46,6 +46,10 @@
#define inw(a) (*(volatile unsigned short *) (a))
#define outw(a,b) (*(volatile unsigned short *) (b) = (a))
+#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && NUM_CORES > 1
+extern struct spinlock boostctrl_spin;
+#endif
+
static inline void udelay(unsigned usecs)
{
unsigned stop = USEC_TIMER + usecs;
@@ -107,7 +111,6 @@ void flush_icache(void);
#endif /* CPU_PP502x */
-
#endif /* CPU_PP */
#endif /* SYSTEM_TARGET_H */
diff --git a/firmware/test/i2c/main.c b/firmware/test/i2c/main.c
index 88aa9151d6..0d54da5dea 100644
--- a/firmware/test/i2c/main.c
+++ b/firmware/test/i2c/main.c
@@ -708,7 +708,7 @@ int main(void)
- create_thread(mpeg_thread, stack - 0x2000, 0x4000);
+ create_thread(mpeg_thread, stack - 0x2000, 0x4000, 0);
@@ -1004,7 +1004,7 @@ void mpeg_thread(void)
{
- struct event ev;
+ struct queue_event ev;
int len;
diff --git a/firmware/test/kernel/main.c b/firmware/test/kernel/main.c
index b651324ae1..99642c867d 100644
--- a/firmware/test/kernel/main.c
+++ b/firmware/test/kernel/main.c
@@ -44,7 +44,7 @@ int main(void)
char buf[40];
char str[32];
int i=0;
- struct event *ev;
+ struct queue_event *ev;
/* Clear it all! */
SSR1 &= ~(SCI_RDRF | SCI_ORER | SCI_PER | SCI_FER);
@@ -69,8 +69,8 @@ int main(void)
queue_init(&main_q);
- create_thread(t1, s1, 1024);
- create_thread(t2, s2, 1024);
+ create_thread(t1, s1, 1024, 0);
+ create_thread(t2, s2, 1024, 0);
while(1)
{
diff --git a/firmware/thread.c b/firmware/thread.c
index 619a1e135a..c9ce049ea1 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -29,43 +29,150 @@
#include <profile.h>
#endif
-#if NUM_CORES > 1
-# define IF_COP2(x) x
+/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
+#ifdef DEBUG
+#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
#else
-# define IF_COP2(x) CURRENT_CORE
+#define THREAD_EXTRA_CHECKS 0
#endif
+/**
+ * General locking order to guarantee progress. Order must be observed but
+ * all stages are not nescessarily obligatory. Going from 1) to 3) is
+ * perfectly legal.
+ *
+ * 1) IRQ
+ * This is first because of the likelyhood of having an interrupt occur that
+ * also accesses one of the objects farther down the list. Any non-blocking
+ * synchronization done may already have a lock on something during normal
+ * execution and if an interrupt handler running on the same processor as
+ * the one that has the resource locked were to attempt to access the
+ * resource, the interrupt handler would wait forever waiting for an unlock
+ * that will never happen. There is no danger if the interrupt occurs on
+ * a different processor because the one that has the lock will eventually
+ * unlock and the other processor's handler may proceed at that time. Not
+ * nescessary when the resource in question is definitely not available to
+ * interrupt handlers.
+ *
+ * 2) Kernel Object
+ * 1) May be needed beforehand if the kernel object allows dual-use such as
+ * event queues. The kernel object must have a scheme to protect itself from
+ * access by another processor and is responsible for serializing the calls
+ * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
+ * other. If a thread blocks on an object it must fill-in the blk_ops members
+ * for its core to unlock _after_ the thread's context has been saved and the
+ * unlocking will be done in reverse from this heirarchy.
+ *
+ * 3) Thread Slot
+ * This locks access to the thread's slot such that its state cannot be
+ * altered by another processor when a state change is in progress such as
+ * when it is in the process of going on a blocked list. An attempt to wake
+ * a thread while it is still blocking will likely desync its state with
+ * the other resources used for that state.
+ *
+ * 4) Lists
+ * Usually referring to a list (aka. queue) that a thread will be blocking
+ * on that belongs to some object and is shareable amongst multiple
+ * processors. Parts of the scheduler may have access to them without actually
+ * locking the kernel object such as when a thread is blocked with a timeout
+ * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
+ * it lists locked when the thread blocks so that all object list access is
+ * synchronized. Failure to do so would corrupt the list links.
+ *
+ * 5) Core Lists
+ * These lists are specific to a particular processor core and are accessible
+ * by all processor cores and interrupt handlers. They are used when an
+ * operation may only be performed by the thread's own core in a normal
+ * execution context. The wakeup list is the prime example where a thread
+ * may be added by any means and the thread's own core will remove it from
+ * the wakeup list and put it on the running list (which is only ever
+ * accessible by its own processor).
+ */
#define DEADBEEF ((unsigned int)0xdeadbeef)
/* Cast to the the machine int type, whose size could be < 4. */
-
struct core_entry cores[NUM_CORES] IBSS_ATTR;
struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
#ifdef HAVE_SCHEDULER_BOOSTCTRL
static int boosted_threads IBSS_ATTR;
#endif
-/* Define to enable additional checks for blocking violations etc. */
-#define THREAD_EXTRA_CHECKS 0
-
static const char main_thread_name[] = "main";
-
extern int stackbegin[];
extern int stackend[];
-/* Conserve IRAM
-static void add_to_list(struct thread_entry **list,
- struct thread_entry *thread) ICODE_ATTR;
-static void remove_from_list(struct thread_entry **list,
- struct thread_entry *thread) ICODE_ATTR;
-*/
+/* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
+ * never results in requiring a wait until the next tick (up to 10000uS!). Likely
+ * requires assembly and careful instruction ordering. Multicore requires
+ * carefully timed sections in order to have synchronization without locking of
+ * any sort.
+ *
+ * 1) Disable all interrupts (FIQ and IRQ for ARM for instance)
+ * 2) Check *waking == NULL.
+ * 3) *waking not NULL? Goto step 7.
+ * 4) On multicore, stay awake if directed to do so by another. If so, goto step 7.
+ * 5) If processor requires, atomically reenable interrupts and perform step 6.
+ * 6) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
+ * goto step 8.
+ * 7) Reenable interrupts.
+ * 8) Exit procedure.
+ */
+static inline void core_sleep(
+ IF_COP(unsigned int core,) struct thread_entry **waking)
+ __attribute__((always_inline));
+
+static void check_tmo_threads(void)
+ __attribute__((noinline));
+
+static inline void block_thread_on_l(
+ struct thread_queue *list, struct thread_entry *thread, unsigned state)
+ __attribute__((always_inline));
+
+static inline void block_thread_on_l_no_listlock(
+ struct thread_entry **list, struct thread_entry *thread, unsigned state)
+ __attribute__((always_inline));
+
+static inline void _block_thread_on_l(
+ struct thread_queue *list, struct thread_entry *thread,
+ unsigned state IF_SWCL(, const bool single))
+ __attribute__((always_inline));
+
+IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
+ struct thread_queue *list IF_SWCL(, const bool nolock))
+ __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
+
+IF_SWCL(static inline) void _block_thread(
+ struct thread_queue *list IF_SWCL(, const bool nolock))
+ __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
+
+static void add_to_list_tmo(struct thread_entry *thread)
+ __attribute__((noinline));
+
+static void core_schedule_wakeup(struct thread_entry *thread)
+ __attribute__((noinline));
+
+static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
+ __attribute__((always_inline));
+
+static inline void run_blocking_ops(
+ IF_COP_VOID(unsigned int core, struct thread_entry *thread))
+ __attribute__((always_inline));
+
+static void thread_stkov(struct thread_entry *thread)
+ __attribute__((noinline));
-void switch_thread(bool save_context, struct thread_entry **blocked_list)
- ICODE_ATTR;
+static inline void store_context(void* addr)
+ __attribute__((always_inline));
-static inline void store_context(void* addr) __attribute__ ((always_inline));
static inline void load_context(const void* addr)
- __attribute__ ((always_inline));
-static inline void core_sleep(void) __attribute__((always_inline));
+ __attribute__((always_inline));
+
+void switch_thread(struct thread_entry *old)
+ __attribute__((noinline));
+
+
+/****************************************************************************
+ * Processor-specific section
+ */
#if defined(CPU_ARM)
/*---------------------------------------------------------------------------
@@ -94,6 +201,14 @@ static void start_thread(void)
); /* No clobber list - new thread doesn't care */
}
+/* For startup, place context pointer in r4 slot, start_thread pointer in r5
+ * slot, and thread function pointer in context.start. See load_context for
+ * what happens when thread is initially going to run. */
+#define THREAD_STARTUP_INIT(core, thread, function) \
+ ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
+ (thread)->context.r[1] = (unsigned int)start_thread, \
+ (thread)->context.start = (void *)function; })
+
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
@@ -106,14 +221,10 @@ static inline void store_context(void* addr)
);
}
-/* For startup, place context pointer in r4 slot, start_thread pointer in r5
- * slot, and thread function pointer in context.start. See load_context for
- * what happens when thread is initially going to run. */
-#define THREAD_STARTUP_INIT(core, thread, function) \
- ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
- (thread)->context.r[1] = (unsigned int)start_thread, \
- (thread)->context.start = (void *)function; })
-
+/*---------------------------------------------------------------------------
+ * Load non-volatile context.
+ *---------------------------------------------------------------------------
+ */
static inline void load_context(const void* addr)
{
asm volatile(
@@ -139,14 +250,226 @@ static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
};
#endif /* NUM_CORES */
-static inline void core_sleep(void)
+#if CONFIG_CORELOCK == SW_CORELOCK
+/* Software core locks using Peterson's mutual exclusion algorithm */
+
+/*---------------------------------------------------------------------------
+ * Initialize the corelock structure.
+ *---------------------------------------------------------------------------
+ */
+void corelock_init(struct corelock *cl)
{
- /* This should sleep the CPU. It appears to wake by itself on
- interrupts */
- if (CURRENT_CORE == CPU)
- CPU_CTL = PROC_SLEEP;
- else
- COP_CTL = PROC_SLEEP;
+ memset(cl, 0, sizeof (*cl));
+}
+
+#if 1 /* Assembly locks to minimize overhead */
+/*---------------------------------------------------------------------------
+ * Wait for the corelock to become free and acquire it when it does.
+ *---------------------------------------------------------------------------
+ */
+void corelock_lock(struct corelock *cl) __attribute__((naked));
+void corelock_lock(struct corelock *cl)
+{
+ asm volatile (
+ "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "ldrb r1, [r1] \n"
+ "mov r3, #1 \n" /* cl->myl[core] = 1 */
+ "strb r3, [r0, r1, lsr #7] \n"
+ "and r2, r1, #1 \n" /* r2 = othercore */
+ "strb r2, [r0, #2] \n" /* cl->turn = othercore */
+ "1: \n"
+ "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 1 ? */
+ "cmp r3, #1 \n"
+ "ldreqb r3, [r0, #2] \n" /* && cl->turn == othercore ? */
+ "cmpeq r3, r2 \n"
+ "bxne lr \n" /* no? lock acquired */
+ "b 1b \n" /* keep trying */
+ : : "i"(&PROCESSOR_ID)
+ );
+ (void)cl;
+}
+
+/*---------------------------------------------------------------------------
+ * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
+ *---------------------------------------------------------------------------
+ */
+int corelock_try_lock(struct corelock *cl) __attribute__((naked));
+int corelock_try_lock(struct corelock *cl)
+{
+ asm volatile (
+ "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "ldrb r1, [r1] \n"
+ "mov r3, #1 \n" /* cl->myl[core] = 1 */
+ "strb r3, [r0, r1, lsr #7] \n"
+ "and r2, r1, #1 \n" /* r2 = othercore */
+ "strb r2, [r0, #2] \n" /* cl->turn = othercore */
+ "1: \n"
+ "ldrb r3, [r0, r2] \n" /* cl->myl[othercore] == 1 ? */
+ "cmp r3, #1 \n"
+ "ldreqb r3, [r0, #2] \n" /* && cl->turn == othercore? */
+ "cmpeq r3, r2 \n"
+ "movne r0, #1 \n" /* no? lock acquired */
+ "bxne lr \n"
+ "mov r2, #0 \n" /* cl->myl[core] = 0 */
+ "strb r2, [r0, r1, lsr #7] \n"
+ "mov r0, r2 \n"
+ "bx lr \n" /* acquisition failed */
+ : : "i"(&PROCESSOR_ID)
+ );
+
+ return 0;
+ (void)cl;
+}
+
+/*---------------------------------------------------------------------------
+ * Release ownership of the corelock
+ *---------------------------------------------------------------------------
+ */
+void corelock_unlock(struct corelock *cl) __attribute__((naked));
+void corelock_unlock(struct corelock *cl)
+{
+ asm volatile (
+ "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "ldrb r1, [r1] \n"
+ "mov r2, #0 \n" /* cl->myl[core] = 0 */
+ "strb r2, [r0, r1, lsr #7] \n"
+ "bx lr \n"
+ : : "i"(&PROCESSOR_ID)
+ );
+ (void)cl;
+}
+#else /* C versions for reference */
+/*---------------------------------------------------------------------------
+ * Wait for the corelock to become free and aquire it when it does.
+ *---------------------------------------------------------------------------
+ */
+void corelock_lock(struct corelock *cl)
+{
+ const unsigned int core = CURRENT_CORE;
+ const unsigned int othercore = 1 - core;
+
+ cl->myl[core] = 1;
+ cl->turn = othercore;
+
+ while (cl->myl[othercore] == 1 && cl->turn == othercore);
+}
+
+/*---------------------------------------------------------------------------
+ * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
+ *---------------------------------------------------------------------------
+ */
+int corelock_try_lock(struct corelock *cl)
+{
+ const unsigned int core = CURRENT_CORE;
+ const unsigned int othercore = 1 - core;
+
+ cl->myl[core] = 1;
+ cl->turn = othercore;
+
+ if (cl->myl[othercore] == 1 && cl->turn == othercore)
+ {
+ cl->myl[core] = 0;
+ return 0;
+ }
+
+ return 1;
+}
+
+/*---------------------------------------------------------------------------
+ * Release ownership of the corelock
+ *---------------------------------------------------------------------------
+ */
+void corelock_unlock(struct corelock *cl)
+{
+ cl->myl[CURRENT_CORE] = 0;
+}
+#endif /* ASM / C selection */
+
+#endif /* CONFIG_CORELOCK == SW_CORELOCK */
+
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state if waking list wasn't repopulated and if
+ * no other core requested a wakeup for it to perform a task.
+ *---------------------------------------------------------------------------
+ */
+static inline void core_sleep(IF_COP(unsigned int core,) struct thread_entry **waking)
+{
+#if NUM_CORES > 1
+#ifdef CPU_PP502x
+ /* Disabling IRQ and FIQ is important to making the fixed-time sequence
+ * non-interruptable */
+ asm volatile (
+ "mrs r2, cpsr \n" /* Disable IRQ, FIQ */
+ "orr r2, r2, #0xc0 \n"
+ "msr cpsr_c, r2 \n"
+ "ldr r0, [%[w]] \n" /* Check *waking */
+ "cmp r0, #0 \n" /* != NULL -> exit */
+ "bne 1f \n"
+ /* ------ fixed-time sequence ----- */
+ "ldr r0, [%[ms], %[oc], lsl #2] \n" /* Stay-awake requested? */
+ "mov r1, #0x80000000 \n"
+ "tst r0, #1 \n"
+ "streq r1, [%[ct], %[c], lsl #2] \n" /* Sleep if not */
+ "nop \n"
+ "mov r0, #0 \n"
+ "str r0, [%[ct], %[c], lsl #2] \n" /* Clear control reg */
+ /* -------------------------------- */
+ "1: \n"
+ "mov r0, #1 \n"
+ "add r1, %[ms], #8 \n"
+ "str r0, [r1, %[oc], lsl #2] \n" /* Clear mailbox */
+ "bic r2, r2, #0xc0 \n" /* Enable interrupts */
+ "msr cpsr_c, r2 \n"
+ :
+ : [ct]"r"(&PROC_CTL(CPU)), [ms]"r"(&PROC_MESSAGE(CPU)),
+ [c]"r" (core), [oc]"r"(1-core), [w]"r"(waking)
+ : "r0", "r1", "r2");
+#else
+ /* TODO: PP5002 */
+#endif /* CONFIG_CPU == */
+#else
+ set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
+ if (*waking == NULL)
+ {
+ PROC_CTL(IF_COP_CORE(core)) = PROC_SLEEP;
+ }
+ set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
+#endif /* NUM_CORES */
+}
+
+/*---------------------------------------------------------------------------
+ * Wake another processor core that is sleeping or prevent it from doing so
+ * if it was already destined. FIQ, IRQ should be disabled before calling.
+ *---------------------------------------------------------------------------
+ */
+void core_wake(IF_COP_VOID(unsigned int othercore))
+{
+#if NUM_CORES == 1
+ /* No wakey - core already wakey */
+#elif defined (CPU_PP502x)
+ /* avoid r0 since that contains othercore */
+ asm volatile (
+ "mrs r2, cpsr \n"
+ "orr r1, r2, #0xc0 \n"
+ "msr cpsr_c, r1 \n"
+ "mov r1, #1 \n"
+ /* ------ fixed-time sequence ----- */
+ "str r1, [%[ms], %[oc], lsl #2] \n" /* Send stay-awake message */
+ "nop \n"
+ "nop \n"
+ "ldr r1, [%[ct], %[oc], lsl #2] \n" /* Wake other core if asleep */
+ "tst r1, #0x80000000 \n"
+ "bic r1, r1, #0x80000000 \n"
+ "strne r1, [%[ct], %[oc], lsl #2] \n"
+ /* -------------------------------- */
+ "msr cpsr_c, r2 \n"
+ :
+ : [ct]"r"(&PROC_CTL(CPU)), [ms]"r"(&PROC_MESSAGE(CPU)),
+ [oc]"r" (othercore)
+ : "r1", "r2");
+#else
+ PROC_CTL(othercore) = PROC_WAKE;
+#endif
}
#if NUM_CORES > 1
@@ -167,22 +490,120 @@ static inline void switch_to_idle_stack(const unsigned int core)
: : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
(void)core;
}
+
+/*---------------------------------------------------------------------------
+ * Perform core switch steps that need to take place inside switch_thread.
+ *
+ * These steps must take place while before changing the processor and after
+ * having entered switch_thread since switch_thread may not do a normal return
+ * because the stack being used for anything the compiler saved will not belong
+ * to the thread's destination core and it may have been recycled for other
+ * purposes by the time a normal context load has taken place. switch_thread
+ * will also clobber anything stashed in the thread's context or stored in the
+ * nonvolatile registers if it is saved there before the call since the
+ * compiler's order of operations cannot be known for certain.
+ */
+static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
+{
+ /* Flush our data to ram */
+ flush_icache();
+ /* Stash thread in r4 slot */
+ thread->context.r[0] = (unsigned int)thread;
+ /* Stash restart address in r5 slot */
+ thread->context.r[1] = (unsigned int)thread->context.start;
+ /* Save sp in context.sp while still running on old core */
+ thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1];
+}
+
+/*---------------------------------------------------------------------------
+ * Machine-specific helper function for switching the processor a thread is
+ * running on. Basically, the thread suicides on the departing core and is
+ * reborn on the destination. Were it not for gcc's ill-behavior regarding
+ * naked functions written in C where it actually clobbers non-volatile
+ * registers before the intended prologue code, this would all be much
+ * simpler. Generic setup is done in switch_core itself.
+ */
+
+/*---------------------------------------------------------------------------
+ * This actually performs the core switch.
+ */
+static void switch_thread_core(unsigned int core, struct thread_entry *thread)
+ __attribute__((naked));
+static void switch_thread_core(unsigned int core, struct thread_entry *thread)
+{
+ /* Pure asm for this because compiler behavior isn't sufficiently predictable.
+ * Stack access also isn't permitted until restoring the original stack and
+ * context. */
+ asm volatile (
+ "stmfd sp!, { r4-r12, lr } \n" /* Stack all non-volatile context on current core */
+ "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
+ "ldr r2, [r2, r0, lsl #2] \n"
+ "add r2, r2, %0*4 \n"
+ "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
+ "mov sp, r2 \n" /* switch stacks */
+ "adr r2, 1f \n" /* r2 = new core restart address */
+ "str r2, [r1, #40] \n" /* thread->context.start = r2 */
+ "mov r0, r1 \n" /* switch_thread(thread) */
+ "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
+ "1: \n"
+ "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
+ "mov r1, #0 \n" /* Clear start address */
+ "str r1, [r0, #40] \n"
+ "ldr r0, =invalidate_icache \n" /* Invalidate new core's cache */
+ "mov lr, pc \n"
+ "bx r0 \n"
+ "ldmfd sp!, { r4-r12, pc } \n" /* Restore non-volatile context to new core and return */
+ ".ltorg \n" /* Dump constant pool */
+ : : "i"(IDLE_STACK_WORDS)
+ );
+ (void)core; (void)thread;
+}
#endif /* NUM_CORES */
#elif CONFIG_CPU == S3C2440
-static inline void core_sleep(void)
+
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state if waking list wasn't repopulated.
+ *---------------------------------------------------------------------------
+ */
+static inline void core_sleep(struct thread_entry **waking)
{
- int i;
- CLKCON |= (1 << 2); /* set IDLE bit */
- for(i=0; i<10; i++); /* wait for IDLE */
- CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
+ /* FIQ also changes the CLKCON register so FIQ must be disabled
+ when changing it here */
+ asm volatile (
+ "mrs r0, cpsr \n" /* Disable IRQ, FIQ */
+ "orr r0, r0, #0xc0 \n"
+ "msr cpsr_c, r0 \n"
+ "ldr r1, [%0] \n" /* Check *waking */
+ "cmp r1, #0 \n"
+ "bne 2f \n" /* != NULL -> exit */
+ "bic r0, r0, #0xc0 \n" /* Prepare IRQ, FIQ enable */
+ "mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
+ "ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
+ "orr r2, r2, #4 \n"
+ "str r2, [r1, #0xc] \n"
+ "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
+ "mov r3, #0 \n" /* wait for IDLE */
+ "1: \n"
+ "add r3, r3, #1 \n"
+ "cmp r3, #10 \n"
+ "bne 1b \n"
+ "orr r0, r0, #0xc0 \n" /* Disable IRQ, FIQ */
+ "msr cpsr_c, r0 \n"
+ "ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
+ "bic r2, r2, #4 \n"
+ "str r2, [r1, #0xc] \n"
+ "2: \n"
+ "bic r0, r0, #0xc0 \n" /* Enable IRQ, FIQ */
+ "msr cpsr_c, r0 \n"
+ : : "r"(waking) : "r0", "r1", "r2", "r3");
}
#else
static inline void core_sleep(void)
{
}
-#endif
+#endif /* CONFIG_CPU == */
#elif defined(CPU_COLDFIRE)
/*---------------------------------------------------------------------------
@@ -252,17 +673,28 @@ static inline void load_context(const void* addr)
);
}
-static inline void core_sleep(void)
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state if waking list wasn't repopulated.
+ *---------------------------------------------------------------------------
+ */
+static inline void core_sleep(struct thread_entry **waking)
{
- asm volatile ("stop #0x2000");
-}
-
-/* Set EMAC unit to fractional mode with saturation for each new thread,
- since that's what'll be the most useful for most things which the dsp
- will do. Codecs should still initialize their preferred modes
- explicitly. */
-#define THREAD_CPU_INIT(core, thread) \
- ({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
+ asm volatile (
+ "moveq.l %1, %%d0 \n" /* Disable interrupts (not audio DMA) */
+ "lsl.l #8, %%d0 \n"
+ "move.w %%d0, %%sr \n"
+ "tst.l (%0) \n" /* Check *waking */
+ "beq.b 1f \n" /* != NULL -> exit */
+ "moveq.l #0x20, %%d0 \n" /* Enable interrupts */
+ "lsl.l #8, %%d0 \n"
+ "move.w %%d0, %%sr \n"
+ ".word 0x51fb \n" /* tpf.l - eat stop instruction */
+ "1: \n"
+ "stop #0x2000 \n" /* Supervisor mode, interrupts enabled
+ upon wakeup */
+ : : "a"(waking), "i"((0x2000 | HIGHEST_IRQ_LEVEL) >> 8) : "d0"
+ );
+};
#elif CONFIG_CPU == SH7034
/*---------------------------------------------------------------------------
@@ -342,18 +774,37 @@ static inline void load_context(const void* addr)
);
}
-static inline void core_sleep(void)
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state if waking list wasn't repopulated.
+ *---------------------------------------------------------------------------
+ */
+static inline void core_sleep(struct thread_entry **waking)
{
- and_b(0x7F, &SBYCR);
- asm volatile ("sleep");
+ asm volatile (
+ "mov %2, r1 \n" /* Disable interrupts */
+ "ldc r1, sr \n"
+ "mov.l @%1, r1 \n" /* Check *waking */
+ "tst r1, r1 \n"
+ "bf 1f \n" /* *waking != NULL ? exit */
+ "and.b #0x7f, @(r0, gbr) \n" /* Clear SBY (bit 7) in SBYCR */
+ "mov #0, r1 \n" /* Enable interrupts */
+ "ldc r1, sr \n" /* Following instruction cannot be interrupted */
+ "bra 2f \n" /* bra and sleep are executed at once */
+ "sleep \n" /* Execute standby */
+ "1: \n"
+ "mov #0, r1 \n" /* Enable interrupts */
+ "ldc r1, sr \n"
+ "2: \n"
+ :
+ : "z"(&SBYCR-GBR), "r"(waking), "i"(HIGHEST_IRQ_LEVEL)
+ : "r1");
}
-#endif
+#endif /* CONFIG_CPU == */
-#ifndef THREAD_CPU_INIT
-/* No cpu specific init - make empty */
-#define THREAD_CPU_INIT(core, thread)
-#endif
+/*
+ * End Processor-specific section
+ ***************************************************************************/
#if THREAD_EXTRA_CHECKS
static void thread_panicf(const char *msg, struct thread_entry *thread)
@@ -387,462 +838,1030 @@ static void thread_stkov(struct thread_entry *thread)
#define THREAD_ASSERT(exp, msg, thread)
#endif /* THREAD_EXTRA_CHECKS */
-static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
+/*---------------------------------------------------------------------------
+ * Lock a list pointer and returns its value
+ *---------------------------------------------------------------------------
+ */
+#if CONFIG_CORELOCK == SW_CORELOCK
+/* Separate locking function versions */
+
+/* Thread locking */
+#define GET_THREAD_STATE(thread) \
+ ({ corelock_lock(&(thread)->cl); (thread)->state; })
+#define TRY_GET_THREAD_STATE(thread) \
+ ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
+#define UNLOCK_THREAD(thread, state) \
+ ({ corelock_unlock(&(thread)->cl); })
+#define UNLOCK_THREAD_SET_STATE(thread, _state) \
+ ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
+
+/* List locking */
+#define LOCK_LIST(tqp) \
+ ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
+#define UNLOCK_LIST(tqp, mod) \
+ ({ corelock_unlock(&(tqp)->cl); })
+#define UNLOCK_LIST_SET_PTR(tqp, mod) \
+ ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
+
+/* Select the queue pointer directly */
+#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
+ ({ add_to_list_l(&(tqp)->queue, (thread)); })
+#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
+ ({ remove_from_list_l(&(tqp)->queue, (thread)); })
+
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+/* Native swap/exchange versions */
+
+/* Thread locking */
+#define GET_THREAD_STATE(thread) \
+ ({ unsigned _s; \
+ while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
+ _s; })
+#define TRY_GET_THREAD_STATE(thread) \
+ ({ xchg8(&(thread)->state, STATE_BUSY); })
+#define UNLOCK_THREAD(thread, _state) \
+ ({ (thread)->state = (_state); })
+#define UNLOCK_THREAD_SET_STATE(thread, _state) \
+ ({ (thread)->state = (_state); })
+
+/* List locking */
+#define LOCK_LIST(tqp) \
+ ({ struct thread_entry *_l; \
+ while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
+ _l; })
+#define UNLOCK_LIST(tqp, mod) \
+ ({ (tqp)->queue = (mod); })
+#define UNLOCK_LIST_SET_PTR(tqp, mod) \
+ ({ (tqp)->queue = (mod); })
+
+/* Select the local queue pointer copy returned from LOCK_LIST */
+#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
+ ({ add_to_list_l(&(tc), (thread)); })
+#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
+ ({ remove_from_list_l(&(tc), (thread)); })
+
+#else
+/* Single-core/non-locked versions */
+
+/* Threads */
+#define GET_THREAD_STATE(thread) \
+ ({ (thread)->state; })
+#define UNLOCK_THREAD(thread, _state)
+#define UNLOCK_THREAD_SET_STATE(thread, _state) \
+ ({ (thread)->state = (_state); })
+
+/* Lists */
+#define LOCK_LIST(tqp) \
+ ({ (tqp)->queue; })
+#define UNLOCK_LIST(tqp, mod)
+#define UNLOCK_LIST_SET_PTR(tqp, mod) \
+ ({ (tqp)->queue = (mod); })
+
+/* Select the queue pointer directly */
+#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
+ ({ add_to_list_l(&(tqp)->queue, (thread)); })
+#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
+ ({ remove_from_list_l(&(tqp)->queue, (thread)); })
+
+#endif /* locking selection */
+
+#if THREAD_EXTRA_CHECKS
+/*---------------------------------------------------------------------------
+ * Lock the thread slot to obtain the state and then unlock it. Waits for
+ * it not to be busy. Used for debugging.
+ *---------------------------------------------------------------------------
+ */
+static unsigned peek_thread_state(struct thread_entry *thread)
+{
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ unsigned state = GET_THREAD_STATE(thread);
+ UNLOCK_THREAD(thread, state);
+ set_irq_level(oldlevel);
+ return state;
+}
+#endif /* THREAD_EXTRA_CHECKS */
+
+/*---------------------------------------------------------------------------
+ * Adds a thread to a list of threads using "intert last". Uses the "l"
+ * links.
+ *---------------------------------------------------------------------------
+ */
+static void add_to_list_l(struct thread_entry **list,
+ struct thread_entry *thread)
{
- if (*list == NULL)
+ struct thread_entry *l = *list;
+
+ if (l == NULL)
{
- thread->next = thread;
- thread->prev = thread;
+ /* Insert into unoccupied list */
+ thread->l.next = thread;
+ thread->l.prev = thread;
*list = thread;
+ return;
}
- else
- {
- /* Insert last */
- thread->next = *list;
- thread->prev = (*list)->prev;
- thread->prev->next = thread;
- (*list)->prev = thread;
-
- /* Insert next
- thread->next = (*list)->next;
- thread->prev = *list;
- thread->next->prev = thread;
- (*list)->next = thread;
- */
- }
+
+ /* Insert last */
+ thread->l.next = l;
+ thread->l.prev = l->l.prev;
+ thread->l.prev->l.next = thread;
+ l->l.prev = thread;
+
+ /* Insert next
+ thread->l.next = l->l.next;
+ thread->l.prev = l;
+ thread->l.next->l.prev = thread;
+ l->l.next = thread;
+ */
+}
+
+/*---------------------------------------------------------------------------
+ * Locks a list, adds the thread entry and unlocks the list on multicore.
+ * Defined as add_to_list_l on single-core.
+ *---------------------------------------------------------------------------
+ */
+#if NUM_CORES > 1
+static void add_to_list_l_locked(struct thread_queue *tq,
+ struct thread_entry *thread)
+{
+ struct thread_entry *t = LOCK_LIST(tq);
+ ADD_TO_LIST_L_SELECT(t, tq, thread);
+ UNLOCK_LIST(tq, t);
+ (void)t;
}
+#else
+#define add_to_list_l_locked(tq, thread) \
+ add_to_list_l(&(tq)->queue, (thread))
+#endif
-static void remove_from_list(struct thread_entry **list,
- struct thread_entry *thread)
+/*---------------------------------------------------------------------------
+ * Removes a thread from a list of threads. Uses the "l" links.
+ *---------------------------------------------------------------------------
+ */
+static void remove_from_list_l(struct thread_entry **list,
+ struct thread_entry *thread)
{
- if (list != NULL)
+ struct thread_entry *prev, *next;
+
+ next = thread->l.next;
+
+ if (thread == next)
{
- if (thread == thread->next)
- {
- *list = NULL;
- return;
- }
-
- if (thread == *list)
- *list = thread->next;
+ /* The only item */
+ *list = NULL;
+ return;
+ }
+
+ if (thread == *list)
+ {
+ /* List becomes next item */
+ *list = next;
}
+
+ prev = thread->l.prev;
/* Fix links to jump over the removed entry. */
- thread->prev->next = thread->next;
- thread->next->prev = thread->prev;
+ prev->l.next = next;
+ next->l.prev = prev;
}
-static void check_sleepers(void) __attribute__ ((noinline));
-static void check_sleepers(void)
+/*---------------------------------------------------------------------------
+ * Locks a list, removes the thread entry and unlocks the list on multicore.
+ * Defined as remove_from_list_l on single-core.
+ *---------------------------------------------------------------------------
+ */
+#if NUM_CORES > 1
+static void remove_from_list_l_locked(struct thread_queue *tq,
+ struct thread_entry *thread)
{
- const unsigned int core = CURRENT_CORE;
- struct thread_entry *current, *next;
-
- /* Check sleeping threads. */
- current = cores[core].sleeping;
-
- for (;;)
+ struct thread_entry *t = LOCK_LIST(tq);
+ REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
+ UNLOCK_LIST(tq, t);
+ (void)t;
+}
+#else
+#define remove_from_list_l_locked(tq, thread) \
+ remove_from_list_l(&(tq)->queue, (thread))
+#endif
+
+/*---------------------------------------------------------------------------
+ * Add a thread from the core's timout list by linking the pointers in its
+ * tmo structure.
+ *---------------------------------------------------------------------------
+ */
+static void add_to_list_tmo(struct thread_entry *thread)
+{
+ /* Insert first */
+ struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout;
+
+ thread->tmo.prev = thread;
+ thread->tmo.next = t;
+
+ if (t != NULL)
+ {
+ /* Fix second item's prev pointer to point to this thread */
+ t->tmo.prev = thread;
+ }
+
+ cores[IF_COP_CORE(thread->core)].timeout = thread;
+}
+
+/*---------------------------------------------------------------------------
+ * Remove a thread from the core's timout list by unlinking the pointers in
+ * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
+ * is cancelled.
+ *---------------------------------------------------------------------------
+ */
+static void remove_from_list_tmo(struct thread_entry *thread)
+{
+ struct thread_entry *next = thread->tmo.next;
+ struct thread_entry *prev;
+
+ if (thread == cores[IF_COP_CORE(thread->core)].timeout)
{
- next = current->next;
-
- if ((unsigned)current_tick >= GET_STATE_ARG(current->statearg))
+ /* Next item becomes list head */
+ cores[IF_COP_CORE(thread->core)].timeout = next;
+
+ if (next != NULL)
{
- /* Sleep timeout has been reached so bring the thread
- * back to life again. */
- remove_from_list(&cores[core].sleeping, current);
- add_to_list(&cores[core].running, current);
- current->statearg = 0;
-
- /* If there is no more processes in the list, break the loop. */
- if (cores[core].sleeping == NULL)
- break;
-
- current = next;
- continue;
+ /* Fix new list head's prev to point to itself. */
+ next->tmo.prev = next;
}
-
- current = next;
-
- /* Break the loop once we have walked through the list of all
- * sleeping processes. */
- if (current == cores[core].sleeping)
- break;
+
+ thread->tmo.prev = NULL;
+ return;
+ }
+
+ prev = thread->tmo.prev;
+
+ if (next != NULL)
+ {
+ next->tmo.prev = prev;
}
+
+ prev->tmo.next = next;
+ thread->tmo.prev = NULL;
}
-/* Safely finish waking all threads potentialy woken by interrupts -
- * statearg already zeroed in wakeup_thread. */
-static void wake_list_awaken(void) __attribute__ ((noinline));
-static void wake_list_awaken(void)
+/*---------------------------------------------------------------------------
+ * Schedules a thread wakeup on the specified core. Threads will be made
+ * ready to run when the next task switch occurs. Note that this does not
+ * introduce an on-core delay since the soonest the next thread may run is
+ * no sooner than that. Other cores and on-core interrupts may only ever
+ * add to the list.
+ *---------------------------------------------------------------------------
+ */
+static void core_schedule_wakeup(struct thread_entry *thread)
{
- const unsigned int core = CURRENT_CORE;
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ const unsigned int core = IF_COP_CORE(thread->core);
+ add_to_list_l_locked(&cores[core].waking, thread);
+#if NUM_CORES > 1
+ if (core != CURRENT_CORE)
+ {
+ core_wake(core);
+ }
+#endif
+ set_irq_level(oldlevel);
+}
- /* No need for another check in the IRQ lock since IRQs are allowed
- only to add threads to the waking list. They won't be adding more
- until we're done here though. */
-
- struct thread_entry *waking = cores[core].waking;
- struct thread_entry *running = cores[core].running;
+/*---------------------------------------------------------------------------
+ * If the waking list was populated, move all threads on it onto the running
+ * list so they may be run ASAP.
+ *---------------------------------------------------------------------------
+ */
+static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
+{
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
+ struct thread_entry *r = cores[IF_COP_CORE(core)].running;
- if (running != NULL)
+ /* Tranfer all threads on waking list to running list in one
+ swoop */
+ if (r != NULL)
{
/* Place waking threads at the end of the running list. */
struct thread_entry *tmp;
- waking->prev->next = running;
- running->prev->next = waking;
- tmp = running->prev;
- running->prev = waking->prev;
- waking->prev = tmp;
+ w->l.prev->l.next = r;
+ r->l.prev->l.next = w;
+ tmp = r->l.prev;
+ r->l.prev = w->l.prev;
+ w->l.prev = tmp;
}
else
{
- /* Just transfer the list as-is - just came out of a core
- * sleep. */
- cores[core].running = waking;
+ /* Just transfer the list as-is */
+ cores[IF_COP_CORE(core)].running = w;
}
+ /* Just leave any timeout threads on the timeout list. If a timeout check
+ * is due, they will be removed there. If they do a timeout again before
+ * being removed, they will just stay on the list with a new expiration
+ * tick. */
- /* Done with waking list */
- cores[core].waking = NULL;
+ /* Waking list is clear - NULL and unlock it */
+ UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
set_irq_level(oldlevel);
}
-static inline void sleep_core(void)
+/*---------------------------------------------------------------------------
+ * Check the core's timeout list when at least one thread is due to wake.
+ * Filtering for the condition is done before making the call. Resets the
+ * tick when the next check will occur.
+ *---------------------------------------------------------------------------
+ */
+static void check_tmo_threads(void)
{
const unsigned int core = CURRENT_CORE;
+ const long tick = current_tick; /* snapshot the current tick */
+ long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
+ struct thread_entry *next = cores[core].timeout;
- for (;;)
+ /* If there are no processes waiting for a timeout, just keep the check
+ tick from falling into the past. */
+ if (next != NULL)
{
- /* We want to do these ASAP as it may change the decision to sleep
- the core or the core has woken because an interrupt occurred
- and posted a message to a queue. */
- if (cores[core].waking != NULL)
- wake_list_awaken();
+ /* Check sleeping threads. */
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- if (cores[core].last_tick != current_tick)
+ do
{
- if (cores[core].sleeping != NULL)
- check_sleepers();
- cores[core].last_tick = current_tick;
+ /* Must make sure noone else is examining the state, wait until
+ slot is no longer busy */
+ struct thread_entry *curr = next;
+ next = curr->tmo.next;
+
+ unsigned state = GET_THREAD_STATE(curr);
+
+ if (state < TIMEOUT_STATE_FIRST)
+ {
+ /* Cleanup threads no longer on a timeout but still on the
+ * list. */
+ remove_from_list_tmo(curr);
+ UNLOCK_THREAD(curr, state); /* Unlock thread slot */
+ }
+ else if (TIME_BEFORE(tick, curr->tmo_tick))
+ {
+ /* Timeout still pending - this will be the usual case */
+ if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
+ {
+ /* Earliest timeout found so far - move the next check up
+ to its time */
+ next_tmo_check = curr->tmo_tick;
+ }
+ UNLOCK_THREAD(curr, state); /* Unlock thread slot */
+ }
+ else
+ {
+ /* Sleep timeout has been reached so bring the thread back to
+ * life again. */
+ if (state == STATE_BLOCKED_W_TMO)
+ {
+ remove_from_list_l_locked(curr->bqp, curr);
+ }
+
+ remove_from_list_tmo(curr);
+ add_to_list_l(&cores[core].running, curr);
+ UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING);
+ }
+
+ /* Break the loop once we have walked through the list of all
+ * sleeping processes or have removed them all. */
}
-
- /* We must sleep until there is at least one process in the list
- * of running processes. */
- if (cores[core].running != NULL)
- break;
+ while (next != NULL);
- /* Enter sleep mode to reduce power usage, woken up on interrupt */
- core_sleep();
+ set_irq_level(oldlevel);
}
+
+ cores[core].next_tmo_check = next_tmo_check;
}
-#ifdef RB_PROFILE
-static int get_threadnum(struct thread_entry *thread)
+/*---------------------------------------------------------------------------
+ * Performs operations that must be done before blocking a thread but after
+ * the state is saved - follows reverse of locking order. blk_ops.flags is
+ * assumed to be nonzero.
+ *---------------------------------------------------------------------------
+ */
+static inline void run_blocking_ops(
+ IF_COP_VOID(unsigned int core, struct thread_entry *thread))
{
- int i;
-
- for (i = 0; i < MAXTHREADS; i++)
+#if NUM_CORES > 1
+ struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
+ const unsigned flags = ops->flags;
+
+ if (flags == 0)
+ return;
+
+ if (flags & TBOP_SWITCH_CORE)
{
- if (&threads[i] == thread)
- return i;
+ core_switch_blk_op(core, thread);
}
-
- return -1;
+
+#if CONFIG_CORELOCK == SW_CORELOCK
+ if (flags & TBOP_UNLOCK_LIST)
+ {
+ UNLOCK_LIST(ops->list_p, NULL);
+ }
+
+ if (flags & TBOP_UNLOCK_CORELOCK)
+ {
+ corelock_unlock(ops->cl_p);
+ }
+
+ if (flags & TBOP_UNLOCK_THREAD)
+ {
+ UNLOCK_THREAD(ops->thread, 0);
+ }
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ /* Write updated variable value into memory location */
+ switch (flags & TBOP_VAR_TYPE_MASK)
+ {
+ case TBOP_UNLOCK_LIST:
+ UNLOCK_LIST(ops->list_p, ops->list_v);
+ break;
+ case TBOP_SET_VARi:
+ *ops->var_ip = ops->var_iv;
+ break;
+ case TBOP_SET_VARu8:
+ *ops->var_u8p = ops->var_u8v;
+ break;
+ }
+#endif /* CONFIG_CORELOCK == */
+
+ /* Unlock thread's slot */
+ if (flags & TBOP_UNLOCK_CURRENT)
+ {
+ UNLOCK_THREAD(thread, ops->state);
+ }
+
+ /* Reset the IRQ level */
+ if (flags & TBOP_IRQ_LEVEL)
+ {
+ set_irq_level(ops->irq_level);
+ }
+
+ ops->flags = 0;
+#else
+ int level = cores[CURRENT_CORE].irq_level;
+ if (level == STAY_IRQ_LEVEL)
+ return;
+
+ cores[CURRENT_CORE].irq_level = STAY_IRQ_LEVEL;
+ set_irq_level(level);
+#endif /* NUM_CORES */
}
-void profile_thread(void) {
- profstart(get_threadnum(cores[CURRENT_CORE].running));
+
+/*---------------------------------------------------------------------------
+ * Runs any operations that may cause threads to be ready to run and then
+ * sleeps the processor core until the next interrupt if none are.
+ *---------------------------------------------------------------------------
+ */
+static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
+{
+ for (;;)
+ {
+ /* We want to do these ASAP as it may change the decision to sleep
+ * the core or a core has woken because an interrupt occurred
+ * and posted a message to a queue. */
+ if (cores[IF_COP_CORE(core)].waking.queue != NULL)
+ {
+ core_perform_wakeup(IF_COP(core));
+ }
+
+ /* If there are threads on a timeout and the earliest wakeup is due,
+ * check the list and wake any threads that need to start running
+ * again. */
+ if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
+ {
+ check_tmo_threads();
+ }
+
+ /* If there is a ready to run task, return its ID and keep core
+ * awake. */
+ if (cores[IF_COP_CORE(core)].running != NULL)
+ {
+ return cores[IF_COP_CORE(core)].running;
+ }
+
+ /* Enter sleep mode to reduce power usage - woken up on interrupt or
+ * wakeup request from another core. May abort if the waking list
+ * became populated (again). See beginning of this file for the
+ * algorithm to atomically determine this. */
+ core_sleep(IF_COP(core, ) &cores[IF_COP_CORE(core)].waking.queue);
+ }
+}
+
+#ifdef RB_PROFILE
+void profile_thread(void)
+{
+ profstart(cores[CURRENT_CORE].running - threads);
}
#endif
-static void change_thread_state(struct thread_entry **blocked_list) __attribute__ ((noinline));
-static void change_thread_state(struct thread_entry **blocked_list)
+/*---------------------------------------------------------------------------
+ * Prepares a thread to block on an object's list and/or for a specified
+ * duration - expects object and slot to be appropriately locked if needed.
+ *---------------------------------------------------------------------------
+ */
+static inline void _block_thread_on_l(struct thread_queue *list,
+ struct thread_entry *thread,
+ unsigned state
+ IF_SWCL(, const bool nolock))
{
- const unsigned int core = CURRENT_CORE;
- struct thread_entry *old;
- unsigned long new_state;
-
+ /* If inlined, unreachable branches will be pruned with no size penalty
+ because constant params are used for state and nolock. */
+ const unsigned int core = IF_COP_CORE(thread->core);
+
/* Remove the thread from the list of running threads. */
- old = cores[core].running;
- new_state = GET_STATE(old->statearg);
+ remove_from_list_l(&cores[core].running, thread);
- /* Check if a thread state change has been requested. */
- if (new_state)
+ /* Add a timeout to the block if not infinite */
+ switch (state)
{
- /* Change running thread state and switch to next thread. */
- remove_from_list(&cores[core].running, old);
-
- /* And put the thread into a new list of inactive threads. */
- if (new_state == STATE_BLOCKED)
- add_to_list(blocked_list, old);
+ case STATE_BLOCKED:
+ /* Put the thread into a new list of inactive threads. */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ if (nolock)
+ {
+ thread->bqp = NULL; /* Indicate nolock list */
+ thread->bqnlp = (struct thread_entry **)list;
+ add_to_list_l((struct thread_entry **)list, thread);
+ }
+ else
+#endif
+ {
+ thread->bqp = list;
+ add_to_list_l_locked(list, thread);
+ }
+ break;
+ case STATE_BLOCKED_W_TMO:
+ /* Put the thread into a new list of inactive threads. */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ if (nolock)
+ {
+ thread->bqp = NULL; /* Indicate nolock list */
+ thread->bqnlp = (struct thread_entry **)list;
+ add_to_list_l((struct thread_entry **)list, thread);
+ }
else
- add_to_list(&cores[core].sleeping, old);
-
-#ifdef HAVE_PRIORITY_SCHEDULING
- /* Reset priorities */
- if (old->priority == cores[core].highest_priority)
- cores[core].highest_priority = 100;
#endif
+ {
+ thread->bqp = list;
+ add_to_list_l_locked(list, thread);
+ }
+ /* Fall-through */
+ case STATE_SLEEPING:
+ /* If this thread times out sooner than any other thread, update
+ next_tmo_check to its timeout */
+ if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
+ {
+ cores[core].next_tmo_check = thread->tmo_tick;
+ }
+
+ if (thread->tmo.prev == NULL)
+ {
+ add_to_list_tmo(thread);
+ }
+ /* else thread was never removed from list - just keep it there */
+ break;
}
- else
- /* Switch to the next running thread. */
- cores[core].running = old->next;
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* Reset priorities */
+ if (thread->priority == cores[core].highest_priority)
+ cores[core].highest_priority = LOWEST_PRIORITY;
+#endif
+
+#if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
+ /* Safe to set state now */
+ thread->state = state;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ cores[core].blk_ops.state = state;
+#endif
+
+#if NUM_CORES > 1
+ /* Delay slot unlock until task switch */
+ cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
+#endif
+}
+
+static inline void block_thread_on_l(
+ struct thread_queue *list, struct thread_entry *thread, unsigned state)
+{
+ _block_thread_on_l(list, thread, state IF_SWCL(, false));
+}
+
+static inline void block_thread_on_l_no_listlock(
+ struct thread_entry **list, struct thread_entry *thread, unsigned state)
+{
+ _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
}
/*---------------------------------------------------------------------------
- * Switch thread in round robin fashion.
+ * Switch thread in round robin fashion for any given priority. Any thread
+ * that removed itself from the running list first must specify itself in
+ * the paramter.
+ *
+ * INTERNAL: Intended for use by kernel and not for programs.
*---------------------------------------------------------------------------
*/
-void switch_thread(bool save_context, struct thread_entry **blocked_list)
+void switch_thread(struct thread_entry *old)
{
const unsigned int core = CURRENT_CORE;
+ struct thread_entry *thread = cores[core].running;
+
+ if (old == NULL)
+ {
+ /* Move to next thread */
+ old = thread;
+ cores[core].running = old->l.next;
+ }
+ /* else running list is already at next thread */
#ifdef RB_PROFILE
- profile_thread_stopped(get_threadnum(cores[core].running));
+ profile_thread_stopped(old - threads);
#endif
- unsigned int *stackptr;
-
-#ifdef SIMULATOR
- /* Do nothing */
-#else
/* Begin task switching by saving our current context so that we can
* restore the state of the current thread later to the point prior
* to this call. */
- if (save_context)
- {
- store_context(&cores[core].running->context);
+ store_context(&old->context);
- /* Check if the current thread stack is overflown */
- stackptr = cores[core].running->stack;
- if(stackptr[0] != DEADBEEF)
- thread_stkov(cores[core].running);
-
- /* Rearrange thread lists as needed */
- change_thread_state(blocked_list);
+ /* Check if the current thread stack is overflown */
+ if(((unsigned int *)old->stack)[0] != DEADBEEF)
+ thread_stkov(old);
+
+ /* Run any blocking operations requested before switching/sleeping */
+ run_blocking_ops(IF_COP(core, old));
- /* This has to be done after the scheduler is finished with the
- blocked_list pointer so that an IRQ can't kill us by attempting
- a wake but before attempting any core sleep. */
- if (cores[core].switch_to_irq_level != STAY_IRQ_LEVEL)
- {
- int level = cores[core].switch_to_irq_level;
- cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
- set_irq_level(level);
- }
- }
-
/* Go through the list of sleeping task to check if we need to wake up
* any of them due to timeout. Also puts core into sleep state until
* there is at least one running process again. */
- sleep_core();
-
+ thread = sleep_core(IF_COP(core));
+
#ifdef HAVE_PRIORITY_SCHEDULING
/* Select the new task based on priorities and the last time a process
* got CPU time. */
for (;;)
{
- int priority = cores[core].running->priority;
+ int priority = MIN(thread->priority, thread->priority_x);
if (priority < cores[core].highest_priority)
cores[core].highest_priority = priority;
if (priority == cores[core].highest_priority ||
- (current_tick - cores[core].running->last_run >
- priority * 8) ||
- cores[core].running->priority_x != 0)
+ (current_tick - thread->last_run > priority * 8))
{
+ cores[core].running = thread;
break;
}
- cores[core].running = cores[core].running->next;
+ thread = thread->l.next;
}
/* Reset the value of thread's last running time to the current time. */
- cores[core].running->last_run = current_tick;
-#endif
-
-#endif
-
+ thread->last_run = current_tick;
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
/* And finally give control to the next thread. */
- load_context(&cores[core].running->context);
-
+ load_context(&thread->context);
+
#ifdef RB_PROFILE
- profile_thread_started(get_threadnum(cores[core].running));
+ profile_thread_started(thread - threads);
#endif
}
-void sleep_thread(int ticks)
+/*---------------------------------------------------------------------------
+ * Removes the boost flag from a thread and unboosts the CPU if thread count
+ * of boosted threads reaches zero. Requires thread slot to be locked first.
+ *---------------------------------------------------------------------------
+ */
+static inline void unboost_thread(struct thread_entry *thread)
{
- struct thread_entry *current;
-
- current = cores[CURRENT_CORE].running;
-
#ifdef HAVE_SCHEDULER_BOOSTCTRL
- if (STATE_IS_BOOSTED(current->statearg))
+ if (thread->boosted != 0)
{
- boosted_threads--;
- if (!boosted_threads)
+ thread->boosted = 0;
+ if (--boosted_threads == 0)
{
cpu_boost(false);
}
}
#endif
-
- /* Set the thread's new state and timeout and finally force a task switch
- * so that scheduler removes thread from the list of running processes
- * and puts it in list of sleeping tasks. */
- SET_STATE(current->statearg, STATE_SLEEPING, current_tick + ticks + 1);
-
- switch_thread(true, NULL);
+ (void)thread;
}
-void block_thread(struct thread_entry **list)
+/*---------------------------------------------------------------------------
+ * Sleeps a thread for a specified number of ticks and unboost the thread if
+ * if it is boosted. If ticks is zero, it does not delay but instead switches
+ * tasks.
+ *
+ * INTERNAL: Intended for use by kernel and not for programs.
+ *---------------------------------------------------------------------------
+ */
+void sleep_thread(int ticks)
{
- struct thread_entry *current;
-
/* Get the entry for the current running thread. */
- current = cores[CURRENT_CORE].running;
+ struct thread_entry *current = cores[CURRENT_CORE].running;
-#ifdef HAVE_SCHEDULER_BOOSTCTRL
- /* Keep the boosted state over indefinite block calls, because
- * we are waiting until the earliest time that someone else
- * completes an action */
- unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
+#if NUM_CORES > 1
+ /* Lock thread slot */
+ GET_THREAD_STATE(current);
#endif
- /* We are not allowed to mix blocking types in one queue. */
- THREAD_ASSERT(*list != NULL && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO,
- "Blocking violation B->*T", current);
-
+ /* Remove our boosted status if any */
+ unboost_thread(current);
+
+ /* Set our timeout, change lists, and finally switch threads.
+ * Unlock during switch on mulicore. */
+ current->tmo_tick = current_tick + ticks + 1;
+ block_thread_on_l(NULL, current, STATE_SLEEPING);
+ switch_thread(current);
+
+ /* Our status should be STATE_RUNNING */
+ THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
+ "S:R->!*R", current);
+}
+
+/*---------------------------------------------------------------------------
+ * Indefinitely block a thread on a blocking queue for explicit wakeup.
+ * Caller with interrupt-accessible lists should disable interrupts first
+ * and request a BOP_IRQ_LEVEL blocking operation to reset it.
+ *
+ * INTERNAL: Intended for use by kernel objects and not for programs.
+ *---------------------------------------------------------------------------
+ */
+IF_SWCL(static inline) void _block_thread(struct thread_queue *list
+ IF_SWCL(, const bool nolock))
+{
+ /* Get the entry for the current running thread. */
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+
/* Set the state to blocked and ask the scheduler to switch tasks,
* this takes us off of the run queue until we are explicitly woken */
- SET_STATE(current->statearg, STATE_BLOCKED, 0);
- switch_thread(true, list);
+#if NUM_CORES > 1
+ /* Lock thread slot */
+ GET_THREAD_STATE(current);
+#endif
-#ifdef HAVE_SCHEDULER_BOOSTCTRL
- /* Reset only the boosted flag to indicate we are up and running again. */
- current->statearg = boost_flag;
-#else
- /* Clear all flags to indicate we are up and running again. */
- current->statearg = 0;
+#if CONFIG_CORELOCK == SW_CORELOCK
+ /* One branch optimized away during inlining */
+ if (nolock)
+ {
+ block_thread_on_l_no_listlock((struct thread_entry **)list,
+ current, STATE_BLOCKED);
+ }
+ else
#endif
+ {
+ block_thread_on_l(list, current, STATE_BLOCKED);
+ }
+
+ switch_thread(current);
+
+ /* Our status should be STATE_RUNNING */
+ THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
+ "B:R->!*R", current);
+}
+
+#if CONFIG_CORELOCK == SW_CORELOCK
+/* Inline lock/nolock version of _block_thread into these functions */
+void block_thread(struct thread_queue *tq)
+{
+ _block_thread(tq, false);
}
-void block_thread_w_tmo(struct thread_entry **list, int timeout)
+void block_thread_no_listlock(struct thread_entry **list)
+{
+ _block_thread((struct thread_queue *)list, true);
+}
+#endif /* CONFIG_CORELOCK */
+
+/*---------------------------------------------------------------------------
+ * Block a thread on a blocking queue for a specified time interval or until
+ * explicitly woken - whichever happens first.
+ * Caller with interrupt-accessible lists should disable interrupts first
+ * and request that interrupt level be restored after switching out the
+ * current thread.
+ *
+ * INTERNAL: Intended for use by kernel objects and not for programs.
+ *---------------------------------------------------------------------------
+ */
+void block_thread_w_tmo(struct thread_queue *list, int timeout)
{
- struct thread_entry *current;
/* Get the entry for the current running thread. */
- current = cores[CURRENT_CORE].running;
-
-#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+
+#if NUM_CORES > 1
+ /* Lock thread slot */
+ GET_THREAD_STATE(current);
+#endif
+
/* A block with a timeout is a sleep situation, whatever we are waiting
* for _may or may not_ happen, regardless of boost state, (user input
* for instance), so this thread no longer needs to boost */
- if (STATE_IS_BOOSTED(current->statearg))
- {
- boosted_threads--;
- if (!boosted_threads)
- {
- cpu_boost(false);
- }
- }
-#endif
-
- /* We can store only one thread to the "list" if thread is used
- * in other list (such as core's list for sleeping tasks). */
- THREAD_ASSERT(*list == NULL, "Blocking violation T->*B", current);
+ unboost_thread(current);
/* Set the state to blocked with the specified timeout */
- SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
-
- /* Set the "list" for explicit wakeup */
- *list = current;
+ current->tmo_tick = current_tick + timeout;
+ /* Set the list for explicit wakeup */
+ block_thread_on_l(list, current, STATE_BLOCKED_W_TMO);
/* Now force a task switch and block until we have been woken up
- * by another thread or timeout is reached. */
- switch_thread(true, NULL);
+ * by another thread or timeout is reached - whichever happens first */
+ switch_thread(current);
- /* It is now safe for another thread to block on this "list" */
- *list = NULL;
+ /* Our status should be STATE_RUNNING */
+ THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
+ "T:R->!*R", current);
}
-#if !defined(SIMULATOR)
-void set_irq_level_and_block_thread(struct thread_entry **list, int level)
+/*---------------------------------------------------------------------------
+ * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
+ * that called sleep().
+ * Caller with interrupt-accessible lists should disable interrupts first.
+ * This code should be considered a critical section by the caller.
+ *
+ * INTERNAL: Intended for use by kernel objects and not for programs.
+ *---------------------------------------------------------------------------
+ */
+IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
+ struct thread_queue *list IF_SWCL(, const bool nolock))
{
- cores[CURRENT_CORE].switch_to_irq_level = level;
- block_thread(list);
-}
+ struct thread_entry *t;
+ struct thread_entry *thread;
+ unsigned state;
-void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
- int timeout, int level)
-{
- cores[CURRENT_CORE].switch_to_irq_level = level;
- block_thread_w_tmo(list, timeout);
-}
+ /* Wake up the last thread first. */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ /* One branch optimized away during inlining */
+ if (nolock)
+ {
+ t = list->queue;
+ }
+ else
#endif
+ {
+ t = LOCK_LIST(list);
+ }
-void wakeup_thread(struct thread_entry **list)
-{
- struct thread_entry *thread;
-
/* Check if there is a blocked thread at all. */
- if (*list == NULL)
+ if (t == NULL)
{
- return ;
+#if CONFIG_CORELOCK == SW_CORELOCK
+ if (!nolock)
+#endif
+ {
+ UNLOCK_LIST(list, NULL);
+ }
+ return NULL;
}
-
- /* Wake up the last thread first. */
- thread = *list;
-
+
+ thread = t;
+
+#if NUM_CORES > 1
+#if CONFIG_CORELOCK == SW_CORELOCK
+ if (nolock)
+ {
+ /* Lock thread only, not list */
+ state = GET_THREAD_STATE(thread);
+ }
+ else
+#endif
+ {
+ /* This locks in reverse order from other routines so a retry in the
+ correct order may be needed */
+ state = TRY_GET_THREAD_STATE(thread);
+ if (state == STATE_BUSY)
+ {
+ /* Unlock list and retry slot, then list */
+ UNLOCK_LIST(list, t);
+ state = GET_THREAD_STATE(thread);
+ t = LOCK_LIST(list);
+ /* Be sure thread still exists here - it couldn't have re-added
+ itself if it was woken elsewhere because this function is
+ serialized within the object that owns the list. */
+ if (thread != t)
+ {
+ /* Thread disappeared :( */
+ UNLOCK_LIST(list, t);
+ UNLOCK_THREAD(thread, state);
+ return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
+ }
+ }
+ }
+#else /* NUM_CORES == 1 */
+ state = GET_THREAD_STATE(thread);
+#endif /* NUM_CORES */
+
/* Determine thread's current state. */
- switch (GET_STATE(thread->statearg))
+ switch (state)
{
- case STATE_BLOCKED:
- /* Remove thread from the list of blocked threads and add it
- * to the scheduler's list of running processes. List removal
- * is safe since each object maintains it's own list of
- * sleepers and queues protect against reentrancy. */
- remove_from_list(list, thread);
- add_to_list(cores[IF_COP2(thread->core)].wakeup_list, thread);
-
- case STATE_BLOCKED_W_TMO:
- /* Just remove the timeout to cause scheduler to immediately
- * wake up the thread. */
- thread->statearg = 0;
- break;
-
- default:
- /* Nothing to do. Thread has already been woken up
- * or it's state is not blocked or blocked with timeout. */
- return ;
+ case STATE_BLOCKED:
+ case STATE_BLOCKED_W_TMO:
+ /* Remove thread from object's blocked list - select t or list depending
+ on locking type at compile time */
+ REMOVE_FROM_LIST_L_SELECT(t, list, thread);
+#if CONFIG_CORELOCK == SW_CORELOCK
+ /* Statment optimized away during inlining if nolock != false */
+ if (!nolock)
+#endif
+ {
+ UNLOCK_LIST(list, t); /* Unlock list - removal complete */
+ }
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* Give the task a kick to avoid a stall after wakeup.
+ Not really proper treatment - TODO later. */
+ thread->last_run = current_tick - 8*LOWEST_PRIORITY;
+#endif
+ core_schedule_wakeup(thread);
+ UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
+ return thread;
+ default:
+ /* Nothing to do. State is not blocked. */
+#if THREAD_EXTRA_CHECKS
+ THREAD_PANICF("wakeup_thread->block invalid", thread);
+ case STATE_RUNNING:
+ case STATE_KILLED:
+#endif
+#if CONFIG_CORELOCK == SW_CORELOCK
+ /* Statement optimized away during inlining if nolock != false */
+ if (!nolock)
+#endif
+ {
+ UNLOCK_LIST(list, t); /* Unlock the object's list */
+ }
+ UNLOCK_THREAD(thread, state); /* Unlock thread slot */
+ return NULL;
}
}
-inline static int find_empty_thread_slot(void)
+#if CONFIG_CORELOCK == SW_CORELOCK
+/* Inline lock/nolock version of _wakeup_thread into these functions */
+struct thread_entry * wakeup_thread(struct thread_queue *tq)
+{
+ return _wakeup_thread(tq, false);
+}
+
+struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
+{
+ return _wakeup_thread((struct thread_queue *)list, true);
+}
+#endif /* CONFIG_CORELOCK */
+
+/*---------------------------------------------------------------------------
+ * Find an empty thread slot or MAXTHREADS if none found. The slot returned
+ * will be locked on multicore.
+ *---------------------------------------------------------------------------
+ */
+static int find_empty_thread_slot(void)
{
+#if NUM_CORES > 1
+ /* Any slot could be on an IRQ-accessible list */
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+#endif
+ /* Thread slots are not locked on single core */
+
int n;
-
+
for (n = 0; n < MAXTHREADS; n++)
{
- if (threads[n].name == NULL)
- return n;
+ /* Obtain current slot state - lock it on multicore */
+ unsigned state = GET_THREAD_STATE(&threads[n]);
+
+ if (state == STATE_KILLED
+#if NUM_CORES > 1
+ && threads[n].name != THREAD_DESTRUCT
+#endif
+ )
+ {
+ /* Slot is empty - leave it locked and caller will unlock */
+ break;
+ }
+
+ /* Finished examining slot - no longer busy - unlock on multicore */
+ UNLOCK_THREAD(&threads[n], state);
}
-
- return -1;
+
+#if NUM_CORES > 1
+ set_irq_level(oldlevel); /* Reenable interrups - this slot is
+ not accesible to them yet */
+#endif
+
+ return n;
}
-/* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
- before calling. */
-void wakeup_thread_irq_safe(struct thread_entry **list)
+
+/*---------------------------------------------------------------------------
+ * Place the current core in idle mode - woken up on interrupt or wake
+ * request from another core.
+ *---------------------------------------------------------------------------
+ */
+void core_idle(void)
{
- struct core_entry *core = &cores[CURRENT_CORE];
- /* Switch wakeup lists and call wakeup_thread */
- core->wakeup_list = &core->waking;
- wakeup_thread(list);
- /* Switch back to normal running list */
- core->wakeup_list = &core->running;
+ const unsigned int core = CURRENT_CORE;
+ core_sleep(IF_COP(core,) &cores[core].waking.queue);
}
/*---------------------------------------------------------------------------
@@ -854,44 +1873,23 @@ void wakeup_thread_irq_safe(struct thread_entry **list)
*/
struct thread_entry*
create_thread(void (*function)(void), void* stack, int stack_size,
- const char *name IF_PRIO(, int priority)
- IF_COP(, unsigned int core, bool fallback))
+ unsigned flags, const char *name
+ IF_PRIO(, int priority)
+ IF_COP(, unsigned int core))
{
unsigned int i;
unsigned int stacklen;
unsigned int *stackptr;
int slot;
struct thread_entry *thread;
-
-/*****
- * Ugly code alert!
- * To prevent ifdef hell while keeping the binary size down, we define
- * core here if it hasn't been passed as a parameter
- *****/
-#if NUM_CORES == 1
-#define core CPU
-#endif
-
-#if NUM_CORES > 1
-/* If the kernel hasn't initialised on the COP (most likely due to an old
- * bootloader) then refuse to start threads on the COP
- */
- if ((core == COP) && !cores[core].kernel_running)
- {
- if (fallback)
- return create_thread(function, stack, stack_size, name
- IF_PRIO(, priority) IF_COP(, CPU, false));
- else
- return NULL;
- }
-#endif
+ unsigned state;
slot = find_empty_thread_slot();
- if (slot < 0)
+ if (slot >= MAXTHREADS)
{
return NULL;
}
-
+
/* Munge the stack to make it easy to spot stack overflows */
stacklen = stack_size / sizeof(int);
stackptr = stack;
@@ -905,11 +1903,19 @@ struct thread_entry*
thread->name = name;
thread->stack = stack;
thread->stack_size = stack_size;
- thread->statearg = 0;
+ thread->bqp = NULL;
+#if CONFIG_CORELOCK == SW_CORELOCK
+ thread->bqnlp = NULL;
+#endif
+ thread->queue = NULL;
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ thread->boosted = 0;
+#endif
#ifdef HAVE_PRIORITY_SCHEDULING
- thread->priority_x = 0;
+ thread->priority_x = LOWEST_PRIORITY;
thread->priority = priority;
- cores[core].highest_priority = 100;
+ thread->last_run = current_tick - priority * 8;
+ cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
#endif
#if NUM_CORES > 1
@@ -921,6 +1927,12 @@ struct thread_entry*
flush_icache();
}
#endif
+
+ /* Thread is not on any timeout list but be a bit paranoid */
+ thread->tmo.prev = NULL;
+
+ state = (flags & CREATE_THREAD_FROZEN) ?
+ STATE_FROZEN : STATE_RUNNING;
/* Align stack to an even 32 bit boundary */
thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
@@ -928,50 +1940,149 @@ struct thread_entry*
/* Load the thread's context structure with needed startup information */
THREAD_STARTUP_INIT(core, thread, function);
- add_to_list(&cores[core].running, thread);
+ if (state == STATE_RUNNING)
+ {
+#if NUM_CORES > 1
+ if (core != CURRENT_CORE)
+ {
+ /* Next task switch on other core moves thread to running list */
+ core_schedule_wakeup(thread);
+ }
+ else
+#endif
+ {
+ /* Place on running list immediately */
+ add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
+ }
+ }
+ /* remove lock and set state */
+ UNLOCK_THREAD_SET_STATE(thread, state);
+
return thread;
-#if NUM_CORES == 1
-#undef core
-#endif
}
#ifdef HAVE_SCHEDULER_BOOSTCTRL
void trigger_cpu_boost(void)
{
- if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
+ /* No IRQ disable nescessary since the current thread cannot be blocked
+ on an IRQ-accessible list */
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+ unsigned state;
+
+ state = GET_THREAD_STATE(current);
+
+ if (current->boosted == 0)
{
- SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
- if (!boosted_threads)
+ current->boosted = 1;
+ if (++boosted_threads == 1)
{
cpu_boost(true);
}
- boosted_threads++;
}
+
+ UNLOCK_THREAD(current, state);
+ (void)state;
}
-#endif
+#endif /* HAVE_SCHEDULER_BOOSTCTRL */
/*---------------------------------------------------------------------------
- * Remove a thread on the current core from the scheduler.
+ * Remove a thread from the scheduler.
* Parameter is the ID as returned from create_thread().
+ *
+ * Use with care on threads that are not under careful control as this may
+ * leave various objects in an undefined state. When trying to kill a thread
+ * on another processor, be sure you know what it's doing and won't be
+ * switching around itself.
*---------------------------------------------------------------------------
*/
void remove_thread(struct thread_entry *thread)
{
+#if NUM_CORES > 1
+ /* core is not constant here because of core switching */
+ unsigned int core = CURRENT_CORE;
+ unsigned int old_core = NUM_CORES;
+#else
const unsigned int core = CURRENT_CORE;
+#endif
+ unsigned state;
+ int oldlevel;
if (thread == NULL)
thread = cores[core].running;
-
- /* Free the entry by removing thread name. */
- thread->name = NULL;
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ state = GET_THREAD_STATE(thread);
+
+ if (state == STATE_KILLED)
+ {
+ goto thread_killed;
+ }
+
+#if NUM_CORES > 1
+ if (thread->core != core)
+ {
+ /* Switch cores and safely extract the thread there */
+ /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
+ condition if the thread runs away to another processor. */
+ unsigned int new_core = thread->core;
+ const char *old_name = thread->name;
+
+ thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
+ UNLOCK_THREAD(thread, state);
+ set_irq_level(oldlevel);
+
+ old_core = switch_core(new_core);
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ state = GET_THREAD_STATE(thread);
+
+ core = new_core;
+
+ if (state == STATE_KILLED)
+ {
+ /* Thread suicided before we could kill it */
+ goto thread_killed;
+ }
+
+ /* Reopen slot - it's locked again anyway */
+ thread->name = old_name;
+
+ if (thread->core != core)
+ {
+ /* We won't play thread tag - just forget it */
+ UNLOCK_THREAD(thread, state);
+ set_irq_level(oldlevel);
+ goto thread_kill_abort;
+ }
+
+ /* Perform the extraction and switch ourselves back to the original
+ processor */
+ }
+#endif /* NUM_CORES > 1 */
+
#ifdef HAVE_PRIORITY_SCHEDULING
- cores[IF_COP2(thread->core)].highest_priority = 100;
+ cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
#endif
-
- if (thread == cores[IF_COP2(thread->core)].running)
+ if (thread->tmo.prev != NULL)
{
- remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
+ /* Clean thread off the timeout list if a timeout check hasn't
+ * run yet */
+ remove_from_list_tmo(thread);
+ }
+
+ if (thread == cores[core].running)
+ {
+ /* Suicide - thread has unconditional rights to do this */
+ /* Maintain locks until switch-out */
+#if NUM_CORES > 1
+ cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.irq_level = oldlevel;
+#else
+ cores[core].irq_level = oldlevel;
+#endif
+ block_thread_on_l(NULL, thread, STATE_KILLED);
+
#if NUM_CORES > 1
/* Switch to the idle stack if not on the main core (where "main"
* runs) */
@@ -982,55 +2093,347 @@ void remove_thread(struct thread_entry *thread)
flush_icache();
#endif
- switch_thread(false, NULL);
+ /* Signal this thread */
+ thread_queue_wake_no_listlock(&thread->queue);
+ /* Switch tasks and never return */
+ switch_thread(thread);
/* This should never and must never be reached - if it is, the
* state is corrupted */
THREAD_PANICF("remove_thread->K:*R", thread);
}
-
- if (thread == cores[IF_COP2(thread->core)].sleeping)
- remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
+
+#if NUM_CORES > 1
+ if (thread->name == THREAD_DESTRUCT)
+ {
+ /* Another core is doing this operation already */
+ UNLOCK_THREAD(thread, state);
+ set_irq_level(oldlevel);
+ return;
+ }
+#endif
+ if (cores[core].waking.queue != NULL)
+ {
+ /* Get any threads off the waking list and onto the running
+ * list first - waking and running cannot be distinguished by
+ * state */
+ core_perform_wakeup(IF_COP(core));
+ }
+
+ switch (state)
+ {
+ case STATE_RUNNING:
+ /* Remove thread from ready to run tasks */
+ remove_from_list_l(&cores[core].running, thread);
+ break;
+ case STATE_BLOCKED:
+ case STATE_BLOCKED_W_TMO:
+ /* Remove thread from the queue it's blocked on - including its
+ * own if waiting there */
+#if CONFIG_CORELOCK == SW_CORELOCK
+ /* One or the other will be valid */
+ if (thread->bqp == NULL)
+ {
+ remove_from_list_l(thread->bqnlp, thread);
+ }
+ else
+#endif /* CONFIG_CORELOCK */
+ {
+ remove_from_list_l_locked(thread->bqp, thread);
+ }
+ break;
+ /* Otherwise thread is killed or is frozen and hasn't run yet */
+ }
+
+ /* If thread was waiting on itself, it will have been removed above.
+ * The wrong order would result in waking the thread first and deadlocking
+ * since the slot is already locked. */
+ thread_queue_wake_no_listlock(&thread->queue);
+
+thread_killed: /* Thread was already killed */
+ /* Removal complete - safe to unlock state and reenable interrupts */
+ UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED);
+ set_irq_level(oldlevel);
+
+#if NUM_CORES > 1
+thread_kill_abort: /* Something stopped us from killing the thread */
+ if (old_core < NUM_CORES)
+ {
+ /* Did a removal on another processor's thread - switch back to
+ native core */
+ switch_core(old_core);
+ }
+#endif
+}
+
+/*---------------------------------------------------------------------------
+ * Block the current thread until another thread terminates. A thread may
+ * wait on itself to terminate which prevents it from running again and it
+ * will need to be killed externally.
+ * Parameter is the ID as returned from create_thread().
+ *---------------------------------------------------------------------------
+ */
+void thread_wait(struct thread_entry *thread)
+{
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *current = cores[core].running;
+ unsigned thread_state;
+#if NUM_CORES > 1
+ int oldlevel;
+ unsigned current_state;
+#endif
+
+ if (thread == NULL)
+ thread = current;
+
+#if NUM_CORES > 1
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+#endif
+
+ thread_state = GET_THREAD_STATE(thread);
+
+#if NUM_CORES > 1
+ /* We can't lock the same slot twice. The waitee will also lock itself
+ first then the thread slots that will be locked and woken in turn.
+ The same order must be observed here as well. */
+ if (thread == current)
+ {
+ current_state = thread_state;
+ }
else
- remove_from_list(NULL, thread);
+ {
+ current_state = GET_THREAD_STATE(current);
+ }
+#endif
+
+ if (thread_state != STATE_KILLED)
+ {
+#if NUM_CORES > 1
+ cores[core].blk_ops.flags = TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.irq_level = oldlevel;
+#endif
+ /* Unlock the waitee state at task switch - not done for self-wait
+ because the would double-unlock the state and potentially
+ corrupt another's busy assert on the slot */
+ if (thread != current)
+ {
+#if CONFIG_CORELOCK == SW_CORELOCK
+ cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
+ cores[core].blk_ops.thread = thread;
+#elif CONFIG_CORELOCK == CORELOCK_SWAP
+ cores[core].blk_ops.flags |= TBOP_SET_VARu8;
+ cores[core].blk_ops.var_u8p = &thread->state;
+ cores[core].blk_ops.var_u8v = thread_state;
+#endif
+ }
+ block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED);
+ switch_thread(current);
+ return;
+ }
+
+ /* Unlock both slots - obviously the current thread can't have
+ STATE_KILLED so the above if clause will always catch a thread
+ waiting on itself */
+#if NUM_CORES > 1
+ UNLOCK_THREAD(current, current_state);
+ UNLOCK_THREAD(thread, thread_state);
+ set_irq_level(oldlevel);
+#endif
}
#ifdef HAVE_PRIORITY_SCHEDULING
+/*---------------------------------------------------------------------------
+ * Sets the thread's relative priority for the core it runs on.
+ *---------------------------------------------------------------------------
+ */
int thread_set_priority(struct thread_entry *thread, int priority)
{
- int old_priority;
+ unsigned old_priority = (unsigned)-1;
if (thread == NULL)
thread = cores[CURRENT_CORE].running;
- old_priority = thread->priority;
- thread->priority = priority;
- cores[IF_COP2(thread->core)].highest_priority = 100;
-
+#if NUM_CORES > 1
+ /* Thread could be on any list and therefore on an interrupt accessible
+ one - disable interrupts */
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+#endif
+ unsigned state = GET_THREAD_STATE(thread);
+
+ /* Make sure it's not killed */
+ if (state != STATE_KILLED)
+ {
+ old_priority = thread->priority;
+ thread->priority = priority;
+ cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY;
+ }
+
+#if NUM_CORES > 1
+ UNLOCK_THREAD(thread, state);
+ set_irq_level(oldlevel);
+#endif
return old_priority;
}
+/*---------------------------------------------------------------------------
+ * Returns the current priority for a thread.
+ *---------------------------------------------------------------------------
+ */
int thread_get_priority(struct thread_entry *thread)
{
+ /* Simple, quick probe. */
if (thread == NULL)
thread = cores[CURRENT_CORE].running;
- return thread->priority;
+ return (unsigned)thread->priority;
}
+/*---------------------------------------------------------------------------
+ * Yield that guarantees thread execution once per round regardless of
+ * thread's scheduler priority - basically a transient realtime boost
+ * without altering the scheduler's thread precedence.
+ *
+ * HACK ALERT! Search for "priority inheritance" for proper treatment.
+ *---------------------------------------------------------------------------
+ */
void priority_yield(void)
{
- struct thread_entry *thread = cores[CURRENT_CORE].running;
- thread->priority_x = 1;
- switch_thread(true, NULL);
- thread->priority_x = 0;
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *thread = cores[core].running;
+ thread->priority_x = HIGHEST_PRIORITY;
+ switch_thread(NULL);
+ thread->priority_x = LOWEST_PRIORITY;
+ cores[core].highest_priority = LOWEST_PRIORITY;
}
#endif /* HAVE_PRIORITY_SCHEDULING */
+/* Resumes a frozen thread - similar logic to wakeup_thread except that
+ the thread is on no scheduler list at all. It exists simply by virtue of
+ the slot having a state of STATE_FROZEN. */
+void thread_thaw(struct thread_entry *thread)
+{
+#if NUM_CORES > 1
+ /* Thread could be on any list and therefore on an interrupt accessible
+ one - disable interrupts */
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+#endif
+ unsigned state = GET_THREAD_STATE(thread);
+
+ if (state == STATE_FROZEN)
+ {
+ const unsigned int core = CURRENT_CORE;
+#if NUM_CORES > 1
+ if (thread->core != core)
+ {
+ core_schedule_wakeup(thread);
+ }
+ else
+#endif
+ {
+ add_to_list_l(&cores[core].running, thread);
+ }
+
+ UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
+ return;
+ }
+
+#if NUM_CORES > 1
+ UNLOCK_THREAD(thread, state);
+ set_irq_level(oldlevel);
+#endif
+}
+
+/*---------------------------------------------------------------------------
+ * Return the ID of the currently executing thread.
+ *---------------------------------------------------------------------------
+ */
struct thread_entry * thread_get_current(void)
{
return cores[CURRENT_CORE].running;
}
+#if NUM_CORES > 1
+/*---------------------------------------------------------------------------
+ * Switch the processor that the currently executing thread runs on.
+ *---------------------------------------------------------------------------
+ */
+unsigned int switch_core(unsigned int new_core)
+{
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *current = cores[core].running;
+ struct thread_entry *w;
+ int oldlevel;
+
+ /* Interrupts can access the lists that will be used - disable them */
+ unsigned state = GET_THREAD_STATE(current);
+
+ if (core == new_core)
+ {
+ /* No change - just unlock everything and return same core */
+ UNLOCK_THREAD(current, state);
+ return core;
+ }
+
+ /* Get us off the running list for the current core */
+ remove_from_list_l(&cores[core].running, current);
+
+ /* Stash return value (old core) in a safe place */
+ current->retval = core;
+
+ /* If a timeout hadn't yet been cleaned-up it must be removed now or
+ * the other core will likely attempt a removal from the wrong list! */
+ if (current->tmo.prev != NULL)
+ {
+ remove_from_list_tmo(current);
+ }
+
+ /* Change the core number for this thread slot */
+ current->core = new_core;
+
+ /* Do not use core_schedule_wakeup here since this will result in
+ * the thread starting to run on the other core before being finished on
+ * this one. Delay the wakeup list unlock to keep the other core stuck
+ * until this thread is ready. */
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ w = LOCK_LIST(&cores[new_core].waking);
+ ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current);
+
+ /* Make a callback into device-specific code, unlock the wakeup list so
+ * that execution may resume on the new core, unlock our slot and finally
+ * restore the interrupt level */
+ cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
+ TBOP_UNLOCK_LIST | TBOP_IRQ_LEVEL;
+ cores[core].blk_ops.irq_level = oldlevel;
+ cores[core].blk_ops.list_p = &cores[new_core].waking;
+#if CONFIG_CORELOCK == CORELOCK_SWAP
+ cores[core].blk_ops.state = STATE_RUNNING;
+ cores[core].blk_ops.list_v = w;
+#endif
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ current->priority_x = HIGHEST_PRIORITY;
+ cores[core].highest_priority = LOWEST_PRIORITY;
+#endif
+ /* Do the stack switching, cache_maintenence and switch_thread call -
+ requires native code */
+ switch_thread_core(core, current);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ current->priority_x = LOWEST_PRIORITY;
+ cores[current->core].highest_priority = LOWEST_PRIORITY;
+#endif
+
+ /* Finally return the old core to caller */
+ return current->retval;
+ (void)state;
+}
+#endif /* NUM_CORES > 1 */
+
+/*---------------------------------------------------------------------------
+ * Initialize threading API. This assumes interrupts are not yet enabled. On
+ * multicore setups, no core is allowed to proceed until create_thread calls
+ * are safe to perform.
+ *---------------------------------------------------------------------------
+ */
void init_threads(void)
{
const unsigned int core = CURRENT_CORE;
@@ -1038,36 +2441,43 @@ void init_threads(void)
/* CPU will initialize first and then sleep */
slot = find_empty_thread_slot();
-#if THREAD_EXTRA_CHECKS
- /* This can fail if, for example, .bss isn't zero'ed out by the loader
- or threads is in the wrong section. */
- if (slot < 0) {
- panicf("uninitialized threads[]");
+
+ if (slot >= MAXTHREADS)
+ {
+ /* WTF? There really must be a slot available at this stage.
+ * This can fail if, for example, .bss isn't zero'ed out by the loader
+ * or threads is in the wrong section. */
+ THREAD_PANICF("init_threads->no slot", NULL);
}
-#endif
- cores[core].sleeping = NULL;
cores[core].running = NULL;
- cores[core].waking = NULL;
- cores[core].wakeup_list = &cores[core].running;
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
+ cores[core].timeout = NULL;
+ thread_queue_init(&cores[core].waking);
+ cores[core].next_tmo_check = current_tick; /* Something not in the past */
+#if NUM_CORES > 1
+ cores[core].blk_ops.flags = 0;
+#else
+ cores[core].irq_level = STAY_IRQ_LEVEL;
#endif
threads[slot].name = main_thread_name;
- threads[slot].statearg = 0;
- threads[slot].context.start = 0; /* core's main thread already running */
+ UNLOCK_THREAD_SET_STATE(&threads[slot], STATE_RUNNING); /* No sync worries yet */
+ threads[slot].context.start = NULL; /* core's main thread already running */
+ threads[slot].tmo.prev = NULL;
+ threads[slot].queue = NULL;
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ threads[slot].boosted = 0;
+#endif
#if NUM_CORES > 1
threads[slot].core = core;
#endif
#ifdef HAVE_PRIORITY_SCHEDULING
threads[slot].priority = PRIORITY_USER_INTERFACE;
- threads[slot].priority_x = 0;
- cores[core].highest_priority = 100;
+ threads[slot].priority_x = LOWEST_PRIORITY;
+ cores[core].highest_priority = LOWEST_PRIORITY;
#endif
- add_to_list(&cores[core].running, &threads[slot]);
-
- /* In multiple core setups, each core has a different stack. There is
- * probably a much better way to do this. */
+
+ add_to_list_l(&cores[core].running, &threads[slot]);
+
if (core == CPU)
{
#ifdef HAVE_SCHEDULER_BOOSTCTRL
@@ -1076,22 +2486,19 @@ void init_threads(void)
threads[slot].stack = stackbegin;
threads[slot].stack_size = (int)stackend - (int)stackbegin;
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
- /* Mark CPU initialized */
- cores[CPU].kernel_running = true;
- /* Do _not_ wait for the COP to init in the bootloader because it doesn't */
/* TODO: HAL interface for this */
/* Wake up coprocessor and let it initialize kernel and threads */
COP_CTL = PROC_WAKE;
/* Sleep until finished */
CPU_CTL = PROC_SLEEP;
}
- else
+ else
{
/* Initial stack is the COP idle stack */
threads[slot].stack = cop_idlestackbegin;
threads[slot].stack_size = IDLE_STACK_SIZE;
/* Mark COP initialized */
- cores[COP].kernel_running = true;
+ cores[COP].blk_ops.flags = 0;
/* Get COP safely primed inside switch_thread where it will remain
* until a thread actually exists on it */
CPU_CTL = PROC_WAKE;
@@ -1100,19 +2507,28 @@ void init_threads(void)
}
}
+/*---------------------------------------------------------------------------
+ * Returns the maximum percentage of stack a thread ever used while running.
+ * NOTE: Some large buffer allocations that don't use enough the buffer to
+ * overwrite stackptr[0] will not be seen.
+ *---------------------------------------------------------------------------
+ */
int thread_stack_usage(const struct thread_entry *thread)
{
- unsigned int i;
unsigned int *stackptr = thread->stack;
+ int stack_words = thread->stack_size / sizeof (int);
+ int i, usage = 0;
- for (i = 0;i < thread->stack_size/sizeof(int);i++)
+ for (i = 0; i < stack_words; i++)
{
if (stackptr[i] != DEADBEEF)
+ {
+ usage = ((stack_words - i) * 100) / stack_words;
break;
+ }
}
- return ((thread->stack_size - i * sizeof(int)) * 100) /
- thread->stack_size;
+ return usage;
}
#if NUM_CORES > 1
@@ -1139,9 +2555,14 @@ int idle_stack_usage(unsigned int core)
}
#endif
-int thread_get_status(const struct thread_entry *thread)
+/*---------------------------------------------------------------------------
+ * Returns the current thread status. This is a snapshot for debugging and
+ * does not do any slot synchronization so it could return STATE_BUSY.
+ *---------------------------------------------------------------------------
+ */
+unsigned thread_get_status(const struct thread_entry *thread)
{
- return GET_STATE(thread->statearg);
+ return thread->state;
}
/*---------------------------------------------------------------------------
@@ -1163,7 +2584,7 @@ void thread_get_name(char *buffer, int size,
/* Display thread name if one or ID if none */
const char *name = thread->name;
const char *fmt = "%s";
- if (name == NULL || *name == '\0')
+ if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
{
name = (const char *)thread;
fmt = "%08lX";
diff --git a/firmware/usb.c b/firmware/usb.c
index af09aecff9..f79af98518 100644
--- a/firmware/usb.c
+++ b/firmware/usb.c
@@ -66,7 +66,7 @@ static int usb_mmc_countdown = 0;
static long usb_stack[(DEFAULT_STACK_SIZE + 0x800)/sizeof(long)];
static const char usb_thread_name[] = "usb";
#endif
-static struct event_queue usb_queue;
+static struct event_queue usb_queue NOCACHEBSS_ATTR;
static int last_usb_status;
static bool usb_monitor_enabled;
@@ -119,7 +119,7 @@ static void usb_thread(void)
{
int num_acks_to_expect = -1;
bool waiting_for_ack;
- struct event ev;
+ struct queue_event ev;
waiting_for_ack = false;
@@ -307,9 +307,9 @@ void usb_init(void)
#ifndef BOOTLOADER
queue_init(&usb_queue, true);
- create_thread(usb_thread, usb_stack, sizeof(usb_stack),
+ create_thread(usb_thread, usb_stack, sizeof(usb_stack), 0,
usb_thread_name IF_PRIO(, PRIORITY_SYSTEM)
- IF_COP(, CPU, false));
+ IF_COP(, CPU));
tick_add_task(usb_tick);
#endif
@@ -318,7 +318,7 @@ void usb_init(void)
void usb_wait_for_disconnect(struct event_queue *q)
{
- struct event ev;
+ struct queue_event ev;
/* Don't return until we get SYS_USB_DISCONNECTED */
while(1)
@@ -334,7 +334,7 @@ void usb_wait_for_disconnect(struct event_queue *q)
int usb_wait_for_disconnect_w_tmo(struct event_queue *q, int ticks)
{
- struct event ev;
+ struct queue_event ev;
/* Don't return until we get SYS_USB_DISCONNECTED or SYS_TIMEOUT */
while(1)