summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2007-03-09 08:03:18 +0000
committerMichael Sevakis <jethead71@rockbox.org>2007-03-09 08:03:18 +0000
commitdee43ece2086e15894934b754e47cb7ce5882cda (patch)
tree9238a5c80fe4382975387ec90eca61bda8d4ed8f
parentf4b83e68594f076cdfe510cd2e23b7f57fd158b5 (diff)
downloadrockbox-dee43ece2086e15894934b754e47cb7ce5882cda.tar.gz
rockbox-dee43ece2086e15894934b754e47cb7ce5882cda.zip
Put an end to priority inversion in the ata driver. Gave up trying to have fully atomic dual use mutexes so just replaced the ata driver locking with spins. Maybe I'll have better luck later. Things should run smoothly with database updates and such happening in the background.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@12688 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--firmware/drivers/ata.c44
-rw-r--r--firmware/export/kernel.h10
-rw-r--r--firmware/export/thread.h14
-rw-r--r--firmware/kernel.c79
-rw-r--r--firmware/thread.c43
-rw-r--r--uisimulator/sdl/kernel.c12
6 files changed, 134 insertions, 68 deletions
diff --git a/firmware/drivers/ata.c b/firmware/drivers/ata.c
index 76c0090a12..d2c80e0ae0 100644
--- a/firmware/drivers/ata.c
+++ b/firmware/drivers/ata.c
@@ -104,7 +104,7 @@ STATICIRAM int wait_for_bsy(void)
long timeout = current_tick + HZ*30;
while (TIME_BEFORE(current_tick, timeout) && (ATA_STATUS & STATUS_BSY)) {
last_disk_activity = current_tick;
- yield();
+ priority_yield();
}
if (TIME_BEFORE(current_tick, timeout))
@@ -126,7 +126,7 @@ STATICIRAM int wait_for_rdy(void)
while (TIME_BEFORE(current_tick, timeout) &&
!(ATA_ALT_STATUS & STATUS_RDY)) {
last_disk_activity = current_tick;
- yield();
+ priority_yield();
}
if (TIME_BEFORE(current_tick, timeout))
@@ -216,7 +216,7 @@ int ata_read_sectors(IF_MV2(int drive,)
#ifdef HAVE_MULTIVOLUME
(void)drive; /* unused for now */
#endif
- mutex_lock(&ata_mtx);
+ spinlock_lock(&ata_mtx);
last_disk_activity = current_tick;
spinup_start = current_tick;
@@ -227,14 +227,14 @@ int ata_read_sectors(IF_MV2(int drive,)
spinup = true;
if (poweroff) {
if (ata_power_on()) {
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
ata_led(false);
return -1;
}
}
else {
if (perform_soft_reset()) {
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
ata_led(false);
return -1;
}
@@ -246,7 +246,7 @@ int ata_read_sectors(IF_MV2(int drive,)
SET_REG(ATA_SELECT, ata_device);
if (!wait_for_rdy())
{
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
ata_led(false);
return -2;
}
@@ -359,7 +359,7 @@ int ata_read_sectors(IF_MV2(int drive,)
}
ata_led(false);
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
return ret;
}
@@ -417,7 +417,7 @@ int ata_write_sectors(IF_MV2(int drive,)
if (start == 0)
panicf("Writing on sector 0\n");
- mutex_lock(&ata_mtx);
+ spinlock_lock(&ata_mtx);
last_disk_activity = current_tick;
spinup_start = current_tick;
@@ -428,14 +428,14 @@ int ata_write_sectors(IF_MV2(int drive,)
spinup = true;
if (poweroff) {
if (ata_power_on()) {
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
ata_led(false);
return -1;
}
}
else {
if (perform_soft_reset()) {
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
ata_led(false);
return -1;
}
@@ -445,7 +445,7 @@ int ata_write_sectors(IF_MV2(int drive,)
SET_REG(ATA_SELECT, ata_device);
if (!wait_for_rdy())
{
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
ata_led(false);
return -2;
}
@@ -507,7 +507,7 @@ int ata_write_sectors(IF_MV2(int drive,)
ata_led(false);
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
return ret;
}
@@ -572,13 +572,13 @@ static int ata_perform_sleep(void)
{
int ret = 0;
- mutex_lock(&ata_mtx);
+ spinlock_lock(&ata_mtx);
SET_REG(ATA_SELECT, ata_device);
if(!wait_for_rdy()) {
DEBUGF("ata_perform_sleep() - not RDY\n");
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
return -1;
}
@@ -591,7 +591,7 @@ static int ata_perform_sleep(void)
}
sleeping = true;
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
return ret;
}
@@ -649,9 +649,9 @@ static void ata_thread(void)
if ( !spinup && sleeping && !poweroff &&
TIME_AFTER( current_tick, last_sleep + ATA_POWER_OFF_TIMEOUT ))
{
- mutex_lock(&ata_mtx);
+ spinlock_lock(&ata_mtx);
ide_power_enable(false);
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
poweroff = true;
}
#endif
@@ -663,11 +663,11 @@ static void ata_thread(void)
#ifndef USB_NONE
case SYS_USB_CONNECTED:
if (poweroff) {
- mutex_lock(&ata_mtx);
+ spinlock_lock(&ata_mtx);
ata_led(true);
ata_power_on();
ata_led(false);
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
}
/* Tell the USB thread that we are safe */
@@ -741,11 +741,11 @@ int ata_soft_reset(void)
{
int ret;
- mutex_lock(&ata_mtx);
+ spinlock_lock(&ata_mtx);
ret = perform_soft_reset();
- mutex_unlock(&ata_mtx);
+ spinlock_unlock(&ata_mtx);
return ret;
}
@@ -936,7 +936,7 @@ int ata_init(void)
bool coldstart = ata_is_coldstart();
/* must be called before ata_device_init() */
- mutex_init(&ata_mtx);
+ spinlock_init(&ata_mtx);
ata_led(false);
ata_device_init();
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index ec8aa28a08..495e558175 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -83,10 +83,14 @@ struct event_queue
#endif
};
+#define MTX_UNOWNED 0x00000000
+#define MTX_BLOCKED_WAITER 0x00000001
+#define MTX_SPIN_WAITER 0x00010001
+#define MTX_SPIN_OWNER 0x00020001
struct mutex
{
+ uint32_t locked;
struct thread_entry *thread;
- bool locked;
};
/* global tick variable */
@@ -126,8 +130,12 @@ extern void queue_remove_from_head(struct event_queue *q, long id);
extern int queue_broadcast(long id, intptr_t data);
extern void mutex_init(struct mutex *m);
+static inline void spinlock_init(struct mutex *m)
+{ mutex_init(m); } /* Same thing for now */
extern void mutex_lock(struct mutex *m);
extern void mutex_unlock(struct mutex *m);
+extern void spinlock_lock(struct mutex *m);
+extern void spinlock_unlock(struct mutex *m);
extern void tick_start(unsigned int interval_in_ms);
#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 3a979722b9..7a9414c79f 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -105,6 +105,7 @@ struct thread_entry {
unsigned short stack_size;
#ifdef HAVE_PRIORITY_SCHEDULING
unsigned short priority;
+ unsigned long priority_x;
long last_run;
#endif
struct thread_entry *next, *prev;
@@ -114,6 +115,10 @@ struct core_entry {
struct thread_entry threads[MAXTHREADS];
struct thread_entry *running;
struct thread_entry *sleeping;
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ int switch_to_irq_level;
+ #define STAY_IRQ_LEVEL -1
+#endif
};
#ifdef HAVE_PRIORITY_SCHEDULING
@@ -149,7 +154,14 @@ void wakeup_thread(struct thread_entry **thread);
#ifdef HAVE_PRIORITY_SCHEDULING
int thread_set_priority(struct thread_entry *thread, int priority);
int thread_get_priority(struct thread_entry *thread);
-#endif
+/* Yield that guarantees thread execution once per round regardless of
+ thread's scheduler priority - basically a transient realtime boost
+ without altering the scheduler's thread precedence. */
+void priority_yield(void);
+#else
+static inline void priority_yield(void)
+ { yield(); }
+#endif /* HAVE_PRIORITY_SCHEDULING */
struct thread_entry * thread_get_current(void);
void init_threads(void);
int thread_stack_usage(const struct thread_entry *thread);
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 313530ffba..db7249fdee 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -656,48 +656,73 @@ void mutex_init(struct mutex *m)
m->thread = NULL;
}
-#ifdef CPU_PP
-/* PortalPlayer chips have 2 cores, therefore need atomic mutexes */
+/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
+ * Just use it for ARM, Coldfire and whatever else well...why not?
+ */
-static inline bool test_and_set(bool *x, bool v)
-{
- asm volatile (
- "swpb %0, %0, [%1]\n"
- : "+r"(v)
- : "r"(x)
- );
- return v;
-}
+/* Macros generate better code than an inline function is this case */
+#if defined (CPU_PP) || defined (CPU_ARM)
+#define test_and_set(x_, v_) \
+({ \
+ uint32_t old; \
+ asm volatile ( \
+ "swpb %[old], %[v], [%[x]] \r\n" \
+ : [old]"=r"(old) \
+ : [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \
+ ); \
+ old; \
+ })
+#elif defined (CPU_COLDFIRE)
+#define test_and_set(x_, v_) \
+({ \
+ uint8_t old; \
+ asm volatile ( \
+ "bset.l %[v], (%[x]) \r\n" \
+ "sne.b %[old] \r\n" \
+ : [old]"=d,d"(old) \
+ : [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \
+ ); \
+ old; \
+ })
+#else
+/* default for no asm version */
+#define test_and_set(x_, v_) \
+({ \
+ uint32_t old = *(uint32_t *)x_; \
+ *(uint32_t *)x_ = v_; \
+ old; \
+ })
+#endif
void mutex_lock(struct mutex *m)
{
- if (test_and_set(&m->locked,true))
+ if (test_and_set(&m->locked, 1))
{
/* Wait until the lock is open... */
block_thread(&m->thread);
}
}
-#else
-void mutex_lock(struct mutex *m)
+void mutex_unlock(struct mutex *m)
{
- if (m->locked)
+ if (m->thread == NULL)
+ m->locked = 0;
+ else
+ wakeup_thread(&m->thread);
+}
+
+void spinlock_lock(struct mutex *m)
+{
+ while (test_and_set(&m->locked, 1))
{
- /* Wait until the lock is open... */
- block_thread(&m->thread);
+ /* wait until the lock is open... */
+ switch_thread(true, NULL);
}
-
- /* ...and lock it */
- m->locked = true;
}
-#endif
-void mutex_unlock(struct mutex *m)
+void spinlock_unlock(struct mutex *m)
{
- if (m->thread == NULL)
- m->locked = false;
- else
- wakeup_thread(&m->thread);
+ m->locked = 0;
}
-#endif
+#endif /* ndef SIMULATOR */
diff --git a/firmware/thread.c b/firmware/thread.c
index 614286c422..8022d94862 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -39,11 +39,6 @@ static unsigned short highest_priority IBSS_ATTR;
static int boosted_threads IBSS_ATTR;
#endif
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
-#define STAY_IRQ_LEVEL -1
-static int switch_to_irq_level = STAY_IRQ_LEVEL;
-#endif
-
/* Define to enable additional checks for blocking violations etc. */
#define THREAD_EXTRA_CHECKS
@@ -136,11 +131,11 @@ static inline void load_context(const void* addr)
"movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
"move.l %%d0,%%macsr \n"
"move.l (52,%0),%%d0 \n" /* Get start address */
- "beq.b .running \n" /* NULL -> already running */
+ "beq.b 1f \n" /* NULL -> already running */
"clr.l (52,%0) \n" /* Clear start address.. */
"move.l %%d0,%0 \n"
"jmp (%0) \n" /* ..and start the thread */
- ".running: \n"
+ "1: \n"
: : "a" (addr) : "d0" /* only! */
);
}
@@ -422,10 +417,10 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
/* This has to be done after the scheduler is finished with the
blocked_list pointer so that an IRQ can't kill us by attempting
a wake but before attempting any core sleep. */
- if (switch_to_irq_level != STAY_IRQ_LEVEL)
+ if (cores[CURRENT_CORE].switch_to_irq_level != STAY_IRQ_LEVEL)
{
- int level = switch_to_irq_level;
- switch_to_irq_level = STAY_IRQ_LEVEL;
+ int level = cores[CURRENT_CORE].switch_to_irq_level;
+ cores[CURRENT_CORE].switch_to_irq_level = STAY_IRQ_LEVEL;
set_irq_level(level);
}
#endif
@@ -442,13 +437,14 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
for (;;)
{
int priority = cores[CURRENT_CORE].running->priority;
-
+
if (priority < highest_priority)
highest_priority = priority;
-
+
if (priority == highest_priority ||
(current_tick - cores[CURRENT_CORE].running->last_run >
- priority * 8))
+ priority * 8) ||
+ cores[CURRENT_CORE].running->priority_x != 0)
break;
cores[CURRENT_CORE].running = cores[CURRENT_CORE].running->next;
@@ -567,7 +563,7 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) && !defined(SIMULATOR)
void set_irq_level_and_block_thread(struct thread_entry **list, int level)
{
- switch_to_irq_level = level;
+ cores[CURRENT_CORE].switch_to_irq_level = level;
block_thread(list);
}
@@ -575,7 +571,7 @@ void set_irq_level_and_block_thread(struct thread_entry **list, int level)
void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
int timeout, int level)
{
- switch_to_irq_level = level;
+ cores[CURRENT_CORE].switch_to_irq_level = level;
block_thread_w_tmo(list, timeout);
}
#endif
@@ -688,6 +684,7 @@ struct thread_entry*
thread->stack_size = stack_size;
thread->statearg = 0;
#ifdef HAVE_PRIORITY_SCHEDULING
+ thread->priority_x = 0;
thread->priority = priority;
highest_priority = 100;
#endif
@@ -759,7 +756,7 @@ int thread_set_priority(struct thread_entry *thread, int priority)
if (thread == NULL)
thread = cores[CURRENT_CORE].running;
-
+
old_priority = thread->priority;
thread->priority = priority;
highest_priority = 100;
@@ -774,7 +771,15 @@ int thread_get_priority(struct thread_entry *thread)
return thread->priority;
}
-#endif
+
+void priority_yield(void)
+{
+ struct thread_entry *thread = cores[CURRENT_CORE].running;
+ thread->priority_x = 1;
+ switch_thread(true, NULL);
+ thread->priority_x = 0;
+}
+#endif /* HAVE_PRIORITY_SCHEDULING */
struct thread_entry * thread_get_current(void)
{
@@ -789,10 +794,14 @@ void init_threads(void)
memset(cores, 0, sizeof cores);
cores[core].sleeping = NULL;
cores[core].running = NULL;
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
+#endif
cores[core].threads[0].name = main_thread_name;
cores[core].threads[0].statearg = 0;
#ifdef HAVE_PRIORITY_SCHEDULING
cores[core].threads[0].priority = PRIORITY_USER_INTERFACE;
+ cores[core].threads[0].priority_x = 0;
highest_priority = 100;
#endif
#ifdef HAVE_SCHEDULER_BOOSTCTRL
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
index dddfa70066..e01fbe65b9 100644
--- a/uisimulator/sdl/kernel.c
+++ b/uisimulator/sdl/kernel.c
@@ -355,3 +355,15 @@ void mutex_unlock(struct mutex *m)
{
m->locked = false;
}
+
+void spinlock_lock(struct mutex *m)
+{
+ while(m->locked)
+ switch_thread(true, NULL);
+ m->locked = true;
+}
+
+void spinlock_unlock(struct mutex *m)
+{
+ m->locked = false;
+}