summaryrefslogtreecommitdiffstats
path: root/firmware/kernel.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel.c')
-rw-r--r--firmware/kernel.c215
1 files changed, 173 insertions, 42 deletions
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 4fcfcb9d30..288ebbbede 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -516,8 +516,10 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
oldlevel = disable_irq_save();
corelock_lock(&q->cl);
- /* auto-reply */
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
queue_do_auto_reply(q->send);
+#endif
while(1)
{
@@ -541,12 +543,18 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
corelock_lock(&q->cl);
}
- q->read = rd + 1;
- rd &= QUEUE_LENGTH_MASK;
- *ev = q->events[rd];
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ if(ev)
+#endif
+ {
+ q->read = rd + 1;
+ rd &= QUEUE_LENGTH_MASK;
+ *ev = q->events[rd];
- /* Get data for a waiting thread if one */
- queue_do_fetch_sender(q->send, rd);
+ /* Get data for a waiting thread if one */
+ queue_do_fetch_sender(q->send, rd);
+ }
+ /* else just waiting on non-empty */
corelock_unlock(&q->cl);
restore_irq(oldlevel);
@@ -566,8 +574,10 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
oldlevel = disable_irq_save();
corelock_lock(&q->cl);
- /* Auto-reply */
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ /* Auto-reply (even if ev is NULL to avoid stalling a waiting thread) */
queue_do_auto_reply(q->send);
+#endif
rd = q->read;
wr = q->write;
@@ -590,20 +600,26 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
wr = q->write;
}
- /* no worry about a removed message here - status is checked inside
- locks - perhaps verify if timeout or false alarm */
- if (rd != wr)
- {
- q->read = rd + 1;
- rd &= QUEUE_LENGTH_MASK;
- *ev = q->events[rd];
- /* Get data for a waiting thread if one */
- queue_do_fetch_sender(q->send, rd);
- }
- else
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ if(ev)
+#endif
{
- ev->id = SYS_TIMEOUT;
+ /* no worry about a removed message here - status is checked inside
+ locks - perhaps verify if timeout or false alarm */
+ if (rd != wr)
+ {
+ q->read = rd + 1;
+ rd &= QUEUE_LENGTH_MASK;
+ *ev = q->events[rd];
+ /* Get data for a waiting thread if one */
+ queue_do_fetch_sender(q->send, rd);
+ }
+ else
+ {
+ ev->id = SYS_TIMEOUT;
+ }
}
+ /* else just waiting on non-empty */
corelock_unlock(&q->cl);
restore_irq(oldlevel);
@@ -740,23 +756,99 @@ void queue_reply(struct event_queue *q, intptr_t retval)
}
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
-bool queue_peek(struct event_queue *q, struct queue_event *ev)
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+/* Scan the even queue from head to tail, returning any event from the
+ filter list that was found, optionally removing the event. If an
+ event is returned, synchronous events are handled in the same manner as
+ with queue_wait(_w_tmo); if discarded, then as queue_clear.
+ If filters are NULL, any event matches. If filters exist, the default
+ is to search the full queue depth.
+ Earlier filters take precedence.
+
+ Return true if an event was found, false otherwise. */
+bool queue_peek_ex(struct event_queue *q, struct queue_event *ev,
+ unsigned int flags, const long (*filters)[2])
{
- unsigned int rd;
+ bool have_msg;
+ unsigned int rd, wr;
+ int oldlevel;
- if(q->read == q->write)
- return false;
+ if(LIKELY(q->read == q->write))
+ return false; /* Empty: do nothing further */
- bool have_msg = false;
+ have_msg = false;
- int oldlevel = disable_irq_save();
+ oldlevel = disable_irq_save();
corelock_lock(&q->cl);
- rd = q->read;
- if(rd != q->write)
+ /* Starting at the head, find first match */
+ for(rd = q->read, wr = q->write; rd != wr; rd++)
{
- *ev = q->events[rd & QUEUE_LENGTH_MASK];
+ struct queue_event *e = &q->events[rd & QUEUE_LENGTH_MASK];
+
+ if(filters)
+ {
+ /* Have filters - find the first thing that passes */
+ const long (* f)[2] = filters;
+ const long (* const f_last)[2] =
+ &filters[flags & QPEEK_FILTER_COUNT_MASK];
+ long id = e->id;
+
+ do
+ {
+ if(UNLIKELY(id >= (*f)[0] && id <= (*f)[1]))
+ goto passed_filter;
+ }
+ while(++f <= f_last);
+
+ if(LIKELY(!(flags & QPEEK_FILTER_HEAD_ONLY)))
+ continue; /* No match; test next event */
+ else
+ break; /* Only check the head */
+ }
+ /* else - anything passes */
+
+ passed_filter:
+
+ /* Found a matching event */
have_msg = true;
+
+ if(ev)
+ *ev = *e; /* Caller wants the event */
+
+ if(flags & QPEEK_REMOVE_EVENTS)
+ {
+ /* Do event removal */
+ unsigned int r = q->read;
+ q->read = r + 1; /* Advance head */
+
+ if(ev)
+ {
+ /* Auto-reply */
+ queue_do_auto_reply(q->send);
+ /* Get the thread waiting for reply, if any */
+ queue_do_fetch_sender(q->send, rd & QUEUE_LENGTH_MASK);
+ }
+ else
+ {
+ /* Release any thread waiting on this message */
+ queue_do_unblock_sender(q->send, rd & QUEUE_LENGTH_MASK);
+ }
+
+ /* Slide messages forward into the gap if not at the head */
+ while(rd != r)
+ {
+ unsigned int dst = rd & QUEUE_LENGTH_MASK;
+ unsigned int src = --rd & QUEUE_LENGTH_MASK;
+
+ q->events[dst] = q->events[src];
+ /* Keep sender wait list in sync */
+ if(q->send)
+ q->send->senders[dst] = q->send->senders[src];
+ }
+ }
+
+ break;
}
corelock_unlock(&q->cl);
@@ -765,30 +857,42 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
return have_msg;
}
-/* Poll queue to see if a message exists - careful in using the result if
- * queue_remove_from_head is called when messages are posted - possibly use
- * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
- * unsignals the queue may cause an unwanted block */
-bool queue_empty(const struct event_queue* q)
+bool queue_peek(struct event_queue *q, struct queue_event *ev)
{
- return ( q->read == q->write );
+ return queue_peek_ex(q, ev, 0, NULL);
}
-void queue_clear(struct event_queue* q)
+void queue_remove_from_head(struct event_queue *q, long id)
{
- int oldlevel;
+ const long f[2] = { id, id };
+ while (queue_peek_ex(q, NULL,
+ QPEEK_FILTER_HEAD_ONLY | QPEEK_REMOVE_EVENTS, &f));
+}
+#else /* !HAVE_EXTENDED_MESSAGING_AND_NAME */
+/* The more powerful routines aren't required */
+bool queue_peek(struct event_queue *q, struct queue_event *ev)
+{
+ unsigned int rd;
- oldlevel = disable_irq_save();
- corelock_lock(&q->cl);
+ if(q->read == q->write)
+ return false;
- /* Release all threads waiting in the queue for a reply -
- dequeued sent message will be handled by owning thread */
- queue_release_all_senders(q);
+ bool have_msg = false;
- q->read = q->write;
+ int oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ rd = q->read;
+ if(rd != q->write)
+ {
+ *ev = q->events[rd & QUEUE_LENGTH_MASK];
+ have_msg = true;
+ }
corelock_unlock(&q->cl);
restore_irq(oldlevel);
+
+ return have_msg;
}
void queue_remove_from_head(struct event_queue *q, long id)
@@ -816,6 +920,33 @@ void queue_remove_from_head(struct event_queue *q, long id)
corelock_unlock(&q->cl);
restore_irq(oldlevel);
}
+#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+
+/* Poll queue to see if a message exists - careful in using the result if
+ * queue_remove_from_head is called when messages are posted - possibly use
+ * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
+ * unsignals the queue may cause an unwanted block */
+bool queue_empty(const struct event_queue* q)
+{
+ return ( q->read == q->write );
+}
+
+void queue_clear(struct event_queue* q)
+{
+ int oldlevel;
+
+ oldlevel = disable_irq_save();
+ corelock_lock(&q->cl);
+
+ /* Release all threads waiting in the queue for a reply -
+ dequeued sent message will be handled by owning thread */
+ queue_release_all_senders(q);
+
+ q->read = q->write;
+
+ corelock_unlock(&q->cl);
+ restore_irq(oldlevel);
+}
/**
* The number of events waiting in the queue.