summaryrefslogtreecommitdiffstats
path: root/apps/buffering.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2011-03-02 04:41:29 +0000
committerMichael Sevakis <jethead71@rockbox.org>2011-03-02 04:41:29 +0000
commit64647f34036684d2d69c525460ee6a6409f2a31f (patch)
tree9fe6595dea8581e760e91b46988545c95d070158 /apps/buffering.c
parentb3bfc09852007fed60bf71451a6c71df8c28b3ed (diff)
downloadrockbox-64647f34036684d2d69c525460ee6a6409f2a31f.tar.gz
rockbox-64647f34036684d2d69c525460ee6a6409f2a31f.tar.bz2
rockbox-64647f34036684d2d69c525460ee6a6409f2a31f.zip
buffering: Unusual cases when a handle ridx is briefly seeked ahead of widx need to be handled properly. In the best case, buffer useful would be wrong and in the worst, a packet audio move_handle delta would be quite incorrect, causing the handle to be moved too far.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@29490 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps/buffering.c')
-rw-r--r--apps/buffering.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index 54c6c05baa..2f6623a936 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -560,6 +560,17 @@ fill_buffer : Call buffer_handle for all handles that have data to buffer
These functions are used by the buffering thread to manage buffer space.
*/
+static size_t handle_size_available(const struct memory_handle *h)
+{
+ /* Obtain proper distances from data start */
+ size_t rd = ringbuf_sub(h->ridx, h->data);
+ size_t wr = ringbuf_sub(h->widx, h->data);
+
+ if (LIKELY(wr > rd))
+ return wr - rd;
+
+ return 0; /* ridx is ahead of or equal to widx at this time */
+}
static void update_data_counters(struct data_counters *dc)
{
@@ -582,6 +593,8 @@ static void update_data_counters(struct data_counters *dc)
m = first_handle;
while (m) {
buffered += m->available;
+ /* wasted could come out larger than the buffer size if ridx's are
+ overlapping data ahead of their handles' buffered data */
wasted += ringbuf_sub(m->ridx, m->data);
remaining += m->filerem;
@@ -589,7 +602,7 @@ static void update_data_counters(struct data_counters *dc)
is_useful = true;
if (is_useful)
- useful += ringbuf_sub(m->widx, m->ridx);
+ useful += handle_size_available(m);
m = m->next;
}
@@ -795,7 +808,11 @@ static void shrink_handle(struct memory_handle *h)
}
} else {
/* only move the handle struct */
- delta = ringbuf_sub(h->ridx, h->data);
+ size_t rd = ringbuf_sub(h->ridx, h->data);
+ size_t wr = ringbuf_sub(h->widx, h->data);
+
+ /* ridx could be ahead of widx on a mini rebuffer */
+ delta = MIN(rd, wr);
if (!move_handle(&h, &delta, 0, true))
return;
@@ -1245,18 +1262,6 @@ int bufadvance(int handle_id, off_t offset)
* actual amount of data available for reading. This function explicitly
* does not check the validity of the input handle. It does do range checks
* on size and returns a valid (and explicit) amount of data for reading */
-static size_t handle_size_available(const struct memory_handle *h)
-{
- /* Obtain proper distances from data start */
- size_t rd = ringbuf_sub(h->ridx, h->data);
- size_t wr = ringbuf_sub(h->widx, h->data);
-
- if (LIKELY(wr > rd))
- return wr - rd;
-
- return 0; /* ridx is ahead of or equal to widx at this time */
-}
-
static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
bool guardbuf_limit)
{