summaryrefslogtreecommitdiffstats
path: root/apps
diff options
context:
space:
mode:
authorNicolas Pennequin <nicolas.pennequin@free.fr>2007-10-30 17:24:31 +0000
committerNicolas Pennequin <nicolas.pennequin@free.fr>2007-10-30 17:24:31 +0000
commit09bce70f17614563df09dedd82cff31298fb1a09 (patch)
treea7cca91074218d2e140b528dd86e657c556aa3ec /apps
parent151b7c9038ba796cd87b6ff2904253e6a3962304 (diff)
downloadrockbox-09bce70f17614563df09dedd82cff31298fb1a09.tar.gz
rockbox-09bce70f17614563df09dedd82cff31298fb1a09.zip
Slight rework of the buffering logic:
* Don't rely only on ata_disk_is_active, and also do buffer filling after buffer handle requests. Should fix FS#8049. * Shrink the handles at the last possible moment. This allows more seeking without rebuffering for long tracks and minimises buffer waste. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15377 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps')
-rw-r--r--apps/buffering.c31
1 files changed, 14 insertions, 17 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index 0325d4e4f3..65070a3556 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -1209,37 +1209,34 @@ void buffering_thread(void)
#if MEM > 8
/* If the disk is spinning, take advantage by filling the buffer */
- if (ata_disk_is_active() && queue_empty(&buffering_queue) &&
- data_counters.remaining > 0 &&
- data_counters.buffered < high_watermark)
+ if ((ata_disk_is_active() || ev.id == Q_BUFFER_HANDLE) &&
+ queue_empty(&buffering_queue))
{
- fill_buffer();
- update_data_counters();
- }
+ if (data_counters.remaining > 0 &&
+ data_counters.buffered < high_watermark)
+ {
+ fill_buffer();
+ update_data_counters();
+ }
- if (ata_disk_is_active() && queue_empty(&buffering_queue) &&
- num_handles > 0 && data_counters.useful < high_watermark)
- {
- call_buffer_low_callbacks();
+ if (num_handles > 0 && data_counters.useful < high_watermark)
+ {
+ call_buffer_low_callbacks();
+ }
}
#endif
if (ev.id == SYS_TIMEOUT && queue_empty(&buffering_queue))
{
if (data_counters.remaining > 0 &&
- data_counters.wasted > data_counters.buffered/2)
+ data_counters.useful < conf_watermark)
{
/* First work forward, shrinking any unmoveable handles */
shrink_buffer(true,false);
/* Then work forward following those up with moveable handles */
shrink_buffer(false,true);
- update_data_counters();
- }
-
- if (data_counters.remaining > 0 &&
- data_counters.buffered < conf_watermark)
- {
fill_buffer();
+ update_data_counters();
}
}
}