summaryrefslogtreecommitdiffstats
path: root/firmware/target/mips/mmu-mips.c
diff options
context:
space:
mode:
authorAidan MacDonald <amachronic@protonmail.com>2021-03-03 17:54:38 +0000
committerSolomon Peachy <pizza@shaftnet.org>2021-03-03 20:50:28 +0000
commit74a3d1f5be2d364a33f37e0ad621538df1bfba4b (patch)
tree8989db6f499d53384645a7a6c6ee84933764f7fd /firmware/target/mips/mmu-mips.c
parentf906df017dd7e82f8452cc479373a1b341a02bd9 (diff)
downloadrockbox-74a3d1f5be2d364a33f37e0ad621538df1bfba4b.tar.gz
rockbox-74a3d1f5be2d364a33f37e0ad621538df1bfba4b.zip
Fix MIPS cache operations and enable HAVE_CPU_CACHE_ALIGN on MIPS
- The range-based cache operations on MIPS were broken and only worked properly when BOTH the address and size were multiples of the cache line size. If this was not the case, the last cache line of the range would not be touched! Fix is to align start/end pointers to cache lines before iterating. - To my knowledge all MIPS processors have a cache, so I enabled HAVE_CPU_CACHE_ALIGN by default. This also allows mmu-mips.c to use the CACHEALIGN_UP/DOWN macros. - Make jz4760/system-target.h define its cache line size properly. Change-Id: I1fcd04a59791daa233b9699f04d5ac1cc6bacee7
Diffstat (limited to 'firmware/target/mips/mmu-mips.c')
-rw-r--r--firmware/target/mips/mmu-mips.c39
1 files changed, 24 insertions, 15 deletions
diff --git a/firmware/target/mips/mmu-mips.c b/firmware/target/mips/mmu-mips.c
index eb7004952e..f4ffbfa6ee 100644
--- a/firmware/target/mips/mmu-mips.c
+++ b/firmware/target/mips/mmu-mips.c
@@ -192,10 +192,11 @@ void commit_discard_dcache(void)
*/
void commit_discard_dcache_range(const void *base, unsigned int size)
{
- register char *s;
+ char *ptr = CACHEALIGN_DOWN((char*)base);
+ char *end = CACHEALIGN_UP((char*)base + size);
- for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE)
- __CACHE_OP(DCHitWBInv, s);
+ for(; ptr != end; ptr += CACHEALIGN_SIZE)
+ __CACHE_OP(DCHitWBInv, ptr);
SYNC_WB();
}
@@ -204,10 +205,11 @@ void commit_discard_dcache_range(const void *base, unsigned int size)
*/
void commit_dcache_range(const void *base, unsigned int size)
{
- register char *s;
+ char *ptr = CACHEALIGN_DOWN((char*)base);
+ char *end = CACHEALIGN_UP((char*)base + size);
- for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE)
- __CACHE_OP(DCHitWB, s);
+ for(; ptr != end; ptr += CACHEALIGN_SIZE)
+ __CACHE_OP(DCHitWB, ptr);
SYNC_WB();
}
@@ -217,17 +219,24 @@ void commit_dcache_range(const void *base, unsigned int size)
*/
void discard_dcache_range(const void *base, unsigned int size)
{
- register char *s;
+ char *ptr = CACHEALIGN_DOWN((char*)base);
+ char *end = CACHEALIGN_UP((char*)base + size);
- if (((int)base & CACHEALIGN_SIZE - 1) ||
- (((int)base + size) & CACHEALIGN_SIZE - 1)) {
- /* Overlapping sections, so we need to write back instead */
- commit_discard_dcache_range(base, size);
- return;
- };
+ if(ptr != base) {
+ /* Start of region not cache aligned */
+ __CACHE_OP(DCHitWBInv, ptr);
+ ptr += CACHEALIGN_SIZE;
+ }
+
+ if(base+size != end) {
+ /* End of region not cache aligned */
+ end -= CACHEALIGN_SIZE;
+ __CACHE_OP(DCHitWBInv, end);
+ }
- for (s=(char *)base; s<(char *)base+size; s+=CACHEALIGN_SIZE)
- __CACHE_OP(DCHitInv, s);
+ /* Interior of region is safe to discard */
+ for(; ptr != end; ptr += CACHEALIGN_SIZE)
+ __CACHE_OP(DCHitInv, ptr);
SYNC_WB();
}