summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--firmware/asm/arm/corelock.c71
1 files changed, 37 insertions, 34 deletions
diff --git a/firmware/asm/arm/corelock.c b/firmware/asm/arm/corelock.c
index 713164e49b..b36a40b45b 100644
--- a/firmware/asm/arm/corelock.c
+++ b/firmware/asm/arm/corelock.c
@@ -28,69 +28,72 @@
* Wait for the corelock to become free and acquire it when it does.
*---------------------------------------------------------------------------
*/
-void __attribute__((naked)) corelock_lock(struct corelock *cl)
+void corelock_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
- "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
+ "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
"eor r2, r1, #0xff \n" /* r2 = othercore */
- "strb r2, [r0, #2] \n" /* cl->turn = othercore */
+ "strb r2, [%[cl], #2] \n" /* cl->turn = othercore */
"1: \n"
- "ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
- "cmp r3, #0 \n" /* yes? lock acquired */
- "bxeq lr \n"
- "ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
- "cmp r3, r1 \n"
- "bxeq lr \n" /* yes? lock acquired */
- "b 1b \n" /* keep trying */
- : : "i"(&PROCESSOR_ID)
+ "ldrb r2, [%[cl], r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
+ "cmp r2, #0 \n" /* yes? lock acquired */
+ "beq 2f \n"
+ "ldrb r2, [%[cl], #2] \n" /* || cl->turn == core ? */
+ "cmp r2, r1 \n"
+ "bne 1b \n" /* no? try again */
+ "2: \n" /* Done */
+ :
+ : [id] "i"(&PROCESSOR_ID), [cl] "r" (cl)
+ : "r1","r2","cc"
);
- (void)cl;
}
/*---------------------------------------------------------------------------
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
*---------------------------------------------------------------------------
*/
-int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
+int corelock_try_lock(struct corelock *cl)
{
+ int rval = 0;
+
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
- "mov r3, r0 \n"
- "strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
+ "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
"eor r2, r1, #0xff \n" /* r2 = othercore */
- "strb r2, [r0, #2] \n" /* cl->turn = othercore */
- "ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
- "eors r0, r0, r2 \n" /* yes? lock acquired */
- "bxne lr \n"
- "ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
- "ands r0, r0, r1 \n"
- "streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
- "bx lr \n" /* return result */
- : : "i"(&PROCESSOR_ID)
+ "strb r2, [%[cl], #2] \n" /* cl->turn = othercore */
+ "ldrb %[rv], [%[cl], r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
+ "eors %[rv], %[rv], r2 \n"
+ "bne 1f \n" /* yes? lock acquired */
+ "ldrb %[rv], [%[cl], #2] \n" /* || cl->turn == core? */
+ "ands %[rv], %[rv], r1 \n"
+ "streqb %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
+ "1: \n" /* Done */
+ : [rv] "=r"(rval)
+ : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
+ : "r1","r2","cc"
);
- return 0;
- (void)cl;
+ return rval;
}
/*---------------------------------------------------------------------------
* Release ownership of the corelock
*---------------------------------------------------------------------------
*/
-void __attribute__((naked)) corelock_unlock(struct corelock *cl)
+void corelock_unlock(struct corelock *cl)
{
asm volatile (
- "mov r1, %0 \n" /* r1 = PROCESSOR_ID */
+ "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
"mov r2, #0 \n" /* cl->myl[core] = 0 */
- "strb r2, [r0, r1, lsr #7] \n"
- "bx lr \n"
- : : "i"(&PROCESSOR_ID)
+ "strb r2, [%[cl], r1, lsr #7] \n"
+ :
+ : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
+ : "r1","r2"
);
- (void)cl;
}