summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Arnold <amiconn@rockbox.org>2010-02-08 21:59:24 +0000
committerJens Arnold <amiconn@rockbox.org>2010-02-08 21:59:24 +0000
commit1cc4bd8f86cf013813d52aeb2c8aa37989026dfc (patch)
tree41fe60252fd1c90971bd6e7ed84b55d557a3b0cf
parent9955e9a7df32418f20a8de27a3787d35bb9436f4 (diff)
downloadrockbox-1cc4bd8f86cf013813d52aeb2c8aa37989026dfc.tar.gz
rockbox-1cc4bd8f86cf013813d52aeb2c8aa37989026dfc.tar.bz2
rockbox-1cc4bd8f86cf013813d52aeb2c8aa37989026dfc.zip
APE: Fused vector math for the filters on ARMv6. Speedup is ~2.5% for -c2000, ~7% for -c3000 and higher.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@24569 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--apps/codecs/demac/libdemac/vector_math16_armv6.h495
1 files changed, 348 insertions, 147 deletions
diff --git a/apps/codecs/demac/libdemac/vector_math16_armv6.h b/apps/codecs/demac/libdemac/vector_math16_armv6.h
index 61471103bd..0ace6c5811 100644
--- a/apps/codecs/demac/libdemac/vector_math16_armv6.h
+++ b/apps/codecs/demac/libdemac/vector_math16_armv6.h
@@ -24,148 +24,350 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110, USA
*/
-/* This version fetches data as 32 bit words, and *requires* v1 to be
- * 32 bit aligned, otherwise it will result either in a data abort, or
- * incorrect results (if ARM aligncheck is disabled). */
-static inline void vector_add(int16_t* v1, int16_t* v2)
+#define FUSED_VECTOR_MATH
+
+#if ORDER > 16
+#define BLOCK_REPEAT "3"
+#else
+#define BLOCK_REPEAT "1"
+#endif
+
+/* Calculate scalarproduct, then add a 2nd vector (fused for performance)
+ * This version fetches data as 32 bit words, and *requires* v1 to be
+ * 32 bit aligned. It also requires that f2 and s2 are either both 32 bit
+ * aligned or both unaligned. If either condition isn't met, it will either
+ * result in a data abort or incorrect results. */
+static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2)
{
+ int res;
#if ORDER > 32
int cnt = ORDER>>5;
#endif
-#if ORDER > 16
-#define ADD_SUB_BLOCKS "4"
+ asm volatile (
+#if ORDER > 32
+ "mov %[res], #0 \n"
+#endif
+ "tst %[f2], #2 \n"
+ "beq 20f \n"
+
+ "10: \n"
+ "ldrh r3, [%[f2]], #2 \n"
+ "ldrh r6, [%[s2]], #2 \n"
+ "ldmia %[f2]!, {r2,r4} \n"
+ "mov r3, r3, lsl #16 \n"
+ "mov r6, r6, lsl #16 \n"
+
+ "1: \n"
+ "ldmia %[s2]!, {r5,r7} \n"
+ "pkhtb r3, r3, r2 \n"
+ "pkhtb r2, r2, r4 \n"
+ "ldrd r0, [%[v1]] \n"
+ "mov r5, r5, ror #16 \n"
+ "pkhtb r6, r5, r6, asr #16 \n"
+ "pkhbt r5, r5, r7, lsl #16 \n"
+#if ORDER > 32
+ "smladx %[res], r0, r3, %[res] \n"
#else
-#define ADD_SUB_BLOCKS "2"
+ "smuadx %[res], r0, r3 \n"
#endif
+ "smladx %[res], r1, r2, %[res] \n"
+ "ldmia %[f2]!, {r2,r3} \n"
+ "sadd16 r0, r0, r6 \n"
+ "sadd16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+
+ ".rept " BLOCK_REPEAT "\n"
+ "ldmia %[s2]!, {r5,r6} \n"
+ "pkhtb r4, r4, r2 \n"
+ "pkhtb r2, r2, r3 \n"
+ "ldrd r0, [%[v1]] \n"
+ "mov r5, r5, ror #16 \n"
+ "pkhtb r7, r5, r7, asr #16 \n"
+ "pkhbt r5, r5, r6, lsl #16 \n"
+ "smladx %[res], r0, r4, %[res] \n"
+ "smladx %[res], r1, r2, %[res] \n"
+ "ldmia %[f2]!, {r2,r4} \n"
+ "sadd16 r0, r0, r7 \n"
+ "sadd16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+ "ldmia %[s2]!, {r5,r7} \n"
+ "pkhtb r3, r3, r2 \n"
+ "pkhtb r2, r2, r4 \n"
+ "ldrd r0, [%[v1]] \n"
+ "mov r5, r5, ror #16 \n"
+ "pkhtb r6, r5, r6, asr #16 \n"
+ "pkhbt r5, r5, r7, lsl #16 \n"
+ "smladx %[res], r0, r3, %[res] \n"
+ "smladx %[res], r1, r2, %[res] \n"
+ "ldmia %[f2]!, {r2,r3} \n"
+ "sadd16 r0, r0, r6 \n"
+ "sadd16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+ ".endr \n"
+
+ "ldmia %[s2]!, {r5,r6} \n"
+ "pkhtb r4, r4, r2 \n"
+ "pkhtb r2, r2, r3 \n"
+ "ldrd r0, [%[v1]] \n"
+ "mov r5, r5, ror #16 \n"
+ "pkhtb r7, r5, r7, asr #16 \n"
+ "pkhbt r5, r5, r6, lsl #16 \n"
+ "smladx %[res], r0, r4, %[res] \n"
+ "smladx %[res], r1, r2, %[res] \n"
+#if ORDER > 32
+ "subs %[cnt], %[cnt], #1 \n"
+ "ldmneia %[f2]!, {r2,r4} \n"
+ "sadd16 r0, r0, r7 \n"
+ "sadd16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+ "bne 1b \n"
+#else
+ "sadd16 r0, r0, r7 \n"
+ "sadd16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+#endif
+
+ "b 99f \n"
+
+ "20: \n"
+ "ldrd r4, [%[f2]], #8 \n"
+ "ldrd r0, [%[v1]] \n"
- asm volatile (
- "tst %[v2], #2 \n"
- "beq 20f \n"
-
- "10: \n"
- "bic %[v2], %[v2], #2 \n"
- "ldmia %[v2]!, {r4-r5} \n"
- "1: \n"
- ".rept " ADD_SUB_BLOCKS "\n"
- "ldmia %[v2]!, {r6-r7} \n"
- "ldmia %[v1], {r0-r3} \n"
- "mov r5, r5, ror #16 \n"
- "pkhtb r4, r5, r4, asr #16 \n"
- "sadd16 r0, r0, r4 \n"
- "pkhbt r5, r5, r6, lsl #16 \n"
- "sadd16 r1, r1, r5 \n"
- "ldmia %[v2]!, {r4-r5} \n"
- "mov r7, r7, ror #16 \n"
- "pkhtb r6, r7, r6, asr #16 \n"
- "sadd16 r2, r2, r6 \n"
- "pkhbt r7, r7, r4, lsl #16 \n"
- "sadd16 r3, r3, r7 \n"
- "stmia %[v1]!, {r0-r3} \n"
- ".endr \n"
#if ORDER > 32
- "subs %[cnt], %[cnt], #1 \n"
- "bne 1b \n"
+ "1: \n"
+ "smlad %[res], r0, r4, %[res] \n"
+#else
+ "smuad %[res], r0, r4 \n"
#endif
- "b 99f \n"
-
- "20: \n"
- "1: \n"
- ".rept " ADD_SUB_BLOCKS "\n"
- "ldmia %[v2]!, {r4-r7} \n"
- "ldmia %[v1], {r0-r3} \n"
- "sadd16 r0, r0, r4 \n"
- "sadd16 r1, r1, r5 \n"
- "sadd16 r2, r2, r6 \n"
- "sadd16 r3, r3, r7 \n"
- "stmia %[v1]!, {r0-r3} \n"
- ".endr \n"
+ "ldrd r6, [%[s2]], #8 \n"
+ "smlad %[res], r1, r5, %[res] \n"
+ "ldrd r4, [%[f2]], #8 \n"
+ "ldrd r2, [%[v1], #8] \n"
+ "sadd16 r0, r0, r6 \n"
+ "sadd16 r1, r1, r7 \n"
+ "strd r0, [%[v1]], #8 \n"
+
+ ".rept " BLOCK_REPEAT "\n"
+ "smlad %[res], r2, r4, %[res] \n"
+ "ldrd r6, [%[s2]], #8 \n"
+ "smlad %[res], r3, r5, %[res] \n"
+ "ldrd r4, [%[f2]], #8 \n"
+ "ldrd r0, [%[v1], #8] \n"
+ "sadd16 r2, r2, r6 \n"
+ "sadd16 r3, r3, r7 \n"
+ "strd r2, [%[v1]], #8 \n"
+ "smlad %[res], r0, r4, %[res] \n"
+ "ldrd r6, [%[s2]], #8 \n"
+ "smlad %[res], r1, r5, %[res] \n"
+ "ldrd r4, [%[f2]], #8 \n"
+ "ldrd r2, [%[v1], #8] \n"
+ "sadd16 r0, r0, r6 \n"
+ "sadd16 r1, r1, r7 \n"
+ "strd r0, [%[v1]], #8 \n"
+ ".endr \n"
+
+ "smlad %[res], r2, r4, %[res] \n"
+ "ldrd r6, [%[s2]], #8 \n"
+ "smlad %[res], r3, r5, %[res] \n"
#if ORDER > 32
- "subs %[cnt], %[cnt], #1 \n"
- "bne 1b \n"
+ "subs %[cnt], %[cnt], #1 \n"
+ "ldrned r4, [%[f2]], #8 \n"
+ "ldrned r0, [%[v1], #8] \n"
+ "sadd16 r2, r2, r6 \n"
+ "sadd16 r3, r3, r7 \n"
+ "strd r2, [%[v1]], #8 \n"
+ "bne 1b \n"
+#else
+ "sadd16 r2, r2, r6 \n"
+ "sadd16 r3, r3, r7 \n"
+ "strd r2, [%[v1]], #8 \n"
#endif
- "99: \n"
+ "99: \n"
: /* outputs */
#if ORDER > 32
[cnt]"+r"(cnt),
#endif
[v1] "+r"(v1),
- [v2] "+r"(v2)
+ [f2] "+r"(f2),
+ [s2] "+r"(s2),
+ [res]"=r"(res)
: /* inputs */
: /* clobbers */
"r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "memory"
);
+ return res;
}
-/* This version fetches data as 32 bit words, and *requires* v1 to be
- * 32 bit aligned, otherwise it will result either in a data abort, or
- * incorrect results (if ARM aligncheck is disabled). */
-static inline void vector_sub(int16_t* v1, int16_t* v2)
+/* Calculate scalarproduct, then subtract a 2nd vector (fused for performance)
+ * This version fetches data as 32 bit words, and *requires* v1 to be
+ * 32 bit aligned. It also requires that f2 and s2 are either both 32 bit
+ * aligned or both unaligned. If either condition isn't met, it will either
+ * result in a data abort or incorrect results. */
+static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2)
{
+ int res;
#if ORDER > 32
int cnt = ORDER>>5;
#endif
asm volatile (
- "tst %[v2], #2 \n"
- "beq 20f \n"
-
- "10: \n"
- "bic %[v2], %[v2], #2 \n"
- "ldmia %[v2]!, {r4-r5} \n"
- "1: \n"
- ".rept " ADD_SUB_BLOCKS "\n"
- "ldmia %[v2]!, {r6-r7} \n"
- "ldmia %[v1], {r0-r3} \n"
- "mov r5, r5, ror #16 \n"
- "pkhtb r4, r5, r4, asr #16 \n"
- "ssub16 r0, r0, r4 \n"
- "pkhbt r5, r5, r6, lsl #16 \n"
- "ssub16 r1, r1, r5 \n"
- "ldmia %[v2]!, {r4-r5} \n"
- "mov r7, r7, ror #16 \n"
- "pkhtb r6, r7, r6, asr #16 \n"
- "ssub16 r2, r2, r6 \n"
- "pkhbt r7, r7, r4, lsl #16 \n"
- "ssub16 r3, r3, r7 \n"
- "stmia %[v1]!, {r0-r3} \n"
- ".endr \n"
#if ORDER > 32
- "subs %[cnt], %[cnt], #1 \n"
- "bne 1b \n"
+ "mov %[res], #0 \n"
+#endif
+ "tst %[f2], #2 \n"
+ "beq 20f \n"
+
+ "10: \n"
+ "ldrh r3, [%[f2]], #2 \n"
+ "ldrh r6, [%[s2]], #2 \n"
+ "ldmia %[f2]!, {r2,r4} \n"
+ "mov r3, r3, lsl #16 \n"
+ "mov r6, r6, lsl #16 \n"
+
+ "1: \n"
+ "ldmia %[s2]!, {r5,r7} \n"
+ "pkhtb r3, r3, r2 \n"
+ "pkhtb r2, r2, r4 \n"
+ "ldrd r0, [%[v1]] \n"
+ "mov r5, r5, ror #16 \n"
+ "pkhtb r6, r5, r6, asr #16 \n"
+ "pkhbt r5, r5, r7, lsl #16 \n"
+#if ORDER > 32
+ "smladx %[res], r0, r3, %[res] \n"
+#else
+ "smuadx %[res], r0, r3 \n"
+#endif
+ "smladx %[res], r1, r2, %[res] \n"
+ "ldmia %[f2]!, {r2,r3} \n"
+ "ssub16 r0, r0, r6 \n"
+ "ssub16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+
+ ".rept " BLOCK_REPEAT "\n"
+ "ldmia %[s2]!, {r5,r6} \n"
+ "pkhtb r4, r4, r2 \n"
+ "pkhtb r2, r2, r3 \n"
+ "ldrd r0, [%[v1]] \n"
+ "mov r5, r5, ror #16 \n"
+ "pkhtb r7, r5, r7, asr #16 \n"
+ "pkhbt r5, r5, r6, lsl #16 \n"
+ "smladx %[res], r0, r4, %[res] \n"
+ "smladx %[res], r1, r2, %[res] \n"
+ "ldmia %[f2]!, {r2,r4} \n"
+ "ssub16 r0, r0, r7 \n"
+ "ssub16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+ "ldmia %[s2]!, {r5,r7} \n"
+ "pkhtb r3, r3, r2 \n"
+ "pkhtb r2, r2, r4 \n"
+ "ldrd r0, [%[v1]] \n"
+ "mov r5, r5, ror #16 \n"
+ "pkhtb r6, r5, r6, asr #16 \n"
+ "pkhbt r5, r5, r7, lsl #16 \n"
+ "smladx %[res], r0, r3, %[res] \n"
+ "smladx %[res], r1, r2, %[res] \n"
+ "ldmia %[f2]!, {r2,r3} \n"
+ "ssub16 r0, r0, r6 \n"
+ "ssub16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+ ".endr \n"
+
+ "ldmia %[s2]!, {r5,r6} \n"
+ "pkhtb r4, r4, r2 \n"
+ "pkhtb r2, r2, r3 \n"
+ "ldrd r0, [%[v1]] \n"
+ "mov r5, r5, ror #16 \n"
+ "pkhtb r7, r5, r7, asr #16 \n"
+ "pkhbt r5, r5, r6, lsl #16 \n"
+ "smladx %[res], r0, r4, %[res] \n"
+ "smladx %[res], r1, r2, %[res] \n"
+#if ORDER > 32
+ "subs %[cnt], %[cnt], #1 \n"
+ "ldmneia %[f2]!, {r2,r4} \n"
+ "ssub16 r0, r0, r7 \n"
+ "ssub16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+ "bne 1b \n"
+#else
+ "ssub16 r0, r0, r7 \n"
+ "ssub16 r1, r1, r5 \n"
+ "strd r0, [%[v1]], #8 \n"
+#endif
+
+ "b 99f \n"
+
+ "20: \n"
+ "ldrd r4, [%[f2]], #8 \n"
+ "ldrd r0, [%[v1]] \n"
+
+#if ORDER > 32
+ "1: \n"
+ "smlad %[res], r0, r4, %[res] \n"
+#else
+ "smuad %[res], r0, r4 \n"
#endif
- "b 99f \n"
-
- "20: \n"
- "1: \n"
- ".rept " ADD_SUB_BLOCKS "\n"
- "ldmia %[v2]!, {r4-r7} \n"
- "ldmia %[v1], {r0-r3} \n"
- "ssub16 r0, r0, r4 \n"
- "ssub16 r1, r1, r5 \n"
- "ssub16 r2, r2, r6 \n"
- "ssub16 r3, r3, r7 \n"
- "stmia %[v1]!, {r0-r3} \n"
- ".endr \n"
+ "ldrd r6, [%[s2]], #8 \n"
+ "smlad %[res], r1, r5, %[res] \n"
+ "ldrd r4, [%[f2]], #8 \n"
+ "ldrd r2, [%[v1], #8] \n"
+ "ssub16 r0, r0, r6 \n"
+ "ssub16 r1, r1, r7 \n"
+ "strd r0, [%[v1]], #8 \n"
+
+ ".rept " BLOCK_REPEAT "\n"
+ "smlad %[res], r2, r4, %[res] \n"
+ "ldrd r6, [%[s2]], #8 \n"
+ "smlad %[res], r3, r5, %[res] \n"
+ "ldrd r4, [%[f2]], #8 \n"
+ "ldrd r0, [%[v1], #8] \n"
+ "ssub16 r2, r2, r6 \n"
+ "ssub16 r3, r3, r7 \n"
+ "strd r2, [%[v1]], #8 \n"
+ "smlad %[res], r0, r4, %[res] \n"
+ "ldrd r6, [%[s2]], #8 \n"
+ "smlad %[res], r1, r5, %[res] \n"
+ "ldrd r4, [%[f2]], #8 \n"
+ "ldrd r2, [%[v1], #8] \n"
+ "ssub16 r0, r0, r6 \n"
+ "ssub16 r1, r1, r7 \n"
+ "strd r0, [%[v1]], #8 \n"
+ ".endr \n"
+
+ "smlad %[res], r2, r4, %[res] \n"
+ "ldrd r6, [%[s2]], #8 \n"
+ "smlad %[res], r3, r5, %[res] \n"
#if ORDER > 32
- "subs %[cnt], %[cnt], #1 \n"
- "bne 1b \n"
+ "subs %[cnt], %[cnt], #1 \n"
+ "ldrned r4, [%[f2]], #8 \n"
+ "ldrned r0, [%[v1], #8] \n"
+ "ssub16 r2, r2, r6 \n"
+ "ssub16 r3, r3, r7 \n"
+ "strd r2, [%[v1]], #8 \n"
+ "bne 1b \n"
+#else
+ "ssub16 r2, r2, r6 \n"
+ "ssub16 r3, r3, r7 \n"
+ "strd r2, [%[v1]], #8 \n"
#endif
- "99: \n"
+ "99: \n"
: /* outputs */
#if ORDER > 32
[cnt]"+r"(cnt),
#endif
[v1] "+r"(v1),
- [v2] "+r"(v2)
+ [f2] "+r"(f2),
+ [s2] "+r"(s2),
+ [res]"=r"(res)
: /* inputs */
: /* clobbers */
"r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "memory"
);
+ return res;
}
/* This version fetches data as 32 bit words, and *requires* v1 to be
@@ -178,12 +380,6 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
int cnt = ORDER>>5;
#endif
-#if ORDER > 16
-#define MLA_BLOCKS "3"
-#else
-#define MLA_BLOCKS "1"
-#endif
-
asm volatile (
#if ORDER > 32
"mov %[res], #0 \n"
@@ -194,80 +390,85 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
"10: \n"
"bic %[v2], %[v2], #2 \n"
"ldmia %[v2]!, {r5-r7} \n"
- "ldmia %[v1]!, {r0-r1} \n"
+ "ldrd r0, [%[v1]], #8 \n"
+
"1: \n"
- "pkhbt r8, r6, r5 \n"
- "ldmia %[v2]!, {r4-r5} \n"
+ "pkhtb r3, r5, r6 \n"
+ "ldrd r4, [%[v2]], #8 \n"
#if ORDER > 32
- "smladx %[res], r0, r8, %[res] \n"
+ "smladx %[res], r0, r3, %[res] \n"
#else
- "smuadx %[res], r0, r8 \n"
+ "smuadx %[res], r0, r3 \n"
#endif
- ".rept " MLA_BLOCKS "\n"
- "pkhbt r8, r7, r6 \n"
- "ldmia %[v1]!, {r2-r3} \n"
- "smladx %[res], r1, r8, %[res] \n"
- "pkhbt r8, r4, r7 \n"
- "ldmia %[v2]!, {r6-r7} \n"
- "smladx %[res], r2, r8, %[res] \n"
- "pkhbt r8, r5, r4 \n"
- "ldmia %[v1]!, {r0-r1} \n"
- "smladx %[res], r3, r8, %[res] \n"
- "pkhbt r8, r6, r5 \n"
- "ldmia %[v2]!, {r4-r5} \n"
- "smladx %[res], r0, r8, %[res] \n"
+ ".rept " BLOCK_REPEAT "\n"
+ "pkhtb r0, r6, r7 \n"
+ "ldrd r2, [%[v1]], #8 \n"
+ "smladx %[res], r1, r0, %[res] \n"
+ "pkhtb r1, r7, r4 \n"
+ "ldrd r6, [%[v2]], #8 \n"
+ "smladx %[res], r2, r1, %[res] \n"
+ "pkhtb r2, r4, r5 \n"
+ "ldrd r0, [%[v1]], #8 \n"
+ "smladx %[res], r3, r2, %[res] \n"
+ "pkhtb r3, r5, r6 \n"
+ "ldrd r4, [%[v2]], #8 \n"
+ "smladx %[res], r0, r3, %[res] \n"
".endr \n"
-
- "pkhbt r8, r7, r6 \n"
- "ldmia %[v1]!, {r2-r3} \n"
- "smladx %[res], r1, r8, %[res] \n"
- "pkhbt r8, r4, r7 \n"
+
+ "pkhtb r0, r6, r7 \n"
+ "ldrd r2, [%[v1]], #8 \n"
+ "smladx %[res], r1, r0, %[res] \n"
+ "pkhtb r1, r7, r4 \n"
#if ORDER > 32
"subs %[cnt], %[cnt], #1 \n"
- "ldmneia %[v2]!, {r6-r7} \n"
- "smladx %[res], r2, r8, %[res] \n"
- "pkhbt r8, r5, r4 \n"
- "ldmneia %[v1]!, {r0-r1} \n"
- "smladx %[res], r3, r8, %[res] \n"
+ "ldrned r6, [%[v2]], #8 \n"
+ "smladx %[res], r2, r1, %[res] \n"
+ "pkhtb r2, r4, r5 \n"
+ "ldrned r0, [%[v1]], #8 \n"
+ "smladx %[res], r3, r2, %[res] \n"
"bne 1b \n"
#else
- "pkhbt r5, r5, r4 \n"
- "smladx %[res], r2, r8, %[res] \n"
- "smladx %[res], r3, r5, %[res] \n"
+ "pkhtb r4, r4, r5 \n"
+ "smladx %[res], r2, r1, %[res] \n"
+ "smladx %[res], r3, r4, %[res] \n"
#endif
- "b 99f \n"
+ "b 99f \n"
+
"20: \n"
- "ldmia %[v1]!, {r0-r1} \n"
+ "ldrd r0, [%[v1]], #8 \n"
"ldmia %[v2]!, {r5-r7} \n"
+
"1: \n"
- "ldmia %[v1]!, {r2-r3} \n"
+ "ldrd r2, [%[v1]], #8 \n"
#if ORDER > 32
"smlad %[res], r0, r5, %[res] \n"
#else
"smuad %[res], r0, r5 \n"
#endif
- ".rept " MLA_BLOCKS "\n"
- "ldmia %[v2]!, {r4-r5} \n"
+ ".rept " BLOCK_REPEAT "\n"
+ "ldrd r4, [%[v2]], #8 \n"
"smlad %[res], r1, r6, %[res] \n"
- "ldmia %[v1]!, {r0-r1} \n"
+ "ldrd r0, [%[v1]], #8 \n"
"smlad %[res], r2, r7, %[res] \n"
- "ldmia %[v2]!, {r6-r7} \n"
+ "ldrd r6, [%[v2]], #8 \n"
"smlad %[res], r3, r4, %[res] \n"
- "ldmia %[v1]!, {r2-r3} \n"
+ "ldrd r2, [%[v1]], #8 \n"
"smlad %[res], r0, r5, %[res] \n"
".endr \n"
- "ldmia %[v2]!, {r4-r5} \n"
- "smlad %[res], r1, r6, %[res] \n"
#if ORDER > 32
+ "ldrd r4, [%[v2]], #8 \n"
+ "smlad %[res], r1, r6, %[res] \n"
"subs %[cnt], %[cnt], #1 \n"
- "ldmneia %[v1]!, {r0-r1} \n"
+ "ldrned r0, [%[v1]], #8 \n"
"smlad %[res], r2, r7, %[res] \n"
- "ldmneia %[v2]!, {r6-r7} \n"
+ "ldrned r6, [%[v2]], #8 \n"
"smlad %[res], r3, r4, %[res] \n"
"bne 1b \n"
#else
+ "ldr r4, [%[v2]], #4 \n"
+ "smlad %[res], r1, r6, %[res] \n"
"smlad %[res], r2, r7, %[res] \n"
"smlad %[res], r3, r4, %[res] \n"
#endif
@@ -282,8 +483,8 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
[res]"=r"(res)
: /* inputs */
: /* clobbers */
- "r0", "r1", "r2", "r3", "r4",
- "r5", "r6", "r7", "r8"
+ "r0", "r1", "r2", "r3",
+ "r4", "r5", "r6", "r7"
);
return res;
}