summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hooper <dave@beermex.com>2010-02-17 00:49:53 +0000
committerDave Hooper <dave@beermex.com>2010-02-17 00:49:53 +0000
commit42774d3128b91d5a37344cb40d56d3c4d147e5f2 (patch)
treebf336b407992ec9a5e454556f3351e3f8a0d10de
parent62257ebc38bc0a3095b25dd0f58c4c8215edf602 (diff)
downloadrockbox-42774d3128b91d5a37344cb40d56d3c4d147e5f2.tar.gz
rockbox-42774d3128b91d5a37344cb40d56d3c4d147e5f2.zip
Merge from branches/mdctexp - faster ifft+imdct in codec lib
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@24712 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--apps/codecs/lib/SOURCES6
-rw-r--r--apps/codecs/lib/asm_arm.h60
-rw-r--r--apps/codecs/lib/asm_mcf5249.h24
-rw-r--r--apps/codecs/lib/codeclib.h9
-rw-r--r--apps/codecs/lib/codeclib_misc.h25
-rw-r--r--apps/codecs/lib/fft-ffmpeg.c467
-rw-r--r--apps/codecs/lib/fft-ffmpeg_arm.h342
-rw-r--r--apps/codecs/lib/fft.h64
-rw-r--r--apps/codecs/lib/mdct.c414
-rw-r--r--apps/codecs/lib/mdct.h141
-rw-r--r--apps/codecs/lib/mdct_lookup.c321
-rw-r--r--apps/codecs/lib/mdct_lookup.h1
-rw-r--r--apps/codecs/liba52/a52_internal.h8
-rw-r--r--apps/codecs/liba52/downmix.c76
-rw-r--r--apps/codecs/liba52/imdct.c133
-rw-r--r--apps/codecs/liba52/parse.c20
-rw-r--r--apps/codecs/libatrac/atrac3.c7
-rw-r--r--apps/codecs/libatrac/atrac3.h2
-rw-r--r--apps/codecs/libcook/cook.c1
-rw-r--r--apps/codecs/libcook/cook.h1
-rw-r--r--apps/codecs/libcook/cook_fixpoint.h5
-rw-r--r--apps/codecs/libtremor/block.c2
-rw-r--r--apps/codecs/libtremor/codec_internal.h3
-rw-r--r--apps/codecs/libtremor/info.c6
-rw-r--r--apps/codecs/libtremor/ivorbiscodec.h2
-rw-r--r--apps/codecs/libtremor/mapping0.c8
-rw-r--r--apps/codecs/libtremor/synthesis.c5
-rw-r--r--apps/codecs/libwma/wmadec.h2
-rw-r--r--apps/codecs/libwma/wmadeci.c22
-rw-r--r--apps/codecs/libwma/wmafixed.c110
-rw-r--r--apps/codecs/libwma/wmafixed.h45
31 files changed, 2006 insertions, 326 deletions
diff --git a/apps/codecs/lib/SOURCES b/apps/codecs/lib/SOURCES
index 42bb1138d1..da77f97d30 100644
--- a/apps/codecs/lib/SOURCES
+++ b/apps/codecs/lib/SOURCES
@@ -2,8 +2,14 @@
codeclib.c
fixedpoint.c
+/* OLD MDCT */
+/* (when all other codecs are remediated this can be remoed) */
mdct2.c
mdct_lookup.c
+
+fft-ffmpeg.c
+mdct.c
+
#ifdef CPU_ARM
mdct_arm.S
setjmp_arm.S
diff --git a/apps/codecs/lib/asm_arm.h b/apps/codecs/lib/asm_arm.h
index 89606184da..4f31f80c3e 100644
--- a/apps/codecs/lib/asm_arm.h
+++ b/apps/codecs/lib/asm_arm.h
@@ -23,7 +23,7 @@ static inline int32_t MULT32(int32_t x, int32_t y) {
int lo,hi;
asm volatile("smull\t%0, %1, %2, %3"
: "=&r"(lo),"=&r"(hi)
- : "%r"(x),"r"(y) );
+ : "r"(x),"r"(y) );
return(hi);
}
@@ -37,7 +37,7 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
"movs %0, %0, lsr #15\n\t"
"adc %1, %0, %1, lsl #17\n\t"
: "=&r"(lo),"=&r"(hi)
- : "%r"(x),"r"(y)
+ : "r"(x),"r"(y)
: "cc" );
return(hi);
}
@@ -45,13 +45,13 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
#define XPROD32(a, b, t, v, x, y) \
{ \
long l; \
- asm( "smull %0, %1, %4, %6\n\t" \
- "rsb %3, %4, #0\n\t" \
- "smlal %0, %1, %5, %7\n\t" \
- "smull %0, %2, %5, %6\n\t" \
- "smlal %0, %2, %3, %7" \
- : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \
- : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \
+ asm( "smull %0, %1, %3, %5\n\t" \
+ "rsb %2, %6, #0\n\t" \
+ "smlal %0, %1, %4, %6\n\t" \
+ "smull %0, %2, %3, %2\n\t" \
+ "smlal %0, %2, %4, %5" \
+ : "=&r" (l), "=&r" (x), "=&r" (y) \
+ : "r" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \
}
static inline void XPROD31(int32_t a, int32_t b,
@@ -59,13 +59,13 @@ static inline void XPROD31(int32_t a, int32_t b,
int32_t *x, int32_t *y)
{
int x1, y1, l;
- asm( "smull %0, %1, %4, %6\n\t"
- "rsb %3, %4, #0\n\t"
- "smlal %0, %1, %5, %7\n\t"
- "smull %0, %2, %5, %6\n\t"
- "smlal %0, %2, %3, %7"
- : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
- : "3" (a), "r" (b), "r" (t), "r" (v) );
+ asm( "smull %0, %1, %3, %5\n\t"
+ "rsb %2, %6, #0\n\t"
+ "smlal %0, %1, %4, %6\n\t"
+ "smull %0, %2, %3, %2\n\t"
+ "smlal %0, %2, %4, %5"
+ : "=&r" (l), "=&r" (x1), "=&r" (y1)
+ : "r" (a), "r" (b), "r" (t), "r" (v) );
*x = x1 << 1;
*y = y1 << 1;
}
@@ -86,6 +86,34 @@ static inline void XNPROD31(int32_t a, int32_t b,
*y = y1 << 1;
}
+#define XPROD31_R(_a, _b, _t, _v, _x, _y)\
+{\
+ int x1, y1, l;\
+ asm( "smull %0, %1, %5, %3\n\t"\
+ "rsb %2, %3, #0\n\t"\
+ "smlal %0, %1, %6, %4\n\t"\
+ "smull %0, %2, %6, %2\n\t"\
+ "smlal %0, %2, %5, %4"\
+ : "=&r" (l), "=&r" (x1), "=&r" (y1)\
+ : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\
+ _x = x1 << 1;\
+ _y = y1 << 1;\
+}
+
+#define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
+{\
+ int x1, y1, l;\
+ asm( "smull %0, %1, %5, %3\n\t"\
+ "rsb %2, %4, #0\n\t"\
+ "smlal %0, %1, %6, %2\n\t"\
+ "smull %0, %2, %5, %4\n\t"\
+ "smlal %0, %2, %6, %3"\
+ : "=&r" (l), "=&r" (x1), "=&r" (y1)\
+ : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\
+ _x = x1 << 1;\
+ _y = y1 << 1;\
+}
+
#ifndef _V_VECT_OPS
#define _V_VECT_OPS
diff --git a/apps/codecs/lib/asm_mcf5249.h b/apps/codecs/lib/asm_mcf5249.h
index 8378accb2a..33b2c9aa9d 100644
--- a/apps/codecs/lib/asm_mcf5249.h
+++ b/apps/codecs/lib/asm_mcf5249.h
@@ -125,6 +125,30 @@ void XNPROD31(int32_t a, int32_t b,
[t] "r" (_t), [v] "r" (_v) \
: "cc");
+#define XPROD31_R(_a, _b, _t, _v, _x, _y) \
+ asm volatile ("mac.l %[a], %[t], %%acc0;" \
+ "mac.l %[b], %[v], %%acc0;" \
+ "mac.l %[b], %[t], %%acc1;" \
+ "msac.l %[a], %[v], %%acc1;" \
+ "movclr.l %%acc0, %[x];" \
+ "movclr.l %%acc1, %[y];" \
+ : [x] "+&d" (_x), [y] "=&d" (_y) \
+ : [a] "r" (_a), [b] "r" (_b), \
+ [t] "r" (_t), [v] "r" (_v) \
+ : "cc");
+
+#define XNPROD31_R(_a, _b, _t, _v, _x, _y) \
+ asm volatile ("mac.l %[a], %[t], %%acc0;" \
+ "msac.l %[b], %[v], %%acc0;" \
+ "mac.l %[b], %[t], %%acc1;" \
+ "mac.l %[a], %[v], %%acc1;" \
+ "movclr.l %%acc0, %[x];" \
+ "movclr.l %%acc1, %[y];" \
+ : [x] "+&d" (_x), [y] "=&d" (_y) \
+ : [a] "r" (_a), [b] "r" (_b), \
+ [t] "r" (_t), [v] "r" (_v) \
+ : "cc");
+
#ifndef _V_VECT_OPS
#define _V_VECT_OPS
diff --git a/apps/codecs/lib/codeclib.h b/apps/codecs/lib/codeclib.h
index 6dda3e794c..817d86a6a3 100644
--- a/apps/codecs/lib/codeclib.h
+++ b/apps/codecs/lib/codeclib.h
@@ -25,6 +25,8 @@
#include "config.h"
#include "codecs.h"
#include <sys/types.h>
+#include "mdct.h"
+#include "fft.h"
extern struct codec_api *ci;
extern size_t mem_ptr;
@@ -62,8 +64,13 @@ int strcmp(const char *, const char *);
void qsort(void *base, size_t nmemb, size_t size, int(*compar)(const void *, const void *));
/*MDCT library functions*/
-
+/* -1- Tremor mdct */
extern void mdct_backward(int n, int32_t *in, int32_t *out);
+/* -2- ffmpeg fft-based mdct */
+extern void ff_imdct_half(unsigned int nbits, int32_t *output, const int32_t *input);
+extern void ff_imdct_calc(unsigned int nbits, int32_t *output, const int32_t *input);
+/*ffmpeg fft (can be used without mdct)*/
+extern void ff_fft_calc_c(int nbits, FFTComplex *z);
#if !defined(CPU_ARM) || ARM_ARCH < 5
/* From libavutil/common.h */
diff --git a/apps/codecs/lib/codeclib_misc.h b/apps/codecs/lib/codeclib_misc.h
index 015a15ece3..6749231ebb 100644
--- a/apps/codecs/lib/codeclib_misc.h
+++ b/apps/codecs/lib/codeclib_misc.h
@@ -132,23 +132,36 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
#else
-static inline void XPROD31(int32_t a, int32_t b,
- int32_t t, int32_t v,
- int32_t *x, int32_t *y)
+static inline void XPROD31(int32_t a, int32_t b,
+ int32_t t, int32_t v,
+ int32_t *x, int32_t *y)
{
*x = MULT31(a, t) + MULT31(b, v);
*y = MULT31(b, t) - MULT31(a, v);
}
-static inline void XNPROD31(int32_t a, int32_t b,
- int32_t t, int32_t v,
- int32_t *x, int32_t *y)
+static inline void XNPROD31(int32_t a, int32_t b,
+ int32_t t, int32_t v,
+ int32_t *x, int32_t *y)
{
*x = MULT31(a, t) - MULT31(b, v);
*y = MULT31(b, t) + MULT31(a, v);
}
#endif
+#define XPROD31_R(_a, _b, _t, _v, _x, _y)\
+{\
+ _x = MULT31(_a, _t) + MULT31(_b, _v);\
+ _y = MULT31(_b, _t) - MULT31(_a, _v);\
+}
+
+#define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
+{\
+ _x = MULT31(_a, _t) - MULT31(_b, _v);\
+ _y = MULT31(_b, _t) + MULT31(_a, _v);\
+}
+
+
#ifndef _V_VECT_OPS
#define _V_VECT_OPS
diff --git a/apps/codecs/lib/fft-ffmpeg.c b/apps/codecs/lib/fft-ffmpeg.c
new file mode 100644
index 0000000000..f08b7fa2eb
--- /dev/null
+++ b/apps/codecs/lib/fft-ffmpeg.c
@@ -0,0 +1,467 @@
+/*
+ * FFT/IFFT transforms converted to integer precision
+ * Copyright (c) 2010 Dave Hooper, Mohamed Tarek, Michael Giacomelli
+ * Copyright (c) 2008 Loren Merritt
+ * Copyright (c) 2002 Fabrice Bellard
+ * Partly based on libdjbfft by D. J. Bernstein
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libavcodec/fft.c
+ * FFT/IFFT transforms.
+ */
+
+
+#ifdef CPU_ARM
+// we definitely want CONFIG_SMALL undefined for ipod
+// so we get the inlined version of fft16 (which is measurably faster)
+#undef CONFIG_SMALL
+#else
+#undef CONFIG_SMALL
+#endif
+
+#include "fft.h"
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+#include <inttypes.h>
+#include <time.h>
+#include <codecs/lib/codeclib.h>
+
+#include "asm_arm.h"
+#include "asm_mcf5249.h"
+#include "codeclib_misc.h"
+#include "mdct_lookup.h"
+
+static void ff_fft_permute_c(FFTContext *s, FFTComplex *z);
+
+/* constants for fft_16 (same constants as in mdct_arm.S ... ) */
+#define cPI1_8 (0x7641af3d) /* cos(pi/8) s.31 */
+#define cPI2_8 (0x5a82799a) /* cos(2pi/8) = 1/sqrt(2) s.31 */
+#define cPI3_8 (0x30fbc54d) /* cos(3pi/8) s.31 */
+
+/* asm-optimised functions and/or macros */
+#include "fft-ffmpeg_arm.h"
+
+static int split_radix_permutation(int i, int n, int inverse)
+{
+ int m;
+ if(n <= 2) return i&1;
+ m = n >> 1;
+ if(!(i&m)) return split_radix_permutation(i, m, inverse)*2;
+ m >>= 1;
+ if(inverse == !(i&m)) return split_radix_permutation(i, m, inverse)*4 + 1;
+ else return split_radix_permutation(i, m, inverse)*4 - 1;
+}
+
+static void ff_fft_permute_c(FFTContext *s, FFTComplex *z)
+{
+ int j, k, np;
+ FFTComplex tmp;
+ //const uint16_t *revtab = s->revtab;
+ np = 1 << s->nbits;
+
+ const int revtab_shift = (12 - s->nbits);
+
+ /* reverse */
+ for(j=0;j<np;j++) {
+ k = revtab[j]>>revtab_shift;
+ if (k < j) {
+ tmp = z[k];
+ z[k] = z[j];
+ z[j] = tmp;
+ }
+ }
+}
+
+#define BF(x,y,a,b) {\
+ x = a - b;\
+ y = a + b;\
+}
+
+#define BF_REV(x,y,a,b) {\
+ x = a + b;\
+ y = a - b;\
+}
+
+#ifndef FFT_FFMPEG_INCL_OPTIMISED_BUTTERFLIES
+#define BUTTERFLIES(a0,a1,a2,a3) {\
+ {\
+ FFTSample temp1,temp2;\
+ BF(temp1, temp2, t5, t1);\
+ BF(a2.re, a0.re, a0.re, temp2);\
+ BF(a3.im, a1.im, a1.im, temp1);\
+ }\
+ {\
+ FFTSample temp1,temp2;\
+ BF(temp1, temp2, t2, t6);\
+ BF(a3.re, a1.re, a1.re, temp1);\
+ BF(a2.im, a0.im, a0.im, temp2);\
+ }\
+}
+
+// force loading all the inputs before storing any.
+// this is slightly slower for small data, but avoids store->load aliasing
+// for addresses separated by large powers of 2.
+#define BUTTERFLIES_BIG(a0,a1,a2,a3) {\
+ FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\
+ {\
+ FFTSample temp1, temp2;\
+ BF(temp1, temp2, t5, t1);\
+ BF(a2.re, a0.re, r0, temp2);\
+ BF(a3.im, a1.im, i1, temp1);\
+ }\
+ {\
+ FFTSample temp1, temp2;\
+ BF(temp1, temp2, t2, t6);\
+ BF(a3.re, a1.re, r1, temp1);\
+ BF(a2.im, a0.im, i0, temp2);\
+ }\
+}
+#endif
+
+/*
+ see conjugate pair description in
+ http://www.fftw.org/newsplit.pdf
+
+ a0 = z[k]
+ a1 = z[k+N/4]
+ a2 = z[k+2N/4]
+ a3 = z[k+3N/4]
+
+ result:
+ y[k] = z[k]+w(z[k+2N/4])+w'(z[k+3N/4])
+ y[k+N/4] = z[k+N/4]-iw(z[k+2N/4])+iw'(z[k+3N/4])
+ y[k+2N/4] = z[k]-w(z[k+2N/4])-w'(z[k+3N/4])
+ y[k+3N/4] = z[k+N/4]+iw(z[k+2N/4])-iw'(z[k+3N/4])
+
+ i.e.
+
+ a0 = a0 + (w.a2 + w'.a3)
+ a1 = a1 - i(w.a2 - w'.a3)
+ a2 = a0 - (w.a2 + w'.a3)
+ a3 = a1 + i(w.a2 - w'.a3)
+
+ note re(w') = re(w) and im(w') = -im(w)
+
+ so therefore
+
+ re(a0) = re(a0) + re(w.a2) + re(w.a3)
+ im(a0) = im(a0) + im(w.a2) - im(w.a3) etc
+
+ and remember also that
+ Re([s+it][u+iv]) = su-tv
+ Im([s+it][u+iv]) = sv+tu
+
+ so
+ Re(w'.(s+it)) = Re(w').s - Im(w').t = Re(w).s + Im(w).t
+ Im(w'.(s+it)) = Re(w').t + Im(w').s = Re(w).t - Im(w).s
+
+ For inverse dft we take the complex conjugate of all twiddle factors.
+ Hence
+
+ a0 = a0 + (w'.a2 + w.a3)
+ a1 = a1 - i(w'.a2 - w.a3)
+ a2 = a0 - (w'.a2 + w.a3)
+ a3 = a1 + i(w'.a2 - w.a3)
+
+ Define t1 = Re(w'.a2) = Re(w)*Re(a2) + Im(w)*Im(a2)
+ t2 = Im(w'.a2) = Re(w)*Im(a2) - Im(w)*Re(a2)
+ t5 = Re(w.a3) = Re(w)*Re(a3) - Im(w)*Im(a3)
+ t6 = Im(w.a3) = Re(w)*Im(a3) + Im(w)*Re(a3)
+
+ Then we just output:
+ a0.re = a0.re + ( t1 + t5 )
+ a0.im = a0.im + ( t2 + t6 )
+ a1.re = a1.re + ( t2 - t6 ) // since we multiply by -i and i(-i) = 1
+ a1.im = a1.im - ( t1 - t5 ) // since we multiply by -i and 1(-i) = -i
+ a2.re = a0.re - ( t1 + t5 )
+ a2.im = a0.im - ( t1 + t5 )
+ a3.re = a1.re - ( t2 - t6 ) // since we multiply by +i and i(+i) = -1
+ a3.im = a1.im + ( t1 - t5 ) // since we multiply by +i and 1(+i) = i
+
+
+*/
+
+#ifndef FFT_FFMPEG_INCL_OPTIMISED_TRANSFORM
+static inline void TRANSFORM(FFTComplex * z, unsigned int n, FFTSample wre, FFTSample wim)
+{
+ register FFTSample t1,t2,t5,t6,r_re,r_im;
+ r_re = z[n*2].re;
+ r_im = z[n*2].im;
+ XPROD31_R(r_re, r_im, wre, wim, t1,t2);
+ r_re = z[n*3].re;
+ r_im = z[n*3].im;
+ XNPROD31_R(r_re, r_im, wre, wim, t5,t6);
+ BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
+}
+
+static inline void TRANSFORM_W01(FFTComplex * z, unsigned int n, const FFTSample * w)
+{
+ register const FFTSample wre=w[0],wim=w[1];
+ register FFTSample t1,t2,t5,t6,r_re,r_im;
+ r_re = z[n*2].re;
+ r_im = z[n*2].im;
+ XPROD31_R(r_re, r_im, wre, wim, t1,t2);
+ r_re = z[n*3].re;
+ r_im = z[n*3].im;
+ XNPROD31_R(r_re, r_im, wre, wim, t5,t6);
+ BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
+}
+
+static inline void TRANSFORM_W10(FFTComplex * z, unsigned int n, const FFTSample * w)
+{
+ register const FFTSample wim=w[0],wre=w[1];
+ register FFTSample t1,t2,t5,t6,r_re,r_im;
+ r_re = z[n*2].re;
+ r_im = z[n*2].im;
+ XPROD31_R(r_re, r_im, wre, wim, t1,t2);
+ r_re = z[n*3].re;
+ r_im = z[n*3].im;
+ XNPROD31_R(r_re, r_im, wre, wim, t5,t6);
+ BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
+}
+
+static inline void TRANSFORM_EQUAL(FFTComplex * z, unsigned int n)
+{
+ register FFTSample t1,t2,t5,t6,temp1,temp2;
+ register FFTSample * my_z = (FFTSample *)(z);
+ my_z += n*4;
+ t2 = MULT31(my_z[0], cPI2_8);
+ temp1 = MULT31(my_z[1], cPI2_8);
+ my_z += n*2;
+ temp2 = MULT31(my_z[0], cPI2_8);
+ t5 = MULT31(my_z[1], cPI2_8);
+ t1 = ( temp1 + t2 );
+ t2 = ( temp1 - t2 );
+ t6 = ( temp2 + t5 );
+ t5 = ( temp2 - t5 );
+ my_z -= n*6;
+ BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
+}
+
+static inline void TRANSFORM_ZERO(FFTComplex * z, unsigned int n)
+{
+ FFTSample t1,t2,t5,t6;
+ t1 = z[n*2].re;
+ t2 = z[n*2].im;
+ t5 = z[n*3].re;
+ t6 = z[n*3].im;
+ BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
+}
+#endif
+
+/* z[0...8n-1], w[1...2n-1] */
+static void pass(FFTComplex *z_arg, unsigned int STEP_arg, unsigned int n_arg)
+{
+ register FFTComplex * z = z_arg;
+ register unsigned int STEP = STEP_arg;
+ register unsigned int n = n_arg;
+
+ register const FFTSample *w = sincos_lookup0+STEP;
+ /* wre = *(wim+1) . ordering is sin,cos */
+ register const FFTSample *w_end = sincos_lookup0+1024;
+
+ /* first two are special (well, first one is special, but we need to do pairs) */
+ TRANSFORM_ZERO(z,n);
+ z++;
+ TRANSFORM_W10(z,n,w);
+ w += STEP;
+ /* first pass forwards through sincos_lookup0*/
+ do {
+ z++;
+ TRANSFORM_W10(z,n,w);
+ w += STEP;
+ z++;
+ TRANSFORM_W10(z,n,w);
+ w += STEP;
+ } while(LIKELY(w < w_end));
+ /* second half: pass backwards through sincos_lookup0*/
+ /* wim and wre are now in opposite places so ordering now [0],[1] */
+ w_end=sincos_lookup0;
+ while(LIKELY(w>w_end))
+ {
+ z++;
+ TRANSFORM_W01(z,n,w);
+ w -= STEP;
+ z++;
+ TRANSFORM_W01(z,n,w);
+ w -= STEP;
+ }
+}
+
+/* what is STEP?
+ sincos_lookup0 has sin,cos pairs for 1/4 cycle, in 1024 points
+ so half cycle would be 2048 points
+ ff_cos_16 has 8 elements corresponding to 4 cos points and 4 sin points
+ so each of the 4 points pairs corresponds to a 256*2-byte jump in sincos_lookup0
+ 8192/16 (from "ff_cos_16") is 512 bytes.
+ i.e. for fft16, STEP = 8192/16 */
+#define DECL_FFT(n,n2,n4)\
+static void fft##n(FFTComplex *z)\
+{\
+ fft##n2(z);\
+ fft##n4(z+n4*2);\
+ fft##n4(z+n4*3);\
+ pass(z,8192/n,n4);\
+}
+
+#ifndef FFT_FFMPEG_INCL_OPTIMISED_FFT4
+static inline void fft4(FFTComplex *z)
+{
+ FFTSample t1, t2, t3, t4, t5, t6, t7, t8;
+
+ BF(t3, t1, z[0].re, z[1].re); // t3=r1-r3 ; t1 = r1+r3
+ BF(t8, t6, z[3].re, z[2].re); // t8=r7-r5 ; t6 = r7+r5
+
+ BF(z[2].re, z[0].re, t1, t6); // r5=t1-t6 ; r1 = t1+t6
+
+ BF(t4, t2, z[0].im, z[1].im); // t4=r2-r4 ; t2 = r2+r4
+ BF(t7, t5, z[2].im, z[3].im); // t7=r6-r8 ; t5 = r6+r8
+
+ BF(z[3].im, z[1].im, t4, t8); // r8=t4-t8 ; r4 = t4+t8
+ BF(z[3].re, z[1].re, t3, t7); // r7=t3-t7 ; r3 = t3+t7
+ BF(z[2].im, z[0].im, t2, t5); // r6=t2-t5 ; r2 = t2+t5
+}
+#endif
+
+static void fft4_dispatch(FFTComplex *z)
+{
+ fft4(z);
+}
+
+#ifndef FFT_FFMPEG_INCL_OPTIMISED_FFT8
+static inline void fft8(FFTComplex *z)
+{
+ fft4(z);
+ FFTSample t1,t2,t3,t4,t7,t8;
+
+ BF(t1, z[5].re, z[4].re, -z[5].re);
+ BF(t2, z[5].im, z[4].im, -z[5].im);
+ BF(t3, z[7].re, z[6].re, -z[7].re);
+ BF(t4, z[7].im, z[6].im, -z[7].im);
+ BF(t8, t1, t3, t1);
+ BF(t7, t2, t2, t4);
+ BF(z[4].re, z[0].re, z[0].re, t1);
+ BF(z[4].im, z[0].im, z[0].im, t2);
+ BF(z[6].re, z[2].re, z[2].re, t7);
+ BF(z[6].im, z[2].im, z[2].im, t8);
+
+ z++;
+ TRANSFORM_EQUAL(z,2);
+}
+#endif
+
+static void fft8_dispatch(FFTComplex *z)
+{
+ fft8(z);
+}
+
+#ifndef CONFIG_SMALL
+static void fft16(FFTComplex *z)
+{
+ fft8(z);
+ fft4(z+8);
+ fft4(z+12);
+
+ TRANSFORM_ZERO(z,4);
+ z+=2;
+ TRANSFORM_EQUAL(z,4);
+ z-=1;
+ TRANSFORM(z,4,cPI1_8,cPI3_8);
+ z+=2;
+ TRANSFORM(z,4,cPI3_8,cPI1_8);
+}
+#else
+DECL_FFT(16,8,4)
+#endif
+DECL_FFT(32,16,8)
+DECL_FFT(64,32,16)
+DECL_FFT(128,64,32)
+DECL_FFT(256,128,64)
+DECL_FFT(512,256,128)
+DECL_FFT(1024,512,256)
+DECL_FFT(2048,1024,512)
+DECL_FFT(4096,2048,1024)
+
+static void (*fft_dispatch[])(FFTComplex*) = {
+ fft4_dispatch, fft8_dispatch, fft16, fft32, fft64, fft128, fft256, fft512, fft1024,
+ fft2048, fft4096
+};
+
+void ff_fft_calc_c(int nbits, FFTComplex *z)
+{
+ fft_dispatch[nbits-2](z);
+}
+
+#if 0
+int main (void)
+{
+#define PRECISION 16
+#define FFT_SIZE 1024
+#define ftofix32(x) ((fixed32)((x) * (float)(1 << PRECISION) + ((x) < 0 ? -0.5 : 0.5)))
+#define itofix32(x) ((x) << PRECISION)
+#define fixtoi32(x) ((x) >> PRECISION)
+
+ int j;
+ const long N = FFT_SIZE;
+ double r[FFT_SIZE] = {0.0}, i[FFT_SIZE] = {0.0};
+ long n;
+ double t;
+ double amp, phase;
+ clock_t start, end;
+ double exec_time = 0;
+ FFTContext s;
+ FFTComplex z[FFT_SIZE];
+ memset(z, 0, 64*sizeof(FFTComplex));
+
+ /* Generate saw-tooth test data */
+ for (n = 0; n < FFT_SIZE; n++)
+ {
+ t = (2 * M_PI * n)/N;
+ /*z[n].re = 1.1 + sin( t) +
+ 0.5 * sin(2.0 * t) +
+ (1.0/3.0) * sin(3.0 * t) +
+ 0.25 * sin(4.0 * t) +
+ 0.2 * sin(5.0 * t) +
+ (1.0/6.0) * sin(6.0 * t) +
+ (1.0/7.0) * sin(7.0 * t) ;*/
+ z[n].re = ftofix32(cos(2*M_PI*n/64));
+ //printf("z[%d] = %f\n", n, z[n].re);
+ //getchar();
+ }
+
+ ff_fft_init(&s, 10, 1);
+//start = clock();
+//for(n = 0; n < 1000000; n++)
+ ff_fft_permute_c(&s, z);
+ ff_fft_calc_c(&s, z);
+//end = clock();
+//exec_time = (((double)end-(double)start)/CLOCKS_PER_SEC);
+ for(j = 0; j < FFT_SIZE; j++)
+ {
+ printf("%8.4f\n", sqrt(pow(fixtof32(z[j].re),2)+ pow(fixtof32(z[j].im), 2)));
+ //getchar();
+ }
+ printf("muls = %d, adds = %d\n", muls, adds);
+//printf(" Time elapsed = %f\n", exec_time);
+ //ff_fft_end(&s);
+
+}
+#endif
diff --git a/apps/codecs/lib/fft-ffmpeg_arm.h b/apps/codecs/lib/fft-ffmpeg_arm.h
new file mode 100644
index 0000000000..94969b4b3d
--- /dev/null
+++ b/apps/codecs/lib/fft-ffmpeg_arm.h
@@ -0,0 +1,342 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id: $
+ *
+ * Copyright (C) 2010 Dave Hooper
+ *
+ * ARM optimisations for ffmpeg's fft (used in fft-ffmpeg.c)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#ifdef CPU_ARM
+
+/* Start off with optimised variants of the butterflies that work
+ nicely on arm */
+/* 1. where y and a share the same variable/register */
+#define BF_OPT(x,y,a,b) {\
+ y = a + b;\
+ x = y - (b<<1);\
+}
+
+/* 2. where y and b share the same variable/register */
+#define BF_OPT2(x,y,a,b) {\
+ x = a - b;\
+ y = x + (b<<1);\
+}
+
+/* 3. where y and b share the same variable/register (but y=(-b)) */
+#define BF_OPT2_REV(x,y,a,b) {\
+ x = a + b;\
+ y = x - (b<<1);\
+}
+
+/* standard BUTTERFLIES package. Note, we actually manually inline this
+ in all the TRANSFORM macros below anyway */
+#define FFT_FFMPEG_INCL_OPTIMISED_BUTTERFLIES
+#define BUTTERFLIES(a0,a1,a2,a3) {\
+ {\
+ BF_OPT(t1, t5, t5, t1);\
+ BF_OPT(t6, t2, t2, t6);\
+ BF_OPT(a2.re, a0.re, a0.re, t5);\
+ BF_OPT(a2.im, a0.im, a0.im, t2);\
+ BF_OPT(a3.re, a1.re, a1.re, t6);\
+ BF_OPT(a3.im, a1.im, a1.im, t1);\
+ }\
+}
+
+#define FFT_FFMPEG_INCL_OPTIMISED_TRANSFORM
+
+/* on ARM, all the TRANSFORM_etc inlines use the following registers:
+ r5,r6,r7,r8,r9,r10,r4,r12
+
+ inputs are: z, n, STEP
+
+ NOTE THAT THESE MACROS ACTUALLY CHANGE z INPUT INPLACE-
+ so sequential actions, z += n*3, z -= n*2 etc etc matter
+*/
+
+
+#define TRANSFORM_POST_STORE( z, n ) {\
+ /*{*/\
+ /* BF_OPT(t1, t5, t5, t1);*/\
+ /* BF_OPT(t6, t2, t2, t6);*/\
+ /* BF_OPT(a2.re, a0.re, a0.re, t5);*/\
+ /* BF_OPT(a2.im, a0.im, a0.im, t2);*/\
+ /* BF_OPT(a3.re, a1.re, a1.re, t6);*/\
+ /* BF_OPT(a3.im, a1.im, a1.im, t1);*/\
+ /*}*/\
+ z -= n*3;\
+ /* r_re = my_z[0]; r_im = my_z[1]; */\
+ {\
+ register FFTSample rt0temp asm("r4");\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+ BF_OPT(rt0temp, r_re, r_re, t5);\
+ BF_OPT(t2, r_im, r_im, t2);\
+ /* my_z[0] = r_re; my_z[1] = r_im; */\
+ asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im));\
+ z += n;\
+ /* r_re = my_z[0]; r_im = my_z[1]; */\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+ BF_OPT(t5, r_re, r_re, t6);\
+ BF_OPT(t6, r_im, r_im, t1);\
+ /* my_z[0] = r_re; my_z[1] = r_im; */\
+ asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im));\
+ z += n;\
+ /* my_z[0] = rt0temp; my_z[1] = t2; */\
+ asm volatile( "stmia %[my_z], {%[rt0temp],%[t2]}\n\t"::[my_z] "r" (z), [rt0temp] "r" (rt0temp), [t2] "r" (t2));\
+ z += n;\
+ }\
+ /* my_z[0] = t5; my_z[1] = t6; */\
+ asm volatile( "stmia %[my_z], {%[t5],%[t6]}\n\t"::[my_z] "r" (z), [t5] "r" (t5), [t6] "r" (t6));\
+ z -= n*3;\
+}
+
+#define TRANSFORM( z, n, wre_arg, wim_arg )\
+{\
+ FFTSample wre = wre_arg, wim = wim_arg;\
+ register FFTSample t1 asm("r5"),t2 asm("r6"),t5 asm("r7"),t6 asm("r8"),r_re asm("r9"),r_im asm("r10");\
+ z += n*2; /* z[o2] */\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+ XPROD31_R(r_re, r_im, wre, wim, t1,t2);\
+ \
+ z += n; /* z[o3] */\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+ XNPROD31_R(r_re, r_im, wre, wim, t5,t6);\
+ \
+ BF_OPT(t1, t5, t5, t1);\
+ BF_OPT(t6, t2, t2, t6);\
+ TRANSFORM_POST_STORE( z, n );\
+}
+
+#define TRANSFORM_W01( z, n, w )\
+{\
+ register FFTSample t1 asm("r5"),t2 asm("r6"),t5 asm("r7"),t6 asm("r8"),r_re asm("r9"),r_im asm("r10");\
+ \
+ {\
+ register FFTSample wre asm("r4"),wim asm("r12");\
+ asm volatile( "ldmia %[w], {%[wre], %[wim]}\n\t":[wre] "=r" (wre), [wim] "=r" (wim):[w] "r" (w));\
+ z += n*2; /* z[o2] -- 2n * 2 since complex numbers */\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+ XPROD31_R(r_re, r_im, wre, wim, t1,t2);\
+\
+ z += n; /* z[o3] */\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+ XNPROD31_R(r_re, r_im, wre, wim, t5,t6);\
+ }\
+ \
+ BF_OPT(t1, t5, t5, t1);\
+ BF_OPT(t6, t2, t2, t6);\
+ TRANSFORM_POST_STORE( z, n );\
+}
+
+//static inline void TRANSFORM_W10(int32_t * z, unsigned int n, const int32_t * w)
+#define TRANSFORM_W10( z, n, w )\
+{\
+ register FFTSample t1 asm("r5"),t2 asm("r6"),t5 asm("r7"),t6 asm("r8"),r_re asm("r9"),r_im asm("r10");\
+ \
+ {\
+ register FFTSample wim asm("r4"),wre asm("r12");\
+ asm volatile( "ldmia %[w], {%[wim], %[wre]}\n\t":[wim] "=r" (wim), [wre] "=r" (wre):[w] "r" (w));\
+ z += n*2; /* z[o2] -- 2n * 2 since complex numbers */\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+ XPROD31_R(r_re, r_im, wre, wim, t1,t2);\
+\
+ z += n; /* z[o3] */\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+ XNPROD31_R(r_re, r_im, wre, wim, t5,t6);\
+ }\
+ \
+ BF_OPT(t1, t5, t5, t1);\
+ BF_OPT(t6, t2, t2, t6);\
+ TRANSFORM_POST_STORE( z, n );\
+}
+
+#define TRANSFORM_EQUAL( z, n )\
+{\
+ register FFTSample t1 asm("r5"),t2 asm("r6"),t5 asm("r7"),t6 asm("r8"),r_re asm("r9"),r_im asm("r10");\
+\
+ z += n*2; /* z[o2] -- 2n * 2 since complex numbers */\
+ asm volatile( "ldmia %[my_z], {%[t5],%[t6]}\n\t":[t5] "=r" (t5), [t6] "=r" (t6):[my_z] "r" (z));\
+ z += n; /* z[o3] */\
+ asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));\
+\
+/**/\
+/*t2 = MULT32(cPI2_8, t5);*/\
+/*t1 = MULT31(cPI2_8, t6);*/\
+/*t6 = MULT31(cPI2_8, r_re);*/\
+/*t5 = MULT32(cPI2_8, r_im);*/\
+\
+/*t1 = ( t1 + (t2<<1) );*/\
+/*t2 = ( t1 - (t2<<2) );*/\
+/*t6 = ( t6 + (t5<<1) );*/\
+/*t5 = ( t6 - (t5<<2) );*/\
+/**/\
+ t2 = MULT31(cPI2_8, t5);\
+ t6 = MULT31(cPI2_8, t6);\
+ r_re = MULT31(cPI2_8, r_re);\
+ t5 = MULT31(cPI2_8, r_im);\
+ \
+ t1 = ( t6 + t2 );\
+ t2 = ( t6 - t2 );\
+ t6 = ( r_re + t5 );\
+ t5 = ( r_re - t5 );\
+ \
+ BF_OPT(t1, t5, t5, t1);\
+ BF_OPT(t6, t2, t2, t6);\
+ TRANSFORM_POST_STORE( z, n );\
+}
+
+#define TRANSFORM_ZERO( z,n )\
+{\
+ register FFTSample t1 asm("r5"),t2 asm("r6"),t5 asm("r7"),t6 asm("r8"),r_re asm("r9"),r_im asm("r10");\
+\
+ z += n*2; /* z[o2] -- 2n * 2 since complex numbers */\
+ asm volatile( "ldmia %[my_z], {%[t1],%[t2]}\n\t":[t1] "=r" (t1), [t2] "=r" (t2):[my_z] "r" (z));\
+ z += n; /* z[o3] */\
+ asm volatile( "ldmia %[my_z], {%[t5],%[t6]}\n\t":[t5] "=r" (t5), [t6] "=r" (t6):[my_z] "r" (z));\
+\
+ BF_OPT(t1, t5, t5, t1);\
+ BF_OPT(t6, t2, t2, t6);\
+ TRANSFORM_POST_STORE( z, n );\
+}
+
+#define FFT_FFMPEG_INCL_OPTIMISED_FFT4
+#define fft4(z_arg)\
+{\
+ /* input[0..7] -> output[0..7] */\
+ fixed32 * m = (fixed32 *) ( ( z_arg ) );\
+ /* load r1=z[0],r2=z[1],...,r8=z[7] */\
+ asm volatile(\
+ "ldmia %[z], {r1-r8}\n\t"\
+ "add r1,r1,r3\n\t" /* r1 :=t1 */\
+ "sub r3,r1,r3, lsl #1\n\t" /* r3 :=t3 */\
+ "sub r7,r7,r5\n\t" /* r10:=t8 */\
+ "add r5,r7,r5, lsl #1\n\t" /* r5 :=t6 */\
+ \
+ "add r1,r1,r5\n\t" /* r1 = o[0] */\
+ "sub r5,r1,r5, lsl #1\n\t" /* r5 = o[4] */\
+ \
+ "add r2,r2,r4\n\t" /* r2 :=t2 */\
+ "sub r4,r2,r4, lsl #1\n\t" /* r9 :=t4 */\
+ \
+ "add r12,r6,r8\n\t" /* r10:=t5 */\
+ "sub r6,r6,r8\n\t" /* r6 :=t7 */\
+ \
+ "sub r8,r4,r7\n\t" /* r8 = o[7]*/ \
+ "add r4,r4,r7\n\t" /* r4 = o[3]*/ \
+ "sub r7,r3,r6\n\t" /* r7 = o[6]*/ \
+ "add r3,r3,r6\n\t" /* r3 = o[2]*/ \
+ "sub r6,r2,r12\n\t" /* r6 = o[5]*/ \
+ "add r2,r2,r12\n\t" /* r2 = o[1]*/ \
+ \
+ "stmia %[z], {r1-r8}\n\t"\
+ : /* outputs */\
+ : /* inputs */ [z] "r" (m)\
+ : /* clobbers */\
+ "r1","r2","r3","r4","r5","r6","r7","r8","r12","memory"\
+ );\
+}
+
+
+#define FFT_FFMPEG_INCL_OPTIMISED_FFT8
+ /* The chunk of asm below is equivalent to the following:
+
+ // first load in z[4].re thru z[7].im into local registers
+ // ...
+ BF_OPT2_REV(z[4].re, z[5].re, z[4].re, z[5].re); // x=a+b; y=x-(b<<1)
+ BF_OPT2_REV(z[4].im, z[5].im, z[4].im, z[5].im);
+ BF_REV (temp, z[7].re, z[6].re, z[7].re); // x=a+b; y=a-b;
+ BF_REV (z[6].re, z[7].im, z[6].im, z[7].im);
+ // save z[7].re and z[7].im as those are complete now
+ // z[5].re and z[5].im are also complete now but save these later on
+
+ BF(z[6].im, z[4].re, temp, z[4].re); // x=a-b; y=a+b
+ BF_OPT(z[6].re, z[4].im, z[4].im, z[6].re); // y=a+b; x=y-(b<<1)
+ // now load z[2].re and z[2].im
+ // ...
+ BF_OPT(z[6].re, z[2].re, z[2].re, z[6].re); // y=a+b; x=y-(b<<1)
+ BF_OPT(z[6].im, z[2].im, z[2].im, z[6].im); // y=a+b; x=y-(b<<1)
+ // Now save z[6].re and z[6].im, along with z[5].re and z[5].im
+ // for efficiency. Also save z[2].re and z[2].im.
+ // Now load z[0].re and z[0].im
+ // ...
+
+ BF_OPT(z[4].re, z[0].re, z[0].re, z[4].re); // y=a+b; x=y-(b<<1)
+ BF_OPT(z[4].im, z[0].im, z[0].im, z[4].im); // y=a+b; x=y-(b<<1)
+ // Finally save out z[4].re, z[4].im, z[0].re and z[0].im
+ // ...
+ */
+static inline void fft8( FFTComplex * z )
+{
+ fft4(z);
+ {
+ FFTSample temp;
+ fixed32 * m4 = (fixed32 *)(&(z[4].re));
+
+ asm volatile(
+ "add %[z_ptr], %[z_ptr], #16\n\t" /* point to &z[2].re */
+ /* read in z[4].re thru z[7].im */
+ "ldmia %[z4_ptr]!, {r1,r2,r3,r4,r5,r6,r7,r8}\n\t"
+ /* (now points one word past &z[7].im) */
+ "add r1,r1,r3\n\t"
+ "sub r3,r1,r3,lsl #1\n\t"
+ "add r2,r2,r4\n\t"
+ "sub r4,r2,r4,lsl #1\n\t"
+ "add %[temp],r5,r7\n\t"
+ "sub r7,r5,r7\n\t"
+ "add r5,r6,r8\n\t"
+ "sub r8,r6,r8\n\t"
+
+ "stmdb %[z4_ptr]!, {r7,r8}\n\t" /* write z[7].re,z[7].im straight away */
+ /* Note, registers r7 & r8 now free */
+
+ "sub r6,%[temp],r1\n\t"
+ "add r1,%[temp],r1\n\t"
+ "add r2,r2,r5\n\t"
+ "sub r5,r2,r5,lsl #1\n\t"
+
+ "ldmia %[z_ptr],{r7,r8}\n\t" /* load z[2].re and z[2].im */
+ "add r7,r7,r5\n\t"
+ "sub r5,r7,r5,lsl #1\n\t"
+ "add r8,r8,r6\n\t"
+ "sub r6,r8,r6,lsl #1\n\t"
+
+ /* write out z[5].re, z[5].im, z[6].re, z[6].im in one go*/
+ "stmdb %[z4_ptr]!, {r3,r4,r5,r6}\n\t"
+ "stmia %[z_ptr],{r7,r8}\n\t" /* write out z[2].re, z[2].im */
+ "sub %[z_ptr],%[z_ptr], #16\n\t" /* point z_ptr back to &z[0].re */
+ "ldmia %[z_ptr],{r7,r8}\n\t" /* load r[0].re, r[0].im */
+
+ "add r7,r7,r1\n\t"
+ "sub r1,r7,r1,lsl #1\n\t"
+ "add r8,r8,r2\n\t"
+ "sub r2,r8,r2,lsl #1\n\t"
+
+ "stmia %[z_ptr],{r7,r8}\n\t" /* write out z[0].re, z[0].im */
+ "stmdb %[z4_ptr], {r1,r2}\n\t" /* write out z[4].re, z[4].im */
+ : [z4_ptr] "+r" (m4), [z_ptr] "+r" (z), [temp] "=r" (temp)
+ :
+ : "r1","r2","r3","r4","r5","r6","r7","r8","memory"
+ );
+ }
+
+ z++;
+ TRANSFORM_EQUAL(z,2);
+}
+
+
+#endif // CPU_ARM
+
diff --git a/apps/codecs/lib/fft.h b/apps/codecs/lib/fft.h
new file mode 100644
index 0000000000..302a3b3996
--- /dev/null
+++ b/apps/codecs/lib/fft.h
@@ -0,0 +1,64 @@
+/*
+ * WMA compatible decoder
+ * Copyright (c) 2002 The FFmpeg Project.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef CODECLIB_FFT_H_INCLUDED
+#define CODECLIB_FFT_H_INCLUDED
+
+#include <inttypes.h>
+typedef int32_t fixed32;
+typedef int64_t fixed64;
+
+#define FFT_FIXED
+
+#ifdef FFT_FIXED
+typedef fixed32 FFTSample;
+#else /* FFT_FIXED */
+typedef float FFTSample;
+#endif /* FFT_FIXED */
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef struct FFTContext {
+ int nbits;
+ int inverse;
+ uint16_t *revtab;
+ int mdct_size; /* size of MDCT (i.e. number of input data * 2) */
+ int mdct_bits; /* n = 2^nbits */
+ /* pre/post rotation tables */
+ FFTSample *tcos;
+ FFTSample *tsin;
+ void (*fft_permute)(struct FFTContext *s, FFTComplex *z);
+ void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
+ void (*imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ void (*imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ void (*mdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
+ int split_radix;
+ int permutation;
+#define FF_MDCT_PERM_NONE 0
+#define FF_MDCT_PERM_INTERLEAVE 1
+} FFTContext;
+
+// internal api (fft<->mdct)
+//int fft_calc_unscaled(FFTContext *s, FFTComplex *z);
+//void ff_fft_permute_c(FFTContext *s, FFTComplex *z); // internal only?
+void ff_fft_calc_c(int nbits, FFTComplex *z);
+
+#endif // CODECLIB_FFT_H_INCLUDED
+
diff --git a/apps/codecs/lib/mdct.c b/apps/codecs/lib/mdct.c
new file mode 100644
index 0000000000..03baa4db4a
--- /dev/null
+++ b/apps/codecs/lib/mdct.c
@@ -0,0 +1,414 @@
+/*
+ * Fixed Point IMDCT
+ * Copyright (c) 2002 The FFmpeg Project.
+ * Copyright (c) 2010 Dave Hooper, Mohamed Tarek, Michael Giacomelli
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "codeclib.h"
+#include "mdct.h"
+#include "asm_arm.h"
+#include "asm_mcf5249.h"
+#include "codeclib_misc.h"
+#include "mdct_lookup.h"
+
+/**
+ * Compute the middle half of the inverse MDCT of size N = 2^nbits
+ * thus excluding the parts that can be derived by symmetry
+ * @param output N/2 samples
+ * @param input N/2 samples
+ *
+ * NOTE - CANNOT CURRENTLY OPERATE IN PLACE (input and output must
+ * not overlap or intersect at all)
+ */
+void ff_imdct_half(unsigned int nbits, fixed32 *output, const fixed32 *input)
+{
+ int n8, n4, n2, n, j;
+ const fixed32 *in1, *in2;
+
+ n = 1 << nbits;
+
+ n2 = n >> 1;
+ n4 = n >> 2;
+ n8 = n >> 3;
+
+ FFTComplex *z = (FFTComplex *)output;
+
+ /* pre rotation */
+ in1 = input;
+ in2 = input + n2 - 1;
+
+ /* revtab comes from the fft; revtab table is sized for N=4096 size fft = 2^12.
+ The fft is size N/4 so s->nbits-2, so our shift needs to be (12-(nbits-2)) */
+ const int revtab_shift = (14- nbits);
+
+ /* bitreverse reorder the input and rotate; result here is in OUTPUT ... */
+ /* (note that when using the current split radix, the bitreverse ordering is
+ complex, meaning that this reordering cannot easily be done in-place) */
+ /* Using the following pdf, you can see that it is possible to rearrange
+ the 'classic' pre/post rotate with an alternative one that enables
+ us to use fewer distinct twiddle factors.
+ http://www.eurasip.org/Proceedings/Eusipco/Eusipco2006/papers/1568980508.pdf
+
+ For prerotation, the factors are just sin,cos(2PI*i/N)
+ For postrotation, the factors are sin,cos(2PI*(i+1/4)/N)
+
+ Therefore, prerotation can immediately reuse the same twiddles as fft
+ (for postrotation it's still a bit complex, so this is still using
+ an mdct-local set of twiddles to do that part)
+ */
+ const int32_t *T = sincos_lookup0;
+ const int step = 2<<(12-nbits);
+ const uint16_t * p_revtab=revtab;
+ {
+ const uint16_t * const p_revtab_end = p_revtab + n8;
+ while(LIKELY(p_revtab < p_revtab_end))
+ {
+ j = (*p_revtab)>>revtab_shift;
+ XNPROD31(*in2, *in1, T[1], T[0], &z[j].re, &z[j].im );
+ T += step;
+ in1 += 2;
+ in2 -= 2;
+ p_revtab++;
+ j = (*p_revtab)>>revtab_shift;
+ XNPROD31(*in2, *in1, T[1], T[0], &z[j].re, &z[j].im );
+ T += step;
+ in1 += 2;
+ in2 -= 2;
+ p_revtab++;
+ }
+ }
+ {
+ const uint16_t * const p_revtab_end = p_revtab + n8;
+ while(LIKELY(p_revtab < p_revtab_end))
+ {
+ j = (*p_revtab)>>revtab_shift;
+ XNPROD31(*in2, *in1, T[0], T[1], &z[j].re, &z[j].im);
+ T -= step;
+ in1 += 2;
+ in2 -= 2;
+ p_revtab++;
+ j = (*p_revtab)>>revtab_shift;
+ XNPROD31(*in2, *in1, T[0], T[1], &z[j].re, &z[j].im);
+ T -= step;
+ in1 += 2;
+ in2 -= 2;
+ p_revtab++;
+ }
+ }
+
+
+ /* ... and so fft runs in OUTPUT buffer */
+ ff_fft_calc_c(nbits-2, z);
+
+ /* post rotation + reordering. now keeps the result within the OUTPUT buffer */
+ switch( nbits )
+ {
+ default:
+ {
+ fixed32 * z1 = (fixed32 *)(&z[0]);
+ fixed32 * z2 = (fixed32 *)(&z[n4-1]);
+ int magic_step = step>>2;
+ int newstep;
+ if(n<=1024)
+ {
+ T = sincos_lookup0 + magic_step;
+ newstep = step>>1;
+ }
+ else
+ {
+ T = sincos_lookup1;
+ newstep = 2;
+ }
+
+ while(z1<z2)
+ {
+ fixed32 r0,i0,r1,i1;
+ XNPROD31_R(z1[1], z1[0], T[0], T[1], r0, i1 ); T+=newstep;
+ XNPROD31_R(z2[1], z2[0], T[1], T[0], r1, i0 ); T+=newstep;
+ z1[0] = r0;
+ z1[1] = i0;
+ z2[0] = r1;
+ z2[1] = i1;
+ z1+=2;
+ z2-=2;
+ }
+
+ break;
+ }
+
+ case 12: /* n=4096 */
+ {
+ /* linear interpolation (50:50) between sincos_lookup0 and sincos_lookup1 */
+ const int32_t * V = sincos_lookup1;
+ T = sincos_lookup0;
+ int32_t t0,t1,v0,v1;
+ fixed32 * z1 = (fixed32 *)(&z[0]);
+ fixed32 * z2 = (fixed32 *)(&z[n4-1]);
+
+ t0 = T[0]>>1; t1=T[1]>>1;
+
+ while(z1<z2)
+ {
+ fixed32 r0,i0,r1,i1;
+ t0 += (v0 = (V[0]>>1));
+ t1 += (v1 = (V[1]>>1));
+ XNPROD31_R(z1[1], z1[0], t0, t1, r0, i1 );
+ T+=2;
+ v0 += (t0 = (T[0]>>1));
+ v1 += (t1 = (T[1]>>1));
+ XNPROD31_R(z2[1], z2[0], v1, v0, r1, i0 );
+ z1[0] = r0;
+ z1[1] = i0;
+ z2[0] = r1;
+ z2[1] = i1;
+ z1+=2;
+ z2-=2;
+ V+=2;
+ }
+
+ break;
+ }
+
+ case 13: /* n = 8192 */
+ {
+ /* weight linear interpolation between sincos_lookup0 and sincos_lookup1
+ specifically: 25:75 for first twiddle and 75:25 for second twiddle */
+ const int32_t * V = sincos_lookup1;
+ T = sincos_lookup0;
+ int32_t t0,t1,v0,v1,q0,q1;
+ fixed32 * z1 = (fixed32 *)(&z[0]);
+ fixed32 * z2 = (fixed32 *)(&z[n4-1]);
+
+ t0 = T[0]; t1=T[1];
+
+ while(z1<z2)
+ {
+ fixed32 r0,i0,r1,i1;
+ v0 = V[0]; v1 = V[1];
+ t0 += (q0 = (v0-t0)>>1);
+ t1 += (q1 = (v1-t1)>>1);
+ XNPROD31_R(z1[1], z1[0], t0, t1, r0, i1 );
+ t0 = v0-q0;
+ t1 = v1-q1;
+ XNPROD31_R(z2[1], z2[0], t1, t0, r1, i0 );
+ z1[0] = r0;
+ z1[1] = i0;
+ z2[0] = r1;
+ z2[1] = i1;
+ z1+=2;
+ z2-=2;
+ T+=2;
+
+ t0 = T[0]; t1 = T[1];
+ v0 += (q0 = (t0-v0)>>1);
+ v1 += (q1 = (t1-v1)>>1);
+ XNPROD31_R(z1[1], z1[0], v0, v1, r0, i1 );
+ v0 = t0-q0;
+ v1 = t1-q1;
+ XNPROD31_R(z2[1], z2[0], v1, v0, r1, i0 );
+ z1[0] = r0;
+ z1[1] = i0;
+ z2[0] = r1;
+ z2[1] = i1;
+ z1+=2;
+ z2-=2;
+ V+=2;
+ }
+
+ break;
+ }
+ }
+}
+
+/**
+ * Compute inverse MDCT of size N = 2^nbits
+ * @param output N samples
+ * @param input N/2 samples
+ * "In-place" processing can be achieved provided that:
+ * [0 .. N/2-1 | N/2 .. N-1 ]
+ * <----input---->
+ * <-----------output----------->
+ *
+ */
+void ff_imdct_calc(unsigned int nbits, fixed32 *output, const fixed32 *input)
+{
+ const int n = (1<<nbits);
+ const int n2 = (n>>1);
+ const int n4 = (n>>2);
+
+ ff_imdct_half(nbits,output+n2,input);
+
+ /* reflect the half imdct into the full N samples */
+ /* TODO: this could easily be optimised more! */
+ fixed32 * in_r, * in_r2, * out_r, * out_r2;
+
+ out_r = output;
+ out_r2 = output+n2-8;
+ in_r = output+n2+n4-8;
+ while(out_r<out_r2)
+ {
+ out_r[0] = -(out_r2[7] = in_r[7]);
+ out_r[1] = -(out_r2[6] = in_r[6]);
+ out_r[2] = -(out_r2[5] = in_r[5]);
+ out_r[3] = -(out_r2[4] = in_r[4]);
+ out_r[4] = -(out_r2[3] = in_r[3]);
+ out_r[5] = -(out_r2[2] = in_r[2]);
+ out_r[6] = -(out_r2[1] = in_r[1]);
+ out_r[7] = -(out_r2[0] = in_r[0]);
+ in_r -= 8;
+ out_r += 8;
+ out_r2 -= 8;
+ }
+
+ in_r = output + n2+n4;
+ in_r2 = output + n-4;
+ out_r = output + n2;
+ out_r2 = output + n2 + n4 - 4;
+ while(in_r<in_r2)
+ {
+ register fixed32 t0,t1,t2,t3;
+ register fixed32 s0,s1,s2,s3;
+
+ //simultaneously do the following things:
+ // 1. copy range from [n2+n4 .. n-1] to range[n2 .. n2+n4-1]
+ // 2. reflect range from [n2+n4 .. n-1] inplace
+ //
+ // [ | ]
+ // ^a -> <- ^b ^c -> <- ^d
+ //
+ // #1: copy from ^c to ^a
+ // #2: copy from ^d to ^b
+ // #3: swap ^c and ^d in place
+ //
+ // #1 pt1 : load 4 words from ^c.
+ t0=in_r[0]; t1=in_r[1]; t2=in_r[2]; t3=in_r[3];
+ // #1 pt2 : write to ^a
+ out_r[0]=t0;out_r[1]=t1;out_r[2]=t2;out_r[3]=t3;
+ // #2 pt1 : load 4 words from ^d
+ s0=in_r2[0];s1=in_r2[1];s2=in_r2[2];s3=in_r2[3];
+ // #2 pt2 : write to ^b
+ out_r2[0]=s0;out_r2[1]=s1;out_r2[2]=s2;out_r2[3]=s3;
+ // #3 pt1 : write words from #2 to ^c
+ in_r[0]=s3;in_r[1]=s2;in_r[2]=s1;in_r[3]=s0;
+ // #3 pt2 : write words from #1 to ^d
+ in_r2[0]=t3;in_r2[1]=t2;in_r2[2]=t1;in_r2[3]=t0;
+
+ in_r += 4;
+ in_r2 -= 4;
+ out_r += 4;
+ out_r2 -= 4;
+ }
+}
+
+static const long cordic_circular_gain = 0xb2458939; /* 0.607252929 */
+
+/* Table of values of atan(2^-i) in 0.32 format fractions of pi where pi = 0xffffffff / 2 */
+static const unsigned long atan_table[] = {
+ 0x1fffffff, /* +0.785398163 (or pi/4) */
+ 0x12e4051d, /* +0.463647609 */
+ 0x09fb385b, /* +0.244978663 */
+ 0x051111d4, /* +0.124354995 */
+ 0x028b0d43, /* +0.062418810 */
+ 0x0145d7e1, /* +0.031239833 */
+ 0x00a2f61e, /* +0.015623729 */
+ 0x00517c55, /* +0.007812341 */
+ 0x0028be53, /* +0.003906230 */
+ 0x00145f2e, /* +0.001953123 */
+ 0x000a2f98, /* +0.000976562 */
+ 0x000517cc, /* +0.000488281 */
+ 0x00028be6, /* +0.000244141 */
+ 0x000145f3, /* +0.000122070 */
+ 0x0000a2f9, /* +0.000061035 */
+ 0x0000517c, /* +0.000030518 */
+ 0x000028be, /* +0.000015259 */
+ 0x0000145f, /* +0.000007629 */
+ 0x00000a2f, /* +0.000003815 */
+ 0x00000517, /* +0.000001907 */
+ 0x0000028b, /* +0.000000954 */
+ 0x00000145, /* +0.000000477 */
+ 0x000000a2, /* +0.000000238 */
+ 0x00000051, /* +0.000000119 */
+ 0x00000028, /* +0.000000060 */
+ 0x00000014, /* +0.000000030 */
+ 0x0000000a, /* +0.000000015 */
+ 0x00000005, /* +0.000000007 */
+ 0x00000002, /* +0.000000004 */
+ 0x00000001, /* +0.000000002 */
+ 0x00000000, /* +0.000000001 */
+ 0x00000000, /* +0.000000000 */
+};
+
+/**
+ * Implements sin and cos using CORDIC rotation.
+ *
+ * @param phase has range from 0 to 0xffffffff, representing 0 and
+ * 2*pi respectively.
+ * @param cos return address for cos
+ * @return sin of phase, value is a signed value from LONG_MIN to LONG_MAX,
+ * representing -1 and 1 respectively.
+ *
+ * Gives at least 24 bits precision (last 2-8 bits or so are probably off)
+ */
+
+long fsincos(unsigned long phase, fixed32 *cos)
+{
+ int32_t x, x1, y, y1;
+ unsigned long z, z1;
+ int i;
+
+ /* Setup initial vector */
+ x = cordic_circular_gain;
+ y = 0;
+ z = phase;
+
+ /* The phase has to be somewhere between 0..pi for this to work right */
+ if (z < 0xffffffff / 4) {
+ /* z in first quadrant, z += pi/2 to correct */
+ x = -x;
+ z += 0xffffffff / 4;
+ } else if (z < 3 * (0xffffffff / 4)) {
+ /* z in third quadrant, z -= pi/2 to correct */
+ z -= 0xffffffff / 4;
+ } else {
+ /* z in fourth quadrant, z -= 3pi/2 to correct */
+ x = -x;
+ z -= 3 * (0xffffffff / 4);
+ }
+
+ /* Each iteration adds roughly 1-bit of extra precision */
+ for (i = 0; i < 31; i++) {
+ x1 = x >> i;
+ y1 = y >> i;
+ z1 = atan_table[i];
+
+ /* Decided which direction to rotate vector. Pivot point is pi/2 */
+ if (z >= 0xffffffff / 4) {
+ x -= y1;
+ y += x1;
+ z -= z1;
+ } else {
+ x += y1;
+ y -= x1;
+ z += z1;
+ }
+ }
+
+ if (cos)
+ *cos = x;
+
+ return y;
+}
diff --git a/apps/codecs/lib/mdct.h b/apps/codecs/lib/mdct.h
new file mode 100644
index 0000000000..d13da0c54d
--- /dev/null
+++ b/apps/codecs/lib/mdct.h
@@ -0,0 +1,141 @@
+/*
+ * WMA compatible decoder
+ * Copyright (c) 2002 The FFmpeg Project.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef CODECLIB_MDCT_H_INCLUDED
+#define CODECLIB_MDCT_H_INCLUDED
+
+//#include "types.h"
+#include "fft.h"
+
+void ff_imdct_calc(unsigned int nbits, fixed32 *output, const fixed32 *input);
+void ff_imdct_half(unsigned int nbits, fixed32 *output, const fixed32 *input);
+
+#ifdef CPU_ARM
+
+/*Sign-15.16 format */
+#define fixmul32b(x, y) \
+ ({ int32_t __hi; \
+ uint32_t __lo; \
+ int32_t __result; \
+ asm ("smull %0, %1, %3, %4\n\t" \
+ "mov %2, %1, lsl #1" \
+ : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
+ : "%r" (x), "r" (y) \
+ : "cc" ); \
+ __result; \
+ })
+
+#elif defined(CPU_COLDFIRE)
+
+static inline int32_t fixmul32b(int32_t x, int32_t y)
+{
+ asm (
+ "mac.l %[x], %[y], %%acc0 \n" /* multiply */
+ "movclr.l %%acc0, %[x] \n" /* get higher half */
+ : [x] "+d" (x)
+ : [y] "d" (y)
+ );
+ return x;
+}
+
+#else
+
+static inline fixed32 fixmul32b(fixed32 x, fixed32 y)
+{
+ fixed64 temp;
+
+ temp = x;
+ temp *= y;
+
+ temp >>= 31; //16+31-16 = 31 bits
+
+ return (fixed32)temp;
+}
+#endif
+
+
+#ifdef CPU_ARM
+static inline
+void CMUL(fixed32 *x, fixed32 *y,
+ fixed32 a, fixed32 b,
+ fixed32 t, fixed32 v)
+{
+ /* This version loses one bit of precision. Could be solved at the cost
+ * of 2 extra cycles if it becomes an issue. */
+ int x1, y1, l;
+ asm(
+ "smull %[l], %[y1], %[b], %[t] \n"
+ "smlal %[l], %[y1], %[a], %[v] \n"
+ "rsb %[b], %[b], #0 \n"
+ "smull %[l], %[x1], %[a], %[t] \n"
+ "smlal %[l], %[x1], %[b], %[v] \n"
+ : [l] "=&r" (l), [x1]"=&r" (x1), [y1]"=&r" (y1), [b] "+r" (b)
+ : [a] "r" (a), [t] "r" (t), [v] "r" (v)
+ : "cc"
+ );
+ *x = x1 << 1;
+ *y = y1 << 1;
+}
+#elif defined CPU_COLDFIRE
+static inline
+void CMUL(fixed32 *x, fixed32 *y,
+ fixed32 a, fixed32 b,
+ fixed32 t, fixed32 v)
+{
+ asm volatile ("mac.l %[a], %[t], %%acc0;"
+ "msac.l %[b], %[v], %%acc0;"
+ "mac.l %[b], %[t], %%acc1;"
+ "mac.l %[a], %[v], %%acc1;"
+ "movclr.l %%acc0, %[a];"
+ "move.l %[a], (%[x]);"
+ "movclr.l %%acc1, %[a];"
+ "move.l %[a], (%[y]);"
+ : [a] "+&r" (a)
+ : [x] "a" (x), [y] "a" (y),
+ [b] "r" (b), [t] "r" (t), [v] "r" (v)
+ : "cc", "memory");
+}
+#else
+static inline
+void CMUL(fixed32 *pre,
+ fixed32 *pim,
+ fixed32 are,
+ fixed32 aim,
+ fixed32 bre,
+ fixed32 bim)
+{
+ //int64_t x,y;
+ fixed32 _aref = are;
+ fixed32 _aimf = aim;
+ fixed32 _bref = bre;
+ fixed32 _bimf = bim;
+ fixed32 _r1 = fixmul32b(_bref, _aref);
+ fixed32 _r2 = fixmul32b(_bimf, _aimf);
+ fixed32 _r3 = fixmul32b(_bref, _aimf);
+ fixed32 _r4 = fixmul32b(_bimf, _aref);
+ *pre = _r1 - _r2;
+ *pim = _r3 + _r4;
+
+}
+#endif
+
+/* Inverse gain of circular cordic rotation in s0.31 format. */
+long fsincos(unsigned long phase, fixed32 *cos);
+
+#endif // CODECLIB_MDCT_H_INCLUDED
diff --git a/apps/codecs/lib/mdct_lookup.c b/apps/codecs/lib/mdct_lookup.c
index 989bb36d82..a8ca748206 100644
--- a/apps/codecs/lib/mdct_lookup.c
+++ b/apps/codecs/lib/mdct_lookup.c
@@ -549,3 +549,324 @@ const int32_t sincos_lookup1[1024] ICONST_ATTR = {
0x5a4d1960, 0x5ab7ba6c, 0x5a70b258, 0x5a943d5e,
};
+/*split radix bit reverse table for FFT of size up to 2048*/
+
+const uint16_t revtab[1<<12] = {
+0, 3072, 1536, 2816, 768, 3840, 1408, 2432, 384, 3456, 1920, 2752, 704,
+3776, 1216, 2240, 192, 3264, 1728, 3008, 960, 4032, 1376, 2400, 352, 3424,
+1888, 2656, 608, 3680, 1120, 2144, 96, 3168, 1632, 2912, 864, 3936, 1504,
+2528, 480, 3552, 2016, 2736, 688, 3760, 1200, 2224, 176, 3248, 1712, 2992,
+944, 4016, 1328, 2352, 304, 3376, 1840, 2608, 560, 3632, 1072, 2096, 48,
+3120, 1584, 2864, 816, 3888, 1456, 2480, 432, 3504, 1968, 2800, 752, 3824,
+1264, 2288, 240, 3312, 1776, 3056, 1008, 4080, 1368, 2392, 344, 3416, 1880,
+2648, 600, 3672, 1112, 2136, 88, 3160, 1624, 2904, 856, 3928, 1496, 2520,
+472, 3544, 2008, 2712, 664, 3736, 1176, 2200, 152, 3224, 1688, 2968, 920,
+3992, 1304, 2328, 280, 3352, 1816, 2584, 536, 3608, 1048, 2072, 24, 3096,
+1560, 2840, 792, 3864, 1432, 2456, 408, 3480, 1944, 2776, 728, 3800, 1240,
+2264, 216, 3288, 1752, 3032, 984, 4056, 1400, 2424, 376, 3448, 1912, 2680,
+632, 3704, 1144, 2168, 120, 3192, 1656, 2936, 888, 3960, 1528, 2552, 504,
+3576, 2040, 2732, 684, 3756, 1196, 2220, 172, 3244, 1708, 2988, 940, 4012,
+1324, 2348, 300, 3372, 1836, 2604, 556, 3628, 1068, 2092, 44, 3116, 1580,
+2860, 812, 3884, 1452, 2476, 428, 3500, 1964, 2796, 748, 3820, 1260, 2284,
+236, 3308, 1772, 3052, 1004, 4076, 1356, 2380, 332, 3404, 1868, 2636, 588,
+3660, 1100, 2124, 76, 3148, 1612, 2892, 844, 3916, 1484, 2508, 460, 3532,
+1996, 2700, 652, 3724, 1164, 2188, 140, 3212, 1676, 2956, 908, 3980, 1292,
+2316, 268, 3340, 1804, 2572, 524, 3596, 1036, 2060, 12, 3084, 1548, 2828,
+780, 3852, 1420, 2444, 396, 3468, 1932, 2764, 716, 3788, 1228, 2252, 204,
+3276, 1740, 3020, 972, 4044, 1388, 2412, 364, 3436, 1900, 2668, 620, 3692,
+1132, 2156, 108, 3180, 1644, 2924, 876, 3948, 1516, 2540, 492, 3564, 2028,
+2748, 700, 3772, 1212, 2236, 188, 3260, 1724, 3004, 956, 4028, 1340, 2364,
+316, 3388, 1852, 2620, 572, 3644, 1084, 2108, 60, 3132, 1596, 2876, 828,
+3900, 1468, 2492, 444, 3516, 1980, 2812, 764, 3836, 1276, 2300, 252, 3324,
+1788, 3068, 1020, 4092, 1366, 2390, 342, 3414, 1878, 2646, 598, 3670, 1110,
+2134, 86, 3158, 1622, 2902, 854, 3926, 1494, 2518, 470, 3542, 2006, 2710,
+662, 3734, 1174, 2198, 150, 3222, 1686, 2966, 918, 3990, 1302, 2326, 278,
+3350, 1814, 2582, 534, 3606, 1046, 2070, 22, 3094, 1558, 2838, 790, 3862,
+1430, 2454, 406, 3478, 1942, 2774, 726, 3798, 1238, 2262, 214, 3286, 1750,
+3030, 982, 4054, 1398, 2422, 374, 3446, 1910, 2678, 630, 3702, 1142, 2166,
+118, 3190, 1654, 2934, 886, 3958, 1526, 2550, 502, 3574, 2038, 2726, 678,
+3750, 1190, 2214, 166, 3238, 1702, 2982, 934, 4006, 1318, 2342, 294, 3366,
+1830, 2598, 550, 3622, 1062, 2086, 38, 3110, 1574, 2854, 806, 3878, 1446,
+2470, 422, 3494, 1958, 2790, 742, 3814, 1254, 2278, 230, 3302, 1766, 3046,
+998, 4070, 1350, 2374, 326, 3398, 1862, 2630, 582, 3654, 1094, 2118, 70,
+3142, 1606, 2886, 838, 3910, 1478, 2502, 454, 3526, 1990, 2694, 646, 3718,
+1158, 2182, 134, 3206, 1670, 2950, 902, 3974, 1286, 2310, 262, 3334, 1798,
+2566, 518, 3590, 1030, 2054, 6, 3078, 1542, 2822, 774, 3846, 1414, 2438,
+390, 3462, 1926, 2758, 710, 3782, 1222, 2246, 198, 3270, 1734, 3014, 966,
+4038, 1382, 2406, 358, 3430, 1894, 2662, 614, 3686, 1126, 2150, 102, 3174,
+1638, 2918, 870, 3942, 1510, 2534, 486, 3558, 2022, 2742, 694, 3766, 1206,
+2230, 182, 3254, 1718, 2998, 950, 4022, 1334, 2358, 310, 3382, 1846, 2614,
+566, 3638, 1078, 2102, 54, 3126, 1590, 2870, 822, 3894, 1462, 2486, 438,
+3510, 1974, 2806, 758, 3830, 1270, 2294, 246, 3318, 1782, 3062, 1014, 4086,
+1374, 2398, 350, 3422, 1886, 2654, 606, 3678, 1118, 2142, 94, 3166, 1630,
+2910, 862, 3934, 1502, 2526, 478, 3550, 2014, 2718, 670, 3742, 1182, 2206,
+158, 3230, 1694, 2974, 926, 3998, 1310, 2334, 286, 3358, 1822, 2590, 542,
+3614, 1054, 2078, 30, 3102, 1566, 2846, 798, 3870, 1438, 2462, 414, 3486,
+1950, 2782, 734, 3806, 1246, 2270, 222, 3294, 1758, 3038, 990, 4062, 1406,
+2430, 382, 3454, 1918, 2686, 638, 3710, 1150, 2174, 126, 3198, 1662, 2942,
+894, 3966, 1534, 2558, 510, 3582, 2046, 2731, 683, 3755, 1195, 2219, 171,
+3243, 1707, 2987, 939, 4011, 1323, 2347, 299, 3371, 1835, 2603, 555, 3627,
+1067, 2091, 43, 3115, 1579, 2859, 811, 3883, 1451, 2475, 427, 3499, 1963,
+2795, 747, 3819, 1259, 2283, 235, 3307, 1771, 3051, 1003, 4075, 1355, 2379,
+331, 3403, 1867, 2635, 587, 3659, 1099, 2123, 75, 3147, 1611, 2891, 843,
+3915, 1483, 2507, 459, 3531, 1995, 2699, 651, 3723, 1163, 2187, 139, 3211,
+1675, 2955, 907, 3979, 1291, 2315, 267, 3339, 1803, 2571, 523, 3595, 1035,
+2059, 11, 3083, 1547, 2827, 779, 3851, 1419, 2443, 395, 3467, 1931, 2763,
+715, 3787, 1227, 2251, 203, 3275, 1739, 3019, 971, 4043, 1387, 2411, 363,
+3435, 1899, 2667, 619, 3691, 1131, 2155, 107, 3179, 1643, 2923, 875, 3947,
+1515, 2539, 491, 3563, 2027, 2747, 699, 3771, 1211, 2235, 187, 3259, 1723,
+3003, 955, 4027, 1339, 2363, 315, 3387, 1851, 2619, 571, 3643, 1083, 2107,
+59, 3131, 1595, 2875, 827, 3899, 1467, 2491, 443, 3515, 1979, 2811, 763,
+3835, 1275, 2299, 251, 3323, 1787, 3067, 1019, 4091, 1363, 2387, 339, 3411,
+1875, 2643, 595, 3667, 1107, 2131, 83, 3155, 1619, 2899, 851, 3923, 1491,
+2515, 467, 3539, 2003, 2707, 659, 3731, 1171, 2195, 147, 3219, 1683, 2963,
+915, 3987, 1299, 2323, 275, 3347, 1811, 2579, 531, 3603, 1043, 2067, 19,
+3091, 1555, 2835, 787, 3859, 1427, 2451, 403, 3475, 1939, 2771, 723, 3795,
+1235, 2259, 211, 3283, 1747, 3027, 979, 4051, 1395, 2419, 371, 3443, 1907,
+2675, 627, 3699, 1139, 2163, 115, 3187, 1651, 2931, 883, 3955, 1523, 2547,
+499, 3571, 2035, 2723, 675, 3747, 1187, 2211, 163, 3235, 1699, 2979, 931,
+4003, 1315, 2339, 291, 3363, 1827, 2595, 547, 3619, 1059, 2083, 35, 3107,
+1571, 2851, 803, 3875, 1443, 2467, 419, 3491, 1955, 2787, 739, 3811, 1251,
+2275, 227, 3299, 1763, 3043, 995, 4067, 1347, 2371, 323, 3395, 1859, 2627,
+579, 3651, 1091, 2115, 67, 3139, 1603, 2883, 835, 3907, 1475, 2499, 451,
+3523, 1987, 2691, 643, 3715, 1155, 2179, 131, 3203, 1667, 2947, 899, 3971,
+1283, 2307, 259, 3331, 1795, 2563, 515, 3587, 1027, 2051, 3, 3075, 1539,
+2819, 771, 3843, 1411, 2435, 387, 3459, 1923, 2755, 707, 3779, 1219, 2243,
+195, 3267, 1731, 3011, 963, 4035, 1379, 2403, 355, 3427, 1891, 2659, 611,
+3683, 1123, 2147, 99, 3171, 1635, 2915, 867, 3939, 1507, 2531, 483, 3555,
+2019, 2739, 691, 3763, 1203, 2227, 179, 3251, 1715, 2995, 947, 4019, 1331,
+2355, 307, 3379, 1843, 2611, 563, 3635, 1075, 2099, 51, 3123, 1587, 2867,
+819, 3891, 1459, 2483, 435, 3507, 1971, 2803, 755, 3827, 1267, 2291, 243,
+3315, 1779, 3059, 1011, 4083, 1371, 2395, 347, 3419, 1883, 2651, 603, 3675,
+1115, 2139, 91, 3163, 1627, 2907, 859, 3931, 1499, 2523, 475, 3547, 2011,
+2715, 667, 3739, 1179, 2203, 155, 3227, 1691, 2971, 923, 3995, 1307, 2331,
+283, 3355, 1819, 2587, 539, 3611, 1051, 2075, 27, 3099, 1563, 2843, 795,
+3867, 1435, 2459, 411, 3483, 1947, 2779, 731, 3803, 1243, 2267, 219, 3291,
+1755, 3035, 987, 4059, 1403, 2427, 379, 3451, 1915, 2683, 635, 3707, 1147,
+2171, 123, 3195, 1659, 2939, 891, 3963, 1531, 2555, 507, 3579, 2043, 2735,
+687, 3759, 1199, 2223, 175, 3247, 1711, 2991, 943, 4015, 1327, 2351, 303,
+3375, 1839, 2607, 559, 3631, 1071, 2095, 47, 3119, 1583, 2863, 815, 3887,
+1455, 2479, 431, 3503, 1967, 2799, 751, 3823, 1263, 2287, 239, 3311, 1775,
+3055, 1007, 4079, 1359, 2383, 335, 3407, 1871, 2639, 591, 3663, 1103, 2127,
+79, 3151, 1615, 2895, 847, 3919, 1487, 2511, 463, 3535, 1999, 2703, 655,
+3727, 1167, 2191, 143, 3215, 1679, 2959, 911, 3983, 1295, 2319, 271, 3343,
+1807, 2575, 527, 3599, 1039, 2063, 15, 3087, 1551, 2831, 783, 3855, 1423,
+2447, 399, 3471, 1935, 2767, 719, 3791, 1231, 2255, 207, 3279, 1743, 3023,
+975, 4047, 1391, 2415, 367, 3439, 1903, 2671, 623, 3695, 1135, 2159, 111,
+3183, 1647, 2927, 879, 3951, 1519, 2543, 495, 3567, 2031, 2751, 703, 3775,
+1215, 2239, 191, 3263, 1727, 3007, 959, 4031, 1343, 2367, 319, 3391, 1855,
+2623, 575, 3647, 1087, 2111, 63, 3135, 1599, 2879, 831, 3903, 1471, 2495,
+447, 3519, 1983, 2815, 767, 3839, 1279, 2303, 255, 3327, 1791, 3071, 1023,
+4095, 1365, 2389, 341, 3413, 1877, 2645, 597, 3669, 1109, 2133, 85, 3157,
+1621, 2901, 853, 3925, 1493, 2517, 469, 3541, 2005, 2709, 661, 3733, 1173,
+2197, 149, 3221, 1685, 2965, 917, 3989, 1301, 2325, 277, 3349, 1813, 2581,
+533, 3605, 1045, 2069, 21, 3093, 1557, 2837, 789, 3861, 1429, 2453, 405,
+3477, 1941, 2773, 725, 3797, 1237, 2261, 213, 3285, 1749, 3029, 981, 4053,
+1397, 2421, 373, 3445, 1909, 2677, 629, 3701, 1141, 2165, 117, 3189, 1653,
+2933, 885, 3957, 1525, 2549, 501, 3573, 2037, 2725, 677, 3749, 1189, 2213,
+165, 3237, 1701, 2981, 933, 4005, 1317, 2341, 293, 3365, 1829, 2597, 549,
+3621, 1061, 2085, 37, 3109, 1573, 2853, 805, 3877, 1445, 2469, 421, 3493,
+1957, 2789, 741, 3813, 1253, 2277, 229, 3301, 1765, 3045, 997, 4069, 1349,
+2373, 325, 3397, 1861, 2629, 581, 3653, 1093, 2117, 69, 3141, 1605, 2885,
+837, 3909, 1477, 2501, 453, 3525, 1989, 2693, 645, 3717, 1157, 2181, 133,
+3205, 1669, 2949, 901, 3973, 1285, 2309, 261, 3333, 1797, 2565, 517, 3589,
+1029, 2053, 5, 3077, 1541, 2821, 773, 3845, 1413, 2437, 389, 3461, 1925,
+2757, 709, 3781, 1221, 2245, 197, 3269, 1733, 3013, 965, 4037, 1381, 2405,
+357, 3429, 1893, 2661, 613, 3685, 1125, 2149, 101, 3173, 1637, 2917, 869,
+3941, 1509, 2533, 485, 3557, 2021, 2741, 693, 3765, 1205, 2229, 181, 3253,
+1717, 2997, 949, 4021, 1333, 2357, 309, 3381, 1845, 2613, 565, 3637, 1077,
+2101, 53, 3125, 1589, 2869, 821, 3893, 1461, 2485, 437, 3509, 1973, 2805,
+757, 3829, 1269, 2293, 245, 3317, 1781, 3061, 1013, 4085, 1373, 2397, 349,
+3421, 1885, 2653, 605, 3677, 1117, 2141, 93, 3165, 1629, 2909, 861, 3933,
+1501, 2525, 477, 3549, 2013, 2717, 669, 3741, 1181, 2205, 157, 3229, 1693,
+2973, 925, 3997, 1309, 2333, 285, 3357, 1821, 2589, 541, 3613, 1053, 2077,
+29, 3101, 1565, 2845, 797, 3869, 1437, 2461, 413, 3485, 1949, 2781, 733,
+3805, 1245, 2269, 221, 3293, 1757, 3037, 989, 4061, 1405, 2429, 381, 3453,
+1917, 2685, 637, 3709, 1149, 2173, 125, 3197, 1661, 2941, 893, 3965, 1533,
+2557, 509, 3581, 2045, 2729, 681, 3753, 1193, 2217, 169, 3241, 1705, 2985,
+937, 4009, 1321, 2345, 297, 3369, 1833, 2601, 553, 3625, 1065, 2089, 41,
+3113, 1577, 2857, 809, 3881, 1449, 2473, 425, 3497, 1961, 2793, 745, 3817,
+1257, 2281, 233, 3305, 1769, 3049, 1001, 4073, 1353, 2377, 329, 3401, 1865,
+2633, 585, 3657, 1097, 2121, 73, 3145, 1609, 2889, 841, 3913, 1481, 2505,
+457, 3529, 1993, 2697, 649, 3721, 1161, 2185, 137, 3209, 1673, 2953, 905,
+3977, 1289, 2313, 265, 3337, 1801, 2569, 521, 3593, 1033, 2057, 9, 3081,
+1545, 2825, 777, 3849, 1417, 2441, 393, 3465, 1929, 2761, 713, 3785, 1225,
+2249, 201, 3273, 1737, 3017, 969, 4041, 1385, 2409, 361, 3433, 1897, 2665,
+617, 3689, 1129, 2153, 105, 3177, 1641, 2921, 873, 3945, 1513, 2537, 489,
+3561, 2025, 2745, 697, 3769, 1209, 2233, 185, 3257, 1721, 3001, 953, 4025,
+1337, 2361, 313, 3385, 1849, 2617, 569, 3641, 1081, 2105, 57, 3129, 1593,
+2873, 825, 3897, 1465, 2489, 441, 3513, 1977, 2809, 761, 3833, 1273, 2297,
+249, 3321, 1785, 3065, 1017, 4089, 1361, 2385, 337, 3409, 1873, 2641, 593,
+3665, 1105, 2129, 81, 3153, 1617, 2897, 849, 3921, 1489, 2513, 465, 3537,
+2001, 2705, 657, 3729, 1169, 2193, 145, 3217, 1681, 2961, 913, 3985, 1297,
+2321, 273, 3345, 1809, 2577, 529, 3601, 1041, 2065, 17, 3089, 1553, 2833,
+785, 3857, 1425, 2449, 401, 3473, 1937, 2769, 721, 3793, 1233, 2257, 209,
+3281, 1745, 3025, 977, 4049, 1393, 2417, 369, 3441, 1905, 2673, 625, 3697,
+1137, 2161, 113, 3185, 1649, 2929, 881, 3953, 1521, 2545, 497, 3569, 2033,
+2721, 673, 3745, 1185, 2209, 161, 3233, 1697, 2977, 929, 4001, 1313, 2337,
+289, 3361, 1825, 2593, 545, 3617, 1057, 2081, 33, 3105, 1569, 2849, 801,
+3873, 1441, 2465, 417, 3489, 1953, 2785, 737, 3809, 1249, 2273, 225, 3297,
+1761, 3041, 993, 4065, 1345, 2369, 321, 3393, 1857, 2625, 577, 3649, 1089,
+2113, 65, 3137, 1601, 2881, 833, 3905, 1473, 2497, 449, 3521, 1985, 2689,
+641, 3713, 1153, 2177, 129, 3201, 1665, 2945, 897, 3969, 1281, 2305, 257,
+3329, 1793, 2561, 513, 3585, 1025, 2049, 1, 3073, 1537, 2817, 769, 3841,
+1409, 2433, 385, 3457, 1921, 2753, 705, 3777, 1217, 2241, 193, 3265, 1729,
+3009, 961, 4033, 1377, 2401, 353, 3425, 1889, 2657, 609, 3681, 1121, 2145,
+97, 3169, 1633, 2913, 865, 3937, 1505, 2529, 481, 3553, 2017, 2737, 689,
+3761, 1201, 2225, 177, 3249, 1713, 2993, 945, 4017, 1329, 2353, 305, 3377,
+1841, 2609, 561, 3633, 1073, 2097, 49, 3121, 1585, 2865, 817, 3889, 1457,
+2481, 433, 3505, 1969, 2801, 753, 3825, 1265, 2289, 241, 3313, 1777, 3057,
+1009, 4081, 1369, 2393, 345, 3417, 1881, 2649, 601, 3673, 1113, 2137, 89,
+3161, 1625, 2905, 857, 3929, 1497, 2521, 473, 3545, 2009, 2713, 665, 3737,
+1177, 2201, 153, 3225, 1689, 2969, 921, 3993, 1305, 2329, 281, 3353, 1817,
+2585, 537, 3609, 1049, 2073, 25, 3097, 1561, 2841, 793, 3865, 1433, 2457,
+409, 3481, 1945, 2777, 729, 3801, 1241, 2265, 217, 3289, 1753, 3033, 985,
+4057, 1401, 2425, 377, 3449, 1913, 2681, 633, 3705, 1145, 2169, 121, 3193,
+1657, 2937, 889, 3961, 1529, 2553, 505, 3577, 2041, 2733, 685, 3757, 1197,
+2221, 173, 3245, 1709, 2989, 941, 4013, 1325, 2349, 301, 3373, 1837, 2605,
+557, 3629, 1069, 2093, 45, 3117, 1581, 2861, 813, 3885, 1453, 2477, 429,
+3501, 1965, 2797, 749, 3821, 1261, 2285, 237, 3309, 1773, 3053, 1005, 4077,
+1357, 2381, 333, 3405, 1869, 2637, 589, 3661, 1101, 2125, 77, 3149, 1613,
+2893, 845, 3917, 1485, 2509, 461, 3533, 1997, 2701, 653, 3725, 1165, 2189,
+141, 3213, 1677, 2957, 909, 3981, 1293, 2317, 269, 3341, 1805, 2573, 525,
+3597, 1037, 2061, 13, 3085, 1549, 2829, 781, 3853, 1421, 2445, 397, 3469,
+1933, 2765, 717, 3789, 1229, 2253, 205, 3277, 1741, 3021, 973, 4045, 1389,
+2413, 365, 3437, 1901, 2669, 621, 3693, 1133, 2157, 109, 3181, 1645, 2925,
+877, 3949, 1517, 2541, 493, 3565, 2029, 2749, 701, 3773, 1213, 2237, 189,
+3261, 1725, 3005, 957, 4029, 1341, 2365, 317, 3389, 1853, 2621, 573, 3645,
+1085, 2109, 61, 3133, 1597, 2877, 829, 3901, 1469, 2493, 445, 3517, 1981,
+2813, 765, 3837, 1277, 2301, 253, 3325, 1789, 3069, 1021, 4093, 1367, 2391,
+343, 3415, 1879, 2647, 599, 3671, 1111, 2135, 87, 3159, 1623, 2903, 855,
+3927, 1495, 2519, 471, 3543, 2007, 2711, 663, 3735, 1175, 2199, 151, 3223,
+1687, 2967, 919, 3991, 1303, 2327, 279, 3351, 1815, 2583, 535, 3607, 1047,
+2071, 23, 3095, 1559, 2839, 791, 3863, 1431, 2455, 407, 3479, 1943, 2775,
+727, 3799, 1239, 2263, 215, 3287, 1751, 3031, 983, 4055, 1399, 2423, 375,
+3447, 1911, 2679, 631, 3703, 1143, 2167, 119, 3191, 1655, 2935, 887, 3959,
+1527, 2551, 503, 3575, 2039, 2727, 679, 3751, 1191, 2215, 167, 3239, 1703,
+2983, 935, 4007, 1319, 2343, 295, 3367, 1831, 2599, 551, 3623, 1063, 2087,
+39, 3111, 1575, 2855, 807, 3879, 1447, 2471, 423, 3495, 1959, 2791, 743,
+3815, 1255, 2279, 231, 3303, 1767, 3047, 999, 4071, 1351, 2375, 327, 3399,
+1863, 2631, 583, 3655, 1095, 2119, 71, 3143, 1607, 2887, 839, 3911, 1479,
+2503, 455, 3527, 1991, 2695, 647, 3719, 1159, 2183, 135, 3207, 1671, 2951,
+903, 3975, 1287, 2311, 263, 3335, 1799, 2567, 519, 3591, 1031, 2055, 7,
+3079, 1543, 2823, 775, 3847, 1415, 2439, 391, 3463, 1927, 2759, 711, 3783,
+1223, 2247, 199, 3271, 1735, 3015, 967, 4039, 1383, 2407, 359, 3431, 1895,
+2663, 615, 3687, 1127, 2151, 103, 3175, 1639, 2919, 871, 3943, 1511, 2535,
+487, 3559, 2023, 2743, 695, 3767, 1207, 2231, 183, 3255, 1719, 2999, 951,
+4023, 1335, 2359, 311, 3383, 1847, 2615, 567, 3639, 1079, 2103, 55, 3127,
+1591, 2871, 823, 3895, 1463, 2487, 439, 3511, 1975, 2807, 759, 3831, 1271,
+2295, 247, 3319, 1783, 3063, 1015, 4087, 1375, 2399, 351, 3423, 1887, 2655,
+607, 3679, 1119, 2143, 95, 3167, 1631, 2911, 863, 3935, 1503, 2527, 479,
+3551, 2015, 2719, 671, 3743, 1183, 2207, 159, 3231, 1695, 2975, 927, 3999,
+1311, 2335, 287, 3359, 1823, 2591, 543, 3615, 1055, 2079, 31, 3103, 1567,
+2847, 799, 3871, 1439, 2463, 415, 3487, 1951, 2783, 735, 3807, 1247, 2271,
+223, 3295, 1759, 3039, 991, 4063, 1407, 2431, 383, 3455, 1919, 2687, 639,
+3711, 1151, 2175, 127, 3199, 1663, 2943, 895, 3967, 1535, 2559, 511, 3583,
+2047, 2730, 682, 3754, 1194, 2218, 170, 3242, 1706, 2986, 938, 4010, 1322,
+2346, 298, 3370, 1834, 2602, 554, 3626, 1066, 2090, 42, 3114, 1578, 2858,
+810, 3882, 1450, 2474, 426, 3498, 1962, 2794, 746, 3818, 1258, 2282, 234,
+3306, 1770, 3050, 1002, 4074, 1354, 2378, 330, 3402, 1866, 2634, 586, 3658,
+1098, 2122, 74, 3146, 1610, 2890, 842, 3914, 1482, 2506, 458, 3530, 1994,
+2698, 650, 3722, 1162, 2186, 138, 3210, 1674, 2954, 906, 3978, 1290, 2314,
+266, 3338, 1802, 2570, 522, 3594, 1034, 2058, 10, 3082, 1546, 2826, 778,
+3850, 1418, 2442, 394, 3466, 1930, 2762, 714, 3786, 1226, 2250, 202, 3274,
+1738, 3018, 970, 4042, 1386, 2410, 362, 3434, 1898, 2666, 618, 3690, 1130,
+2154, 106, 3178, 1642, 2922, 874, 3946, 1514, 2538, 490, 3562, 2026, 2746,
+698, 3770, 1210, 2234, 186, 3258, 1722, 3002, 954, 4026, 1338, 2362, 314,
+3386, 1850, 2618, 570, 3642, 1082, 2106, 58, 3130, 1594, 2874, 826, 3898,
+1466, 2490, 442, 3514, 1978, 2810, 762, 3834, 1274, 2298, 250, 3322, 1786,
+3066, 1018, 4090, 1362, 2386, 338, 3410, 1874, 2642, 594, 3666, 1106, 2130,
+82, 3154, 1618, 2898, 850, 3922, 1490, 2514, 466, 3538, 2002, 2706, 658,
+3730, 1170, 2194, 146, 3218, 1682, 2962, 914, 3986, 1298, 2322, 274, 3346,
+1810, 2578, 530, 3602, 1042, 2066, 18, 3090, 1554, 2834, 786, 3858, 1426,
+2450, 402, 3474, 1938, 2770, 722, 3794, 1234, 2258, 210, 3282, 1746, 3026,
+978, 4050, 1394, 2418, 370, 3442, 1906, 2674, 626, 3698, 1138, 2162, 114,
+3186, 1650, 2930, 882, 3954, 1522, 2546, 498, 3570, 2034, 2722, 674, 3746,
+1186, 2210, 162, 3234, 1698, 2978, 930, 4002, 1314, 2338, 290, 3362, 1826,
+2594, 546, 3618, 1058, 2082, 34, 3106, 1570, 2850, 802, 3874, 1442, 2466,
+418, 3490, 1954, 2786, 738, 3810, 1250, 2274, 226, 3298, 1762, 3042, 994,
+4066, 1346, 2370, 322, 3394, 1858, 2626, 578, 3650, 1090, 2114, 66, 3138,
+1602, 2882, 834, 3906, 1474, 2498, 450, 3522, 1986, 2690, 642, 3714, 1154,
+2178, 130, 3202, 1666, 2946, 898, 3970, 1282, 2306, 258, 3330, 1794, 2562,
+514, 3586, 1026, 2050, 2, 3074, 1538, 2818, 770, 3842, 1410, 2434, 386,
+3458, 1922, 2754, 706, 3778, 1218, 2242, 194, 3266, 1730, 3010, 962, 4034,
+1378, 2402, 354, 3426, 1890, 2658, 610, 3682, 1122, 2146, 98, 3170, 1634,
+2914, 866, 3938, 1506, 2530, 482, 3554, 2018, 2738, 690, 3762, 1202, 2226,
+178, 3250, 1714, 2994, 946, 4018, 1330, 2354, 306, 3378, 1842, 2610, 562,
+3634, 1074, 2098, 50, 3122, 1586, 2866, 818, 3890, 1458, 2482, 434, 3506,
+1970, 2802, 754, 3826, 1266, 2290, 242, 3314, 1778, 3058, 1010, 4082, 1370,
+2394, 346, 3418, 1882, 2650, 602, 3674, 1114, 2138, 90, 3162, 1626, 2906,
+858, 3930, 1498, 2522, 474, 3546, 2010, 2714, 666, 3738, 1178, 2202, 154,
+3226, 1690, 2970, 922, 3994, 1306, 2330, 282, 3354, 1818, 2586, 538, 3610,
+1050, 2074, 26, 3098, 1562, 2842, 794, 3866, 1434, 2458, 410, 3482, 1946,
+2778, 730, 3802, 1242, 2266, 218, 3290, 1754, 3034, 986, 4058, 1402, 2426,
+378, 3450, 1914, 2682, 634, 3706, 1146, 2170, 122, 3194, 1658, 2938, 890,
+3962, 1530, 2554, 506, 3578, 2042, 2734, 686, 3758, 1198, 2222, 174, 3246,
+1710, 2990, 942, 4014, 1326, 2350, 302, 3374, 1838, 2606, 558, 3630, 1070,
+2094, 46, 3118, 1582, 2862, 814, 3886, 1454, 2478, 430, 3502, 1966, 2798,
+750, 3822, 1262, 2286, 238, 3310, 1774, 3054, 1006, 4078, 1358, 2382, 334,
+3406, 1870, 2638, 590, 3662, 1102, 2126, 78, 3150, 1614, 2894, 846, 3918,
+1486, 2510, 462, 3534, 1998, 2702, 654, 3726, 1166, 2190, 142, 3214, 1678,
+2958, 910, 3982, 1294, 2318, 270, 3342, 1806, 2574, 526, 3598, 1038, 2062,
+14, 3086, 1550, 2830, 782, 3854, 1422, 2446, 398, 3470, 1934, 2766, 718,
+3790, 1230, 2254, 206, 3278, 1742, 3022, 974, 4046, 1390, 2414, 366, 3438,
+1902, 2670, 622, 3694, 1134, 2158, 110, 3182, 1646, 2926, 878, 3950, 1518,
+2542, 494, 3566, 2030, 2750, 702, 3774, 1214, 2238, 190, 3262, 1726, 3006,
+958, 4030, 1342, 2366, 318, 3390, 1854, 2622, 574, 3646, 1086, 2110, 62,
+3134, 1598, 2878, 830, 3902, 1470, 2494, 446, 3518, 1982, 2814, 766, 3838,
+1278, 2302, 254, 3326, 1790, 3070, 1022, 4094, 1364, 2388, 340, 3412, 1876,
+2644, 596, 3668, 1108, 2132, 84, 3156, 1620, 2900, 852, 3924, 1492, 2516,
+468, 3540, 2004, 2708, 660, 3732, 1172, 2196, 148, 3220, 1684, 2964, 916,
+3988, 1300, 2324, 276, 3348, 1812, 2580, 532, 3604, 1044, 2068, 20, 3092,
+1556, 2836, 788, 3860, 1428, 2452, 404, 3476, 1940, 2772, 724, 3796, 1236,
+2260, 212, 3284, 1748, 3028, 980, 4052, 1396, 2420, 372, 3444, 1908, 2676,
+628, 3700, 1140, 2164, 116, 3188, 1652, 2932, 884, 3956, 1524, 2548, 500,
+3572, 2036, 2724, 676, 3748, 1188, 2212, 164, 3236, 1700, 2980, 932, 4004,
+1316, 2340, 292, 3364, 1828, 2596, 548, 3620, 1060, 2084, 36, 3108, 1572,
+2852, 804, 3876, 1444, 2468, 420, 3492, 1956, 2788, 740, 3812, 1252, 2276,
+228, 3300, 1764, 3044, 996, 4068, 1348, 2372, 324, 3396, 1860, 2628, 580,
+3652, 1092, 2116, 68, 3140, 1604, 2884, 836, 3908, 1476, 2500, 452, 3524,
+1988, 2692, 644, 3716, 1156, 2180, 132, 3204, 1668, 2948, 900, 3972, 1284,
+2308, 260, 3332, 1796, 2564, 516, 3588, 1028, 2052, 4, 3076, 1540, 2820,
+772, 3844, 1412, 2436, 388, 3460, 1924, 2756, 708, 3780, 1220, 2244, 196,
+3268, 1732, 3012, 964, 4036, 1380, 2404, 356, 3428, 1892, 2660, 612, 3684,
+1124, 2148, 100, 3172, 1636, 2916, 868, 3940, 1508, 2532, 484, 3556, 2020,
+2740, 692, 3764, 1204, 2228, 180, 3252, 1716, 2996, 948, 4020, 1332, 2356,
+308, 3380, 1844, 2612, 564, 3636, 1076, 2100, 52, 3124, 1588, 2868, 820,
+3892, 1460, 2484, 436, 3508, 1972, 2804, 756, 3828, 1268, 2292, 244, 3316,
+1780, 3060, 1012, 4084, 1372, 2396, 348, 3420, 1884, 2652, 604, 3676, 1116,
+2140, 92, 3164, 1628, 2908, 860, 3932, 1500, 2524, 476, 3548, 2012, 2716,
+668, 3740, 1180, 2204, 156, 3228, 1692, 2972, 924, 3996, 1308, 2332, 284,
+3356, 1820, 2588, 540, 3612, 1052, 2076, 28, 3100, 1564, 2844, 796, 3868,
+1436, 2460, 412, 3484, 1948, 2780, 732, 3804, 1244, 2268, 220, 3292, 1756,
+3036, 988, 4060, 1404, 2428, 380, 3452, 1916, 2684, 636, 3708, 1148, 2172,
+124, 3196, 1660, 2940, 892, 3964, 1532, 2556, 508, 3580, 2044, 2728, 680,
+3752, 1192, 2216, 168, 3240, 1704, 2984, 936, 4008, 1320, 2344, 296, 3368,
+1832, 2600, 552, 3624, 1064, 2088, 40, 3112, 1576, 2856, 808, 3880, 1448,
+2472, 424, 3496, 1960, 2792, 744, 3816, 1256, 2280, 232, 3304, 1768, 3048,
+1000, 4072, 1352, 2376, 328, 3400, 1864, 2632, 584, 3656, 1096, 2120, 72,
+3144, 1608, 2888, 840, 3912, 1480, 2504, 456, 3528, 1992, 2696, 648, 3720,
+1160, 2184, 136, 3208, 1672, 2952, 904, 3976, 1288, 2312, 264, 3336, 1800,
+2568, 520, 3592, 1032, 2056, 8, 3080, 1544, 2824, 776, 3848, 1416, 2440,
+392, 3464, 1928, 2760, 712, 3784, 1224, 2248, 200, 3272, 1736, 3016, 968,
+4040, 1384, 2408, 360, 3432, 1896, 2664, 616, 3688, 1128, 2152, 104, 3176,
+1640, 2920, 872, 3944, 1512, 2536, 488, 3560, 2024, 2744, 696, 3768, 1208,
+2232, 184, 3256, 1720, 3000, 952, 4024, 1336, 2360, 312, 3384, 1848, 2616,
+568, 3640, 1080, 2104, 56, 3128, 1592, 2872, 824, 3896, 1464, 2488, 440,
+3512, 1976, 2808, 760, 3832, 1272, 2296, 248, 3320, 1784, 3064, 1016, 4088,
+1360, 2384, 336, 3408, 1872, 2640, 592, 3664, 1104, 2128, 80, 3152, 1616,
+2896, 848, 3920, 1488, 2512, 464, 3536, 2000, 2704, 656, 3728, 1168, 2192,
+144, 3216, 1680, 2960, 912, 3984, 1296, 2320, 272, 3344, 1808, 2576, 528,
+3600, 1040, 2064, 16, 3088, 1552, 2832, 784, 3856, 1424, 2448, 400, 3472,
+1936, 2768, 720, 3792, 1232, 2256, 208, 3280, 1744, 3024, 976, 4048, 1392,
+2416, 368, 3440, 1904, 2672, 624, 3696, 1136, 2160, 112, 3184, 1648, 2928,
+880, 3952, 1520, 2544, 496, 3568, 2032, 2720, 672, 3744, 1184, 2208, 160,
+3232, 1696, 2976, 928, 4000, 1312, 2336, 288, 3360, 1824, 2592, 544, 3616,
+1056, 2080, 32, 3104, 1568, 2848, 800, 3872, 1440, 2464, 416, 3488, 1952,
+2784, 736, 3808, 1248, 2272, 224, 3296, 1760, 3040, 992, 4064, 1344, 2368,
+320, 3392, 1856, 2624, 576, 3648, 1088, 2112, 64, 3136, 1600, 2880, 832,
+3904, 1472, 2496, 448, 3520, 1984, 2688, 640, 3712, 1152, 2176, 128, 3200,
+1664, 2944, 896, 3968, 1280, 2304, 256, 3328, 1792, 2560, 512, 3584, 1024,
+2048};
+
+
diff --git a/apps/codecs/lib/mdct_lookup.h b/apps/codecs/lib/mdct_lookup.h
index 67e166b783..909b95ddbb 100644
--- a/apps/codecs/lib/mdct_lookup.h
+++ b/apps/codecs/lib/mdct_lookup.h
@@ -18,6 +18,7 @@
extern const int32_t sincos_lookup0[1026];
extern const int32_t sincos_lookup1[1024];
+extern const uint16_t revtab[1<<12];
diff --git a/apps/codecs/liba52/a52_internal.h b/apps/codecs/liba52/a52_internal.h
index 0db16a8bcf..1e3b4a7edf 100644
--- a/apps/codecs/liba52/a52_internal.h
+++ b/apps/codecs/liba52/a52_internal.h
@@ -111,13 +111,13 @@ int a52_downmix_init (int input, int flags, level_t * level,
level_t clev, level_t slev);
int a52_downmix_coeff (level_t * coeff, int acmod, int output, level_t level,
level_t clev, level_t slev);
-void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
+void a52_downmix (sample_t * samples, int acmod, int output,
level_t clev, level_t slev);
void a52_upmix (sample_t * samples, int acmod, int output);
void a52_imdct_init (uint32_t mm_accel);
-void a52_imdct_256 (sample_t * data, sample_t * delay, sample_t bias);
-void a52_imdct_512 (sample_t * data, sample_t * delay, sample_t bias);
+void a52_imdct_256 (sample_t * data, sample_t * delay);
+void a52_imdct_512 (sample_t * data, sample_t * delay);
#define ROUND(x) ((int)((x) + ((x) > 0 ? 0.5 : -0.5)))
@@ -210,6 +210,6 @@ typedef int16_t quantizer_t;
#define MUL_C(a,b) MUL_L (a, LEVEL (b))
#define DIV(a,b) ((((int64_t)LEVEL (a)) << 26) / (b))
-#define BIAS(x) ((x) + (bias*0))
+#define BIAS(x) ((x))
#endif
diff --git a/apps/codecs/liba52/downmix.c b/apps/codecs/liba52/downmix.c
index 7bbf3793f0..2e8567bceb 100644
--- a/apps/codecs/liba52/downmix.c
+++ b/apps/codecs/liba52/downmix.c
@@ -329,7 +329,7 @@ int a52_downmix_coeff (level_t * coeff, int acmod, int output, level_t level,
return -1; /* NOTREACHED */
}
-static void mix2to1 (sample_t * dest, sample_t * src, sample_t bias)
+static void mix2to1 (sample_t * dest, sample_t * src)
{
int i;
@@ -337,7 +337,7 @@ static void mix2to1 (sample_t * dest, sample_t * src, sample_t bias)
dest[i] += BIAS (src[i]);
}
-static void mix3to1 (sample_t * samples, sample_t bias)
+static void mix3to1 (sample_t * samples)
{
int i;
@@ -345,7 +345,7 @@ static void mix3to1 (sample_t * samples, sample_t bias)
samples[i] += BIAS (samples[i + 256] + samples[i + 512]);
}
-static void mix4to1 (sample_t * samples, sample_t bias)
+static void mix4to1 (sample_t * samples)
{
int i;
@@ -354,7 +354,7 @@ static void mix4to1 (sample_t * samples, sample_t bias)
samples[i + 768]);
}
-static void mix5to1 (sample_t * samples, sample_t bias)
+static void mix5to1 (sample_t * samples)
{
int i;
@@ -363,7 +363,7 @@ static void mix5to1 (sample_t * samples, sample_t bias)
samples[i + 768] + samples[i + 1024]);
}
-static void mix3to2 (sample_t * samples, sample_t bias)
+static void mix3to2 (sample_t * samples)
{
int i;
sample_t common;
@@ -375,7 +375,7 @@ static void mix3to2 (sample_t * samples, sample_t bias)
}
}
-static void mix21to2 (sample_t * left, sample_t * right, sample_t bias)
+static void mix21to2 (sample_t * left, sample_t * right)
{
int i;
sample_t common;
@@ -387,7 +387,7 @@ static void mix21to2 (sample_t * left, sample_t * right, sample_t bias)
}
}
-static void mix21toS (sample_t * samples, sample_t bias)
+static void mix21toS (sample_t * samples)
{
int i;
sample_t surround;
@@ -399,7 +399,7 @@ static void mix21toS (sample_t * samples, sample_t bias)
}
}
-static void mix31to2 (sample_t * samples, sample_t bias)
+static void mix31to2 (sample_t * samples)
{
int i;
sample_t common;
@@ -411,7 +411,7 @@ static void mix31to2 (sample_t * samples, sample_t bias)
}
}
-static void mix31toS (sample_t * samples, sample_t bias)
+static void mix31toS (sample_t * samples)
{
int i;
sample_t common, surround;
@@ -424,7 +424,7 @@ static void mix31toS (sample_t * samples, sample_t bias)
}
}
-static void mix22toS (sample_t * samples, sample_t bias)
+static void mix22toS (sample_t * samples)
{
int i;
sample_t surround;
@@ -436,7 +436,7 @@ static void mix22toS (sample_t * samples, sample_t bias)
}
}
-static void mix32to2 (sample_t * samples, sample_t bias)
+static void mix32to2 (sample_t * samples)
{
int i;
sample_t common;
@@ -448,7 +448,7 @@ static void mix32to2 (sample_t * samples, sample_t bias)
}
}
-static void mix32toS (sample_t * samples, sample_t bias)
+static void mix32toS (sample_t * samples)
{
int i;
sample_t common, surround;
@@ -461,7 +461,7 @@ static void mix32toS (sample_t * samples, sample_t bias)
}
}
-static void move2to1 (sample_t * src, sample_t * dest, sample_t bias)
+static void move2to1 (sample_t * src, sample_t * dest)
{
int i;
@@ -477,7 +477,7 @@ static void zero (sample_t * samples)
samples[i] = 0;
}
-void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
+void a52_downmix (sample_t * samples, int acmod, int output,
level_t clev, level_t slev)
{
/* avoid compiler warning */
@@ -492,7 +492,7 @@ void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
case CONVERT (A52_CHANNEL, A52_MONO):
case CONVERT (A52_STEREO, A52_MONO):
mix_2to1:
- mix2to1 (samples, samples + 256, bias);
+ mix2to1 (samples, samples + 256);
break;
case CONVERT (A52_2F1R, A52_MONO):
@@ -500,7 +500,7 @@ void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
goto mix_2to1;
case CONVERT (A52_3F, A52_MONO):
mix_3to1:
- mix3to1 (samples, bias);
+ mix3to1 (samples);
break;
case CONVERT (A52_3F1R, A52_MONO):
@@ -509,13 +509,13 @@ void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
case CONVERT (A52_2F2R, A52_MONO):
if (slev == 0)
goto mix_2to1;
- mix4to1 (samples, bias);
+ mix4to1 (samples);
break;
case CONVERT (A52_3F2R, A52_MONO):
if (slev == 0)
goto mix_3to1;
- mix5to1 (samples, bias);
+ mix5to1 (samples);
break;
case CONVERT (A52_MONO, A52_DOLBY):
@@ -525,79 +525,79 @@ void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
case CONVERT (A52_3F, A52_STEREO):
case CONVERT (A52_3F, A52_DOLBY):
mix_3to2:
- mix3to2 (samples, bias);
+ mix3to2 (samples);
break;
case CONVERT (A52_2F1R, A52_STEREO):
if (slev == 0)
break;
- mix21to2 (samples, samples + 256, bias);
+ mix21to2 (samples, samples + 256);
break;
case CONVERT (A52_2F1R, A52_DOLBY):
- mix21toS (samples, bias);
+ mix21toS (samples);
break;
case CONVERT (A52_3F1R, A52_STEREO):
if (slev == 0)
goto mix_3to2;
- mix31to2 (samples, bias);
+ mix31to2 (samples);
break;
case CONVERT (A52_3F1R, A52_DOLBY):
- mix31toS (samples, bias);
+ mix31toS (samples);
break;
case CONVERT (A52_2F2R, A52_STEREO):
if (slev == 0)
break;
- mix2to1 (samples, samples + 512, bias);
- mix2to1 (samples + 256, samples + 768, bias);
+ mix2to1 (samples, samples + 512);
+ mix2to1 (samples + 256, samples + 768);
break;
case CONVERT (A52_2F2R, A52_DOLBY):
- mix22toS (samples, bias);
+ mix22toS (samples);
break;
case CONVERT (A52_3F2R, A52_STEREO):
if (slev == 0)
goto mix_3to2;
- mix32to2 (samples, bias);
+ mix32to2 (samples);
break;
case CONVERT (A52_3F2R, A52_DOLBY):
- mix32toS (samples, bias);
+ mix32toS (samples);
break;
case CONVERT (A52_3F1R, A52_3F):
if (slev == 0)
break;
- mix21to2 (samples, samples + 512, bias);
+ mix21to2 (samples, samples + 512);
break;
case CONVERT (A52_3F2R, A52_3F):
if (slev == 0)
break;
- mix2to1 (samples, samples + 768, bias);
- mix2to1 (samples + 512, samples + 1024, bias);
+ mix2to1 (samples, samples + 768);
+ mix2to1 (samples + 512, samples + 1024);
break;
case CONVERT (A52_3F1R, A52_2F1R):
- mix3to2 (samples, bias);
+ mix3to2 (samples);
memcpy (samples + 512, samples + 768, 256 * sizeof (sample_t));
break;
case CONVERT (A52_2F2R, A52_2F1R):
- mix2to1 (samples + 512, samples + 768, bias);
+ mix2to1 (samples + 512, samples + 768);
break;
case CONVERT (A52_3F2R, A52_2F1R):
- mix3to2 (samples, bias);
- move2to1 (samples + 768, samples + 512, bias);
+ mix3to2 (samples);
+ move2to1 (samples + 768, samples + 512);
break;
case CONVERT (A52_3F2R, A52_3F1R):
- mix2to1 (samples + 768, samples + 1024, bias);
+ mix2to1 (samples + 768, samples + 1024);
break;
case CONVERT (A52_2F1R, A52_2F2R):
@@ -605,12 +605,12 @@ void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
break;
case CONVERT (A52_3F1R, A52_2F2R):
- mix3to2 (samples, bias);
+ mix3to2 (samples);
memcpy (samples + 512, samples + 768, 256 * sizeof (sample_t));
break;
case CONVERT (A52_3F2R, A52_2F2R):
- mix3to2 (samples, bias);
+ mix3to2 (samples);
memcpy (samples + 512, samples + 768, 256 * sizeof (sample_t));
memcpy (samples + 768, samples + 1024, 256 * sizeof (sample_t));
break;
diff --git a/apps/codecs/liba52/imdct.c b/apps/codecs/liba52/imdct.c
index d1035030fd..4483bd0667 100644
--- a/apps/codecs/liba52/imdct.c
+++ b/apps/codecs/liba52/imdct.c
@@ -72,6 +72,8 @@ static const uint8_t fftorder[] = {
//static sample_t a52_imdct_window[256];
#include "imdct_lookups.h"
+
+/*
static void (* ifft128) (complex_t * buf);
static void (* ifft64) (complex_t * buf);
@@ -109,7 +111,7 @@ static inline void ifft4 (complex_t * buf)
buf[3].real = tmp5 - tmp7;
buf[3].imag = tmp6 - tmp8;
}
-
+*/
/* basic radix-2 ifft butterfly */
#define BUTTERFLY_0(t0,t1,W0,W1,d0,d1) do { \
@@ -161,7 +163,7 @@ static inline void ifft4 (complex_t * buf)
} while (0)
/* split-radix ifft butterfly, specialized for wr=wi */
-
+/*
#define BUTTERFLY_HALF(a0,a1,a2,a3,w) do { \
tmp5 = MUL (a2.real + a2.imag, w); \
tmp6 = MUL (a2.imag - a2.real, w); \
@@ -255,93 +257,96 @@ static void ifft128_c (complex_t * buf)
ifft32 (buf + 96);
ifft_pass (buf, roots128, 32);
}
-
-void a52_imdct_512 (sample_t * data, sample_t * delay, sample_t bias)
+*/
+void a52_imdct_512 (sample_t * data, sample_t * delay)
{
int i, k;
sample_t t_r, t_i, a_r, a_i, b_r, b_i, w_1, w_2;
const sample_t * window = a52_imdct_window;
- complex_t buf[128];
+ FFTComplex buf[128];
for (i = 0; i < 128; i++) {
- k = fftorder[i];
- t_r = pre1[i].real;
- t_i = pre1[i].imag;
- BUTTERFLY_0 (buf[i].real, buf[i].imag, t_r, t_i, data[k], data[255-k]);
+ k = fftorder[i];
+ t_r = pre1[i].real;
+ t_i = pre1[i].imag;
+ BUTTERFLY_0 (buf[i].re, buf[i].im, t_r, t_i, data[k], data[255-k]);
}
- ifft128 (buf);
+ //ifft128 (buf);
+ ff_fft_calc_c(7, (FFTComplex *)&buf);
/* Post IFFT complex multiply plus IFFT complex conjugate*/
/* Window and convert to real valued signal */
for (i = 0; i < 64; i++) {
- /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */
- t_r = post1[i].real;
- t_i = post1[i].imag;
- BUTTERFLY_0 (a_r, a_i, t_i, t_r, buf[i].imag, buf[i].real);
- BUTTERFLY_0 (b_r, b_i, t_r, t_i, buf[127-i].imag, buf[127-i].real);
-
- w_1 = window[2*i];
- w_2 = window[255-2*i];
- BUTTERFLY_B (data[255-2*i], data[2*i], w_2, w_1, a_r, delay[2*i]);
- delay[2*i] = a_i;
-
- w_1 = window[2*i+1];
- w_2 = window[254-2*i];
- BUTTERFLY_B (data[2*i+1], data[254-2*i], w_1, w_2, b_r, delay[2*i+1]);
- delay[2*i+1] = b_i;
+ /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */
+ t_r = post1[i].real;
+ t_i = post1[i].imag;
+ BUTTERFLY_0 (a_r, a_i, t_i, t_r, buf[i].im, buf[i].re);
+ BUTTERFLY_0 (b_r, b_i, t_r, t_i, buf[127-i].im, buf[127-i].re);
+
+ w_1 = window[2*i];
+ w_2 = window[255-2*i];
+ BUTTERFLY_B (data[255-2*i], data[2*i], w_2, w_1, a_r, delay[2*i]);
+ delay[2*i] = a_i;
+
+ w_1 = window[2*i+1];
+ w_2 = window[254-2*i];
+ BUTTERFLY_B (data[2*i+1], data[254-2*i], w_1, w_2, b_r, delay[2*i+1]);
+ delay[2*i+1] = b_i;
}
}
-void a52_imdct_256 (sample_t * data, sample_t * delay, sample_t bias)
+void a52_imdct_256 (sample_t * data, sample_t * delay)
{
int i, k;
sample_t t_r, t_i, a_r, a_i, b_r, b_i, c_r, c_i, d_r, d_i, w_1, w_2;
const sample_t * window = a52_imdct_window;
- complex_t buf1[64], buf2[64];
+ FFTComplex buf1[64], buf2[64];
/* Pre IFFT complex multiply plus IFFT cmplx conjugate */
for (i = 0; i < 64; i++) {
- k = fftorder[i];
- t_r = pre2[i].real;
- t_i = pre2[i].imag;
- BUTTERFLY_0 (buf1[i].real, buf1[i].imag, t_r, t_i, data[k], data[254-k]);
- BUTTERFLY_0 (buf2[i].real, buf2[i].imag, t_r, t_i, data[k+1], data[255-k]);
+ k = fftorder[i];
+ t_r = pre2[i].real;
+ t_i = pre2[i].imag;
+ BUTTERFLY_0 (buf1[i].re, buf1[i].im, t_r, t_i, data[k], data[254-k]);
+ BUTTERFLY_0 (buf2[i].re, buf2[i].im, t_r, t_i, data[k+1], data[255-k]);
}
- ifft64 (buf1);
- ifft64 (buf2);
+ //ifft64 (buf1);
+ //ifft64 (buf2);
+ ff_fft_calc_c(6, (FFTComplex *)&buf1);
+ ff_fft_calc_c(6, (FFTComplex *)&buf2);
/* Post IFFT complex multiply */
/* Window and convert to real valued signal */
for (i = 0; i < 32; i++) {
- /* y1[n] = z1[n] * (xcos2[n] + j * xs in2[n]) ; */
- t_r = post2[i].real;
- t_i = post2[i].imag;
- BUTTERFLY_0 (a_r, a_i, t_i, t_r, buf1[i].imag, buf1[i].real);
- BUTTERFLY_0 (b_r, b_i, t_r, t_i, buf1[63-i].imag, buf1[63-i].real);
- BUTTERFLY_0 (c_r, c_i, t_i, t_r, buf2[i].imag, buf2[i].real);
- BUTTERFLY_0 (d_r, d_i, t_r, t_i, buf2[63-i].imag, buf2[63-i].real);
-
- w_1 = window[2*i];
- w_2 = window[255-2*i];
- BUTTERFLY_B (data[255-2*i], data[2*i], w_2, w_1, a_r, delay[2*i]);
- delay[2*i] = c_i;
-
- w_1 = window[128+2*i];
- w_2 = window[127-2*i];
- BUTTERFLY_B (data[128+2*i], data[127-2*i], w_1, w_2, a_i, delay[127-2*i]);
- delay[127-2*i] = c_r;
-
- w_1 = window[2*i+1];
- w_2 = window[254-2*i];
- BUTTERFLY_B (data[254-2*i], data[2*i+1], w_2, w_1, b_i, delay[2*i+1]);
- delay[2*i+1] = d_r;
-
- w_1 = window[129+2*i];
- w_2 = window[126-2*i];
- BUTTERFLY_B (data[129+2*i], data[126-2*i], w_1, w_2, b_r, delay[126-2*i]);
- delay[126-2*i] = d_i;
+ /* y1[n] = z1[n] * (xcos2[n] + j * xs in2[n]) ; */
+ t_r = post2[i].real;
+ t_i = post2[i].imag;
+ BUTTERFLY_0 (a_r, a_i, t_i, t_r, buf1[i].im, buf1[i].re);
+ BUTTERFLY_0 (b_r, b_i, t_r, t_i, buf1[63-i].im, buf1[63-i].re);
+ BUTTERFLY_0 (c_r, c_i, t_i, t_r, buf2[i].im, buf2[i].re);
+ BUTTERFLY_0 (d_r, d_i, t_r, t_i, buf2[63-i].im, buf2[63-i].re);
+
+ w_1 = window[2*i];
+ w_2 = window[255-2*i];
+ BUTTERFLY_B (data[255-2*i], data[2*i], w_2, w_1, a_r, delay[2*i]);
+ delay[2*i] = c_i;
+
+ w_1 = window[128+2*i];
+ w_2 = window[127-2*i];
+ BUTTERFLY_B (data[128+2*i], data[127-2*i], w_1, w_2, a_i, delay[127-2*i]);
+ delay[127-2*i] = c_r;
+
+ w_1 = window[2*i+1];
+ w_2 = window[254-2*i];
+ BUTTERFLY_B (data[254-2*i], data[2*i+1], w_2, w_1, b_i, delay[2*i+1]);
+ delay[2*i+1] = d_r;
+
+ w_1 = window[129+2*i];
+ w_2 = window[126-2*i];
+ BUTTERFLY_B (data[129+2*i], data[126-2*i], w_1, w_2, b_r, delay[126-2*i]);
+ delay[126-2*i] = d_i;
}
}
@@ -361,6 +366,9 @@ static double besselI0 (double x)
void a52_imdct_init (uint32_t mm_accel)
{
(void)mm_accel;
+ //ff_fft_init(&s128, 7, 1);
+ //ff_fft_init(&s64, 6, 1);
+
/* int i, k;
double sum;
double local_imdct_window[256];*/
@@ -457,7 +465,7 @@ void a52_imdct_init (uint32_t mm_accel)
printf("static complex_t post2[32]={");
for (i=0;i<32;i++) { printf("{%d,%d}%s",post2[i].real,post2[i].imag,(i < 31 ? "," : "")); }
printf("};\n");
- */
+
#ifdef LIBA52_DJBFFT
if (mm_accel & MM_ACCEL_DJBFFT) {
@@ -474,4 +482,5 @@ void a52_imdct_init (uint32_t mm_accel)
ifft128 = ifft128_c;
ifft64 = ifft64_c;
}
+ */
}
diff --git a/apps/codecs/liba52/parse.c b/apps/codecs/liba52/parse.c
index 2a065b4fc4..f2b0ce4f6d 100644
--- a/apps/codecs/liba52/parse.c
+++ b/apps/codecs/liba52/parse.c
@@ -881,7 +881,7 @@ int a52_block (a52_state_t * state)
state->dynrng, 0, 7);
for (i = 7; i < 256; i++)
(samples-256)[i] = 0;
- a52_imdct_512 (samples - 256, samples + 1536 - 256, state->bias);
+ a52_imdct_512 (samples - 256, samples + 1536 - 256);
} else {
/* just skip the LFE coefficients */
coeff_get (state, samples + 1280, &state->lfe_expbap, &quant,
@@ -910,11 +910,9 @@ int a52_block (a52_state_t * state)
if (coeff[i]) {
if (blksw[i])
- a52_imdct_256 (samples + 256 * i, samples + 1536 + 256 * i,
- bias);
+ a52_imdct_256 (samples + 256 * i, samples + 1536 + 256 * i);
else
- a52_imdct_512 (samples + 256 * i, samples + 1536 + 256 * i,
- bias);
+ a52_imdct_512 (samples + 256 * i, samples + 1536 + 256 * i);
} else {
int j;
@@ -923,28 +921,26 @@ int a52_block (a52_state_t * state)
}
}
- a52_downmix (samples, state->acmod, state->output, state->bias,
+ a52_downmix (samples, state->acmod, state->output,
state->clev, state->slev);
} else {
nfchans = nfchans_tbl[state->output & A52_CHANNEL_MASK];
- a52_downmix (samples, state->acmod, state->output, 0,
+ a52_downmix (samples, state->acmod, state->output,
state->clev, state->slev);
if (!state->downmixed) {
state->downmixed = 1;
- a52_downmix (samples + 1536, state->acmod, state->output, 0,
+ a52_downmix (samples + 1536, state->acmod, state->output,
state->clev, state->slev);
}
if (blksw[0])
for (i = 0; i < nfchans; i++)
- a52_imdct_256 (samples + 256 * i, samples + 1536 + 256 * i,
- state->bias);
+ a52_imdct_256 (samples + 256 * i, samples + 1536 + 256 * i);
else
for (i = 0; i < nfchans; i++)
- a52_imdct_512 (samples + 256 * i, samples + 1536 + 256 * i,
- state->bias);
+ a52_imdct_512 (samples + 256 * i, samples + 1536 + 256 * i);
}
return 0;
diff --git a/apps/codecs/libatrac/atrac3.c b/apps/codecs/libatrac/atrac3.c
index ad57ad6237..3555f74cfb 100644
--- a/apps/codecs/libatrac/atrac3.c
+++ b/apps/codecs/libatrac/atrac3.c
@@ -40,7 +40,6 @@
#include "atrac3data.h"
#include "atrac3data_fixed.h"
#include "fixp_math.h"
-#include "../lib/mdct2.h"
#define JOINT_STEREO 0x12
#define STEREO 0x2
@@ -260,7 +259,7 @@ static void iqmf (int32_t *inlo, int32_t *inhi, unsigned int nIn, int32_t *pOut,
static void IMLT(int32_t *pInput, int32_t *pOutput)
{
/* Apply the imdct. */
- mdct_backward(512, pInput, pOutput);
+ ff_imdct_calc(9, pOutput, pInput);
/* Windowing. */
atrac3_imdct_windowing(pOutput, window_lookup);
@@ -297,7 +296,7 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
}
-static void init_atrac3_transforms(void) {
+static void init_atrac3_transforms() {
int32_t s;
int i;
@@ -312,7 +311,7 @@ static void init_atrac3_transforms(void) {
qmf_window[i] = s;
qmf_window[47 - i] = s;
}
-}
+ }
/**
diff --git a/apps/codecs/libatrac/atrac3.h b/apps/codecs/libatrac/atrac3.h
index 1878efeb1b..d3fdc5056a 100644
--- a/apps/codecs/libatrac/atrac3.h
+++ b/apps/codecs/libatrac/atrac3.h
@@ -21,9 +21,7 @@
#include "ffmpeg_bitstream.h"
#include "../librm/rm.h"
-#ifdef ROCKBOX
#include "codeclib.h"
-#endif
#if (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024) || (CONFIG_CPU == MCF5250)
/* PP5022/24 and MCF5250 have larger IRAM */
diff --git a/apps/codecs/libcook/cook.c b/apps/codecs/libcook/cook.c
index 3212c57abb..8d9611c4d9 100644
--- a/apps/codecs/libcook/cook.c
+++ b/apps/codecs/libcook/cook.c
@@ -797,6 +797,7 @@ static void dump_cook_context(COOKContext *q)
/* Initialize variable relations */
q->numvector_size = (1 << q->log2_numvector_size);
+ q->mdct_nbits = av_log2(q->samples_per_channel)+1;
/* Generate tables */
if (init_cook_vlc_tables(q) != 0)
diff --git a/apps/codecs/libcook/cook.h b/apps/codecs/libcook/cook.h
index 4fb7b1c0db..0672553895 100644
--- a/apps/codecs/libcook/cook.h
+++ b/apps/codecs/libcook/cook.h
@@ -63,6 +63,7 @@ typedef struct cook {
int num_vectors;
int bits_per_subpacket;
int cookversion;
+ int mdct_nbits; /* is this the same as one of above? */
/* states */
int random_state;
diff --git a/apps/codecs/libcook/cook_fixpoint.h b/apps/codecs/libcook/cook_fixpoint.h
index b17d99eeeb..30e5a3eee2 100644
--- a/apps/codecs/libcook/cook_fixpoint.h
+++ b/apps/codecs/libcook/cook_fixpoint.h
@@ -165,15 +165,14 @@ static void scalar_dequant_math(COOKContext *q, int index,
* @param mlt_tmp pointer to temporary storage space
*/
#include "../lib/mdct_lookup.h"
-#include "../lib/mdct2.h"
static inline void imlt_math(COOKContext *q, FIXP *in)
{
const int n = q->samples_per_channel;
const int step = 2 << (10 - av_log2(n));
int i = 0, j = 0;
-
- mdct_backward(2 * n, in, q->mono_mdct_output);
+
+ ff_imdct_calc(q->mdct_nbits, q->mono_mdct_output, in);
do {
FIXP tmp = q->mono_mdct_output[i];
diff --git a/apps/codecs/libtremor/block.c b/apps/codecs/libtremor/block.c
index fe736c8def..b4ca8f3f11 100644
--- a/apps/codecs/libtremor/block.c
+++ b/apps/codecs/libtremor/block.c
@@ -25,6 +25,7 @@
#include "window.h"
#include "registry.h"
#include "misc.h"
+//#include <codecs/lib/codeclib.h>
static int ilog(unsigned int v){
int ret=0;
@@ -239,6 +240,7 @@ static int _vds_init(vorbis_dsp_state *v,vorbis_info *vi){
b->mode[i]=_mapping_P[maptype]->look(v,ci->mode_param[i],
ci->map_param[mapnum]);
}
+
return(0);
}
diff --git a/apps/codecs/libtremor/codec_internal.h b/apps/codecs/libtremor/codec_internal.h
index 3ca7f54724..3cbd7cde89 100644
--- a/apps/codecs/libtremor/codec_internal.h
+++ b/apps/codecs/libtremor/codec_internal.h
@@ -60,7 +60,8 @@ typedef struct codec_setup_info {
/* Vorbis supports only short and long blocks, but allows the
encoder to choose the sizes */
- long blocksizes[2];
+ int blocksizes_nbits[2];
+ long blocksizes[2]; /* = 1<<nbits */
/* modes are the primary means of supporting on-the-fly different
blocksizes, different channel mappings (LR or M/A),
diff --git a/apps/codecs/libtremor/info.c b/apps/codecs/libtremor/info.c
index 4273f97dc1..afa9497cf0 100644
--- a/apps/codecs/libtremor/info.c
+++ b/apps/codecs/libtremor/info.c
@@ -120,8 +120,10 @@ static int _vorbis_unpack_info(vorbis_info *vi,oggpack_buffer *opb){
vi->bitrate_nominal=oggpack_read(opb,32);
vi->bitrate_lower=oggpack_read(opb,32);
- ci->blocksizes[0]=1<<oggpack_read(opb,4);
- ci->blocksizes[1]=1<<oggpack_read(opb,4);
+ ci->blocksizes_nbits[0]=oggpack_read(opb,4);
+ ci->blocksizes_nbits[1]=oggpack_read(opb,4);
+ ci->blocksizes[0]=1<<(ci->blocksizes_nbits[0]);
+ ci->blocksizes[1]=1<<(ci->blocksizes_nbits[1]);
if(vi->rate<1)goto err_out;
if(vi->channels<1)goto err_out;
diff --git a/apps/codecs/libtremor/ivorbiscodec.h b/apps/codecs/libtremor/ivorbiscodec.h
index c2836ad8a9..f17c57a86d 100644
--- a/apps/codecs/libtremor/ivorbiscodec.h
+++ b/apps/codecs/libtremor/ivorbiscodec.h
@@ -24,6 +24,7 @@ extern "C"
#endif /* __cplusplus */
#include "ogg.h"
+//#include <codecs/lib/codeclib.h>
typedef struct vorbis_info{
int version;
@@ -105,7 +106,6 @@ typedef struct vorbis_block{
long localalloc;
long totaluse;
struct alloc_chain *reap;
-
} vorbis_block;
/* vorbis_block is a single block of data to be processed as part of
diff --git a/apps/codecs/libtremor/mapping0.c b/apps/codecs/libtremor/mapping0.c
index ecee6db0c7..bd0e0322fe 100644
--- a/apps/codecs/libtremor/mapping0.c
+++ b/apps/codecs/libtremor/mapping0.c
@@ -27,8 +27,7 @@
#include "window.h"
#include "registry.h"
#include "misc.h"
-
-
+#include <codecs/lib/codeclib.h>
/* simplistic, wasteful way of doing this (unique lookup for each
mode/submapping); there should be a central repository for
@@ -291,7 +290,10 @@ static int mapping0_inverse(vorbis_block *vb,vorbis_look_mapping *l){
/* compute and apply spectral envelope */
look->floor_func[submap]->
inverse2(vb,look->floor_look[submap],floormemo[i],pcm);
- mdct_backward(n, (int32_t*) pcm, (int32_t*) pcm);
+
+ ff_imdct_calc(ci->blocksizes_nbits[vb->W],
+ (int32_t*)pcm,
+ (int32_t*)pcm);
/* window the data */
_vorbis_apply_window(pcm,b->window,ci->blocksizes,vb->lW,vb->W,vb->nW);
}
diff --git a/apps/codecs/libtremor/synthesis.c b/apps/codecs/libtremor/synthesis.c
index a882a6d07a..464c777605 100644
--- a/apps/codecs/libtremor/synthesis.c
+++ b/apps/codecs/libtremor/synthesis.c
@@ -26,6 +26,7 @@
static ogg_int32_t *ipcm_vect[CHANNELS] IBSS_ATTR;
+int32_t staticbuffer[16384];
int vorbis_synthesis(vorbis_block *vb,ogg_packet *op,int decodep)
ICODE_ATTR_TREMOR_NOT_MDCT;
@@ -67,7 +68,8 @@ int vorbis_synthesis(vorbis_block *vb,ogg_packet *op,int decodep){
vb->sequence=op->packetno-3; /* first block is third packet */
vb->eofflag=op->e_o_s;
- if(decodep && vi->channels<=CHANNELS){
+ if(decodep && vi->channels<=CHANNELS)
+ {
vb->pcm = ipcm_vect;
/* set pcm end point */
@@ -81,6 +83,7 @@ int vorbis_synthesis(vorbis_block *vb,ogg_packet *op,int decodep){
by simply flipping pointers */
for(i=0; i<vi->channels; i++)
vb->pcm[i] = &vd->first_pcm[i*ci->blocksizes[1]];
+
}
vd->reset_pcmb = false;
diff --git a/apps/codecs/libwma/wmadec.h b/apps/codecs/libwma/wmadec.h
index a547ece157..4efaa9b8a2 100644
--- a/apps/codecs/libwma/wmadec.h
+++ b/apps/codecs/libwma/wmadec.h
@@ -23,8 +23,6 @@
#include "asf.h"
#include "bitstream.h" /* For GetBitContext */
#include "types.h"
-//#include "dsputil.h" /* For MDCTContext */
-
//#define TRACE
/* size of blocks */
diff --git a/apps/codecs/libwma/wmadeci.c b/apps/codecs/libwma/wmadeci.c
index ae1a93ecf2..6ff6a176ee 100644
--- a/apps/codecs/libwma/wmadeci.c
+++ b/apps/codecs/libwma/wmadeci.c
@@ -452,17 +452,6 @@ int wma_decode_init(WMADecodeContext* s, asf_waveformatex_t *wfx)
}
}
- /*Not using the ffmpeg IMDCT anymore*/
-
- /* mdct_init_global();
-
- for(i = 0; i < s->nb_block_sizes; ++i)
- {
- ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 1);
-
- }
- */
-
/* ffmpeg uses malloc to only allocate as many window sizes as needed.
* However, we're really only interested in the worst case memory usage.
* In the worst case you can have 5 window sizes, 128 doubling up 2048
@@ -1253,14 +1242,9 @@ static int wma_decode_block(WMADecodeContext *s, int32_t *scratch_buffer)
n4 = s->block_len >>1;
- /*faster IMDCT from Vorbis*/
- mdct_backward( (1 << (s->block_len_bits+1)), (int32_t*)(*(s->coefs))[ch], (int32_t*)scratch_buffer);
-
- /*slower but more easily understood IMDCT from FFMPEG*/
- //ff_imdct_calc(&s->mdct_ctx[bsize],
- // output,
- // (*(s->coefs))[ch]);
-
+ ff_imdct_calc( (s->frame_len_bits - bsize + 1),
+ (int32_t*)scratch_buffer,
+ (*(s->coefs))[ch]);
/* add in the frame */
index = (s->frame_len / 2) + s->block_pos - n4;
diff --git a/apps/codecs/libwma/wmafixed.c b/apps/codecs/libwma/wmafixed.c
index 5569309145..1472ed081c 100644
--- a/apps/codecs/libwma/wmafixed.c
+++ b/apps/codecs/libwma/wmafixed.c
@@ -250,113 +250,3 @@ fixed64 fixdiv64(fixed64 x, fixed64 y)
return (fixed32)(r << (PRECISION / 2));
}
-
-
-
-/* Inverse gain of circular cordic rotation in s0.31 format. */
-static const long cordic_circular_gain = 0xb2458939; /* 0.607252929 */
-
-/* Table of values of atan(2^-i) in 0.32 format fractions of pi where pi = 0xffffffff / 2 */
-static const unsigned long atan_table[] = {
- 0x1fffffff, /* +0.785398163 (or pi/4) */
- 0x12e4051d, /* +0.463647609 */
- 0x09fb385b, /* +0.244978663 */
- 0x051111d4, /* +0.124354995 */
- 0x028b0d43, /* +0.062418810 */
- 0x0145d7e1, /* +0.031239833 */
- 0x00a2f61e, /* +0.015623729 */
- 0x00517c55, /* +0.007812341 */
- 0x0028be53, /* +0.003906230 */
- 0x00145f2e, /* +0.001953123 */
- 0x000a2f98, /* +0.000976562 */
- 0x000517cc, /* +0.000488281 */
- 0x00028be6, /* +0.000244141 */
- 0x000145f3, /* +0.000122070 */
- 0x0000a2f9, /* +0.000061035 */
- 0x0000517c, /* +0.000030518 */
- 0x000028be, /* +0.000015259 */
- 0x0000145f, /* +0.000007629 */
- 0x00000a2f, /* +0.000003815 */
- 0x00000517, /* +0.000001907 */
- 0x0000028b, /* +0.000000954 */
- 0x00000145, /* +0.000000477 */
- 0x000000a2, /* +0.000000238 */
- 0x00000051, /* +0.000000119 */
- 0x00000028, /* +0.000000060 */
- 0x00000014, /* +0.000000030 */
- 0x0000000a, /* +0.000000015 */
- 0x00000005, /* +0.000000007 */
- 0x00000002, /* +0.000000004 */
- 0x00000001, /* +0.000000002 */
- 0x00000000, /* +0.000000001 */
- 0x00000000, /* +0.000000000 */
-};
-
-
-/*
-
- Below here functions do not use standard fixed precision!
-*/
-
-
-/**
- * Implements sin and cos using CORDIC rotation.
- *
- * @param phase has range from 0 to 0xffffffff, representing 0 and
- * 2*pi respectively.
- * @param cos return address for cos
- * @return sin of phase, value is a signed value from LONG_MIN to LONG_MAX,
- * representing -1 and 1 respectively.
- *
- * Gives at least 24 bits precision (last 2-8 bits or so are probably off)
- */
-long fsincos(unsigned long phase, fixed32 *cos)
-{
- int32_t x, x1, y, y1;
- unsigned long z, z1;
- int i;
-
- /* Setup initial vector */
- x = cordic_circular_gain;
- y = 0;
- z = phase;
-
- /* The phase has to be somewhere between 0..pi for this to work right */
- if (z < 0xffffffff / 4) {
- /* z in first quadrant, z += pi/2 to correct */
- x = -x;
- z += 0xffffffff / 4;
- } else if (z < 3 * (0xffffffff / 4)) {
- /* z in third quadrant, z -= pi/2 to correct */
- z -= 0xffffffff / 4;
- } else {
- /* z in fourth quadrant, z -= 3pi/2 to correct */
- x = -x;
- z -= 3 * (0xffffffff / 4);
- }
-
- /* Each iteration adds roughly 1-bit of extra precision */
- for (i = 0; i < 31; i++) {
- x1 = x >> i;
- y1 = y >> i;
- z1 = atan_table[i];
-
- /* Decided which direction to rotate vector. Pivot point is pi/2 */
- if (z >= 0xffffffff / 4) {
- x -= y1;
- y += x1;
- z -= z1;
- } else {
- x += y1;
- y -= x1;
- z += z1;
- }
- }
-
- if (cos)
- *cos = x;
-
- return y;
-}
-
-
diff --git a/apps/codecs/libwma/wmafixed.h b/apps/codecs/libwma/wmafixed.h
index 6b5137e044..0ecdc5cfbc 100644
--- a/apps/codecs/libwma/wmafixed.h
+++ b/apps/codecs/libwma/wmafixed.h
@@ -52,10 +52,10 @@ fixed64 fixdiv64(fixed64 x, fixed64 y);
fixed32 fixsqrt32(fixed32 x);
long fsincos(unsigned long phase, fixed32 *cos);
+
#ifdef CPU_ARM
/*Sign-15.16 format */
-
#define fixmul32(x, y) \
({ int32_t __hi; \
uint32_t __lo; \
@@ -70,18 +70,6 @@ long fsincos(unsigned long phase, fixed32 *cos);
__result; \
})
-#define fixmul32b(x, y) \
- ({ int32_t __hi; \
- uint32_t __lo; \
- int32_t __result; \
- asm ("smull %0, %1, %3, %4\n\t" \
- "movs %2, %1, lsl #1" \
- : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
- : "%r" (x), "r" (y) \
- : "cc"); \
- __result; \
- })
-
#elif defined(CPU_COLDFIRE)
static inline int32_t fixmul32(int32_t x, int32_t y)
@@ -91,9 +79,9 @@ static inline int32_t fixmul32(int32_t x, int32_t y)
#endif
int32_t t1;
asm (
- "mac.l %[x], %[y], %%acc0 \n" /* multiply */
- "mulu.l %[y], %[x] \n" /* get lower half, avoid emac stall */
- "movclr.l %%acc0, %[t1] \n" /* get higher half */
+ "mac.l %[x], %[y], %%acc0 \n" // multiply
+ "mulu.l %[y], %[x] \n" // get lower half, avoid emac stall
+ "movclr.l %%acc0, %[t1] \n" // get higher half
"lsr.l #1, %[t1] \n"
"move.w %[t1], %[x] \n"
"swap %[x] \n"
@@ -103,17 +91,6 @@ static inline int32_t fixmul32(int32_t x, int32_t y)
return x;
}
-static inline int32_t fixmul32b(int32_t x, int32_t y)
-{
- asm (
- "mac.l %[x], %[y], %%acc0 \n" /* multiply */
- "movclr.l %%acc0, %[x] \n" /* get higher half */
- : [x] "+d" (x)
- : [y] "d" (y)
- );
- return x;
-}
-
#else
static inline fixed32 fixmul32(fixed32 x, fixed32 y)
@@ -127,17 +104,7 @@ static inline fixed32 fixmul32(fixed32 x, fixed32 y)
return (fixed32)temp;
}
-static inline fixed32 fixmul32b(fixed32 x, fixed32 y)
-{
- fixed64 temp;
-
- temp = x;
- temp *= y;
-
- temp >>= 31; //16+31-16 = 31 bits
-
- return (fixed32)temp;
-}
-
#endif
+
+/* get fixmul32b from codeclib */