summaryrefslogtreecommitdiffstats
path: root/apps/codecs/libtremor/asm_arm.h
blob: 9531f2165790c3a7036725b8c7ec1f586f4a76d0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
/********************************************************************
 *                                                                  *
 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE.   *
 *                                                                  *
 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS     *
 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING.       *
 *                                                                  *
 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002    *
 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/                  *
 *                                                                  *
 ********************************************************************

 function: arm7 and later wide math functions

 ********************************************************************/

#ifdef _ARM_ASSEM_

#if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
#define _V_WIDE_MATH

static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
  int lo,hi;
  asm volatile("smull\t%0, %1, %2, %3"
               : "=&r"(lo),"=&r"(hi)
               : "%r"(x),"r"(y) );
  return(hi);
}

static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
  return MULT32(x,y)<<1;
}

static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
  int lo,hi;
  asm volatile("smull   %0, %1, %2, %3\n\t"
               "movs    %0, %0, lsr #15\n\t"
               "adc     %1, %0, %1, lsl #17\n\t"
               : "=&r"(lo),"=&r"(hi)
               : "%r"(x),"r"(y)
               : "cc");
  return(hi);
}

#ifndef _V_VECT_OPS
#define _V_VECT_OPS

/* asm versions of vector operations for block.c, window.c */
/* SOME IMPORTANT NOTES: this implementation of vect_mult_bw does
   NOT do a final shift, meaning that the result of vect_mult_bw is
   only 31 bits not 32.  This is so that we can do the shift in-place
   in vect_add_xxxx instead to save one instruction for each mult on arm */
static inline 
void vect_add_right_left(ogg_int32_t *x, const ogg_int32_t *y, int n)
{
  /* first arg is right subframe of previous frame and second arg
     is left subframe of current frame.  overlap left onto right overwriting
     the right subframe */
     
  do{
    asm volatile (
                  "ldmia %[x], {r0, r1, r2, r3};"
                  "ldmia %[y]!, {r4, r5, r6, r7};"
                  "add r0, r4, r0, lsl #1;"
                  "add r1, r5, r1, lsl #1;"
                  "add r2, r6, r2, lsl #1;"
                  "add r3, r7, r3, lsl #1;"
                  "stmia %[x]!, {r0, r1, r2, r3};"
                  "ldmia %[x], {r0, r1, r2, r3};"
                  "ldmia %[y]!, {r4, r5, r6, r7};"
                  "add r0, r4, r0, lsl #1;"
                  "add r1, r5, r1, lsl #1;"
                  "add r2, r6, r2, lsl #1;"
                  "add r3, r7, r3, lsl #1;"
                  "stmia %[x]!, {r0, r1, r2, r3};"
                  : [x] "+r" (x), [y] "+r" (y)
                  : : "r0", "r1", "r2", "r3",
                  "r4", "r5", "r6", "r7",
                  "memory");
    n -= 8;
  } while (n);
}

static inline 
void vect_add_left_right(ogg_int32_t *x, const ogg_int32_t *y, int n)
{
  /* first arg is left subframe of current frame and second arg
     is right subframe of previous frame.  overlap right onto left overwriting
     the LEFT subframe */
  do{
    asm volatile (
                  "ldmia %[x], {r0, r1, r2, r3};"
                  "ldmia %[y]!, {r4, r5, r6, r7};"
                  "add r0, r0, r4, lsl #1;"
                  "add r1, r1, r5, lsl #1;"
                  "add r2, r2, r6, lsl #1;"
                  "add r3, r3, r7, lsl #1;"
                  "stmia %[x]!, {r0, r1, r2, r3};"
                  "ldmia %[x], {r0, r1, r2, r3};"
                  "ldmia %[y]!, {r4, r5, r6, r7};"
                  "add r0, r0, r4, lsl #1;"
                  "add r1, r1, r5, lsl #1;"
                  "add r2, r2, r6, lsl #1;"
                  "add r3, r3, r7, lsl #1;"
                  "stmia %[x]!, {r0, r1, r2, r3};"
                  : [x] "+r" (x), [y] "+r" (y)
                  : : "r0", "r1", "r2", "r3",
                  "r4", "r5", "r6", "r7",
                  "memory");
    n -= 8;
  } while (n);
}

#if ARM_ARCH >= 6
static inline 
void vect_mult_fw(ogg_int32_t *data, LOOKUP_T *window, int n)
{
  /* Note, mult_fw uses MULT31 */
  do{
    asm volatile (
                  "ldmia %[d], {r0, r1, r2, r3};"
                  "ldmia %[w]!, {r4, r5, r6, r7};"
                  "smmul r0, r4, r0;"
                  "smmul r1, r5, r1;"
                  "smmul r2, r6, r2;"
                  "smmul r3, r7, r3;"
                  "mov   r0, r0, lsl #1;"
                  "mov   r1, r1, lsl #1;"
                  "mov   r2, r2, lsl #1;"
                  "mov   r3, r3, lsl #1;"
                  "stmia %[d]!, {r0, r1, r2, r3};"
                  : [d] "+r" (data), [w] "+r" (window)
                  : : "r0", "r1", "r2", "r3",
                  "r4", "r5", "r6", "r7",
                  "memory" );
    n -= 4;
  } while (n);
}
#else
static inline 
void vect_mult_fw(ogg_int32_t *data, LOOKUP_T *window, int n)
{
  /* Note, mult_fw uses MULT31 */
  do{
    asm volatile (
                  "ldmia %[d], {r0, r1, r2, r3};"
                  "ldmia %[w]!, {r4, r5, r6, r7};"
                  "smull r8, r0, r4, r0;"
                  "mov   r0, r0, lsl #1;"
                  "smull r8, r1, r5, r1;"
                  "mov   r1, r1, lsl #1;"
                  "smull r8, r2, r6, r2;"
                  "mov   r2, r2, lsl #1;"
                  "smull r8, r3, r7, r3;"
                  "mov   r3, r3, lsl #1;"
                  "stmia %[d]!, {r0, r1, r2, r3};"
                  : [d] "+r" (data), [w] "+r" (window)
                  : : "r0", "r1", "r2", "r3",
                  "r4", "r5", "r6", "r7", "r8",
                  "memory" );
    n -= 4;
  } while (n);
}
#endif

#if ARM_ARCH >= 6
static inline
void vect_mult_bw(ogg_int32_t *data, LOOKUP_T *window, int n)
{
  /* NOTE mult_bw uses MULT_32 i.e. doesn't shift result left at end */
  /* On ARM, we can do the shift at the same time as the overlap-add */
  do{
    asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
                  "ldmda %[w]!, {r4, r5, r6, r7};"
                  "smmul r0, r7, r0;"
                  "smmul r1, r6, r1;"
                  "smmul r2, r5, r2;"
                  "smmul r3, r4, r3;"
                  "stmia %[d]!, {r0, r1, r2, r3};"
                  : [d] "+r" (data), [w] "+r" (window)
                  : : "r0", "r1", "r2", "r3",
                  "r4", "r5", "r6", "r7",
                  "memory" );
    n -= 4;
  } while (n);
}
#else
static inline
void vect_mult_bw(ogg_int32_t *data, LOOKUP_T *window, int n)
{
  /* NOTE mult_bw uses MULT_32 i.e. doesn't shift result left at end */
  /* On ARM, we can do the shift at the same time as the overlap-add */
  do{
    asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
                  "ldmda %[w]!, {r4, r5, r6, r7};"
                  "smull r8, r0, r7, r0;"
                  "smull r7, r1, r6, r1;"
                  "smull r6, r2, r5, r2;"
                  "smull r5, r3, r4, r3;"
                  "stmia %[d]!, {r0, r1, r2, r3};"
                  : [d] "+r" (data), [w] "+r" (window)
                  : : "r0", "r1", "r2", "r3",
                  "r4", "r5", "r6", "r7", "r8",
                  "memory" );
    n -= 4;
  } while (n);
}
#endif

static inline void vect_copy(ogg_int32_t *x, const ogg_int32_t *y, int n)
{
  memcpy(x,y,n*sizeof(ogg_int32_t));
}

#endif

#endif
/* not used anymore */
/*
#ifndef _V_CLIP_MATH
#define _V_CLIP_MATH

static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
  int tmp;
  asm volatile("subs    %1, %0, #32768\n\t"
               "movpl   %0, #0x7f00\n\t"
               "orrpl   %0, %0, #0xff\n"
               "adds    %1, %0, #32768\n\t"
               "movmi   %0, #0x8000"
               : "+r"(x),"=r"(tmp)
               :
               : "cc");
  return(x);
}

#endif
*/

#ifndef _V_LSP_MATH_ASM
#define _V_LSP_MATH_ASM

static inline void lsp_loop_asm(ogg_uint32_t *qip,ogg_uint32_t *pip,
                                ogg_int32_t *qexpp,
                                ogg_int32_t *ilsp,ogg_int32_t wi,
                                ogg_int32_t m){
  
  ogg_uint32_t qi=*qip,pi=*pip;
  ogg_int32_t qexp=*qexpp;

  asm("mov     r0,%3;"
      "movs    r1,%5,asr#1;"
      "add     r0,r0,r1,lsl#3;"
      "beq 2f;\n"
      "1:"
      
      "ldmdb   r0!,{r1,r3};"
      "subs    r1,r1,%4;"          //ilsp[j]-wi
      "rsbmi   r1,r1,#0;"          //labs(ilsp[j]-wi)
      "umull   %0,r2,r1,%0;"       //qi*=labs(ilsp[j]-wi)
      
      "subs    r1,r3,%4;"          //ilsp[j+1]-wi
      "rsbmi   r1,r1,#0;"          //labs(ilsp[j+1]-wi)
      "umull   %1,r3,r1,%1;"       //pi*=labs(ilsp[j+1]-wi)
      
      "cmn     r2,r3;"             // shift down 16?
      "beq     0f;"
      "add     %2,%2,#16;"
      "mov     %0,%0,lsr #16;"
      "orr     %0,%0,r2,lsl #16;"
      "mov     %1,%1,lsr #16;"
      "orr     %1,%1,r3,lsl #16;"
      "0:"
      "cmp     r0,%3;\n"
      "bhi     1b;\n"
      
      "2:"
      // odd filter assymetry
      "ands    r0,%5,#1;\n"
      "beq     3f;\n"
      "add     r0,%3,%5,lsl#2;\n"
      
      "ldr     r1,[r0,#-4];\n"
      "mov     r0,#0x4000;\n"
      
      "subs    r1,r1,%4;\n"          //ilsp[j]-wi
      "rsbmi   r1,r1,#0;\n"          //labs(ilsp[j]-wi)
      "umull   %0,r2,r1,%0;\n"       //qi*=labs(ilsp[j]-wi)
      "umull   %1,r3,r0,%1;\n"       //pi*=labs(ilsp[j+1]-wi)
      
      "cmn     r2,r3;\n"             // shift down 16?
      "beq     3f;\n"
      "add     %2,%2,#16;\n"
      "mov     %0,%0,lsr #16;\n"
      "orr     %0,%0,r2,lsl #16;\n"
      "mov     %1,%1,lsr #16;\n"
      "orr     %1,%1,r3,lsl #16;\n"
      
      //qi=(pi>>shift)*labs(ilsp[j]-wi);
      //pi=(qi>>shift)*labs(ilsp[j+1]-wi);
      //qexp+=shift;

      //}

      /* normalize to max 16 sig figs */
      "3:"
      "mov     r2,#0;"
      "orr     r1,%0,%1;"
      "tst     r1,#0xff000000;"
      "addne   r2,r2,#8;"
      "movne   r1,r1,lsr #8;"
      "tst     r1,#0x00f00000;"
      "addne   r2,r2,#4;"
      "movne   r1,r1,lsr #4;"
      "tst     r1,#0x000c0000;"
      "addne   r2,r2,#2;"
      "movne   r1,r1,lsr #2;"
      "tst     r1,#0x00020000;"
      "addne   r2,r2,#1;"
      "movne   r1,r1,lsr #1;"
      "tst     r1,#0x00010000;"
      "addne   r2,r2,#1;"
      "mov     %0,%0,lsr r2;"
      "mov     %1,%1,lsr r2;"
      "add     %2,%2,r2;"
      
      : "+r"(qi),"+r"(pi),"+r"(qexp)
      : "r"(ilsp),"r"(wi),"r"(m)
      : "r0","r1","r2","r3","cc");
  
  *qip=qi;
  *pip=pi;
  *qexpp=qexp;
}

static inline void lsp_norm_asm(ogg_uint32_t *qip,ogg_int32_t *qexpp){

  ogg_uint32_t qi=*qip;
  ogg_int32_t qexp=*qexpp;

  asm("tst     %0,#0x0000ff00;"
      "moveq   %0,%0,lsl #8;"
      "subeq   %1,%1,#8;"
      "tst     %0,#0x0000f000;"
      "moveq   %0,%0,lsl #4;"
      "subeq   %1,%1,#4;"
      "tst     %0,#0x0000c000;"
      "moveq   %0,%0,lsl #2;"
      "subeq   %1,%1,#2;"
      "tst     %0,#0x00008000;"
      "moveq   %0,%0,lsl #1;"
      "subeq   %1,%1,#1;"
      : "+r"(qi),"+r"(qexp)
      :
      : "cc");
  *qip=qi;
  *qexpp=qexp;
}

#endif
#endif