summaryrefslogtreecommitdiffstats
path: root/apps/plugins/mpegplayer/libmpeg2/idct_armv6.S
blob: ad28cefcf2288762acc0cb630b6618144c8387bc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
/***************************************************************************
 *             __________               __   ___.
 *   Open      \______   \ ____   ____ |  | _\_ |__   _______  ___
 *   Source     |       _//  _ \_/ ___\|  |/ /| __ \ /  _ \  \/  /
 *   Jukebox    |    |   (  <_> )  \___|    < | \_\ (  <_> > <  <
 *   Firmware   |____|_  /\____/ \___  >__|_ \|___  /\____/__/\_ \
 *                     \/            \/     \/    \/            \/
 * $Id$
 *
 * Copyright (C) 2009 by Jens Arnold
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
 * KIND, either express or implied.
 *
 ****************************************************************************/

#include "config.h"

    .syntax unified

    .global     mpeg2_idct_copy
    .type       mpeg2_idct_copy, %function
    .global     mpeg2_idct_add
    .type       mpeg2_idct_add, %function

/* Custom calling convention:
 * r0 contains block pointer and is non-volatile
 * all non-volatile c context saved and restored on its behalf
 */
.idct:
    str     lr, [sp, #-4]!      @ lr is used
    add     r1, r0, #128        @ secondary, transposed temp buffer
    mov     r14, #8             @ loop counter

.row_loop:
    ldmia   r0!, {r2, r3, r10, r11} @ fetch f0, f2, f4, f6, f1, f3, f5, f7
    ldrd    r4, L_W1357         @ load  W1, W3, W5, W7

    smuad   r6, r4, r10         @ b0 = W1 * f1 + W3 * f3
    smultt  r7, r5, r10         @ -b1 = W7 * f3
    smulbt  r8, r4, r10         @ -b2 = W1 * f3

    smusdx  r9, r10, r5         @ b3 = f1 * W7 - f3 * W5
    smlabb  r7, r4, r11, r7     @ -b1 += W1 * f5
    rsb     r8, r8, #0          @ b2 = -b2
    smlabb  r8, r5, r10, r8     @ b2 += W5 * f1

    smlad   r6, r5, r11, r6     @ b0 += W5 * f5 + W7 * f7
    smlabt  r7, r5, r11, r7     @ -b1 += W5 * f7
    smlatb  r8, r5, r11, r8     @ b2 += W7 * f5         

    smlsdx  r9, r11, r4, r9     @ b3 += f5 * W3 - f7 * W1
    rsb     r7, r7, #0          @ b1 = -b1
    smlatb  r7, r4, r10, r7     @ b1 += W3 * f1
    smlatt  r8, r4, r11, r8     @ b2 += W3 * f7

    ldrd    r4, L_W0246         @ load  W0, W2, W4, W6
    add     r2, r2, #1          @ f0 += 1

    smulbb  r10, r5, r3         @ a0' = W4 * f4
    smultt  r12, r5, r3         @ a3' = W6 * f6
    smultt  r3, r4, r3          @ -a2' = W2 * f6

    rsb     r11, r10, #0        @ a1' = -W4 * f4
    smlabb  r10, r4, r2, r10    @ a0' += W0 * f0
    smlabb  r11, r4, r2, r11    @ a1' += W0 * f0
    smlatt  r12, r4, r2, r12    @ a3' += W2 * f2
    rsb     r3, r3, #0          @ a2' = -a2'
    smlatt  r3, r5, r2, r3      @ a2' += W6 * f2

    add     r10, r10, r12       @ a0  = a0' + a3'
    sub     r12, r10, r12, lsl #1  @ a3  = a0 - 2 * a3'
    add     r11, r11, r3        @ a1  = a1' + a2'
    sub     r3, r11, r3, lsl #1 @ a2  = a1 - 2 * a2'
    
    subs    r14, r14, #1        @ decrease loop count

    @ Special store order for making the column pass calculate columns in
    @ the order 0-2-1-3-4-6-5-7, allowing for uxtab16 use in later stages.
    sub     r2, r10, r6         @ block[7] = (a0 - b0)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #7*16]
    sub     r2, r11, r7         @ block[6] = (a1 - b1)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #5*16]
    sub     r2, r3, r8          @ block[5] = (a2 - b2)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #6*16]
    sub     r2, r12, r9         @ block[4] = (a3 - b3)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #4*16]
    add     r2, r12, r9         @ block[3] = (a3 + b3)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #3*16]
    add     r2, r3, r8          @ block[2] = (a2 + b2)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #1*16]
    add     r2, r11, r7         @ block[1] = (a1 + b1)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1, #2*16]
    add     r2, r10, r6         @ block[0] = (a0 + b0)
    mov     r2, r2, asr #12     @            >> 12
    strh    r2, [r1], #2        @ advance to next temp column
    
    bne     .row_loop
    b       .col_start

    @placed here because of ldrd's offset limit
L_W1357:
    .short  2841
    .short  2408
    .short  1609
    .short   565

L_W0246:
    .short  2048
    .short  2676
    .short  2048
    .short  1108

.col_start:
    @ r0 now points to the temp buffer, where we need it.
    sub     r1, r1, #128+16     @ point r1 back to the input block
    mov     r14, #8             @ loop counter

.col_loop:
    ldmia   r0!, {r2, r3, r10, r11} @ fetch f0, f2, f4, f6, f1, f3, f5, f7
    ldrd    r4, L_W1357         @ load  W1, W3, W5, W7

    smuad   r6, r4, r10         @ b0 = W1 * f1 + W3 * f3
    smultt  r7, r5, r10         @ -b1 = W7 * f3
    smulbt  r8, r4, r10         @ -b2 = W1 * f3

    smusdx  r9, r10, r5         @ b3 = f1 * W7 - f3 * W5
    smlabb  r7, r4, r11, r7     @ -b1 += W1 * f5
    rsb     r8, r8, #0          @ b2 = -b2
    smlabb  r8, r5, r10, r8     @ b2 += W5 * f1

    smlad   r6, r5, r11, r6     @ b0 += W5 * f5 + W7 * f7
    smlabt  r7, r5, r11, r7     @ -b1 += W5 * f7
    smlatb  r8, r5, r11, r8     @ b2 += W7 * f5

    smlsdx  r9, r11, r4, r9     @ b3 += f5 * W3 - f7 * W1
    rsb     r7, r7, #0          @ b1 = -b1
    smlatb  r7, r4, r10, r7     @ b1 += W3 * f1
    smlatt  r8, r4, r11, r8     @ b2 += W3 * f7

    ldrd    r4, L_W0246         @ load  W0, W2, W4, W6
    add     r2, r2, #32         @ DC offset: 0.5

    smulbb  r10, r5, r3         @ a0' = W4 * f4
    smultt  r12, r5, r3         @ a3' = W6 * f6
    smultt  r3, r4, r3          @ -a2' = W2 * f6

    rsb     r11, r10, #0        @ a1' = -W4 * f4
    smlabb  r10, r4, r2, r10    @ a0' += W0 * f0
    smlabb  r11, r4, r2, r11    @ a1' += W0 * f0
    smlatt  r12, r4, r2, r12    @ a3' += W2 * f2
    rsb     r3, r3, #0          @ a2' = -a2'
    smlatt  r3, r5, r2, r3      @ a2' += W6 * f2

    add     r10, r10, r12       @ a0  = a0' + a3'
    sub     r12, r10, r12, lsl #1  @ a3  = a0 - 2 * a3'
    add     r11, r11, r3        @ a1  = a1' + a2'
    sub     r3, r11, r3, lsl #1 @ a2  = a1 - 2 * a2'
    
    subs    r14, r14, #1        @ decrease loop count

    sub     r2, r10, r6         @ block[7] = (a0 - b0)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #7*16]
    sub     r2, r11, r7         @ block[6] = (a1 - b1)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #6*16]
    sub     r2, r3, r8          @ block[5] = (a2 - b2)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #5*16]
    sub     r2, r12, r9         @ block[4] = (a3 - b3)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #4*16]
    add     r2, r12, r9         @ block[3] = (a3 + b3)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #3*16]
    add     r2, r3, r8          @ block[2] = (a2 + b2)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #2*16]
    add     r2, r11, r7         @ block[1] = (a1 + b1)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1, #1*16]
    add     r2, r10, r6         @ block[0] = (a0 + b0)
    mov     r2, r2, asr #17     @            >> 17
    strh    r2, [r1], #2        @ advance to next column

    bne     .col_loop

    sub     r0, r0, #256        @ point r0 back to the input block
    ldr     pc, [sp], #4


mpeg2_idct_copy:
    stmfd  sp!, {r1-r2, r4-r11, lr}
    bl     .idct
    ldmfd  sp!, {r1-r2}

    add    r3, r0, #128
    mov    r8, #0
    mov    r9, #0
    mov    r10, #0
    mov    r11, #0
1:                              @ idct data is in order 0-2-1-3-4-6-5-7,
    ldmia  r0,  {r4-r7}         @ see above
    stmia  r0!, {r8-r11}
    usat16 r4, #8, r4
    usat16 r5, #8, r5
    orr    r4, r4, r5, lsl #8
    usat16 r6, #8, r6
    usat16 r7, #8, r7
    orr    r5, r6, r7, lsl #8
    strd   r4, [r1]             @ r4, r5
    add    r1, r1, r2
    cmp    r0, r3
    blo    1b

    ldmfd  sp!, {r4-r11, pc}

mpeg2_idct_add:
    cmp    r0, #129
    mov    r0, r1
    ldrsheq r1, [r0, #0]
    bne    1f
    and    r1, r1, #0x70
    cmp    r1, #0x40
    bne    3f
1:
    stmfd  sp!, {r2-r11, lr}
    bl     .idct
    ldmfd  sp!, {r1-r2}

    add    r3, r0, #128
    mov    r10, #0
    mov    r11, #0
    mov    r12, #0
    mov    lr, #0
    ldrd   r8, [r1]             @ r8, r9
2:                              @ idct data is in order 0-2-1-3-4-6-5-7,
    ldmia  r0,  {r4-r7}         @ see above
    stmia  r0!, {r10-r12, lr}
    uxtab16 r4, r4, r8
    uxtab16 r5, r5, r8, ror #8
    usat16 r4, #8, r4
    usat16 r5, #8, r5
    orr    r4, r4, r5, lsl #8
    uxtab16 r6, r6, r9
    uxtab16 r7, r7, r9, ror #8
    usat16 r6, #8, r6
    usat16 r7, #8, r7
    orr    r5, r6, r7, lsl #8
    strd   r4, [r1]             @ r4, r5
    add    r1, r1, r2
    cmp    r0, r3
    ldrdlo r8, [r1]             @ r8, r9
    blo    2b

    ldmfd  sp!, {r4-r11, pc}

3:
    stmfd  sp!, {r4, lr}
    ldrsh  r4, [r0, #0]         @ r4 = block[0]
    mov    r12, #0
    strh   r12, [r0, #0]        @ block[0] = 0
    strh   r12, [r0, #126]      @ block[63] = 0
    add    r4, r4, #64
    mov    r4, r4, asr #7       @ r4 = DC
    mov    r4, r4, lsl #16      @ spread to 2 halfwords
    orr    r4, r4, r4, lsr #16
    ldrd   r0, [r2]             @ r0, r1
    add    r12, r2, r3, asl #3
4:
    uxtab16 lr, r4, r0, ror #8
    uxtab16 r0, r4, r0
    usat16 lr, #8, lr
    usat16 r0, #8, r0
    orr    r0, r0, lr, lsl #8
    uxtab16 lr, r4, r1, ror #8
    uxtab16 r1, r4, r1
    usat16 lr, #8, lr
    usat16 r1, #8, r1
    orr    r1, r1, lr, lsl #8
    strd   r0, [r2]             @ r0, r1
    add    r2, r2, r3
    cmp    r2, r12
    ldrdlo r0, [r2]             @ r0, r1
    blo    4b

    ldmfd  sp!, {r4, pc}