10153
|
1 /*
|
|
2 * ARM NEON optimised MDCT
|
|
3 * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
|
|
4 *
|
|
5 * This file is part of FFmpeg.
|
|
6 *
|
|
7 * FFmpeg is free software; you can redistribute it and/or
|
|
8 * modify it under the terms of the GNU Lesser General Public
|
|
9 * License as published by the Free Software Foundation; either
|
|
10 * version 2.1 of the License, or (at your option) any later version.
|
|
11 *
|
|
12 * FFmpeg is distributed in the hope that it will be useful,
|
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
15 * Lesser General Public License for more details.
|
|
16 *
|
|
17 * You should have received a copy of the GNU Lesser General Public
|
|
18 * License along with FFmpeg; if not, write to the Free Software
|
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
20 */
|
|
21
|
|
22 #include "asm.S"
|
|
23
|
|
24 .fpu neon
|
|
25 .text
|
|
26
|
|
27 function ff_imdct_half_neon, export=1
|
|
28 push {r4-r8,lr}
|
|
29
|
|
30 mov r12, #1
|
|
31 ldr lr, [r0, #4] @ nbits
|
|
32 ldr r4, [r0, #8] @ tcos
|
|
33 ldr r5, [r0, #12] @ tsin
|
|
34 ldr r3, [r0, #24] @ revtab
|
|
35 lsl r12, r12, lr @ n = 1 << nbits
|
|
36 lsr lr, r12, #2 @ n4 = n >> 2
|
|
37 add r7, r2, r12, lsl #1
|
|
38 mov r12, #-16
|
|
39 sub r7, r7, #16
|
|
40
|
10160
|
41 vld2.32 {d16-d17},[r7,:128],r12 @ d16=x,n1 d17=x,n0
|
|
42 vld2.32 {d0-d1}, [r2,:128]! @ d0 =m0,x d1 =m1,x
|
|
43 vrev64.32 d17, d17
|
10153
|
44 vld1.32 {d2}, [r4,:64]! @ d2=c0,c1
|
10160
|
45 vmul.f32 d6, d17, d2
|
10153
|
46 vld1.32 {d3}, [r5,:64]! @ d3=s0,s1
|
|
47 vmul.f32 d7, d0, d2
|
|
48 1:
|
|
49 subs lr, lr, #2
|
|
50 ldr r6, [r3], #4
|
|
51 vmul.f32 d4, d0, d3
|
10160
|
52 vmul.f32 d5, d17, d3
|
10153
|
53 vsub.f32 d4, d6, d4
|
|
54 vadd.f32 d5, d5, d7
|
10172
|
55 uxth r8, r6, ror #16
|
|
56 uxth r6, r6
|
|
57 add r8, r1, r8, lsl #3
|
|
58 add r6, r1, r6, lsl #3
|
10153
|
59 beq 1f
|
10160
|
60 vld2.32 {d16-d17},[r7,:128],r12
|
|
61 vld2.32 {d0-d1}, [r2,:128]!
|
|
62 vrev64.32 d17, d17
|
10153
|
63 vld1.32 {d2}, [r4,:64]!
|
10160
|
64 vmul.f32 d6, d17, d2
|
10153
|
65 vld1.32 {d3}, [r5,:64]!
|
|
66 vmul.f32 d7, d0, d2
|
|
67 vst2.32 {d4[0],d5[0]}, [r6,:64]
|
|
68 vst2.32 {d4[1],d5[1]}, [r8,:64]
|
|
69 b 1b
|
|
70 1:
|
|
71 vst2.32 {d4[0],d5[0]}, [r6,:64]
|
|
72 vst2.32 {d4[1],d5[1]}, [r8,:64]
|
|
73
|
|
74 mov r4, r0
|
|
75 mov r6, r1
|
|
76 add r0, r0, #16
|
|
77 bl ff_fft_calc_neon
|
|
78
|
|
79 mov r12, #1
|
|
80 ldr lr, [r4, #4] @ nbits
|
|
81 ldr r5, [r4, #12] @ tsin
|
|
82 ldr r4, [r4, #8] @ tcos
|
|
83 lsl r12, r12, lr @ n = 1 << nbits
|
|
84 lsr lr, r12, #3 @ n8 = n >> 3
|
|
85
|
|
86 add r4, r4, lr, lsl #2
|
|
87 add r5, r5, lr, lsl #2
|
|
88 add r6, r6, lr, lsl #3
|
|
89 sub r1, r4, #8
|
|
90 sub r2, r5, #8
|
|
91 sub r3, r6, #16
|
|
92
|
|
93 mov r7, #-16
|
|
94 mov r12, #-8
|
|
95 mov r8, r6
|
|
96 mov r0, r3
|
|
97
|
10160
|
98 vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =i1,r1 d1 =i0,r0
|
|
99 vld2.32 {d20-d21},[r6,:128]! @ d20=i2,r2 d21=i3,r3
|
10153
|
100 vld1.32 {d18}, [r2,:64], r12 @ d18=s1,s0
|
|
101 1:
|
|
102 subs lr, lr, #2
|
|
103 vmul.f32 d7, d0, d18
|
|
104 vld1.32 {d19}, [r5,:64]! @ d19=s2,s3
|
|
105 vmul.f32 d4, d1, d18
|
|
106 vld1.32 {d16}, [r1,:64], r12 @ d16=c1,c0
|
|
107 vmul.f32 d5, d21, d19
|
|
108 vld1.32 {d17}, [r4,:64]! @ d17=c2,c3
|
|
109 vmul.f32 d6, d20, d19
|
|
110 vmul.f32 d22, d1, d16
|
|
111 vmul.f32 d23, d21, d17
|
|
112 vmul.f32 d24, d0, d16
|
|
113 vmul.f32 d25, d20, d17
|
|
114 vadd.f32 d7, d7, d22
|
|
115 vadd.f32 d6, d6, d23
|
|
116 vsub.f32 d4, d4, d24
|
|
117 vsub.f32 d5, d5, d25
|
|
118 beq 1f
|
10160
|
119 vld2.32 {d0-d1}, [r3,:128], r7
|
|
120 vld2.32 {d20-d21},[r6,:128]!
|
10153
|
121 vld1.32 {d18}, [r2,:64], r12
|
|
122 vrev64.32 q3, q3
|
10160
|
123 vst2.32 {d4,d6}, [r0,:128], r7
|
|
124 vst2.32 {d5,d7}, [r8,:128]!
|
10153
|
125 b 1b
|
|
126 1:
|
|
127 vrev64.32 q3, q3
|
10160
|
128 vst2.32 {d4,d6}, [r0,:128]
|
|
129 vst2.32 {d5,d7}, [r8,:128]
|
10153
|
130
|
|
131 pop {r4-r8,pc}
|
|
132 .endfunc
|
|
133
|
|
134 function ff_imdct_calc_neon, export=1
|
|
135 push {r4-r6,lr}
|
|
136
|
|
137 ldr r3, [r0, #4]
|
|
138 mov r4, #1
|
|
139 mov r5, r1
|
|
140 lsl r4, r4, r3
|
|
141 add r1, r1, r4
|
|
142
|
|
143 bl ff_imdct_half_neon
|
|
144
|
|
145 add r0, r5, r4, lsl #2
|
|
146 add r1, r5, r4, lsl #1
|
|
147 sub r0, r0, #8
|
|
148 sub r2, r1, #16
|
|
149 mov r3, #-16
|
|
150 mov r6, #-8
|
|
151 vmov.i32 d30, #1<<31
|
|
152 1:
|
|
153 vld1.32 {d0-d1}, [r2,:128], r3
|
|
154 pld [r0, #-16]
|
|
155 vrev64.32 q0, q0
|
|
156 vld1.32 {d2-d3}, [r1,:128]!
|
|
157 veor d4, d1, d30
|
|
158 pld [r2, #-16]
|
|
159 vrev64.32 q1, q1
|
|
160 veor d5, d0, d30
|
|
161 vst1.32 {d2}, [r0,:64], r6
|
|
162 vst1.32 {d3}, [r0,:64], r6
|
|
163 vst1.32 {d4-d5}, [r5,:128]!
|
|
164 subs r4, r4, #16
|
|
165 bgt 1b
|
|
166
|
|
167 pop {r4-r6,pc}
|
|
168 .endfunc
|
10162
|
169
|
|
170 function ff_mdct_calc_neon, export=1
|
|
171 push {r4-r10,lr}
|
|
172
|
|
173 mov r12, #1
|
|
174 ldr lr, [r0, #4] @ nbits
|
|
175 ldr r4, [r0, #8] @ tcos
|
|
176 ldr r5, [r0, #12] @ tsin
|
|
177 ldr r3, [r0, #24] @ revtab
|
|
178 lsl lr, r12, lr @ n = 1 << nbits
|
|
179 add r7, r2, lr @ in4u
|
|
180 sub r9, r7, #16 @ in4d
|
|
181 add r2, r7, lr, lsl #1 @ in3u
|
|
182 add r8, r9, lr, lsl #1 @ in3d
|
|
183 mov r12, #-16
|
|
184
|
|
185 vld2.32 {d16,d18},[r9,:128],r12 @ x,x in4d1,in4d0
|
|
186 vld2.32 {d17,d19},[r8,:128],r12 @ x,x in3d1,in3d0
|
|
187 vld2.32 {d20,d21},[r7,:128]! @ in4u0,in4u1 x,x
|
|
188 vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1
|
|
189 vld2.32 {d0, d1}, [r2,:128]! @ in3u0,in3u1 x,x
|
|
190 vsub.f32 d20, d18, d20 @ in4d-in4u I
|
|
191 vld1.32 {d2}, [r4,:64]! @ c0,c1
|
|
192 vadd.f32 d0, d0, d19 @ in3u+in3d -R
|
|
193 vld1.32 {d3}, [r5,:64]! @ s0,s1
|
|
194 1:
|
|
195 vmul.f32 d7, d20, d3 @ I*s
|
|
196 vmul.f32 d6, d0, d2 @ -R*c
|
|
197 ldr r6, [r3], #4
|
|
198 vmul.f32 d4, d0, d3 @ -R*s
|
|
199 vmul.f32 d5, d20, d2 @ I*c
|
|
200 subs lr, lr, #16
|
|
201 vsub.f32 d6, d6, d7 @ -R*c-I*s
|
|
202 vadd.f32 d7, d4, d5 @ -R*s+I*c
|
10172
|
203 uxth r10, r6, ror #16
|
|
204 uxth r6, r6
|
|
205 add r10, r1, r10, lsl #3
|
|
206 add r6, r1, r6, lsl #3
|
10162
|
207 beq 1f
|
|
208 vld2.32 {d16,d18},[r9,:128],r12 @ x,x in4d1,in4d0
|
|
209 vld2.32 {d17,d19},[r8,:128],r12 @ x,x in3d1,in3d0
|
|
210 vneg.f32 d7, d7 @ R*s-I*c
|
|
211 vld2.32 {d20,d21},[r7,:128]! @ in4u0,in4u1 x,x
|
|
212 vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1
|
|
213 vld2.32 {d0, d1}, [r2,:128]! @ in3u0,in3u1 x,x
|
|
214 vsub.f32 d20, d18, d20 @ in4d-in4u I
|
|
215 vld1.32 {d2}, [r4,:64]! @ c0,c1
|
|
216 vadd.f32 d0, d0, d19 @ in3u+in3d -R
|
|
217 vld1.32 {d3}, [r5,:64]! @ s0,s1
|
|
218 vst2.32 {d6[0],d7[0]}, [r6,:64]
|
|
219 vst2.32 {d6[1],d7[1]}, [r10,:64]
|
|
220 b 1b
|
|
221 1:
|
|
222 vneg.f32 d7, d7 @ R*s-I*c
|
|
223 vst2.32 {d6[0],d7[0]}, [r6,:64]
|
|
224 vst2.32 {d6[1],d7[1]}, [r10,:64]
|
|
225
|
|
226 mov r12, #1
|
|
227 ldr lr, [r0, #4] @ nbits
|
|
228 lsl lr, r12, lr @ n = 1 << nbits
|
|
229 sub r8, r2, #16 @ in1d
|
|
230 add r2, r9, #16 @ in0u
|
|
231 sub r9, r7, #16 @ in2d
|
|
232 mov r12, #-16
|
|
233
|
|
234 vld2.32 {d16,d18},[r9,:128],r12 @ x,x in2d1,in2d0
|
|
235 vld2.32 {d17,d19},[r8,:128],r12 @ x,x in1d1,in1d0
|
|
236 vld2.32 {d20,d21},[r7,:128]! @ in2u0,in2u1 x,x
|
|
237 vrev64.32 q9, q9 @ in2d0,in2d1 in1d0,in1d1
|
|
238 vld2.32 {d0, d1}, [r2,:128]! @ in0u0,in0u1 x,x
|
|
239 vsub.f32 d0, d0, d18 @ in0u-in2d R
|
|
240 vld1.32 {d2}, [r4,:64]! @ c0,c1
|
|
241 vadd.f32 d20, d20, d19 @ in2u+in1d -I
|
|
242 vld1.32 {d3}, [r5,:64]! @ s0,s1
|
|
243 1:
|
|
244 vmul.f32 d6, d0, d2 @ R*c
|
|
245 vmul.f32 d7, d20, d3 @ -I*s
|
|
246 ldr r6, [r3], #4
|
|
247 vmul.f32 d4, d0, d3 @ R*s
|
|
248 vmul.f32 d5, d20, d2 @ I*c
|
|
249 subs lr, lr, #16
|
|
250 vsub.f32 d6, d7, d6 @ I*s-R*c
|
|
251 vadd.f32 d7, d4, d5 @ R*s-I*c
|
10172
|
252 uxth r10, r6, ror #16
|
|
253 uxth r6, r6
|
|
254 add r10, r1, r10, lsl #3
|
|
255 add r6, r1, r6, lsl #3
|
10162
|
256 beq 1f
|
|
257 vld2.32 {d16,d18},[r9,:128],r12 @ x,x in2d1,in2d0
|
|
258 vld2.32 {d17,d19},[r8,:128],r12 @ x,x in1d1,in1d0
|
|
259 vld2.32 {d20,d21},[r7,:128]! @ in2u0,in2u1 x,x
|
|
260 vrev64.32 q9, q9 @ in2d0,in2d1 in1d0,in1d1
|
|
261 vld2.32 {d0, d1}, [r2,:128]! @ in0u0,in0u1 x,x
|
|
262 vsub.f32 d0, d0, d18 @ in0u-in2d R
|
|
263 vld1.32 {d2}, [r4,:64]! @ c0,c1
|
|
264 vadd.f32 d20, d20, d19 @ in2u+in1d -I
|
|
265 vld1.32 {d3}, [r5,:64]! @ s0,s1
|
|
266 vst2.32 {d6[0],d7[0]}, [r6,:64]
|
|
267 vst2.32 {d6[1],d7[1]}, [r10,:64]
|
|
268 b 1b
|
|
269 1:
|
|
270 vst2.32 {d6[0],d7[0]}, [r6,:64]
|
|
271 vst2.32 {d6[1],d7[1]}, [r10,:64]
|
|
272
|
|
273 mov r4, r0
|
|
274 mov r6, r1
|
|
275 add r0, r0, #16
|
|
276 bl ff_fft_calc_neon
|
|
277
|
|
278 mov r12, #1
|
|
279 ldr lr, [r4, #4] @ nbits
|
|
280 ldr r5, [r4, #12] @ tsin
|
|
281 ldr r4, [r4, #8] @ tcos
|
|
282 lsl r12, r12, lr @ n = 1 << nbits
|
|
283 lsr lr, r12, #3 @ n8 = n >> 3
|
|
284
|
|
285 add r4, r4, lr, lsl #2
|
|
286 add r5, r5, lr, lsl #2
|
|
287 add r6, r6, lr, lsl #3
|
|
288 sub r1, r4, #8
|
|
289 sub r2, r5, #8
|
|
290 sub r3, r6, #16
|
|
291
|
|
292 mov r7, #-16
|
|
293 mov r12, #-8
|
|
294 mov r8, r6
|
|
295 mov r0, r3
|
|
296
|
|
297 vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =r1,i1 d1 =r0,i0
|
|
298 vld2.32 {d20-d21},[r6,:128]! @ d20=r2,i2 d21=r3,i3
|
|
299 vld1.32 {d18}, [r2,:64], r12 @ d18=s1,s0
|
|
300 1:
|
|
301 subs lr, lr, #2
|
|
302 vmul.f32 d7, d0, d18 @ r1*s1,r0*s0
|
|
303 vld1.32 {d19}, [r5,:64]! @ s2,s3
|
|
304 vmul.f32 d4, d1, d18 @ i1*s1,i0*s0
|
|
305 vld1.32 {d16}, [r1,:64], r12 @ c1,c0
|
|
306 vmul.f32 d5, d21, d19 @ i2*s2,i3*s3
|
|
307 vld1.32 {d17}, [r4,:64]! @ c2,c3
|
|
308 vmul.f32 d6, d20, d19 @ r2*s2,r3*s3
|
|
309 vmul.f32 d24, d0, d16 @ r1*c1,r0*c0
|
|
310 vmul.f32 d25, d20, d17 @ r2*c2,r3*c3
|
|
311 vmul.f32 d22, d21, d17 @ i2*c2,i3*c3
|
|
312 vmul.f32 d23, d1, d16 @ i1*c1,i0*c0
|
|
313 vadd.f32 d4, d4, d24 @ i1*s1+r1*c1,i0*s0+r0*c0
|
|
314 vadd.f32 d5, d5, d25 @ i2*s2+r2*c2,i3*s3+r3*c3
|
|
315 vsub.f32 d6, d22, d6 @ i2*c2-r2*s2,i3*c3-r3*s3
|
|
316 vsub.f32 d7, d23, d7 @ i1*c1-r1*s1,i0*c0-r0*s0
|
|
317 vneg.f32 q2, q2
|
|
318 beq 1f
|
|
319 vld2.32 {d0-d1}, [r3,:128], r7
|
|
320 vld2.32 {d20-d21},[r6,:128]!
|
|
321 vld1.32 {d18}, [r2,:64], r12
|
|
322 vrev64.32 q3, q3
|
|
323 vst2.32 {d4,d6}, [r0,:128], r7
|
|
324 vst2.32 {d5,d7}, [r8,:128]!
|
|
325 b 1b
|
|
326 1:
|
|
327 vrev64.32 q3, q3
|
|
328 vst2.32 {d4,d6}, [r0,:128]
|
|
329 vst2.32 {d5,d7}, [r8,:128]
|
|
330
|
|
331 pop {r4-r10,pc}
|
|
332 .endfunc
|