8336
|
1 /*
|
|
2 * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
|
|
3 *
|
|
4 * This file is part of FFmpeg.
|
|
5 *
|
|
6 * FFmpeg is free software; you can redistribute it and/or
|
|
7 * modify it under the terms of the GNU Lesser General Public
|
|
8 * License as published by the Free Software Foundation; either
|
|
9 * version 2.1 of the License, or (at your option) any later version.
|
|
10 *
|
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
14 * Lesser General Public License for more details.
|
|
15 *
|
|
16 * You should have received a copy of the GNU Lesser General Public
|
|
17 * License along with FFmpeg; if not, write to the Free Software
|
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
19 */
|
|
20
|
|
21 #include "asm.S"
|
|
22
|
|
23 .fpu neon
|
|
24
|
|
25 /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
|
|
26 .macro h264_chroma_mc8 avg=0
|
|
27 push {r4-r7, lr}
|
|
28 ldrd r4, [sp, #20]
|
|
29 .if \avg
|
|
30 mov lr, r0
|
|
31 .endif
|
|
32 pld [r1]
|
|
33 pld [r1, r2]
|
|
34
|
|
35 muls r7, r4, r5
|
|
36 rsb r6, r7, r5, lsl #3
|
|
37 rsb ip, r7, r4, lsl #3
|
|
38 sub r4, r7, r4, lsl #3
|
|
39 sub r4, r4, r5, lsl #3
|
|
40 add r4, r4, #64
|
|
41
|
|
42 beq 2f
|
|
43
|
|
44 add r5, r1, r2
|
|
45
|
|
46 vdup.8 d0, r4
|
|
47 lsl r4, r2, #1
|
|
48 vdup.8 d1, ip
|
|
49 vld1.64 {d4, d5}, [r1], r4
|
|
50 vdup.8 d2, r6
|
|
51 vld1.64 {d6, d7}, [r5], r4
|
|
52 vdup.8 d3, r7
|
|
53
|
|
54 vext.8 d5, d4, d5, #1
|
|
55 vext.8 d7, d6, d7, #1
|
|
56
|
|
57 1: pld [r5]
|
|
58 vmull.u8 q8, d4, d0
|
|
59 vmlal.u8 q8, d5, d1
|
|
60 vld1.64 {d4, d5}, [r1], r4
|
|
61 vmlal.u8 q8, d6, d2
|
|
62 vext.8 d5, d4, d5, #1
|
|
63 vmlal.u8 q8, d7, d3
|
|
64 vmull.u8 q9, d6, d0
|
|
65 subs r3, r3, #2
|
|
66 vmlal.u8 q9, d7, d1
|
|
67 vmlal.u8 q9, d4, d2
|
|
68 vmlal.u8 q9, d5, d3
|
|
69 vrshrn.u16 d16, q8, #6
|
|
70 vld1.64 {d6, d7}, [r5], r4
|
|
71 pld [r1]
|
|
72 vrshrn.u16 d17, q9, #6
|
|
73 .if \avg
|
|
74 vld1.64 {d20}, [lr,:64], r2
|
|
75 vld1.64 {d21}, [lr,:64], r2
|
|
76 vrhadd.u8 q8, q8, q10
|
|
77 .endif
|
|
78 vext.8 d7, d6, d7, #1
|
|
79 vst1.64 {d16}, [r0,:64], r2
|
|
80 vst1.64 {d17}, [r0,:64], r2
|
|
81 bgt 1b
|
|
82
|
|
83 pop {r4-r7, pc}
|
|
84
|
|
85 2: tst r6, r6
|
|
86 add ip, ip, r6
|
|
87 vdup.8 d0, r4
|
|
88 vdup.8 d1, ip
|
|
89
|
|
90 beq 4f
|
|
91
|
|
92 add r5, r1, r2
|
|
93 lsl r4, r2, #1
|
|
94 vld1.64 {d4}, [r1], r4
|
|
95 vld1.64 {d6}, [r5], r4
|
|
96
|
|
97 3: pld [r5]
|
|
98 vmull.u8 q8, d4, d0
|
|
99 vmlal.u8 q8, d6, d1
|
|
100 vld1.64 {d4}, [r1], r4
|
|
101 vmull.u8 q9, d6, d0
|
|
102 vmlal.u8 q9, d4, d1
|
|
103 vld1.64 {d6}, [r5], r4
|
|
104 vrshrn.u16 d16, q8, #6
|
|
105 vrshrn.u16 d17, q9, #6
|
|
106 .if \avg
|
|
107 vld1.64 {d20}, [lr,:64], r2
|
|
108 vld1.64 {d21}, [lr,:64], r2
|
|
109 vrhadd.u8 q8, q8, q10
|
|
110 .endif
|
|
111 subs r3, r3, #2
|
|
112 pld [r1]
|
|
113 vst1.64 {d16}, [r0,:64], r2
|
|
114 vst1.64 {d17}, [r0,:64], r2
|
|
115 bgt 3b
|
|
116
|
|
117 pop {r4-r7, pc}
|
|
118
|
|
119 4: vld1.64 {d4, d5}, [r1], r2
|
|
120 vld1.64 {d6, d7}, [r1], r2
|
|
121 vext.8 d5, d4, d5, #1
|
|
122 vext.8 d7, d6, d7, #1
|
|
123
|
|
124 5: pld [r1]
|
|
125 subs r3, r3, #2
|
|
126 vmull.u8 q8, d4, d0
|
|
127 vmlal.u8 q8, d5, d1
|
|
128 vld1.64 {d4, d5}, [r1], r2
|
|
129 vmull.u8 q9, d6, d0
|
|
130 vmlal.u8 q9, d7, d1
|
|
131 pld [r1]
|
|
132 vext.8 d5, d4, d5, #1
|
|
133 vrshrn.u16 d16, q8, #6
|
|
134 vrshrn.u16 d17, q9, #6
|
|
135 .if \avg
|
|
136 vld1.64 {d20}, [lr,:64], r2
|
|
137 vld1.64 {d21}, [lr,:64], r2
|
|
138 vrhadd.u8 q8, q8, q10
|
|
139 .endif
|
|
140 vld1.64 {d6, d7}, [r1], r2
|
|
141 vext.8 d7, d6, d7, #1
|
|
142 vst1.64 {d16}, [r0,:64], r2
|
|
143 vst1.64 {d17}, [r0,:64], r2
|
|
144 bgt 5b
|
|
145
|
|
146 pop {r4-r7, pc}
|
|
147 .endm
|
|
148
|
|
149 /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
|
|
150 .macro h264_chroma_mc4 avg=0
|
|
151 push {r4-r7, lr}
|
|
152 ldrd r4, [sp, #20]
|
|
153 .if \avg
|
|
154 mov lr, r0
|
|
155 .endif
|
|
156 pld [r1]
|
|
157 pld [r1, r2]
|
|
158
|
|
159 muls r7, r4, r5
|
|
160 rsb r6, r7, r5, lsl #3
|
|
161 rsb ip, r7, r4, lsl #3
|
|
162 sub r4, r7, r4, lsl #3
|
|
163 sub r4, r4, r5, lsl #3
|
|
164 add r4, r4, #64
|
|
165
|
|
166 beq 2f
|
|
167
|
|
168 add r5, r1, r2
|
|
169
|
|
170 vdup.8 d0, r4
|
|
171 lsl r4, r2, #1
|
|
172 vdup.8 d1, ip
|
|
173 vld1.64 {d4}, [r1], r4
|
|
174 vdup.8 d2, r6
|
|
175 vld1.64 {d6}, [r5], r4
|
|
176 vdup.8 d3, r7
|
|
177
|
|
178 vext.8 d5, d4, d5, #1
|
|
179 vext.8 d7, d6, d7, #1
|
|
180 vtrn.32 d4, d5
|
|
181 vtrn.32 d6, d7
|
|
182
|
|
183 vtrn.32 d0, d1
|
|
184 vtrn.32 d2, d3
|
|
185
|
|
186 1: pld [r5]
|
|
187 vmull.u8 q8, d4, d0
|
|
188 vmlal.u8 q8, d6, d2
|
|
189 vld1.64 {d4}, [r1], r4
|
|
190 vext.8 d5, d4, d5, #1
|
|
191 vtrn.32 d4, d5
|
|
192 vmull.u8 q9, d6, d0
|
|
193 vmlal.u8 q9, d4, d2
|
|
194 vld1.64 {d6}, [r5], r4
|
|
195 vadd.i16 d16, d16, d17
|
|
196 vadd.i16 d17, d18, d19
|
|
197 vrshrn.u16 d16, q8, #6
|
|
198 subs r3, r3, #2
|
|
199 pld [r1]
|
|
200 .if \avg
|
|
201 vld1.32 {d20[0]}, [lr,:32], r2
|
|
202 vld1.32 {d20[1]}, [lr,:32], r2
|
|
203 vrhadd.u8 d16, d16, d20
|
|
204 .endif
|
|
205 vext.8 d7, d6, d7, #1
|
|
206 vtrn.32 d6, d7
|
|
207 vst1.32 {d16[0]}, [r0,:32], r2
|
|
208 vst1.32 {d16[1]}, [r0,:32], r2
|
|
209 bgt 1b
|
|
210
|
|
211 pop {r4-r7, pc}
|
|
212
|
|
213 2: tst r6, r6
|
|
214 add ip, ip, r6
|
|
215 vdup.8 d0, r4
|
|
216 vdup.8 d1, ip
|
|
217 vtrn.32 d0, d1
|
|
218
|
|
219 beq 4f
|
|
220
|
|
221 vext.32 d1, d0, d1, #1
|
|
222 add r5, r1, r2
|
|
223 lsl r4, r2, #1
|
|
224 vld1.32 {d4[0]}, [r1], r4
|
|
225 vld1.32 {d4[1]}, [r5], r4
|
|
226
|
|
227 3: pld [r5]
|
|
228 vmull.u8 q8, d4, d0
|
|
229 vld1.32 {d4[0]}, [r1], r4
|
|
230 vmull.u8 q9, d4, d1
|
|
231 vld1.32 {d4[1]}, [r5], r4
|
|
232 vadd.i16 d16, d16, d17
|
|
233 vadd.i16 d17, d18, d19
|
|
234 vrshrn.u16 d16, q8, #6
|
|
235 .if \avg
|
|
236 vld1.32 {d20[0]}, [lr,:32], r2
|
|
237 vld1.32 {d20[1]}, [lr,:32], r2
|
|
238 vrhadd.u8 d16, d16, d20
|
|
239 .endif
|
|
240 subs r3, r3, #2
|
|
241 pld [r1]
|
|
242 vst1.32 {d16[0]}, [r0,:32], r2
|
|
243 vst1.32 {d16[1]}, [r0,:32], r2
|
|
244 bgt 3b
|
|
245
|
|
246 pop {r4-r7, pc}
|
|
247
|
|
248 4: vld1.64 {d4}, [r1], r2
|
|
249 vld1.64 {d6}, [r1], r2
|
|
250 vext.8 d5, d4, d5, #1
|
|
251 vext.8 d7, d6, d7, #1
|
|
252 vtrn.32 d4, d5
|
|
253 vtrn.32 d6, d7
|
|
254
|
|
255 5: vmull.u8 q8, d4, d0
|
|
256 vmull.u8 q9, d6, d0
|
|
257 subs r3, r3, #2
|
|
258 vld1.64 {d4}, [r1], r2
|
|
259 vext.8 d5, d4, d5, #1
|
|
260 vtrn.32 d4, d5
|
|
261 vadd.i16 d16, d16, d17
|
|
262 vadd.i16 d17, d18, d19
|
|
263 pld [r1]
|
|
264 vrshrn.u16 d16, q8, #6
|
|
265 .if \avg
|
|
266 vld1.32 {d20[0]}, [lr,:32], r2
|
|
267 vld1.32 {d20[1]}, [lr,:32], r2
|
|
268 vrhadd.u8 d16, d16, d20
|
|
269 .endif
|
|
270 vld1.64 {d6}, [r1], r2
|
|
271 vext.8 d7, d6, d7, #1
|
|
272 vtrn.32 d6, d7
|
|
273 pld [r1]
|
|
274 vst1.32 {d16[0]}, [r0,:32], r2
|
|
275 vst1.32 {d16[1]}, [r0,:32], r2
|
|
276 bgt 5b
|
|
277
|
|
278 pop {r4-r7, pc}
|
|
279 .endm
|
|
280
|
|
281 .text
|
|
282 .align
|
|
283
|
|
284 function ff_put_h264_chroma_mc8_neon, export=1
|
|
285 h264_chroma_mc8
|
|
286 .endfunc
|
|
287
|
|
288 function ff_avg_h264_chroma_mc8_neon, export=1
|
|
289 h264_chroma_mc8 avg=1
|
|
290 .endfunc
|
|
291
|
|
292 function ff_put_h264_chroma_mc4_neon, export=1
|
|
293 h264_chroma_mc4
|
|
294 .endfunc
|
|
295
|
|
296 function ff_avg_h264_chroma_mc4_neon, export=1
|
|
297 h264_chroma_mc4 avg=1
|
|
298 .endfunc
|
8337
|
299
|
|
300 /* H.264 loop filter */
|
|
301
|
|
302 .macro h264_loop_filter_start
|
|
303 ldr ip, [sp]
|
|
304 tst r2, r2
|
|
305 ldr ip, [ip]
|
|
306 tstne r3, r3
|
|
307 vmov.32 d24[0], ip
|
|
308 and ip, ip, ip, lsl #16
|
|
309 bxeq lr
|
|
310 ands ip, ip, ip, lsl #8
|
|
311 bxlt lr
|
|
312 .endm
|
|
313
|
|
314 .macro align_push_regs
|
|
315 and ip, sp, #15
|
|
316 add ip, ip, #32
|
|
317 sub sp, sp, ip
|
|
318 vst1.64 {d12-d15}, [sp,:128]
|
|
319 sub sp, sp, #32
|
|
320 vst1.64 {d8-d11}, [sp,:128]
|
|
321 .endm
|
|
322
|
|
323 .macro align_pop_regs
|
|
324 vld1.64 {d8-d11}, [sp,:128]!
|
|
325 vld1.64 {d12-d15}, [sp,:128], ip
|
|
326 .endm
|
|
327
|
|
328 .macro h264_loop_filter_luma
|
|
329 vdup.8 q11, r2 @ alpha
|
|
330 vmovl.u8 q12, d24
|
|
331 vabd.u8 q6, q8, q0 @ abs(p0 - q0)
|
|
332 vmovl.u16 q12, d24
|
|
333 vabd.u8 q14, q9, q8 @ abs(p1 - p0)
|
|
334 vsli.16 q12, q12, #8
|
|
335 vabd.u8 q15, q1, q0 @ abs(q1 - q0)
|
|
336 vsli.32 q12, q12, #16
|
|
337 vclt.u8 q6, q6, q11 @ < alpha
|
|
338 vdup.8 q11, r3 @ beta
|
|
339 vclt.s8 q7, q12, #0
|
|
340 vclt.u8 q14, q14, q11 @ < beta
|
|
341 vclt.u8 q15, q15, q11 @ < beta
|
|
342 vbic q6, q6, q7
|
|
343 vabd.u8 q4, q10, q8 @ abs(p2 - p0)
|
|
344 vand q6, q6, q14
|
|
345 vabd.u8 q5, q2, q0 @ abs(q2 - q0)
|
|
346 vclt.u8 q4, q4, q11 @ < beta
|
|
347 vand q6, q6, q15
|
|
348 vclt.u8 q5, q5, q11 @ < beta
|
|
349 vand q4, q4, q6
|
|
350 vand q5, q5, q6
|
|
351 vand q12, q12, q6
|
|
352 vrhadd.u8 q14, q8, q0
|
|
353 vsub.i8 q6, q12, q4
|
|
354 vqadd.u8 q7, q9, q12
|
|
355 vhadd.u8 q10, q10, q14
|
|
356 vsub.i8 q6, q6, q5
|
|
357 vhadd.u8 q14, q2, q14
|
|
358 vmin.u8 q7, q7, q10
|
|
359 vqsub.u8 q11, q9, q12
|
|
360 vqadd.u8 q2, q1, q12
|
|
361 vmax.u8 q7, q7, q11
|
|
362 vqsub.u8 q11, q1, q12
|
|
363 vmin.u8 q14, q2, q14
|
|
364 vmovl.u8 q2, d0
|
|
365 vmax.u8 q14, q14, q11
|
|
366 vmovl.u8 q10, d1
|
|
367 vsubw.u8 q2, q2, d16
|
|
368 vsubw.u8 q10, q10, d17
|
|
369 vshl.i16 q2, q2, #2
|
|
370 vshl.i16 q10, q10, #2
|
|
371 vaddw.u8 q2, q2, d18
|
|
372 vaddw.u8 q10, q10, d19
|
|
373 vsubw.u8 q2, q2, d2
|
|
374 vsubw.u8 q10, q10, d3
|
|
375 vrshrn.i16 d4, q2, #3
|
|
376 vrshrn.i16 d5, q10, #3
|
|
377 vbsl q4, q7, q9
|
|
378 vbsl q5, q14, q1
|
|
379 vneg.s8 q7, q6
|
|
380 vmovl.u8 q14, d16
|
|
381 vmin.s8 q2, q2, q6
|
|
382 vmovl.u8 q6, d17
|
|
383 vmax.s8 q2, q2, q7
|
|
384 vmovl.u8 q11, d0
|
|
385 vmovl.u8 q12, d1
|
|
386 vaddw.s8 q14, q14, d4
|
|
387 vaddw.s8 q6, q6, d5
|
|
388 vsubw.s8 q11, q11, d4
|
|
389 vsubw.s8 q12, q12, d5
|
|
390 vqmovun.s16 d16, q14
|
|
391 vqmovun.s16 d17, q6
|
|
392 vqmovun.s16 d0, q11
|
|
393 vqmovun.s16 d1, q12
|
|
394 .endm
|
|
395
|
|
396 function ff_h264_v_loop_filter_luma_neon, export=1
|
|
397 h264_loop_filter_start
|
|
398
|
|
399 vld1.64 {d0, d1}, [r0,:128], r1
|
|
400 vld1.64 {d2, d3}, [r0,:128], r1
|
|
401 vld1.64 {d4, d5}, [r0,:128], r1
|
|
402 sub r0, r0, r1, lsl #2
|
|
403 sub r0, r0, r1, lsl #1
|
|
404 vld1.64 {d20,d21}, [r0,:128], r1
|
|
405 vld1.64 {d18,d19}, [r0,:128], r1
|
|
406 vld1.64 {d16,d17}, [r0,:128], r1
|
|
407
|
|
408 align_push_regs
|
|
409
|
|
410 h264_loop_filter_luma
|
|
411
|
|
412 sub r0, r0, r1, lsl #1
|
|
413 vst1.64 {d8, d9}, [r0,:128], r1
|
|
414 vst1.64 {d16,d17}, [r0,:128], r1
|
|
415 vst1.64 {d0, d1}, [r0,:128], r1
|
|
416 vst1.64 {d10,d11}, [r0,:128]
|
|
417
|
|
418 align_pop_regs
|
|
419 bx lr
|
|
420 .endfunc
|
|
421
|
|
422 function ff_h264_h_loop_filter_luma_neon, export=1
|
|
423 h264_loop_filter_start
|
|
424
|
|
425 sub r0, r0, #4
|
|
426 vld1.64 {d6}, [r0], r1
|
|
427 vld1.64 {d20}, [r0], r1
|
|
428 vld1.64 {d18}, [r0], r1
|
|
429 vld1.64 {d16}, [r0], r1
|
|
430 vld1.64 {d0}, [r0], r1
|
|
431 vld1.64 {d2}, [r0], r1
|
|
432 vld1.64 {d4}, [r0], r1
|
|
433 vld1.64 {d26}, [r0], r1
|
|
434 vld1.64 {d7}, [r0], r1
|
|
435 vld1.64 {d21}, [r0], r1
|
|
436 vld1.64 {d19}, [r0], r1
|
|
437 vld1.64 {d17}, [r0], r1
|
|
438 vld1.64 {d1}, [r0], r1
|
|
439 vld1.64 {d3}, [r0], r1
|
|
440 vld1.64 {d5}, [r0], r1
|
|
441 vld1.64 {d27}, [r0], r1
|
|
442
|
|
443 vtrn.32 q3, q0
|
|
444 vtrn.32 q10, q1
|
|
445 vtrn.32 q9, q2
|
|
446 vtrn.32 q8, q13
|
|
447 vtrn.16 q3, q9
|
|
448 vtrn.16 q10, q8
|
|
449 vtrn.16 q0, q2
|
|
450 vtrn.16 q1, q13
|
|
451 vtrn.8 q3, q10
|
|
452 vtrn.8 q9, q8
|
|
453 vtrn.8 q0, q1
|
|
454 vtrn.8 q2, q13
|
|
455
|
|
456 align_push_regs
|
|
457 sub sp, sp, #16
|
|
458 vst1.64 {d4, d5}, [sp,:128]
|
|
459 sub sp, sp, #16
|
|
460 vst1.64 {d20,d21}, [sp,:128]
|
|
461
|
|
462 h264_loop_filter_luma
|
|
463
|
|
464 vld1.64 {d20,d21}, [sp,:128]!
|
|
465 vld1.64 {d4, d5}, [sp,:128]!
|
|
466
|
|
467 vtrn.32 q3, q0
|
|
468 vtrn.32 q10, q5
|
|
469 vtrn.32 q4, q2
|
|
470 vtrn.32 q8, q13
|
|
471 vtrn.16 q3, q4
|
|
472 vtrn.16 q10, q8
|
|
473 vtrn.16 q0, q2
|
|
474 vtrn.16 q5, q13
|
|
475 vtrn.8 q3, q10
|
|
476 vtrn.8 q4, q8
|
|
477 vtrn.8 q0, q5
|
|
478 vtrn.8 q2, q13
|
|
479
|
|
480 sub r0, r0, r1, lsl #4
|
|
481 vst1.64 {d6}, [r0], r1
|
|
482 vst1.64 {d20}, [r0], r1
|
|
483 vst1.64 {d8}, [r0], r1
|
|
484 vst1.64 {d16}, [r0], r1
|
|
485 vst1.64 {d0}, [r0], r1
|
|
486 vst1.64 {d10}, [r0], r1
|
|
487 vst1.64 {d4}, [r0], r1
|
|
488 vst1.64 {d26}, [r0], r1
|
|
489 vst1.64 {d7}, [r0], r1
|
|
490 vst1.64 {d21}, [r0], r1
|
|
491 vst1.64 {d9}, [r0], r1
|
|
492 vst1.64 {d17}, [r0], r1
|
|
493 vst1.64 {d1}, [r0], r1
|
|
494 vst1.64 {d11}, [r0], r1
|
|
495 vst1.64 {d5}, [r0], r1
|
|
496 vst1.64 {d27}, [r0], r1
|
|
497
|
|
498 align_pop_regs
|
|
499 bx lr
|
|
500 .endfunc
|
|
501
|
|
502 .macro h264_loop_filter_chroma
|
|
503 vdup.8 d22, r2 @ alpha
|
|
504 vmovl.u8 q12, d24
|
|
505 vabd.u8 d26, d16, d0 @ abs(p0 - q0)
|
|
506 vmovl.u8 q2, d0
|
|
507 vabd.u8 d28, d18, d16 @ abs(p1 - p0)
|
|
508 vsubw.u8 q2, q2, d16
|
|
509 vsli.16 d24, d24, #8
|
|
510 vshl.i16 q2, q2, #2
|
|
511 vabd.u8 d30, d2, d0 @ abs(q1 - q0)
|
|
512 vaddw.u8 q2, q2, d18
|
|
513 vclt.u8 d26, d26, d22 @ < alpha
|
|
514 vsubw.u8 q2, q2, d2
|
|
515 vdup.8 d22, r3 @ beta
|
|
516 vclt.s8 d25, d24, #0
|
|
517 vrshrn.i16 d4, q2, #3
|
|
518 vclt.u8 d28, d28, d22 @ < beta
|
|
519 vbic d26, d26, d25
|
|
520 vclt.u8 d30, d30, d22 @ < beta
|
|
521 vand d26, d26, d28
|
|
522 vneg.s8 d25, d24
|
|
523 vand d26, d26, d30
|
|
524 vmin.s8 d4, d4, d24
|
|
525 vmovl.u8 q14, d16
|
|
526 vand d4, d4, d26
|
|
527 vmax.s8 d4, d4, d25
|
|
528 vmovl.u8 q11, d0
|
|
529 vaddw.s8 q14, q14, d4
|
|
530 vsubw.s8 q11, q11, d4
|
|
531 vqmovun.s16 d16, q14
|
|
532 vqmovun.s16 d0, q11
|
|
533 .endm
|
|
534
|
|
535 function ff_h264_v_loop_filter_chroma_neon, export=1
|
|
536 h264_loop_filter_start
|
|
537
|
|
538 sub r0, r0, r1, lsl #1
|
|
539 vld1.64 {d18}, [r0,:64], r1
|
|
540 vld1.64 {d16}, [r0,:64], r1
|
|
541 vld1.64 {d0}, [r0,:64], r1
|
|
542 vld1.64 {d2}, [r0,:64]
|
|
543
|
|
544 h264_loop_filter_chroma
|
|
545
|
|
546 sub r0, r0, r1, lsl #1
|
|
547 vst1.64 {d16}, [r0,:64], r1
|
|
548 vst1.64 {d0}, [r0,:64], r1
|
|
549
|
|
550 bx lr
|
|
551 .endfunc
|
|
552
|
|
553 function ff_h264_h_loop_filter_chroma_neon, export=1
|
|
554 h264_loop_filter_start
|
|
555
|
|
556 sub r0, r0, #2
|
|
557 vld1.32 {d18[0]}, [r0], r1
|
|
558 vld1.32 {d16[0]}, [r0], r1
|
|
559 vld1.32 {d0[0]}, [r0], r1
|
|
560 vld1.32 {d2[0]}, [r0], r1
|
|
561 vld1.32 {d18[1]}, [r0], r1
|
|
562 vld1.32 {d16[1]}, [r0], r1
|
|
563 vld1.32 {d0[1]}, [r0], r1
|
|
564 vld1.32 {d2[1]}, [r0], r1
|
|
565
|
|
566 vtrn.16 d18, d0
|
|
567 vtrn.16 d16, d2
|
|
568 vtrn.8 d18, d16
|
|
569 vtrn.8 d0, d2
|
|
570
|
|
571 h264_loop_filter_chroma
|
|
572
|
|
573 vtrn.16 d18, d0
|
|
574 vtrn.16 d16, d2
|
|
575 vtrn.8 d18, d16
|
|
576 vtrn.8 d0, d2
|
|
577
|
|
578 sub r0, r0, r1, lsl #3
|
|
579 vst1.32 {d18[0]}, [r0], r1
|
|
580 vst1.32 {d16[0]}, [r0], r1
|
|
581 vst1.32 {d0[0]}, [r0], r1
|
|
582 vst1.32 {d2[0]}, [r0], r1
|
|
583 vst1.32 {d18[1]}, [r0], r1
|
|
584 vst1.32 {d16[1]}, [r0], r1
|
|
585 vst1.32 {d0[1]}, [r0], r1
|
|
586 vst1.32 {d2[1]}, [r0], r1
|
|
587
|
|
588 bx lr
|
|
589 .endfunc
|