Mercurial > libavcodec.hg
changeset 8197:06acc3ab4bdc libavcodec
ARM: move dct_unquantize_h263_*_armv5te asm to separate file
author | mru |
---|---|
date | Sun, 23 Nov 2008 19:11:44 +0000 |
parents | 2717c68de07f |
children | de344498875e |
files | Makefile armv4l/mpegvideo_armv5te.c armv4l/mpegvideo_armv5te_s.S |
diffstat | 3 files changed, 122 insertions(+), 110 deletions(-) [+] |
line wrap: on
line diff
--- a/Makefile Sat Nov 22 16:36:50 2008 +0000 +++ b/Makefile Sun Nov 23 19:11:44 2008 +0000 @@ -432,6 +432,7 @@ armv4l/simple_idct_arm.o \ OBJS-$(HAVE_ARMV5TE) += armv4l/mpegvideo_armv5te.o \ + armv4l/mpegvideo_armv5te_s.o \ armv4l/simple_idct_armv5te.o \ OBJS-$(HAVE_ARMV6) += armv4l/simple_idct_armv6.o \
--- a/armv4l/mpegvideo_armv5te.c Sat Nov 22 16:36:50 2008 +0000 +++ b/armv4l/mpegvideo_armv5te.c Sun Nov 23 19:11:44 2008 +0000 @@ -23,6 +23,8 @@ #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" +extern void ff_dct_unquantize_h263_armv5te(DCTELEM *block, int qmul, int qadd, + int count); #ifdef ENABLE_ARM_TESTS /** @@ -47,108 +49,6 @@ } #endif -/* GCC 3.1 or higher is required to support symbolic names in assembly code */ -#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) - -/** - * Special optimized version of dct_unquantize_h263_helper_c, it requires the block - * to be at least 8 bytes aligned, and may process more elements than requested. - * But it is guaranteed to never process more than 64 elements provided that - * xxcount argument is <= 64, so it is safe. This macro is optimized for a common - * distribution of values for nCoeffs (they are mostly multiple of 8 plus one or - * two extra elements). So this macro processes data as 8 elements per loop iteration - * and contains optional 2 elements processing in the end. - * - * Inner loop should take 6 cycles per element on arm926ej-s (Nokia 770) - */ -#define dct_unquantize_h263_special_helper_armv5te(xxblock, xxqmul, xxqadd, xxcount) \ -({ DCTELEM *xblock = xxblock; \ - int xqmul = xxqmul, xqadd = xxqadd, xcount = xxcount, xtmp; \ - int xdata1, xdata2; \ -__asm__ volatile( \ - "subs %[count], %[count], #2 \n\t" \ - "ble 2f \n\t" \ - "ldrd r4, [%[block], #0] \n\t" \ - "1: \n\t" \ - "ldrd r6, [%[block], #8] \n\t" \ -\ - "rsbs %[data1], %[zero], r4, asr #16 \n\t" \ - "addgt %[data1], %[qadd], #0 \n\t" \ - "rsblt %[data1], %[qadd], #0 \n\t" \ - "smlatbne %[data1], r4, %[qmul], %[data1] \n\t" \ -\ - "rsbs %[data2], %[zero], r5, asr #16 \n\t" \ - "addgt %[data2], %[qadd], #0 \n\t" \ - "rsblt %[data2], %[qadd], #0 \n\t" \ - "smlatbne %[data2], r5, %[qmul], %[data2] \n\t" \ -\ - "rsbs %[tmp], %[zero], r4, asl #16 \n\t" \ - "addgt %[tmp], %[qadd], #0 \n\t" \ - "rsblt %[tmp], %[qadd], #0 \n\t" \ - "smlabbne r4, r4, %[qmul], %[tmp] \n\t" \ -\ - "rsbs %[tmp], %[zero], r5, asl #16 \n\t" \ - "addgt %[tmp], %[qadd], #0 \n\t" \ - "rsblt %[tmp], %[qadd], #0 \n\t" \ - "smlabbne r5, r5, %[qmul], %[tmp] \n\t" \ -\ - "strh r4, [%[block]], #2 \n\t" \ - "strh %[data1], [%[block]], #2 \n\t" \ - "strh r5, [%[block]], #2 \n\t" \ - "strh %[data2], [%[block]], #2 \n\t" \ -\ - "rsbs %[data1], %[zero], r6, asr #16 \n\t" \ - "addgt %[data1], %[qadd], #0 \n\t" \ - "rsblt %[data1], %[qadd], #0 \n\t" \ - "smlatbne %[data1], r6, %[qmul], %[data1] \n\t" \ -\ - "rsbs %[data2], %[zero], r7, asr #16 \n\t" \ - "addgt %[data2], %[qadd], #0 \n\t" \ - "rsblt %[data2], %[qadd], #0 \n\t" \ - "smlatbne %[data2], r7, %[qmul], %[data2] \n\t" \ -\ - "rsbs %[tmp], %[zero], r6, asl #16 \n\t" \ - "addgt %[tmp], %[qadd], #0 \n\t" \ - "rsblt %[tmp], %[qadd], #0 \n\t" \ - "smlabbne r6, r6, %[qmul], %[tmp] \n\t" \ -\ - "rsbs %[tmp], %[zero], r7, asl #16 \n\t" \ - "addgt %[tmp], %[qadd], #0 \n\t" \ - "rsblt %[tmp], %[qadd], #0 \n\t" \ - "smlabbne r7, r7, %[qmul], %[tmp] \n\t" \ -\ - "strh r6, [%[block]], #2 \n\t" \ - "strh %[data1], [%[block]], #2 \n\t" \ - "strh r7, [%[block]], #2 \n\t" \ - "strh %[data2], [%[block]], #2 \n\t" \ -\ - "subs %[count], %[count], #8 \n\t" \ - "ldrgtd r4, [%[block], #0] \n\t" /* load data early to avoid load/use pipeline stall */ \ - "bgt 1b \n\t" \ -\ - "adds %[count], %[count], #2 \n\t" \ - "ble 3f \n\t" \ - "2: \n\t" \ - "ldrsh %[data1], [%[block], #0] \n\t" \ - "ldrsh %[data2], [%[block], #2] \n\t" \ - "mov %[tmp], %[qadd] \n\t" \ - "cmp %[data1], #0 \n\t" \ - "rsblt %[tmp], %[qadd], #0 \n\t" \ - "smlabbne %[data1], %[data1], %[qmul], %[tmp] \n\t" \ - "mov %[tmp], %[qadd] \n\t" \ - "cmp %[data2], #0 \n\t" \ - "rsblt %[tmp], %[qadd], #0 \n\t" \ - "smlabbne %[data2], %[data2], %[qmul], %[tmp] \n\t" \ - "strh %[data1], [%[block]], #2 \n\t" \ - "strh %[data2], [%[block]], #2 \n\t" \ - "3: \n\t" \ - : [block] "+&r" (xblock), [count] "+&r" (xcount), [tmp] "=&r" (xtmp), \ - [data1] "=&r" (xdata1), [data2] "=&r" (xdata2) \ - : [qmul] "r" (xqmul), [qadd] "r" (xqadd), [zero] "r" (0) \ - : "r4", "r5", "r6", "r7", "cc", "memory" \ -); \ -}) - static void dct_unquantize_h263_intra_armv5te(MpegEncContext *s, DCTELEM *block, int n, int qscale) { @@ -174,7 +74,7 @@ else nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; - dct_unquantize_h263_special_helper_armv5te(block, qmul, qadd, nCoeffs + 1); + ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1); block[0] = level; } @@ -191,17 +91,11 @@ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; - dct_unquantize_h263_special_helper_armv5te(block, qmul, qadd, nCoeffs + 1); + ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1); } -#define HAVE_DCT_UNQUANTIZE_H263_ARMV5TE_OPTIMIZED - -#endif - void MPV_common_init_armv5te(MpegEncContext *s) { -#ifdef HAVE_DCT_UNQUANTIZE_H263_ARMV5TE_OPTIMIZED s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te; -#endif }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/armv4l/mpegvideo_armv5te_s.S Sun Nov 23 19:11:44 2008 +0000 @@ -0,0 +1,117 @@ +/* + * Optimization of some functions from mpegvideo.c for armv5te + * Copyright (c) 2007 Siarhei Siamashka <ssvb@users.sourceforge.net> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "config.h" +#include "asm.S" + +/* + * Special optimized version of dct_unquantize_h263_helper_c, it + * requires the block to be at least 8 bytes aligned, and may process + * more elements than requested. But it is guaranteed to never + * process more than 64 elements provided that count argument is <= 64, + * so it is safe. This function is optimized for a common distribution + * of values for nCoeffs (they are mostly multiple of 8 plus one or + * two extra elements). So this function processes data as 8 elements + * per loop iteration and contains optional 2 elements processing in + * the end. + * + * Inner loop should take 6 cycles per element on arm926ej-s (Nokia 770) + */ +function ff_dct_unquantize_h263_armv5te, export=1 + push {r4-r9,lr} + mov ip, #0 + subs r3, r3, #2 + ble 2f + ldrd r4, [r0, #0] +1: + ldrd r6, [r0, #8] + + rsbs r9, ip, r4, asr #16 + addgt r9, r2, #0 + rsblt r9, r2, #0 + smlatbne r9, r4, r1, r9 + + rsbs lr, ip, r5, asr #16 + addgt lr, r2, #0 + rsblt lr, r2, #0 + smlatbne lr, r5, r1, lr + + rsbs r8, ip, r4, asl #16 + addgt r8, r2, #0 + rsblt r8, r2, #0 + smlabbne r4, r4, r1, r8 + + rsbs r8, ip, r5, asl #16 + addgt r8, r2, #0 + rsblt r8, r2, #0 + smlabbne r5, r5, r1, r8 + + strh r4, [r0], #2 + strh r9, [r0], #2 + strh r5, [r0], #2 + strh lr, [r0], #2 + + rsbs r9, ip, r6, asr #16 + addgt r9, r2, #0 + rsblt r9, r2, #0 + smlatbne r9, r6, r1, r9 + + rsbs lr, ip, r7, asr #16 + addgt lr, r2, #0 + rsblt lr, r2, #0 + smlatbne lr, r7, r1, lr + + rsbs r8, ip, r6, asl #16 + addgt r8, r2, #0 + rsblt r8, r2, #0 + smlabbne r6, r6, r1, r8 + + rsbs r8, ip, r7, asl #16 + addgt r8, r2, #0 + rsblt r8, r2, #0 + smlabbne r7, r7, r1, r8 + + strh r6, [r0], #2 + strh r9, [r0], #2 + strh r7, [r0], #2 + strh lr, [r0], #2 + + subs r3, r3, #8 + ldrgtd r4, [r0, #0] /* load data early to avoid load/use pipeline stall */ + bgt 1b + + adds r3, r3, #2 + pople {r4-r9,pc} +2: + ldrsh r9, [r0, #0] + ldrsh lr, [r0, #2] + mov r8, r2 + cmp r9, #0 + rsblt r8, r2, #0 + smlabbne r9, r9, r1, r8 + mov r8, r2 + cmp lr, #0 + rsblt r8, r2, #0 + smlabbne lr, lr, r1, r8 + strh r9, [r0], #2 + strh lr, [r0], #2 + pop {r4-r9,pc} + .endfunc