changeset 10623:f52d07b169b4 libavcodec

ARM: NEON optimised H264 16x16, 8x8 pred
author mru
date Wed, 02 Dec 2009 14:56:45 +0000
parents 2474aceea736
children 8db678424a18
files Makefile arm/h264pred_init_arm.c arm/h264pred_neon.S h264pred.c h264pred.h
diffstat 5 files changed, 444 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Wed Dec 02 09:12:05 2009 +0000
+++ b/Makefile	Wed Dec 02 14:56:45 2009 +0000
@@ -601,6 +601,7 @@
 OBJS-$(ARCH_ARM)                       += arm/dsputil_init_arm.o        \
                                           arm/dsputil_arm.o             \
                                           arm/fft_init_arm.o            \
+                                          arm/h264pred_init_arm.o       \
                                           arm/jrevdct_arm.o             \
                                           arm/mpegvideo_arm.o           \
                                           arm/simple_idct_arm.o         \
@@ -626,6 +627,7 @@
 
 NEON-OBJS-$(CONFIG_H264_DECODER)       += arm/h264dsp_neon.o            \
                                           arm/h264idct_neon.o           \
+                                          arm/h264pred_neon.o           \
 
 NEON-OBJS-$(CONFIG_VP3_DECODER)        += arm/vp3dsp_neon.o
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/arm/h264pred_init_arm.c	Wed Dec 02 14:56:45 2009 +0000
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavcodec/h264pred.h"
+
+void ff_pred16x16_vert_neon(uint8_t *src, int stride);
+void ff_pred16x16_hor_neon(uint8_t *src, int stride);
+void ff_pred16x16_plane_neon(uint8_t *src, int stride);
+void ff_pred16x16_dc_neon(uint8_t *src, int stride);
+void ff_pred16x16_128_dc_neon(uint8_t *src, int stride);
+void ff_pred16x16_left_dc_neon(uint8_t *src, int stride);
+void ff_pred16x16_top_dc_neon(uint8_t *src, int stride);
+
+void ff_pred8x8_vert_neon(uint8_t *src, int stride);
+void ff_pred8x8_hor_neon(uint8_t *src, int stride);
+void ff_pred8x8_plane_neon(uint8_t *src, int stride);
+void ff_pred8x8_dc_neon(uint8_t *src, int stride);
+void ff_pred8x8_128_dc_neon(uint8_t *src, int stride);
+void ff_pred8x8_left_dc_neon(uint8_t *src, int stride);
+void ff_pred8x8_top_dc_neon(uint8_t *src, int stride);
+void ff_pred8x8_l0t_dc_neon(uint8_t *src, int stride);
+void ff_pred8x8_0lt_dc_neon(uint8_t *src, int stride);
+void ff_pred8x8_l00_dc_neon(uint8_t *src, int stride);
+void ff_pred8x8_0l0_dc_neon(uint8_t *src, int stride);
+
+#if HAVE_NEON
+static void ff_h264_pred_init_neon(H264PredContext *h, int codec_id)
+{
+    h->pred8x8[VERT_PRED8x8     ] = ff_pred8x8_vert_neon;
+    h->pred8x8[HOR_PRED8x8      ] = ff_pred8x8_hor_neon;
+    h->pred8x8[PLANE_PRED8x8    ] = ff_pred8x8_plane_neon;
+    h->pred8x8[DC_128_PRED8x8   ] = ff_pred8x8_128_dc_neon;
+    if (codec_id != CODEC_ID_RV40) {
+        h->pred8x8[DC_PRED8x8     ] = ff_pred8x8_dc_neon;
+        h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon;
+        h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon;
+        h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon;
+        h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon;
+        h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon;
+        h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon;
+    }
+
+    h->pred16x16[DC_PRED8x8     ] = ff_pred16x16_dc_neon;
+    h->pred16x16[VERT_PRED8x8   ] = ff_pred16x16_vert_neon;
+    h->pred16x16[HOR_PRED8x8    ] = ff_pred16x16_hor_neon;
+    h->pred16x16[PLANE_PRED8x8  ] = ff_pred16x16_plane_neon;
+    h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon;
+    h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon;
+    h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon;
+    if (codec_id != CODEC_ID_SVQ3 && codec_id != CODEC_ID_RV40)
+        h->pred16x16[PLANE_PRED8x8  ] = ff_pred16x16_plane_neon;
+}
+#endif
+
+void ff_h264_pred_init_arm(H264PredContext *h, int codec_id)
+{
+    if (HAVE_NEON)    ff_h264_pred_init_neon(h, codec_id);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/arm/h264pred_neon.S	Wed Dec 02 14:56:45 2009 +0000
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "asm.S"
+
+        .macro ldcol.8  rd,  rs,  rt,  n=8,  hi=0
+.if \n == 8 || \hi == 0
+        vld1.8          {\rd[0]}, [\rs], \rt
+        vld1.8          {\rd[1]}, [\rs], \rt
+        vld1.8          {\rd[2]}, [\rs], \rt
+        vld1.8          {\rd[3]}, [\rs], \rt
+.endif
+.if \n == 8 || \hi == 1
+        vld1.8          {\rd[4]}, [\rs], \rt
+        vld1.8          {\rd[5]}, [\rs], \rt
+        vld1.8          {\rd[6]}, [\rs], \rt
+        vld1.8          {\rd[7]}, [\rs], \rt
+.endif
+        .endm
+
+        .macro add16x8  dq,  dl,  dh,  rl,  rh
+        vaddl.u8        \dq, \rl, \rh
+        vadd.u16        \dl, \dl, \dh
+        vpadd.u16       \dl, \dl, \dl
+        vpadd.u16       \dl, \dl, \dl
+        .endm
+
+function ff_pred16x16_128_dc_neon, export=1
+        vmov.i8         q0,  #128
+        b               .L_pred16x16_dc_end
+        .endfunc
+
+function ff_pred16x16_top_dc_neon, export=1
+        sub             r2,  r0,  r1
+        vld1.8          {q0},     [r2,:128]
+        add16x8         q0,  d0,  d1,  d0,  d1
+        vrshrn.u16      d0,  q0,  #4
+        vdup.8          q0,  d0[0]
+        b               .L_pred16x16_dc_end
+        .endfunc
+
+function ff_pred16x16_left_dc_neon, export=1
+        sub             r2,  r0,  #1
+        ldcol.8         d0,  r2,  r1
+        ldcol.8         d1,  r2,  r1
+        add16x8         q0,  d0,  d1,  d0,  d1
+        vrshrn.u16      d0,  q0,  #4
+        vdup.8          q0,  d0[0]
+        b               .L_pred16x16_dc_end
+        .endfunc
+
+function ff_pred16x16_dc_neon, export=1
+        sub             r2,  r0,  r1
+        vld1.8          {q0},     [r2,:128]
+        sub             r2,  r0,  #1
+        ldcol.8         d2,  r2,  r1
+        ldcol.8         d3,  r2,  r1
+        vaddl.u8        q0,  d0,  d1
+        vaddl.u8        q1,  d2,  d3
+        vadd.u16        q0,  q0,  q1
+        vadd.u16        d0,  d0,  d1
+        vpadd.u16       d0,  d0,  d0
+        vpadd.u16       d0,  d0,  d0
+        vrshrn.u16      d0,  q0,  #5
+        vdup.8          q0,  d0[0]
+.L_pred16x16_dc_end:
+        mov             r3,  #8
+6:      vst1.8          {q0},     [r0,:128], r1
+        vst1.8          {q0},     [r0,:128], r1
+        subs            r3,  r3,  #1
+        bne             6b
+        bx              lr
+        .endfunc
+
+function ff_pred16x16_hor_neon, export=1
+        sub             r2,  r0,  #1
+        mov             r3,  #16
+1:      vld1.8          {d0[],d1[]},[r2],      r1
+        vst1.8          {q0},       [r0,:128], r1
+        subs            r3,  r3,  #1
+        bne             1b
+        bx              lr
+        .endfunc
+
+function ff_pred16x16_vert_neon, export=1
+        sub             r0,  r0,  r1
+        vld1.8          {q0},     [r0,:128], r1
+        mov             r3,  #8
+1:      vst1.8          {q0},     [r0,:128], r1
+        vst1.8          {q0},     [r0,:128], r1
+        subs            r3,  r3,  #1
+        bne             1b
+        bx              lr
+        .endfunc
+
+function ff_pred16x16_plane_neon, export=1
+        sub             r3,  r0,  r1
+        add             r2,  r3,  #8
+        sub             r3,  r3,  #1
+        vld1.8          {d0},     [r3]
+        vld1.8          {d2},     [r2,:64], r1
+        ldcol.8         d1,  r3,  r1
+        add             r3,  r3,  r1
+        ldcol.8         d3,  r3,  r1
+        vrev64.8        q0,  q0
+        vaddl.u8        q8,  d2,  d3
+        vsubl.u8        q2,  d2,  d0
+        vsubl.u8        q3,  d3,  d1
+        movrel          r3,  p16weight
+        vld1.8          {q0},     [r3,:128]
+        vmul.s16        q2,  q2,  q0
+        vmul.s16        q3,  q3,  q0
+        vadd.i16        d4,  d4,  d5
+        vadd.i16        d5,  d6,  d7
+        vpadd.i16       d4,  d4,  d5
+        vpadd.i16       d4,  d4,  d4
+        vshl.i16        d5,  d4,  #2
+        vaddl.s16       q2,  d4,  d5
+        vrshrn.s32      d4,  q2,  #6
+        mov             r3,  #0
+        vtrn.16         d4,  d5
+        vadd.i16        d2,  d4,  d5
+        vshl.i16        d3,  d2,  #3
+        vrev64.16       d16, d17
+        vsub.i16        d3,  d3,  d2
+        vadd.i16        d16, d16, d0
+        vshl.i16        d2,  d16, #4
+        vsub.i16        d2,  d2,  d3
+        vshl.i16        d3,  d4,  #4
+        vext.16         q0,  q0,  q0,  #7
+        vsub.i16        d6,  d5,  d3
+        vmov.16         d0[0], r3
+        vmul.i16        q0,  q0,  d4[0]
+        vdup.16         q1,  d2[0]
+        vdup.16         q2,  d4[0]
+        vdup.16         q3,  d6[0]
+        vshl.i16        q2,  q2,  #3
+        vadd.i16        q1,  q1,  q0
+        vadd.i16        q3,  q3,  q2
+        mov             r3,  #16
+1:
+        vqshrun.s16     d0,  q1,  #5
+        vadd.i16        q1,  q1,  q2
+        vqshrun.s16     d1,  q1,  #5
+        vadd.i16        q1,  q1,  q3
+        vst1.8          {q0},     [r0,:128], r1
+        subs            r3,  r3,  #1
+        bne             1b
+        bx              lr
+        .endfunc
+
+        .section        .rodata
+        .align          4
+p16weight:
+        .short          1,2,3,4,5,6,7,8
+
+        .text
+
+function ff_pred8x8_hor_neon, export=1
+        sub             r2,  r0,  #1
+        mov             r3,  #8
+1:      vld1.8          {d0[]},   [r2],     r1
+        vst1.8          {d0},     [r0,:64], r1
+        subs            r3,  r3,  #1
+        bne             1b
+        bx              lr
+        .endfunc
+
+function ff_pred8x8_vert_neon, export=1
+        sub             r0,  r0,  r1
+        vld1.8          {d0},     [r0,:64], r1
+        mov             r3,  #4
+1:      vst1.8          {d0},     [r0,:64], r1
+        vst1.8          {d0},     [r0,:64], r1
+        subs            r3,  r3,  #1
+        bne             1b
+        bx              lr
+        .endfunc
+
+function ff_pred8x8_plane_neon, export=1
+        sub             r3,  r0,  r1
+        add             r2,  r3,  #4
+        sub             r3,  r3,  #1
+        vld1.32         {d0[0]},  [r3]
+        vld1.32         {d2[0]},  [r2,:32], r1
+        ldcol.8         d0,  r3,  r1,  4,  hi=1
+        add             r3,  r3,  r1
+        ldcol.8         d3,  r3,  r1,  4
+        vaddl.u8        q8,  d2,  d3
+        vrev32.8        d0,  d0
+        vtrn.32         d2,  d3
+        vsubl.u8        q2,  d2,  d0
+        movrel          r3,  p16weight
+        vld1.16         {q0},     [r3,:128]
+        vmul.s16        d4,  d4,  d0
+        vmul.s16        d5,  d5,  d0
+        vpadd.i16       d4,  d4,  d5
+        vpaddl.s16      d4,  d4
+        vshl.i32        d5,  d4,  #4
+        vadd.s32        d4,  d4,  d5
+        vrshrn.s32      d4,  q2,  #5
+        mov             r3,  #0
+        vtrn.16         d4,  d5
+        vadd.i16        d2,  d4,  d5
+        vshl.i16        d3,  d2,  #2
+        vrev64.16       d16, d16
+        vsub.i16        d3,  d3,  d2
+        vadd.i16        d16, d16, d0
+        vshl.i16        d2,  d16, #4
+        vsub.i16        d2,  d2,  d3
+        vshl.i16        d3,  d4,  #3
+        vext.16         q0,  q0,  q0,  #7
+        vsub.i16        d6,  d5,  d3
+        vmov.16         d0[0], r3
+        vmul.i16        q0,  q0,  d4[0]
+        vdup.16         q1,  d2[0]
+        vdup.16         q2,  d4[0]
+        vdup.16         q3,  d6[0]
+        vshl.i16        q2,  q2,  #3
+        vadd.i16        q1,  q1,  q0
+        vadd.i16        q3,  q3,  q2
+        mov             r3,  #8
+1:
+        vqshrun.s16     d0,  q1,  #5
+        vadd.i16        q1,  q1,  q3
+        vst1.8          {d0},     [r0,:64], r1
+        subs            r3,  r3,  #1
+        bne             1b
+        bx              lr
+        .endfunc
+
+function ff_pred8x8_128_dc_neon, export=1
+        vmov.i8         q0,  #128
+        b               .L_pred8x8_dc_end
+        .endfunc
+
+function ff_pred8x8_top_dc_neon, export=1
+        sub             r2,  r0,  r1
+        vld1.8          {d0},     [r2,:64]
+        vpaddl.u8       d0,  d0
+        vpadd.u16       d0,  d0,  d0
+        vrshrn.u16      d0,  q0,  #2
+        vdup.8          d1,  d0[1]
+        vdup.8          d0,  d0[0]
+        vtrn.32         d0,  d1
+        b               .L_pred8x8_dc_end
+        .endfunc
+
+function ff_pred8x8_left_dc_neon, export=1
+        sub             r2,  r0,  #1
+        ldcol.8         d0,  r2,  r1
+        vpaddl.u8       d0,  d0
+        vpadd.u16       d0,  d0,  d0
+        vrshrn.u16      d0,  q0,  #2
+        vdup.8          d1,  d0[1]
+        vdup.8          d0,  d0[0]
+        b               .L_pred8x8_dc_end
+        .endfunc
+
+function ff_pred8x8_dc_neon, export=1
+        sub             r2,  r0,  r1
+        vld1.8          {d0},     [r2,:64]
+        sub             r2,  r0,  #1
+        ldcol.8         d1,  r2,  r1
+        vtrn.32         d0,  d1
+        vpaddl.u8       q0,  q0
+        vpadd.u16       d0,  d0,  d1
+        vpadd.u16       d1,  d0,  d0
+        vrshrn.u16      d2,  q0,  #3
+        vrshrn.u16      d3,  q0,  #2
+        vdup.8          d0,  d2[4]
+        vdup.8          d1,  d3[3]
+        vdup.8          d4,  d3[2]
+        vdup.8          d5,  d2[5]
+        vtrn.32         q0,  q2
+.L_pred8x8_dc_end:
+        mov             r3,  #4
+        add             r2,  r0,  r1,  lsl #2
+6:      vst1.8          {d0},     [r0,:64], r1
+        vst1.8          {d1},     [r2,:64], r1
+        subs            r3,  r3,  #1
+        bne             6b
+        bx              lr
+        .endfunc
+
+function ff_pred8x8_l0t_dc_neon, export=1
+        sub             r2,  r0,  r1
+        vld1.8          {d0},     [r2,:64]
+        sub             r2,  r0,  #1
+        ldcol.8         d1,  r2,  r1,  4
+        vtrn.32         d0,  d1
+        vpaddl.u8       q0,  q0
+        vpadd.u16       d0,  d0,  d1
+        vpadd.u16       d1,  d0,  d0
+        vrshrn.u16      d2,  q0,  #3
+        vrshrn.u16      d3,  q0,  #2
+        vdup.8          d0,  d2[4]
+        vdup.8          d1,  d3[0]
+        vdup.8          q2,  d3[2]
+        vtrn.32         q0,  q2
+        b               .L_pred8x8_dc_end
+        .endfunc
+
+function ff_pred8x8_l00_dc_neon, export=1
+        sub             r2,  r0,  #1
+        ldcol.8         d0,  r2,  r1,  4
+        vpaddl.u8       d0,  d0
+        vpadd.u16       d0,  d0,  d0
+        vrshrn.u16      d0,  q0,  #2
+        vmov.i8         d1,  #128
+        vdup.8          d0,  d0[0]
+        b               .L_pred8x8_dc_end
+        .endfunc
+
+function ff_pred8x8_0lt_dc_neon, export=1
+        sub             r2,  r0,  r1
+        vld1.8          {d0},     [r2,:64]
+        add             r2,  r0,  r1,  lsl #2
+        sub             r2,  r2,  #1
+        ldcol.8         d1,  r2,  r1,  4,  hi=1
+        vtrn.32         d0,  d1
+        vpaddl.u8       q0,  q0
+        vpadd.u16       d0,  d0,  d1
+        vpadd.u16       d1,  d0,  d0
+        vrshrn.u16      d3,  q0,  #2
+        vrshrn.u16      d2,  q0,  #3
+        vdup.8          d0,  d3[0]
+        vdup.8          d1,  d3[3]
+        vdup.8          d4,  d3[2]
+        vdup.8          d5,  d2[5]
+        vtrn.32         q0,  q2
+        b               .L_pred8x8_dc_end
+        .endfunc
+
+function ff_pred8x8_0l0_dc_neon, export=1
+        add             r2,  r0,  r1,  lsl #2
+        sub             r2,  r2,  #1
+        ldcol.8         d1,  r2,  r1,  4
+        vpaddl.u8       d2,  d1
+        vpadd.u16       d2,  d2,  d2
+        vrshrn.u16      d1,  q1,  #2
+        vmov.i8         d0,  #128
+        vdup.8          d1,  d1[0]
+        b               .L_pred8x8_dc_end
+        .endfunc
--- a/h264pred.c	Wed Dec 02 09:12:05 2009 +0000
+++ b/h264pred.c	Wed Dec 02 14:56:45 2009 +0000
@@ -1172,4 +1172,6 @@
     h->pred8x8_add  [ HOR_PRED8x8]= pred8x8_horizontal_add_c;
     h->pred16x16_add[VERT_PRED8x8]= pred16x16_vertical_add_c;
     h->pred16x16_add[ HOR_PRED8x8]= pred16x16_horizontal_add_c;
+
+    if (ARCH_ARM) ff_h264_pred_init_arm(h, codec_id);
 }
--- a/h264pred.h	Wed Dec 02 09:12:05 2009 +0000
+++ b/h264pred.h	Wed Dec 02 14:56:45 2009 +0000
@@ -84,5 +84,6 @@
 }H264PredContext;
 
 void ff_h264_pred_init(H264PredContext *h, int codec_id);
+void ff_h264_pred_init_arm(H264PredContext *h, int codec_id);
 
 #endif /* AVCODEC_H264PRED_H */