view x86/dnxhd_mmx.c @ 12208:5d73c4b4cd37 libavcodec

Make ff_inverse stay with libavutil, and optional copy it to libavcodec. The ff_inverse table is used by FASTDIV macro, defined in libavutil, but up to now the table was defined only in libavcodec. After this change, the main copy of ff_inverse is part of libavutil (just like FASTDIV), but if CONFIG_SMALL is unset, then a different copy is made available to libavcodec, to avoid the performance penalty of using an external look up table. Dynamic linking works, because the libraries are linked with -Bsymbolic, so the local copy of the symbol has priority over the external; static linking works because the table is on a standalone object file in both libraries, so the linker is able to discard one of the two. Tested on Linux/x86-64 and Mac OS X/x86-64.
author flameeyes
date Wed, 21 Jul 2010 12:37:37 +0000
parents 7768bdfd4f7b
children 3fc4c625b6f3
line wrap: on
line source

/*
 * VC3/DNxHD SIMD functions
 * Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
 *
 * VC-3 encoder funded by the British Broadcasting Corporation
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "libavutil/x86_cpu.h"
#include "libavcodec/dnxhdenc.h"

static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int line_size)
{
    __asm__ volatile(
        "pxor %%xmm7,      %%xmm7       \n\t"
        "movq (%0),        %%xmm0       \n\t"
        "add  %2,          %0           \n\t"
        "movq (%0),        %%xmm1       \n\t"
        "movq (%0, %2),    %%xmm2       \n\t"
        "movq (%0, %2,2),  %%xmm3       \n\t"
        "punpcklbw %%xmm7, %%xmm0       \n\t"
        "punpcklbw %%xmm7, %%xmm1       \n\t"
        "punpcklbw %%xmm7, %%xmm2       \n\t"
        "punpcklbw %%xmm7, %%xmm3       \n\t"
        "movdqa %%xmm0,      (%1)       \n\t"
        "movdqa %%xmm1,    16(%1)       \n\t"
        "movdqa %%xmm2,    32(%1)       \n\t"
        "movdqa %%xmm3,    48(%1)       \n\t"
        "movdqa %%xmm3 ,   64(%1)       \n\t"
        "movdqa %%xmm2 ,   80(%1)       \n\t"
        "movdqa %%xmm1 ,   96(%1)       \n\t"
        "movdqa %%xmm0,   112(%1)       \n\t"
        : "+r" (pixels)
        : "r" (block), "r" ((x86_reg)line_size)
    );
}

void ff_dnxhd_init_mmx(DNXHDEncContext *ctx)
{
    if (mm_flags & FF_MM_SSE2) {
        ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2;
    }
}