comparison x86/vc1dsp_mmx.c @ 11369:98970e51365a libavcodec

Remove DECLARE_ALIGNED_{8,16} macros These macros are redundant. All uses are replaced with the generic DECLARE_ALIGNED macro instead.
author mru
date Sat, 06 Mar 2010 14:24:59 +0000
parents 34a65026fa06
children f5ccf2e590d6
comparison
equal deleted inserted replaced
11368:3d4f64b8fb10 11369:98970e51365a
71 "psubw %%mm"#R3", %%mm"#R1" \n\t" \ 71 "psubw %%mm"#R3", %%mm"#R1" \n\t" \
72 "psraw %4, %%mm"#R1" \n\t" \ 72 "psraw %4, %%mm"#R1" \n\t" \
73 "movq %%mm"#R1", "#OFF"(%1) \n\t" \ 73 "movq %%mm"#R1", "#OFF"(%1) \n\t" \
74 "add %2, %0 \n\t" 74 "add %2, %0 \n\t"
75 75
76 DECLARE_ALIGNED_16(const uint64_t, ff_pw_9) = 0x0009000900090009ULL; 76 DECLARE_ALIGNED(16, const uint64_t, ff_pw_9) = 0x0009000900090009ULL;
77 77
78 /** Sacrifying mm6 allows to pipeline loads from src */ 78 /** Sacrifying mm6 allows to pipeline loads from src */
79 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst, 79 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
80 const uint8_t *src, x86_reg stride, 80 const uint8_t *src, x86_reg stride,
81 int rnd, int64_t shift) 81 int rnd, int64_t shift)
440 if (vmode) { /* Vertical filter to apply */\ 440 if (vmode) { /* Vertical filter to apply */\
441 if (hmode) { /* Horizontal filter to apply, output to tmp */\ 441 if (hmode) { /* Horizontal filter to apply, output to tmp */\
442 static const int shift_value[] = { 0, 5, 1, 5 };\ 442 static const int shift_value[] = { 0, 5, 1, 5 };\
443 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\ 443 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
444 int r;\ 444 int r;\
445 DECLARE_ALIGNED_16(int16_t, tmp)[12*8];\ 445 DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
446 \ 446 \
447 r = (1<<(shift-1)) + rnd-1;\ 447 r = (1<<(shift-1)) + rnd-1;\
448 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\ 448 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
449 \ 449 \
450 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\ 450 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\