comparison i386/dsputil_mmx.c @ 2754:a49f140179e9 libavcodec

sort H.264 mmx dsp functions into their own file
author lorenm
date Thu, 02 Jun 2005 20:45:35 +0000
parents ba8ecddf5598
children 95bac7109ff0
comparison
equal deleted inserted replaced
2753:ba8ecddf5598 2754:a49f140179e9
41 static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL; 41 static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
42 static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL; 42 static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
43 static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL; 43 static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
44 static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL; 44 static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
45 static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL; 45 static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
46 static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
46 static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL; 47 static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
47 48
48 static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL; 49 static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
49 static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL; 50 static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
50 51
690 "r" (src + 4*stride), 691 "r" (src + 4*stride),
691 "r" ((long) stride ), 692 "r" ((long) stride ),
692 "r" ((long)(3*stride)) 693 "r" ((long)(3*stride))
693 ); 694 );
694 } 695 }
695
696
697 // out: o = |x-y|>a
698 // clobbers: t
699 #define DIFF_GT_MMX(x,y,a,o,t)\
700 "movq "#y", "#t" \n\t"\
701 "movq "#x", "#o" \n\t"\
702 "psubusb "#x", "#t" \n\t"\
703 "psubusb "#y", "#o" \n\t"\
704 "por "#t", "#o" \n\t"\
705 "psubusb "#a", "#o" \n\t"
706
707 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
708 // out: mm5=beta-1, mm7=mask
709 // clobbers: mm4,mm6
710 #define H264_DEBLOCK_MASK(alpha1, beta1) \
711 "pshufw $0, "#alpha1", %%mm4 \n\t"\
712 "pshufw $0, "#beta1 ", %%mm5 \n\t"\
713 "packuswb %%mm4, %%mm4 \n\t"\
714 "packuswb %%mm5, %%mm5 \n\t"\
715 DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
716 DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
717 "por %%mm4, %%mm7 \n\t"\
718 DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
719 "por %%mm4, %%mm7 \n\t"\
720 "pxor %%mm6, %%mm6 \n\t"\
721 "pcmpeqb %%mm6, %%mm7 \n\t"
722
723 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
724 // out: mm1=p0' mm2=q0'
725 // clobbers: mm0,3-6
726 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
727 /* a = q0^p0^((p1-q1)>>2) */\
728 "movq %%mm0, %%mm4 \n\t"\
729 "psubb %%mm3, %%mm4 \n\t"\
730 "psrlw $2, %%mm4 \n\t"\
731 "pxor %%mm1, %%mm4 \n\t"\
732 "pxor %%mm2, %%mm4 \n\t"\
733 /* b = p0^(q1>>2) */\
734 "psrlw $2, %%mm3 \n\t"\
735 "pand "#pb_3f", %%mm3 \n\t"\
736 "movq %%mm1, %%mm5 \n\t"\
737 "pxor %%mm3, %%mm5 \n\t"\
738 /* c = q0^(p1>>2) */\
739 "psrlw $2, %%mm0 \n\t"\
740 "pand "#pb_3f", %%mm0 \n\t"\
741 "movq %%mm2, %%mm6 \n\t"\
742 "pxor %%mm0, %%mm6 \n\t"\
743 /* d = (c^b) & ~(b^a) & 1 */\
744 "pxor %%mm5, %%mm6 \n\t"\
745 "pxor %%mm4, %%mm5 \n\t"\
746 "pandn %%mm6, %%mm5 \n\t"\
747 "pand "#pb_01", %%mm5 \n\t"\
748 /* delta = (avg(q0, p1>>2) + (d&a))
749 * - (avg(p0, q1>>2) + (d&~a)) */\
750 "pavgb %%mm2, %%mm0 \n\t"\
751 "movq %%mm5, %%mm6 \n\t"\
752 "pand %%mm4, %%mm6 \n\t"\
753 "paddusb %%mm6, %%mm0 \n\t"\
754 "pavgb %%mm1, %%mm3 \n\t"\
755 "pandn %%mm5, %%mm4 \n\t"\
756 "paddusb %%mm4, %%mm3 \n\t"\
757 /* p0 += clip(delta, -tc0, tc0)
758 * q0 -= clip(delta, -tc0, tc0) */\
759 "movq %%mm0, %%mm4 \n\t"\
760 "psubusb %%mm3, %%mm0 \n\t"\
761 "psubusb %%mm4, %%mm3 \n\t"\
762 "pminub %%mm7, %%mm0 \n\t"\
763 "pminub %%mm7, %%mm3 \n\t"\
764 "paddusb %%mm0, %%mm1 \n\t"\
765 "paddusb %%mm3, %%mm2 \n\t"\
766 "psubusb %%mm3, %%mm1 \n\t"\
767 "psubusb %%mm0, %%mm2 \n\t"
768
769 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
770 // out: (q1addr) = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
771 // clobbers: q2, tmp, tc0
772 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
773 "movq %%mm1, "#tmp" \n\t"\
774 "pavgb %%mm2, "#tmp" \n\t"\
775 "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
776 "pxor "q2addr", "#tmp" \n\t"\
777 "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
778 "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
779 "movq "#p1", "#tmp" \n\t"\
780 "psubusb "#tc0", "#tmp" \n\t"\
781 "paddusb "#p1", "#tc0" \n\t"\
782 "pmaxub "#tmp", "#q2" \n\t"\
783 "pminub "#tc0", "#q2" \n\t"\
784 "movq "#q2", "q1addr" \n\t"
785
786 static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
787 {
788 uint64_t tmp0;
789 uint64_t tc = (uint8_t)tc0[1]*0x01010000 | (uint8_t)tc0[0]*0x0101;
790 // with luma, tc0=0 doesn't mean no filtering, so we need a separate input mask
791 uint32_t mask[2] = { (tc0[0]>=0)*0xffffffff, (tc0[1]>=0)*0xffffffff };
792
793 asm volatile(
794 "movq (%1,%3), %%mm0 \n\t" //p1
795 "movq (%1,%3,2), %%mm1 \n\t" //p0
796 "movq (%2), %%mm2 \n\t" //q0
797 "movq (%2,%3), %%mm3 \n\t" //q1
798 H264_DEBLOCK_MASK(%6, %7)
799 "pand %5, %%mm7 \n\t"
800 "movq %%mm7, %0 \n\t"
801
802 /* filter p1 */
803 "movq (%1), %%mm3 \n\t" //p2
804 DIFF_GT_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
805 "pandn %%mm7, %%mm6 \n\t"
806 "pcmpeqb %%mm7, %%mm6 \n\t"
807 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
808 "pshufw $80, %4, %%mm4 \n\t"
809 "pand %%mm7, %%mm4 \n\t" // mask & tc0
810 "movq %8, %%mm7 \n\t"
811 "pand %%mm6, %%mm7 \n\t" // mask & |p2-p0|<beta & 1
812 "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
813 "paddb %%mm4, %%mm7 \n\t" // tc++
814 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
815
816 /* filter q1 */
817 "movq (%2,%3,2), %%mm4 \n\t" //q2
818 DIFF_GT_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
819 "pandn %0, %%mm6 \n\t"
820 "pcmpeqb %0, %%mm6 \n\t"
821 "pand %0, %%mm6 \n\t"
822 "pshufw $80, %4, %%mm5 \n\t"
823 "pand %%mm6, %%mm5 \n\t"
824 "pand %8, %%mm6 \n\t"
825 "paddb %%mm6, %%mm7 \n\t"
826 "movq (%2,%3), %%mm3 \n\t"
827 H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
828
829 /* filter p0, q0 */
830 H264_DEBLOCK_P0_Q0(%8, %9)
831 "movq %%mm1, (%1,%3,2) \n\t"
832 "movq %%mm2, (%2) \n\t"
833
834 : "=m"(tmp0)
835 : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
836 "m"(tc), "m"(*(uint64_t*)mask), "m"(alpha1), "m"(beta1),
837 "m"(mm_bone), "m"(ff_pb_3F)
838 );
839 }
840
841 static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
842 {
843 if((tc0[0] & tc0[1]) >= 0)
844 h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
845 if((tc0[2] & tc0[3]) >= 0)
846 h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
847 }
848 static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
849 {
850 //FIXME: could cut some load/stores by merging transpose with filter
851 // also, it only needs to transpose 6x8
852 uint8_t trans[8*8];
853 int i;
854 for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
855 if((tc0[0] & tc0[1]) < 0)
856 continue;
857 transpose4x4(trans, pix-4, 8, stride);
858 transpose4x4(trans +4*8, pix, 8, stride);
859 transpose4x4(trans+4, pix-4+4*stride, 8, stride);
860 transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
861 h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
862 transpose4x4(pix-2, trans +2*8, stride, 8);
863 transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
864 }
865 }
866
867 static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
868 {
869 asm volatile(
870 "movq (%0), %%mm0 \n\t" //p1
871 "movq (%0,%2), %%mm1 \n\t" //p0
872 "movq (%1), %%mm2 \n\t" //q0
873 "movq (%1,%2), %%mm3 \n\t" //q1
874 H264_DEBLOCK_MASK(%4, %5)
875 "movd %3, %%mm6 \n\t"
876 "punpcklbw %%mm6, %%mm6 \n\t"
877 "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
878 H264_DEBLOCK_P0_Q0(%6, %7)
879 "movq %%mm1, (%0,%2) \n\t"
880 "movq %%mm2, (%1) \n\t"
881
882 :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
883 "r"(*(uint32_t*)tc0),
884 "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F)
885 );
886 }
887
888 static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
889 {
890 h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
891 }
892
893 static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
894 {
895 //FIXME: could cut some load/stores by merging transpose with filter
896 uint8_t trans[8*4];
897 transpose4x4(trans, pix-2, 8, stride);
898 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
899 h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
900 transpose4x4(pix-2, trans, stride, 8);
901 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
902 }
903
904 // p0 = (p0 + q1 + 2*p1 + 2) >> 2
905 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
906 "movq "#p0", %%mm4 \n\t"\
907 "pxor "#q1", %%mm4 \n\t"\
908 "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
909 "pavgb "#q1", "#p0" \n\t"\
910 "psubusb %%mm4, "#p0" \n\t"\
911 "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
912
913 static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
914 {
915 asm volatile(
916 "movq (%0), %%mm0 \n\t"
917 "movq (%0,%2), %%mm1 \n\t"
918 "movq (%1), %%mm2 \n\t"
919 "movq (%1,%2), %%mm3 \n\t"
920 H264_DEBLOCK_MASK(%3, %4)
921 "movq %%mm1, %%mm5 \n\t"
922 "movq %%mm2, %%mm6 \n\t"
923 H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
924 H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
925 "psubb %%mm5, %%mm1 \n\t"
926 "psubb %%mm6, %%mm2 \n\t"
927 "pand %%mm7, %%mm1 \n\t"
928 "pand %%mm7, %%mm2 \n\t"
929 "paddb %%mm5, %%mm1 \n\t"
930 "paddb %%mm6, %%mm2 \n\t"
931 "movq %%mm1, (%0,%2) \n\t"
932 "movq %%mm2, (%1) \n\t"
933 :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
934 "m"(alpha1), "m"(beta1), "m"(mm_bone)
935 );
936 }
937
938 static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
939 {
940 h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
941 }
942
943 static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
944 {
945 //FIXME: could cut some load/stores by merging transpose with filter
946 uint8_t trans[8*4];
947 transpose4x4(trans, pix-2, 8, stride);
948 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
949 h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
950 transpose4x4(pix-2, trans, stride, 8);
951 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
952 }
953
954 696
955 #ifdef CONFIG_ENCODERS 697 #ifdef CONFIG_ENCODERS
956 static int pix_norm1_mmx(uint8_t *pix, int line_size) { 698 static int pix_norm1_mmx(uint8_t *pix, int line_size) {
957 int tmp; 699 int tmp;
958 asm volatile ( 700 asm volatile (
2544 uint8_t * const halfH= ((uint8_t*)half);\ 2286 uint8_t * const halfH= ((uint8_t*)half);\
2545 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ 2287 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2546 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ 2288 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2547 } 2289 }
2548 2290
2549 #define QPEL_H264V(A,B,C,D,E,F,OP)\
2550 "movd (%0), "#F" \n\t"\
2551 "movq "#C", %%mm6 \n\t"\
2552 "paddw "#D", %%mm6 \n\t"\
2553 "psllw $2, %%mm6 \n\t"\
2554 "psubw "#B", %%mm6 \n\t"\
2555 "psubw "#E", %%mm6 \n\t"\
2556 "pmullw %4, %%mm6 \n\t"\
2557 "add %2, %0 \n\t"\
2558 "punpcklbw %%mm7, "#F" \n\t"\
2559 "paddw %5, "#A" \n\t"\
2560 "paddw "#F", "#A" \n\t"\
2561 "paddw "#A", %%mm6 \n\t"\
2562 "psraw $5, %%mm6 \n\t"\
2563 "packuswb %%mm6, %%mm6 \n\t"\
2564 OP(%%mm6, (%1), A, d)\
2565 "add %3, %1 \n\t"
2566
2567 #define QPEL_H264HV(A,B,C,D,E,F,OF)\
2568 "movd (%0), "#F" \n\t"\
2569 "movq "#C", %%mm6 \n\t"\
2570 "paddw "#D", %%mm6 \n\t"\
2571 "psllw $2, %%mm6 \n\t"\
2572 "psubw "#B", %%mm6 \n\t"\
2573 "psubw "#E", %%mm6 \n\t"\
2574 "pmullw %3, %%mm6 \n\t"\
2575 "add %2, %0 \n\t"\
2576 "punpcklbw %%mm7, "#F" \n\t"\
2577 "paddw "#F", "#A" \n\t"\
2578 "paddw "#A", %%mm6 \n\t"\
2579 "movq %%mm6, "#OF"(%1) \n\t"
2580
2581 #define QPEL_H264(OPNAME, OP, MMX)\
2582 static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2583 int h=4;\
2584 \
2585 asm volatile(\
2586 "pxor %%mm7, %%mm7 \n\t"\
2587 "movq %5, %%mm4 \n\t"\
2588 "movq %6, %%mm5 \n\t"\
2589 "1: \n\t"\
2590 "movd -1(%0), %%mm1 \n\t"\
2591 "movd (%0), %%mm2 \n\t"\
2592 "movd 1(%0), %%mm3 \n\t"\
2593 "movd 2(%0), %%mm0 \n\t"\
2594 "punpcklbw %%mm7, %%mm1 \n\t"\
2595 "punpcklbw %%mm7, %%mm2 \n\t"\
2596 "punpcklbw %%mm7, %%mm3 \n\t"\
2597 "punpcklbw %%mm7, %%mm0 \n\t"\
2598 "paddw %%mm0, %%mm1 \n\t"\
2599 "paddw %%mm3, %%mm2 \n\t"\
2600 "movd -2(%0), %%mm0 \n\t"\
2601 "movd 3(%0), %%mm3 \n\t"\
2602 "punpcklbw %%mm7, %%mm0 \n\t"\
2603 "punpcklbw %%mm7, %%mm3 \n\t"\
2604 "paddw %%mm3, %%mm0 \n\t"\
2605 "psllw $2, %%mm2 \n\t"\
2606 "psubw %%mm1, %%mm2 \n\t"\
2607 "pmullw %%mm4, %%mm2 \n\t"\
2608 "paddw %%mm5, %%mm0 \n\t"\
2609 "paddw %%mm2, %%mm0 \n\t"\
2610 "psraw $5, %%mm0 \n\t"\
2611 "packuswb %%mm0, %%mm0 \n\t"\
2612 OP(%%mm0, (%1),%%mm6, d)\
2613 "add %3, %0 \n\t"\
2614 "add %4, %1 \n\t"\
2615 "decl %2 \n\t"\
2616 " jnz 1b \n\t"\
2617 : "+a"(src), "+c"(dst), "+m"(h)\
2618 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
2619 : "memory"\
2620 );\
2621 }\
2622 static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2623 src -= 2*srcStride;\
2624 asm volatile(\
2625 "pxor %%mm7, %%mm7 \n\t"\
2626 "movd (%0), %%mm0 \n\t"\
2627 "add %2, %0 \n\t"\
2628 "movd (%0), %%mm1 \n\t"\
2629 "add %2, %0 \n\t"\
2630 "movd (%0), %%mm2 \n\t"\
2631 "add %2, %0 \n\t"\
2632 "movd (%0), %%mm3 \n\t"\
2633 "add %2, %0 \n\t"\
2634 "movd (%0), %%mm4 \n\t"\
2635 "add %2, %0 \n\t"\
2636 "punpcklbw %%mm7, %%mm0 \n\t"\
2637 "punpcklbw %%mm7, %%mm1 \n\t"\
2638 "punpcklbw %%mm7, %%mm2 \n\t"\
2639 "punpcklbw %%mm7, %%mm3 \n\t"\
2640 "punpcklbw %%mm7, %%mm4 \n\t"\
2641 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
2642 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
2643 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
2644 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
2645 \
2646 : "+a"(src), "+c"(dst)\
2647 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
2648 : "memory"\
2649 );\
2650 }\
2651 static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
2652 int h=4;\
2653 int w=3;\
2654 src -= 2*srcStride+2;\
2655 while(w--){\
2656 asm volatile(\
2657 "pxor %%mm7, %%mm7 \n\t"\
2658 "movd (%0), %%mm0 \n\t"\
2659 "add %2, %0 \n\t"\
2660 "movd (%0), %%mm1 \n\t"\
2661 "add %2, %0 \n\t"\
2662 "movd (%0), %%mm2 \n\t"\
2663 "add %2, %0 \n\t"\
2664 "movd (%0), %%mm3 \n\t"\
2665 "add %2, %0 \n\t"\
2666 "movd (%0), %%mm4 \n\t"\
2667 "add %2, %0 \n\t"\
2668 "punpcklbw %%mm7, %%mm0 \n\t"\
2669 "punpcklbw %%mm7, %%mm1 \n\t"\
2670 "punpcklbw %%mm7, %%mm2 \n\t"\
2671 "punpcklbw %%mm7, %%mm3 \n\t"\
2672 "punpcklbw %%mm7, %%mm4 \n\t"\
2673 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
2674 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
2675 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
2676 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
2677 \
2678 : "+a"(src)\
2679 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
2680 : "memory"\
2681 );\
2682 tmp += 4;\
2683 src += 4 - 9*srcStride;\
2684 }\
2685 tmp -= 3*4;\
2686 asm volatile(\
2687 "movq %4, %%mm6 \n\t"\
2688 "1: \n\t"\
2689 "movq (%0), %%mm0 \n\t"\
2690 "paddw 10(%0), %%mm0 \n\t"\
2691 "movq 2(%0), %%mm1 \n\t"\
2692 "paddw 8(%0), %%mm1 \n\t"\
2693 "movq 4(%0), %%mm2 \n\t"\
2694 "paddw 6(%0), %%mm2 \n\t"\
2695 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
2696 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
2697 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
2698 "paddsw %%mm2, %%mm0 \n\t"\
2699 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b)/4 */\
2700 "paddw %%mm6, %%mm2 \n\t"\
2701 "paddw %%mm2, %%mm0 \n\t"\
2702 "psraw $6, %%mm0 \n\t"\
2703 "packuswb %%mm0, %%mm0 \n\t"\
2704 OP(%%mm0, (%1),%%mm7, d)\
2705 "add $24, %0 \n\t"\
2706 "add %3, %1 \n\t"\
2707 "decl %2 \n\t"\
2708 " jnz 1b \n\t"\
2709 : "+a"(tmp), "+c"(dst), "+m"(h)\
2710 : "S"((long)dstStride), "m"(ff_pw_32)\
2711 : "memory"\
2712 );\
2713 }\
2714 \
2715 static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2716 int h=8;\
2717 asm volatile(\
2718 "pxor %%mm7, %%mm7 \n\t"\
2719 "movq %5, %%mm6 \n\t"\
2720 "1: \n\t"\
2721 "movq (%0), %%mm0 \n\t"\
2722 "movq 1(%0), %%mm2 \n\t"\
2723 "movq %%mm0, %%mm1 \n\t"\
2724 "movq %%mm2, %%mm3 \n\t"\
2725 "punpcklbw %%mm7, %%mm0 \n\t"\
2726 "punpckhbw %%mm7, %%mm1 \n\t"\
2727 "punpcklbw %%mm7, %%mm2 \n\t"\
2728 "punpckhbw %%mm7, %%mm3 \n\t"\
2729 "paddw %%mm2, %%mm0 \n\t"\
2730 "paddw %%mm3, %%mm1 \n\t"\
2731 "psllw $2, %%mm0 \n\t"\
2732 "psllw $2, %%mm1 \n\t"\
2733 "movq -1(%0), %%mm2 \n\t"\
2734 "movq 2(%0), %%mm4 \n\t"\
2735 "movq %%mm2, %%mm3 \n\t"\
2736 "movq %%mm4, %%mm5 \n\t"\
2737 "punpcklbw %%mm7, %%mm2 \n\t"\
2738 "punpckhbw %%mm7, %%mm3 \n\t"\
2739 "punpcklbw %%mm7, %%mm4 \n\t"\
2740 "punpckhbw %%mm7, %%mm5 \n\t"\
2741 "paddw %%mm4, %%mm2 \n\t"\
2742 "paddw %%mm3, %%mm5 \n\t"\
2743 "psubw %%mm2, %%mm0 \n\t"\
2744 "psubw %%mm5, %%mm1 \n\t"\
2745 "pmullw %%mm6, %%mm0 \n\t"\
2746 "pmullw %%mm6, %%mm1 \n\t"\
2747 "movd -2(%0), %%mm2 \n\t"\
2748 "movd 7(%0), %%mm5 \n\t"\
2749 "punpcklbw %%mm7, %%mm2 \n\t"\
2750 "punpcklbw %%mm7, %%mm5 \n\t"\
2751 "paddw %%mm3, %%mm2 \n\t"\
2752 "paddw %%mm5, %%mm4 \n\t"\
2753 "movq %6, %%mm5 \n\t"\
2754 "paddw %%mm5, %%mm2 \n\t"\
2755 "paddw %%mm5, %%mm4 \n\t"\
2756 "paddw %%mm2, %%mm0 \n\t"\
2757 "paddw %%mm4, %%mm1 \n\t"\
2758 "psraw $5, %%mm0 \n\t"\
2759 "psraw $5, %%mm1 \n\t"\
2760 "packuswb %%mm1, %%mm0 \n\t"\
2761 OP(%%mm0, (%1),%%mm5, q)\
2762 "add %3, %0 \n\t"\
2763 "add %4, %1 \n\t"\
2764 "decl %2 \n\t"\
2765 " jnz 1b \n\t"\
2766 : "+a"(src), "+c"(dst), "+m"(h)\
2767 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
2768 : "memory"\
2769 );\
2770 }\
2771 \
2772 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2773 int h= 2;\
2774 src -= 2*srcStride;\
2775 \
2776 while(h--){\
2777 asm volatile(\
2778 "pxor %%mm7, %%mm7 \n\t"\
2779 "movd (%0), %%mm0 \n\t"\
2780 "add %2, %0 \n\t"\
2781 "movd (%0), %%mm1 \n\t"\
2782 "add %2, %0 \n\t"\
2783 "movd (%0), %%mm2 \n\t"\
2784 "add %2, %0 \n\t"\
2785 "movd (%0), %%mm3 \n\t"\
2786 "add %2, %0 \n\t"\
2787 "movd (%0), %%mm4 \n\t"\
2788 "add %2, %0 \n\t"\
2789 "punpcklbw %%mm7, %%mm0 \n\t"\
2790 "punpcklbw %%mm7, %%mm1 \n\t"\
2791 "punpcklbw %%mm7, %%mm2 \n\t"\
2792 "punpcklbw %%mm7, %%mm3 \n\t"\
2793 "punpcklbw %%mm7, %%mm4 \n\t"\
2794 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
2795 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
2796 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
2797 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
2798 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
2799 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
2800 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
2801 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
2802 \
2803 : "+a"(src), "+c"(dst)\
2804 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
2805 : "memory"\
2806 );\
2807 src += 4-13*srcStride;\
2808 dst += 4-8*dstStride;\
2809 }\
2810 }\
2811 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
2812 int h=8;\
2813 int w=4;\
2814 src -= 2*srcStride+2;\
2815 while(w--){\
2816 asm volatile(\
2817 "pxor %%mm7, %%mm7 \n\t"\
2818 "movd (%0), %%mm0 \n\t"\
2819 "add %2, %0 \n\t"\
2820 "movd (%0), %%mm1 \n\t"\
2821 "add %2, %0 \n\t"\
2822 "movd (%0), %%mm2 \n\t"\
2823 "add %2, %0 \n\t"\
2824 "movd (%0), %%mm3 \n\t"\
2825 "add %2, %0 \n\t"\
2826 "movd (%0), %%mm4 \n\t"\
2827 "add %2, %0 \n\t"\
2828 "punpcklbw %%mm7, %%mm0 \n\t"\
2829 "punpcklbw %%mm7, %%mm1 \n\t"\
2830 "punpcklbw %%mm7, %%mm2 \n\t"\
2831 "punpcklbw %%mm7, %%mm3 \n\t"\
2832 "punpcklbw %%mm7, %%mm4 \n\t"\
2833 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*4)\
2834 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*4)\
2835 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*4)\
2836 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*4)\
2837 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*8*4)\
2838 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*8*4)\
2839 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*8*4)\
2840 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*8*4)\
2841 \
2842 : "+a"(src)\
2843 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
2844 : "memory"\
2845 );\
2846 tmp += 4;\
2847 src += 4 - 13*srcStride;\
2848 }\
2849 tmp -= 4*4;\
2850 asm volatile(\
2851 "movq %4, %%mm6 \n\t"\
2852 "1: \n\t"\
2853 "movq (%0), %%mm0 \n\t"\
2854 "movq 8(%0), %%mm3 \n\t"\
2855 "movq 2(%0), %%mm1 \n\t"\
2856 "movq 10(%0), %%mm4 \n\t"\
2857 "paddw %%mm4, %%mm0 \n\t"\
2858 "paddw %%mm3, %%mm1 \n\t"\
2859 "paddw 18(%0), %%mm3 \n\t"\
2860 "paddw 16(%0), %%mm4 \n\t"\
2861 "movq 4(%0), %%mm2 \n\t"\
2862 "movq 12(%0), %%mm5 \n\t"\
2863 "paddw 6(%0), %%mm2 \n\t"\
2864 "paddw 14(%0), %%mm5 \n\t"\
2865 "psubw %%mm1, %%mm0 \n\t"\
2866 "psubw %%mm4, %%mm3 \n\t"\
2867 "psraw $2, %%mm0 \n\t"\
2868 "psraw $2, %%mm3 \n\t"\
2869 "psubw %%mm1, %%mm0 \n\t"\
2870 "psubw %%mm4, %%mm3 \n\t"\
2871 "paddsw %%mm2, %%mm0 \n\t"\
2872 "paddsw %%mm5, %%mm3 \n\t"\
2873 "psraw $2, %%mm0 \n\t"\
2874 "psraw $2, %%mm3 \n\t"\
2875 "paddw %%mm6, %%mm2 \n\t"\
2876 "paddw %%mm6, %%mm5 \n\t"\
2877 "paddw %%mm2, %%mm0 \n\t"\
2878 "paddw %%mm5, %%mm3 \n\t"\
2879 "psraw $6, %%mm0 \n\t"\
2880 "psraw $6, %%mm3 \n\t"\
2881 "packuswb %%mm3, %%mm0 \n\t"\
2882 OP(%%mm0, (%1),%%mm7, q)\
2883 "add $32, %0 \n\t"\
2884 "add %3, %1 \n\t"\
2885 "decl %2 \n\t"\
2886 " jnz 1b \n\t"\
2887 : "+a"(tmp), "+c"(dst), "+m"(h)\
2888 : "S"((long)dstStride), "m"(ff_pw_32)\
2889 : "memory"\
2890 );\
2891 }\
2892 static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2893 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
2894 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
2895 src += 8*srcStride;\
2896 dst += 8*dstStride;\
2897 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
2898 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
2899 }\
2900 \
2901 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2902 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
2903 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
2904 src += 8*srcStride;\
2905 dst += 8*dstStride;\
2906 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
2907 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
2908 }\
2909 \
2910 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
2911 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\
2912 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\
2913 src += 8*srcStride;\
2914 dst += 8*dstStride;\
2915 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\
2916 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\
2917 }\
2918
2919 #define H264_MC(OPNAME, SIZE, MMX) \
2920 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2921 OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
2922 }\
2923 \
2924 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2925 uint64_t temp[SIZE*SIZE/8];\
2926 uint8_t * const half= (uint8_t*)temp;\
2927 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
2928 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
2929 }\
2930 \
2931 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2932 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
2933 }\
2934 \
2935 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2936 uint64_t temp[SIZE*SIZE/8];\
2937 uint8_t * const half= (uint8_t*)temp;\
2938 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
2939 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+1, half, stride, stride, SIZE);\
2940 }\
2941 \
2942 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2943 uint64_t temp[SIZE*SIZE/8];\
2944 uint8_t * const half= (uint8_t*)temp;\
2945 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
2946 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
2947 }\
2948 \
2949 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2950 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
2951 }\
2952 \
2953 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2954 uint64_t temp[SIZE*SIZE/8];\
2955 uint8_t * const half= (uint8_t*)temp;\
2956 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
2957 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
2958 }\
2959 \
2960 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2961 uint64_t temp[SIZE*SIZE/4];\
2962 uint8_t * const halfH= (uint8_t*)temp;\
2963 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
2964 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
2965 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
2966 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
2967 }\
2968 \
2969 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2970 uint64_t temp[SIZE*SIZE/4];\
2971 uint8_t * const halfH= (uint8_t*)temp;\
2972 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
2973 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
2974 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
2975 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
2976 }\
2977 \
2978 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2979 uint64_t temp[SIZE*SIZE/4];\
2980 uint8_t * const halfH= (uint8_t*)temp;\
2981 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
2982 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
2983 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
2984 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
2985 }\
2986 \
2987 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2988 uint64_t temp[SIZE*SIZE/4];\
2989 uint8_t * const halfH= (uint8_t*)temp;\
2990 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
2991 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
2992 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
2993 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
2994 }\
2995 \
2996 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2997 uint64_t temp[SIZE*(SIZE+8)/4];\
2998 int16_t * const tmp= (int16_t*)temp;\
2999 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
3000 }\
3001 \
3002 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
3003 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
3004 uint8_t * const halfH= (uint8_t*)temp;\
3005 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
3006 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
3007 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
3008 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
3009 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
3010 }\
3011 \
3012 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
3013 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
3014 uint8_t * const halfH= (uint8_t*)temp;\
3015 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
3016 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
3017 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
3018 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
3019 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
3020 }\
3021 \
3022 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
3023 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
3024 uint8_t * const halfV= (uint8_t*)temp;\
3025 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
3026 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
3027 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
3028 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
3029 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\
3030 }\
3031 \
3032 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
3033 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
3034 uint8_t * const halfV= (uint8_t*)temp;\
3035 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
3036 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
3037 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
3038 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
3039 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\
3040 }\
3041
3042
3043 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" 2291 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
3044 #define AVG_3DNOW_OP(a,b,temp, size) \ 2292 #define AVG_3DNOW_OP(a,b,temp, size) \
3045 "mov" #size " " #b ", " #temp " \n\t"\ 2293 "mov" #size " " #b ", " #temp " \n\t"\
3046 "pavgusb " #temp ", " #a " \n\t"\ 2294 "pavgusb " #temp ", " #a " \n\t"\
3047 "mov" #size " " #a ", " #b " \n\t" 2295 "mov" #size " " #a ", " #b " \n\t"
3057 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow) 2305 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
3058 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow) 2306 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
3059 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) 2307 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
3060 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) 2308 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
3061 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) 2309 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
3062
3063 QPEL_H264(put_ , PUT_OP, 3dnow)
3064 QPEL_H264(avg_ , AVG_3DNOW_OP, 3dnow)
3065 QPEL_H264(put_ , PUT_OP, mmx2)
3066 QPEL_H264(avg_ , AVG_MMX2_OP, mmx2)
3067
3068 H264_MC(put_, 4, 3dnow)
3069 H264_MC(put_, 8, 3dnow)
3070 H264_MC(put_, 16,3dnow)
3071 H264_MC(avg_, 4, 3dnow)
3072 H264_MC(avg_, 8, 3dnow)
3073 H264_MC(avg_, 16,3dnow)
3074 H264_MC(put_, 4, mmx2)
3075 H264_MC(put_, 8, mmx2)
3076 H264_MC(put_, 16,mmx2)
3077 H264_MC(avg_, 4, mmx2)
3078 H264_MC(avg_, 8, mmx2)
3079 H264_MC(avg_, 16,mmx2)
3080
3081
3082 /** These are used by *_h264_chroma_mc8_* */
3083 static const uint64_t thirtytwo __align8 = 0x0020002000200020ULL;
3084 static const uint64_t sixtyfour __align8 = 0x0040004000400040ULL;
3085
3086 #define H264_CHROMA_OP(S,D)
3087 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
3088 #include "dsputil_h264_template_mmx.c"
3089 #undef H264_CHROMA_OP
3090 #undef H264_CHROMA_MC8_TMPL
3091
3092 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
3093 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
3094 #include "dsputil_h264_template_mmx.c"
3095 #undef H264_CHROMA_OP
3096 #undef H264_CHROMA_MC8_TMPL
3097
3098 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
3099 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
3100 #include "dsputil_h264_template_mmx.c"
3101 #undef H264_CHROMA_OP
3102 #undef H264_CHROMA_MC8_TMPL
3103
3104 2310
3105 #if 0 2311 #if 0
3106 static void just_return() { return; } 2312 static void just_return() { return; }
3107 #endif 2313 #endif
3108 2314
3194 for(i=0; i<8*8; i++){ 2400 for(i=0; i<8*8; i++){
3195 rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT); 2401 rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
3196 } 2402 }
3197 } 2403 }
3198 } 2404 }
2405
2406 #include "h264dsp_mmx.c"
3199 2407
3200 /* external functions, from idct_mmx.c */ 2408 /* external functions, from idct_mmx.c */
3201 void ff_mmx_idct(DCTELEM *block); 2409 void ff_mmx_idct(DCTELEM *block);
3202 void ff_mmxext_idct(DCTELEM *block); 2410 void ff_mmxext_idct(DCTELEM *block);
3203 2411
3204 void ff_vp3_idct_sse2(int16_t *input_data); 2412 void ff_vp3_idct_sse2(int16_t *input_data);
3205 void ff_vp3_idct_mmx(int16_t *data); 2413 void ff_vp3_idct_mmx(int16_t *data);
3206 void ff_vp3_dsp_init_mmx(void); 2414 void ff_vp3_dsp_init_mmx(void);
3207
3208 void ff_h264_idct_add_mmx2(uint8_t *dst, int16_t *block, int stride);
3209 2415
3210 /* XXX: those functions should be suppressed ASAP when all IDCTs are 2416 /* XXX: those functions should be suppressed ASAP when all IDCTs are
3211 converted */ 2417 converted */
3212 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) 2418 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
3213 { 2419 {