comparison i386/dsputil_mmx.c @ 2899:d3a726717baf libavcodec

sse2 16x16 sum squared diff (306=>268 cycles on a K8) faster 8x8 mmx ssd (77=>70 cycles)
author lorenm
date Fri, 30 Sep 2005 02:31:47 +0000
parents 41315d0120b3
children 3c79bc9f3aa9
comparison
equal deleted inserted replaced
2898:95f469274a1d 2899:d3a726717baf
742 742
743 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { 743 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
744 int tmp; 744 int tmp;
745 asm volatile ( 745 asm volatile (
746 "movl %4,%%ecx\n" 746 "movl %4,%%ecx\n"
747 "shr $1,%%ecx\n"
747 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ 748 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
748 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ 749 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
749 "1:\n" 750 "1:\n"
750 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */ 751 "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
751 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */ 752 "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
752 753 "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
754 "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
755
756 /* todo: mm1-mm2, mm3-mm4 */
757 /* algo: substract mm1 from mm2 with saturation and vice versa */
758 /* OR the results to get absolute difference */
753 "movq %%mm1,%%mm5\n" 759 "movq %%mm1,%%mm5\n"
760 "movq %%mm3,%%mm6\n"
754 "psubusb %%mm2,%%mm1\n" 761 "psubusb %%mm2,%%mm1\n"
762 "psubusb %%mm4,%%mm3\n"
755 "psubusb %%mm5,%%mm2\n" 763 "psubusb %%mm5,%%mm2\n"
764 "psubusb %%mm6,%%mm4\n"
756 765
757 "por %%mm1,%%mm2\n" 766 "por %%mm1,%%mm2\n"
758 767 "por %%mm3,%%mm4\n"
768
769 /* now convert to 16-bit vectors so we can square them */
759 "movq %%mm2,%%mm1\n" 770 "movq %%mm2,%%mm1\n"
771 "movq %%mm4,%%mm3\n"
760 772
761 "punpckhbw %%mm0,%%mm2\n" 773 "punpckhbw %%mm0,%%mm2\n"
774 "punpckhbw %%mm0,%%mm4\n"
762 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ 775 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
776 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
763 777
764 "pmaddwd %%mm2,%%mm2\n" 778 "pmaddwd %%mm2,%%mm2\n"
779 "pmaddwd %%mm4,%%mm4\n"
765 "pmaddwd %%mm1,%%mm1\n" 780 "pmaddwd %%mm1,%%mm1\n"
766 781 "pmaddwd %%mm3,%%mm3\n"
767 "add %3,%0\n" 782
768 "add %3,%1\n" 783 "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
784 "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
769 785
770 "paddd %%mm2,%%mm1\n" 786 "paddd %%mm2,%%mm1\n"
787 "paddd %%mm4,%%mm3\n"
771 "paddd %%mm1,%%mm7\n" 788 "paddd %%mm1,%%mm7\n"
789 "paddd %%mm3,%%mm7\n"
772 790
773 "decl %%ecx\n" 791 "decl %%ecx\n"
774 "jnz 1b\n" 792 "jnz 1b\n"
775 793
776 "movq %%mm7,%%mm1\n" 794 "movq %%mm7,%%mm1\n"
838 "paddd %%mm7,%%mm1\n" 856 "paddd %%mm7,%%mm1\n"
839 "movd %%mm1,%2\n" 857 "movd %%mm1,%2\n"
840 : "+r" (pix1), "+r" (pix2), "=r"(tmp) 858 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
841 : "r" ((long)line_size) , "m" (h) 859 : "r" ((long)line_size) , "m" (h)
842 : "%ecx"); 860 : "%ecx");
861 return tmp;
862 }
863
864 static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
865 int tmp;
866 asm volatile (
867 "shr $1,%2\n"
868 "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
869 "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
870 "1:\n"
871 "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
872 "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
873 "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
874 "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
875
876 /* todo: mm1-mm2, mm3-mm4 */
877 /* algo: substract mm1 from mm2 with saturation and vice versa */
878 /* OR the results to get absolute difference */
879 "movdqa %%xmm1,%%xmm5\n"
880 "movdqa %%xmm3,%%xmm6\n"
881 "psubusb %%xmm2,%%xmm1\n"
882 "psubusb %%xmm4,%%xmm3\n"
883 "psubusb %%xmm5,%%xmm2\n"
884 "psubusb %%xmm6,%%xmm4\n"
885
886 "por %%xmm1,%%xmm2\n"
887 "por %%xmm3,%%xmm4\n"
888
889 /* now convert to 16-bit vectors so we can square them */
890 "movdqa %%xmm2,%%xmm1\n"
891 "movdqa %%xmm4,%%xmm3\n"
892
893 "punpckhbw %%xmm0,%%xmm2\n"
894 "punpckhbw %%xmm0,%%xmm4\n"
895 "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
896 "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
897
898 "pmaddwd %%xmm2,%%xmm2\n"
899 "pmaddwd %%xmm4,%%xmm4\n"
900 "pmaddwd %%xmm1,%%xmm1\n"
901 "pmaddwd %%xmm3,%%xmm3\n"
902
903 "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
904 "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
905
906 "paddd %%xmm2,%%xmm1\n"
907 "paddd %%xmm4,%%xmm3\n"
908 "paddd %%xmm1,%%xmm7\n"
909 "paddd %%xmm3,%%xmm7\n"
910
911 "decl %2\n"
912 "jnz 1b\n"
913
914 "movdqa %%xmm7,%%xmm1\n"
915 "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
916 "paddd %%xmm1,%%xmm7\n"
917 "movdqa %%xmm7,%%xmm1\n"
918 "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
919 "paddd %%xmm1,%%xmm7\n"
920 "movd %%xmm7,%3\n"
921 : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
922 : "r" ((long)line_size));
843 return tmp; 923 return tmp;
844 } 924 }
845 925
846 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) { 926 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
847 int tmp; 927 int tmp;
2624 2704
2625 c->hadamard8_diff[0]= hadamard8_diff16_mmx; 2705 c->hadamard8_diff[0]= hadamard8_diff16_mmx;
2626 c->hadamard8_diff[1]= hadamard8_diff_mmx; 2706 c->hadamard8_diff[1]= hadamard8_diff_mmx;
2627 2707
2628 c->pix_norm1 = pix_norm1_mmx; 2708 c->pix_norm1 = pix_norm1_mmx;
2629 c->sse[0] = sse16_mmx; 2709 c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
2630 c->sse[1] = sse8_mmx; 2710 c->sse[1] = sse8_mmx;
2631 c->vsad[4]= vsad_intra16_mmx; 2711 c->vsad[4]= vsad_intra16_mmx;
2632 2712
2633 c->nsse[0] = nsse16_mmx; 2713 c->nsse[0] = nsse16_mmx;
2634 c->nsse[1] = nsse8_mmx; 2714 c->nsse[1] = nsse8_mmx;