comparison i386/dsputil_mmx.c @ 3574:f549d1e685f7 libavcodec

vorbis simd tweaks
author lorenm
date Fri, 11 Aug 2006 18:19:37 +0000
parents c42c03f3b402
children f7125bf10892
comparison
equal deleted inserted replaced
3573:7c0a476d0fde 3574:f549d1e685f7
2771 ); 2771 );
2772 } 2772 }
2773 } 2773 }
2774 2774
2775 static void vector_fmul_3dnow(float *dst, const float *src, int len){ 2775 static void vector_fmul_3dnow(float *dst, const float *src, int len){
2776 long i; 2776 long i = (len-4)*4;
2777 len >>= 1; 2777 asm volatile(
2778 for(i=0; i<len; i++) { 2778 "1: \n\t"
2779 asm volatile( 2779 "movq (%1,%0), %%mm0 \n\t"
2780 "movq %0, %%mm0 \n\t" 2780 "movq 8(%1,%0), %%mm1 \n\t"
2781 "pfmul %1, %%mm0 \n\t" 2781 "pfmul (%2,%0), %%mm0 \n\t"
2782 "movq %%mm0, %0 \n\t" 2782 "pfmul 8(%2,%0), %%mm1 \n\t"
2783 :"+m"(dst[i*2]) 2783 "movq %%mm0, (%1,%0) \n\t"
2784 :"m"(src[i*2]) 2784 "movq %%mm1, 8(%1,%0) \n\t"
2785 :"memory" 2785 "sub $16, %0 \n\t"
2786 ); 2786 "jge 1b \n\t"
2787 } 2787 "femms \n\t"
2788 asm volatile("femms"); 2788 :"+r"(i)
2789 :"r"(dst), "r"(src)
2790 :"memory"
2791 );
2789 } 2792 }
2790 static void vector_fmul_sse(float *dst, const float *src, int len){ 2793 static void vector_fmul_sse(float *dst, const float *src, int len){
2791 long i; 2794 long i = (len-8)*4;
2792 len >>= 2; 2795 asm volatile(
2793 for(i=0; i<len; i++) { 2796 "1: \n\t"
2794 asm volatile( 2797 "movaps (%1,%0), %%xmm0 \n\t"
2795 "movaps %0, %%xmm0 \n\t" 2798 "movaps 16(%1,%0), %%xmm1 \n\t"
2796 "mulps %1, %%xmm0 \n\t" 2799 "mulps (%2,%0), %%xmm0 \n\t"
2797 "movaps %%xmm0, %0 \n\t" 2800 "mulps 16(%2,%0), %%xmm1 \n\t"
2798 :"+m"(dst[i*4]) 2801 "movaps %%xmm0, (%1,%0) \n\t"
2799 :"m"(src[i*4]) 2802 "movaps %%xmm1, 16(%1,%0) \n\t"
2800 :"memory" 2803 "sub $32, %0 \n\t"
2801 ); 2804 "jge 1b \n\t"
2802 } 2805 :"+r"(i)
2806 :"r"(dst), "r"(src)
2807 :"memory"
2808 );
2803 } 2809 }
2804 2810
2805 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ 2811 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2806 long i = len*4-16; 2812 long i = len*4-16;
2807 asm volatile( 2813 asm volatile(
2840 ); 2846 );
2841 } 2847 }
2842 2848
2843 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1, 2849 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
2844 const float *src2, int src3, int len, int step){ 2850 const float *src2, int src3, int len, int step){
2845 long i; 2851 long i = (len-4)*4;
2846 if(step == 2 && src3 == 0){ 2852 if(step == 2 && src3 == 0){
2847 i = (len-4)*4;
2848 dst += (len-4)*2; 2853 dst += (len-4)*2;
2849 asm volatile( 2854 asm volatile(
2850 "1: \n\t" 2855 "1: \n\t"
2851 "movq (%2,%0), %%mm0 \n\t" 2856 "movq (%2,%0), %%mm0 \n\t"
2852 "movq 8(%2,%0), %%mm1 \n\t" 2857 "movq 8(%2,%0), %%mm1 \n\t"
2867 :"r"(src0), "r"(src1), "r"(src2) 2872 :"r"(src0), "r"(src1), "r"(src2)
2868 :"memory" 2873 :"memory"
2869 ); 2874 );
2870 } 2875 }
2871 else if(step == 1 && src3 == 0){ 2876 else if(step == 1 && src3 == 0){
2872 for(i=0; i<len; i+=2){ 2877 asm volatile(
2873 asm volatile( 2878 "1: \n\t"
2874 "movq %1, %%mm0 \n\t" 2879 "movq (%2,%0), %%mm0 \n\t"
2875 "pfmul %2, %%mm0 \n\t" 2880 "movq 8(%2,%0), %%mm1 \n\t"
2876 "pfadd %3, %%mm0 \n\t" 2881 "pfmul (%3,%0), %%mm0 \n\t"
2877 "movq %%mm0, %0 \n\t" 2882 "pfmul 8(%3,%0), %%mm1 \n\t"
2878 :"=m"(dst[i]) 2883 "pfadd (%4,%0), %%mm0 \n\t"
2879 :"m"(src0[i]), "m"(src1[i]), "m"(src2[i]) 2884 "pfadd 8(%4,%0), %%mm1 \n\t"
2880 ); 2885 "movq %%mm0, (%1,%0) \n\t"
2881 } 2886 "movq %%mm1, 8(%1,%0) \n\t"
2887 "sub $16, %0 \n\t"
2888 "jge 1b \n\t"
2889 :"+r"(i)
2890 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2891 :"memory"
2892 );
2882 } 2893 }
2883 else 2894 else
2884 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); 2895 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2885 asm volatile("femms"); 2896 asm volatile("femms");
2886 } 2897 }
2887 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1, 2898 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
2888 const float *src2, float src3, int len, int step){ 2899 const float *src2, int src3, int len, int step){
2889 long i; 2900 long i = (len-8)*4;
2890 if(step == 2 && src3 == 0){ 2901 if(step == 2 && src3 == 0){
2891 i = (len-8)*4;
2892 dst += (len-8)*2; 2902 dst += (len-8)*2;
2893 asm volatile( 2903 asm volatile(
2894 "1: \n\t" 2904 "1: \n\t"
2895 "movaps (%2,%0), %%xmm0 \n\t" 2905 "movaps (%2,%0), %%xmm0 \n\t"
2896 "movaps 16(%2,%0), %%xmm1 \n\t" 2906 "movaps 16(%2,%0), %%xmm1 \n\t"
2919 :"r"(src0), "r"(src1), "r"(src2) 2929 :"r"(src0), "r"(src1), "r"(src2)
2920 :"memory" 2930 :"memory"
2921 ); 2931 );
2922 } 2932 }
2923 else if(step == 1 && src3 == 0){ 2933 else if(step == 1 && src3 == 0){
2924 for(i=0; i<len; i+=4){ 2934 asm volatile(
2925 asm volatile( 2935 "1: \n\t"
2926 "movaps %1, %%xmm0 \n\t" 2936 "movaps (%2,%0), %%xmm0 \n\t"
2927 "mulps %2, %%xmm0 \n\t" 2937 "movaps 16(%2,%0), %%xmm1 \n\t"
2928 "addps %3, %%xmm0 \n\t" 2938 "mulps (%3,%0), %%xmm0 \n\t"
2929 "movaps %%xmm0, %0 \n\t" 2939 "mulps 16(%3,%0), %%xmm1 \n\t"
2930 :"=m"(dst[i]) 2940 "addps (%4,%0), %%xmm0 \n\t"
2931 :"m"(src0[i]), "m"(src1[i]), "m"(src2[i]) 2941 "addps 16(%4,%0), %%xmm1 \n\t"
2932 ); 2942 "movaps %%xmm0, (%1,%0) \n\t"
2933 } 2943 "movaps %%xmm1, 16(%1,%0) \n\t"
2944 "sub $32, %0 \n\t"
2945 "jge 1b \n\t"
2946 :"+r"(i)
2947 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2948 :"memory"
2949 );
2934 } 2950 }
2935 else 2951 else
2936 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); 2952 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2937 } 2953 }
2938 2954
3407 c->float_to_int16 = float_to_int16_sse; 3423 c->float_to_int16 = float_to_int16_sse;
3408 c->vector_fmul_reverse = vector_fmul_reverse_sse; 3424 c->vector_fmul_reverse = vector_fmul_reverse_sse;
3409 c->vector_fmul_add_add = vector_fmul_add_add_sse; 3425 c->vector_fmul_add_add = vector_fmul_add_add_sse;
3410 } 3426 }
3411 if(mm_flags & MM_3DNOW) 3427 if(mm_flags & MM_3DNOW)
3412 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse2 3428 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
3413 } 3429 }
3414 3430
3415 #ifdef CONFIG_ENCODERS 3431 #ifdef CONFIG_ENCODERS
3416 dsputil_init_pix_mmx(c, avctx); 3432 dsputil_init_pix_mmx(c, avctx);
3417 #endif //CONFIG_ENCODERS 3433 #endif //CONFIG_ENCODERS