diff mp3lib/dct64_k7.s @ 1173:3c53cbf53e7e

Better 3dnow! optimization
author nickols_k
date Wed, 20 Jun 2001 07:54:19 +0000
parents ee303142c2e0
children 03b7e2955a20
line wrap: on
line diff
--- a/mp3lib/dct64_k7.s	Tue Jun 19 23:20:59 2001 +0000
+++ b/mp3lib/dct64_k7.s	Wed Jun 20 07:54:19 2001 +0000
@@ -9,6 +9,9 @@
 ///    (using memory reference as operand of instructions)
 ///  - Phase 6 is rewritten with mixing of cpu and mmx opcodes
 ///  - change function name for support 3DNowEx! automatic detect
+///  - negation of 3dnow reg was replaced with PXOR 0x800000000, MMi instead 
+///    of PFMUL as it was suggested by athlon manual. (Two not separated PFMUL
+///    can not be paired, but PXOR can be).
 ///
 /// note: because K7 processors are an aggresive out-of-order three-way
 ///       superscalar ones instruction order is not significand for them.
@@ -21,6 +24,11 @@
 /// this program. Use it at your own risk.
 ///
 
+.data
+        .align 8
+plus_minus_3dnow: .long 0x00000000, 0x80000000
+
+.text
         .globl dct64_3dnowex
         .type    dct64_3dnowex,@function
 
@@ -412,13 +420,8 @@
         movq   %mm5, 120(%esi)
 
         // 5
-        movl $-1,%eax
-        movd %eax,%mm1
+	movq plus_minus_3dnow, %mm0 /* mm0 = 1.0 | -1.0 */
         movl $1,%eax
-        movd %eax,%mm0
-        / L | H
-        punpckldq %mm1,%mm0
-        pi2fd %mm0,%mm0       /* mm0 = 1.0 | -1.0 */
         movd %eax,%mm1
         pi2fd %mm1,%mm1
         movl pnts+16,%eax
@@ -433,7 +436,7 @@
         movq 8(%esi),%mm4     /* mm4 = tmp2[2] | tmp2[3]*/
 	pfpnacc %mm4, %mm4
 	pswapd  %mm4, %mm4    /* mm4 = tmp2[2]+tmp2[3]|tmp2[2]-tmp2[3]*/
-        pfmul %mm0,%mm4       /* mm4 = tmp2[2]+tmp2[3]|tmp2[3]-tmp2[2]*/
+        pxor  %mm0,%mm4       /* mm4 = tmp2[2]+tmp2[3]|tmp2[3]-tmp2[2]*/
         pfmul %mm1,%mm4       /* mm4 = tmp2[2]+tmp2[3]|(tmp2[3]-tmp2[2])*cos0*/
         movq %mm4,%mm5
         psrlq $32,%mm5        /* mm5 = (tmp2[3]-tmp2[2])*cos0 */
@@ -449,7 +452,7 @@
 	pfpnacc %mm4, %mm4
 	pswapd  %mm4, %mm4
 
-        pfmul %mm0,%mm4
+        pxor  %mm0,%mm4
         pfmul %mm1,%mm4
         movq %mm4,%mm5
         psrlq $32,%mm5
@@ -470,7 +473,7 @@
         movq 40(%esi),%mm4
 	pfpnacc %mm4, %mm4
 	pswapd  %mm4, %mm4
-        pfmul %mm0,%mm4
+        pxor  %mm0,%mm4
         pfmul %mm1,%mm4
         movq %mm4,%mm5
         psrlq $32,%mm5
@@ -484,7 +487,7 @@
         movq 56(%esi),%mm4
 	pfpnacc %mm4, %mm4
 	pswapd  %mm4, %mm4
-        pfmul %mm0,%mm4
+        pxor  %mm0,%mm4
         pfmul %mm1,%mm4
         movq %mm4,%mm5
         psrlq $32,%mm5
@@ -504,7 +507,7 @@
         movq 72(%esi),%mm4
 	pfpnacc %mm4, %mm4
 	pswapd  %mm4, %mm4
-        pfmul %mm0,%mm4
+        pxor  %mm0,%mm4
         pfmul %mm1,%mm4
         movq %mm4,%mm5
         psrlq $32,%mm5
@@ -518,7 +521,7 @@
         movq 88(%esi),%mm4
 	pfpnacc %mm4, %mm4
 	pswapd  %mm4, %mm4
-        pfmul %mm0,%mm4
+        pxor  %mm0,%mm4
         pfmul %mm1,%mm4
         movq %mm4,%mm5
         psrlq $32,%mm5
@@ -538,7 +541,7 @@
         movq 104(%esi),%mm4
 	pfpnacc %mm4, %mm4
 	pswapd  %mm4, %mm4
-        pfmul %mm0,%mm4
+        pxor  %mm0,%mm4
         pfmul %mm1,%mm4
         movq %mm4,%mm5
         psrlq $32,%mm5
@@ -552,7 +555,7 @@
         movq 120(%esi),%mm4
 	pfpnacc %mm4, %mm4
 	pswapd  %mm4, %mm4
-        pfmul %mm0,%mm4
+        pxor  %mm0,%mm4
         pfmul %mm1,%mm4
         movq %mm4,%mm5
         psrlq $32,%mm5