diff x86/vp8dsp.asm @ 12209:9eef00a43280 libavcodec

Make mmx VP8 WHT faster Avoid pextrw, since it's slow on many older CPUs. Now it doesn't require mmxext either.
author darkshikari
date Wed, 21 Jul 2010 20:51:01 +0000
parents d38e8565ba05
children baf13deed97e
line wrap: on
line diff
--- a/x86/vp8dsp.asm	Wed Jul 21 12:37:37 2010 +0000
+++ b/x86/vp8dsp.asm	Wed Jul 21 20:51:01 2010 +0000
@@ -1034,15 +1034,25 @@
 ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
 ;-----------------------------------------------------------------------------
 
-%macro SCATTER_WHT 1
-    pextrw r1d, m0, %1
-    pextrw r2d, m1, %1
-    mov [r0+2*16*0], r1w
-    mov [r0+2*16*1], r2w
-    pextrw r1d, m2, %1
-    pextrw r2d, m3, %1
-    mov [r0+2*16*2], r1w
-    mov [r0+2*16*3], r2w
+%macro SCATTER_WHT 3
+    movd  r1d, m%1
+    movd  r2d, m%2
+    mov [r0+2*16*(0+%3)], r1w
+    mov [r0+2*16*(1+%3)], r2w
+    shr   r1d, 16
+    shr   r2d, 16
+    psrlq m%1, 32
+    psrlq m%2, 32
+    mov [r0+2*16*(4+%3)], r1w
+    mov [r0+2*16*(5+%3)], r2w
+    movd  r1d, m%1
+    movd  r2d, m%2
+    mov [r0+2*16*(8+%3)], r1w
+    mov [r0+2*16*(9+%3)], r2w
+    shr   r1d, 16
+    shr   r2d, 16
+    mov [r0+2*16*(12+%3)], r1w
+    mov [r0+2*16*(13+%3)], r2w
 %endmacro
 
 %macro HADAMARD4_1D 4
@@ -1052,7 +1062,7 @@
 %endmacro
 
 INIT_MMX
-cglobal vp8_luma_dc_wht_mmxext, 2,3
+cglobal vp8_luma_dc_wht_mmx, 2,3
     movq          m0, [r1]
     movq          m1, [r1+8]
     movq          m2, [r1+16]
@@ -1065,13 +1075,8 @@
     psraw         m1, 3
     psraw         m2, 3
     psraw         m3, 3
-    SCATTER_WHT   0
-    add           r0, 2*16*4
-    SCATTER_WHT   1
-    add           r0, 2*16*4
-    SCATTER_WHT   2
-    add           r0, 2*16*4
-    SCATTER_WHT   3
+    SCATTER_WHT   0, 1, 0
+    SCATTER_WHT   2, 3, 2
     RET
 
 ;-----------------------------------------------------------------------------