358
|
1 /*
|
|
2 This part of code was taken by from Linux-2.4.3 and slightly modified
|
|
3 for MMX2 instruction set. I have done it since linux uses page aligned
|
|
4 blocks but mplayer uses weakly ordered data and original sources can not
|
376
|
5 speedup their. Only using prefetchnta and movntq together have effect!
|
358
|
6 If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru.
|
|
7 */
|
376
|
8 #ifdef HAVE_MMX2
|
|
9 /* for small memory blocks (<256 bytes) this version is faster */
|
|
10 #define small_memcpy(to,from,n)\
|
|
11 {\
|
|
12 __asm__ __volatile__(\
|
|
13 "rep ; movsb\n"\
|
|
14 ::"D" (to), "S" (from),"c" (n)\
|
|
15 : "memory");\
|
|
16 }
|
358
|
17
|
370
|
18 inline static void * fast_memcpy(void * to, const void * from, unsigned len)
|
358
|
19 {
|
|
20 void *p;
|
|
21 int i;
|
|
22
|
|
23 if(len >= 0x200) /* 512-byte blocks */
|
|
24 {
|
|
25 p = to;
|
|
26 i = len >> 6; /* len/64 */
|
376
|
27 len&=63;
|
|
28
|
358
|
29 __asm__ __volatile__ (
|
409
|
30 "prefetchnta (%0)\n"
|
|
31 "prefetchnta 64(%0)\n"
|
|
32 "prefetchnta 128(%0)\n"
|
|
33 "prefetchnta 192(%0)\n"
|
|
34 "prefetchnta 256(%0)\n"
|
358
|
35 : : "r" (from) );
|
409
|
36 /*
|
|
37 This algorithm is top effective when the code consequently
|
|
38 reads and writes blocks which have size of cache line.
|
|
39 Size of cache line is processor-dependent.
|
|
40 It will, however, be a minimum of 32 bytes on any processors.
|
|
41 It would be better to have a number of instructions which
|
|
42 perform reading and writing to be multiple to a number of
|
|
43 processor's decoders, but it's not always possible.
|
|
44 */
|
358
|
45 for(; i>0; i--)
|
|
46 {
|
|
47 __asm__ __volatile__ (
|
409
|
48 "prefetchnta 320(%0)\n"
|
|
49 #ifdef HAVE_SSE /* Only P3 (may be Cyrix3) */
|
|
50 "movups (%0), %%xmm0\n"
|
|
51 "movups 16(%0), %%xmm1\n"
|
|
52 "movntps %%xmm0, (%1)\n"
|
|
53 "movntps %%xmm1, 16(%1)\n"
|
|
54 "movups 32(%0), %%xmm0\n"
|
|
55 "movups 48(%0), %%xmm1\n"
|
|
56 "movntps %%xmm0, 32(%1)\n"
|
|
57 "movntps %%xmm1, 48(%1)\n"
|
|
58 #else /* Only K7 (may be other) */
|
|
59 "movq (%0), %%mm0\n"
|
|
60 "movq 8(%0), %%mm1\n"
|
|
61 "movq 16(%0), %%mm2\n"
|
|
62 "movq 24(%0), %%mm3\n"
|
|
63 "movntq %%mm0, (%1)\n"
|
|
64 "movntq %%mm1, 8(%1)\n"
|
|
65 "movntq %%mm2, 16(%1)\n"
|
|
66 "movntq %%mm3, 24(%1)\n"
|
|
67 "movq 32(%0), %%mm0\n"
|
|
68 "movq 40(%0), %%mm1\n"
|
|
69 "movq 48(%0), %%mm2\n"
|
|
70 "movq 56(%0), %%mm3\n"
|
|
71 "movntq %%mm0, 32(%1)\n"
|
|
72 "movntq %%mm1, 40(%1)\n"
|
|
73 "movntq %%mm2, 48(%1)\n"
|
|
74 "movntq %%mm3, 56(%1)\n"
|
|
75 #endif
|
|
76 :: "r" (from), "r" (to) : "memory");
|
358
|
77 from+=64;
|
|
78 to+=64;
|
|
79 }
|
409
|
80 __asm__ __volatile__ ("emms":::"memory");
|
358
|
81 }
|
|
82 /*
|
|
83 * Now do the tail of the block
|
|
84 */
|
376
|
85 small_memcpy(to, from, len);
|
358
|
86 return p;
|
|
87 }
|
376
|
88 #define memcpy(a,b,c) fast_memcpy(a,b,c)
|
358
|
89 #endif
|
|
90
|
370
|
91
|