comparison i386/mmx.h @ 31:e4b7c3e5e527 libavcodec

updated mmx macros
author glantau
date Tue, 07 Aug 2001 22:40:52 +0000
parents 986e461dc072
children d34dde800cca
comparison
equal deleted inserted replaced
30:b8a290072f26 31:e4b7c3e5e527
1 /* mmx.h 1 /*
2 2 * mmx.h
3 MultiMedia eXtensions GCC interface library for IA32. 3 * Copyright (C) 1997-2001 H. Dietz and R. Fisher
4 4 */
5 To use this library, simply include this header file 5
6 and compile with GCC. You MUST have inlining enabled 6 /*
7 in order for mmx_ok() to work; this can be done by 7 * The type of an value that fits in an MMX register (note that long
8 simply using -O on the GCC command line. 8 * long constant values MUST be suffixed by LL and unsigned long long
9 9 * values by ULL, lest they be truncated by the compiler)
10 Compiling with -DMMX_TRACE will cause detailed trace 10 */
11 output to be sent to stderr for each mmx operation. 11
12 This adds lots of code, and obviously slows execution to
13 a crawl, but can be very useful for debugging.
14
15 THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY
16 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT
17 LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY
18 AND FITNESS FOR ANY PARTICULAR PURPOSE.
19
20 1997-99 by H. Dietz and R. Fisher
21
22 Notes:
23 It appears that the latest gas has the pand problem fixed, therefore
24 I'll undefine BROKEN_PAND by default.
25 */
26
27 #ifndef _MMX_H
28 #define _MMX_H
29
30
31 /* Warning: at this writing, the version of GAS packaged
32 with most Linux distributions does not handle the
33 parallel AND operation mnemonic correctly. If the
34 symbol BROKEN_PAND is defined, a slower alternative
35 coding will be used. If execution of mmxtest results
36 in an illegal instruction fault, define this symbol.
37 */
38 #undef BROKEN_PAND
39
40
41 /* The type of an value that fits in an MMX register
42 (note that long long constant values MUST be suffixed
43 by LL and unsigned long long values by ULL, lest
44 they be truncated by the compiler)
45 */
46 typedef union { 12 typedef union {
47 long long q; /* Quadword (64-bit) value */ 13 long long q; /* Quadword (64-bit) value */
48 unsigned long long uq; /* Unsigned Quadword */ 14 unsigned long long uq; /* Unsigned Quadword */
49 int d[2]; /* 2 Doubleword (32-bit) values */ 15 int d[2]; /* 2 Doubleword (32-bit) values */
50 unsigned int ud[2]; /* 2 Unsigned Doubleword */ 16 unsigned int ud[2]; /* 2 Unsigned Doubleword */
51 short w[4]; /* 4 Word (16-bit) values */ 17 short w[4]; /* 4 Word (16-bit) values */
52 unsigned short uw[4]; /* 4 Unsigned Word */ 18 unsigned short uw[4]; /* 4 Unsigned Word */
53 char b[8]; /* 8 Byte (8-bit) values */ 19 char b[8]; /* 8 Byte (8-bit) values */
54 unsigned char ub[8]; /* 8 Unsigned Byte */ 20 unsigned char ub[8]; /* 8 Unsigned Byte */
55 float s[2]; /* Single-precision (32-bit) value */ 21 float s[2]; /* Single-precision (32-bit) value */
56 } __attribute__ ((aligned (8))) mmx_t; /* On an 8-byte (64-bit) boundary */ 22 } mmx_t; /* On an 8-byte (64-bit) boundary */
57 23
58 24
59 /* Helper functions for the instruction macros that follow... 25 #define mmx_i2r(op,imm,reg) \
60 (note that memory-to-register, m2r, instructions are nearly
61 as efficient as register-to-register, r2r, instructions;
62 however, memory-to-memory instructions are really simulated
63 as a convenience, and are only 1/3 as efficient)
64 */
65 #ifdef MMX_TRACE
66
67 /* Include the stuff for printing a trace to stderr...
68 */
69
70 #include <stdio.h>
71
72 #define mmx_i2r(op, imm, reg) \
73 { \
74 mmx_t mmx_trace; \
75 mmx_trace.uq = (imm); \
76 fprintf(stderr, #op "_i2r(" #imm "=0x%08x%08x, ", \
77 mmx_trace.d[1], mmx_trace.d[0]); \
78 __asm__ __volatile__ ("movq %%" #reg ", %0" \
79 : "=X" (mmx_trace) \
80 : /* nothing */ ); \
81 fprintf(stderr, #reg "=0x%08x%08x) => ", \
82 mmx_trace.d[1], mmx_trace.d[0]); \
83 __asm__ __volatile__ (#op " %0, %%" #reg \
84 : /* nothing */ \
85 : "X" (imm)); \
86 __asm__ __volatile__ ("movq %%" #reg ", %0" \
87 : "=X" (mmx_trace) \
88 : /* nothing */ ); \
89 fprintf(stderr, #reg "=0x%08x%08x\n", \
90 mmx_trace.d[1], mmx_trace.d[0]); \
91 }
92
93 #define mmx_m2r(op, mem, reg) \
94 { \
95 mmx_t mmx_trace; \
96 mmx_trace = (mem); \
97 fprintf(stderr, #op "_m2r(" #mem "=0x%08x%08x, ", \
98 mmx_trace.d[1], mmx_trace.d[0]); \
99 __asm__ __volatile__ ("movq %%" #reg ", %0" \
100 : "=X" (mmx_trace) \
101 : /* nothing */ ); \
102 fprintf(stderr, #reg "=0x%08x%08x) => ", \
103 mmx_trace.d[1], mmx_trace.d[0]); \
104 __asm__ __volatile__ (#op " %0, %%" #reg \
105 : /* nothing */ \
106 : "X" (mem)); \
107 __asm__ __volatile__ ("movq %%" #reg ", %0" \
108 : "=X" (mmx_trace) \
109 : /* nothing */ ); \
110 fprintf(stderr, #reg "=0x%08x%08x\n", \
111 mmx_trace.d[1], mmx_trace.d[0]); \
112 }
113
114 #define mmx_r2m(op, reg, mem) \
115 { \
116 mmx_t mmx_trace; \
117 __asm__ __volatile__ ("movq %%" #reg ", %0" \
118 : "=X" (mmx_trace) \
119 : /* nothing */ ); \
120 fprintf(stderr, #op "_r2m(" #reg "=0x%08x%08x, ", \
121 mmx_trace.d[1], mmx_trace.d[0]); \
122 mmx_trace = (mem); \
123 fprintf(stderr, #mem "=0x%08x%08x) => ", \
124 mmx_trace.d[1], mmx_trace.d[0]); \
125 __asm__ __volatile__ (#op " %%" #reg ", %0" \
126 : "=X" (mem) \
127 : /* nothing */ ); \
128 mmx_trace = (mem); \
129 fprintf(stderr, #mem "=0x%08x%08x\n", \
130 mmx_trace.d[1], mmx_trace.d[0]); \
131 }
132
133 #define mmx_r2r(op, regs, regd) \
134 { \
135 mmx_t mmx_trace; \
136 __asm__ __volatile__ ("movq %%" #regs ", %0" \
137 : "=X" (mmx_trace) \
138 : /* nothing */ ); \
139 fprintf(stderr, #op "_r2r(" #regs "=0x%08x%08x, ", \
140 mmx_trace.d[1], mmx_trace.d[0]); \
141 __asm__ __volatile__ ("movq %%" #regd ", %0" \
142 : "=X" (mmx_trace) \
143 : /* nothing */ ); \
144 fprintf(stderr, #regd "=0x%08x%08x) => ", \
145 mmx_trace.d[1], mmx_trace.d[0]); \
146 __asm__ __volatile__ (#op " %" #regs ", %" #regd); \
147 __asm__ __volatile__ ("movq %%" #regd ", %0" \
148 : "=X" (mmx_trace) \
149 : /* nothing */ ); \
150 fprintf(stderr, #regd "=0x%08x%08x\n", \
151 mmx_trace.d[1], mmx_trace.d[0]); \
152 }
153
154 #define mmx_m2m(op, mems, memd) \
155 { \
156 mmx_t mmx_trace; \
157 mmx_trace = (mems); \
158 fprintf(stderr, #op "_m2m(" #mems "=0x%08x%08x, ", \
159 mmx_trace.d[1], mmx_trace.d[0]); \
160 mmx_trace = (memd); \
161 fprintf(stderr, #memd "=0x%08x%08x) => ", \
162 mmx_trace.d[1], mmx_trace.d[0]); \
163 __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
164 #op " %1, %%mm0\n\t" \
165 "movq %%mm0, %0" \
166 : "=X" (memd) \
167 : "X" (mems)); \
168 mmx_trace = (memd); \
169 fprintf(stderr, #memd "=0x%08x%08x\n", \
170 mmx_trace.d[1], mmx_trace.d[0]); \
171 }
172
173 #else
174
175 /* These macros are a lot simpler without the tracing...
176 */
177
178 #define mmx_i2r(op, imm, reg) \
179 __asm__ __volatile__ (#op " %0, %%" #reg \ 26 __asm__ __volatile__ (#op " %0, %%" #reg \
180 : /* nothing */ \ 27 : /* nothing */ \
181 : "i" (imm) ) 28 : "i" (imm) )
182 29
183 #define mmx_m2r(op, mem, reg) \ 30 #define mmx_m2r(op,mem,reg) \
184 __asm__ __volatile__ (#op " %0, %%" #reg \ 31 __asm__ __volatile__ (#op " %0, %%" #reg \
185 : /* nothing */ \ 32 : /* nothing */ \
186 : "m" (mem)) 33 : "m" (mem))
187 34
188 #define mmx_r2m(op, reg, mem) \ 35 #define mmx_r2m(op,reg,mem) \
189 __asm__ __volatile__ (#op " %%" #reg ", %0" \ 36 __asm__ __volatile__ (#op " %%" #reg ", %0" \
190 : "=m" (mem) \ 37 : "=m" (mem) \
191 : /* nothing */ ) 38 : /* nothing */ )
192 39
193 #define mmx_r2r(op, regs, regd) \ 40 #define mmx_r2r(op,regs,regd) \
194 __asm__ __volatile__ (#op " %" #regs ", %" #regd) 41 __asm__ __volatile__ (#op " %" #regs ", %" #regd)
195 42
196 #define mmx_m2m(op, mems, memd) \ 43
197 __asm__ __volatile__ ("movq %0, %%mm0\n\t" \ 44 #define emms() __asm__ __volatile__ ("emms")
198 #op " %1, %%mm0\n\t" \ 45
199 "movq %%mm0, %0" \ 46 #define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
200 : "=m" (memd) \ 47 #define movd_r2m(reg,var) mmx_r2m (movd, reg, var)
201 : "m" (mems)) 48 #define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd)
202 49
203 #endif 50 #define movq_m2r(var,reg) mmx_m2r (movq, var, reg)
204 51 #define movq_r2m(reg,var) mmx_r2m (movq, reg, var)
205 52 #define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd)
206 /* 1x64 MOVe Quadword 53
207 (this is both a load and a store... 54 #define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg)
208 in fact, it is the only way to store) 55 #define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd)
209 */ 56 #define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg)
210 #define movq_m2r(var, reg) mmx_m2r(movq, var, reg) 57 #define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd)
211 #define movq_r2m(reg, var) mmx_r2m(movq, reg, var) 58
212 #define movq_r2r(regs, regd) mmx_r2r(movq, regs, regd) 59 #define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg)
213 #define movq(vars, vard) \ 60 #define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd)
214 __asm__ __volatile__ ("movq %1, %%mm0\n\t" \ 61
215 "movq %%mm0, %0" \ 62 #define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg)
216 : "=X" (vard) \ 63 #define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd)
217 : "X" (vars)) 64 #define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg)
218 65 #define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd)
219 66 #define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg)
220 /* 1x32 MOVe Doubleword 67 #define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd)
221 (like movq, this is both load and store... 68
222 but is most useful for moving things between 69 #define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg)
223 mmx registers and ordinary registers) 70 #define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd)
224 */ 71 #define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg)
225 #define movd_m2r(var, reg) mmx_m2r(movd, var, reg) 72 #define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd)
226 #define movd_r2m(reg, var) mmx_r2m(movd, reg, var) 73
227 #define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd) 74 #define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg)
228 #define movd(vars, vard) \ 75 #define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd)
229 __asm__ __volatile__ ("movd %1, %%mm0\n\t" \ 76 #define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg)
230 "movd %%mm0, %0" \ 77 #define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd)
231 : "=X" (vard) \ 78
232 : "X" (vars)) 79 #define pand_m2r(var,reg) mmx_m2r (pand, var, reg)
233 80 #define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd)
234 81
235 /* 2x32, 4x16, and 8x8 Parallel ADDs 82 #define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg)
236 */ 83 #define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd)
237 #define paddd_m2r(var, reg) mmx_m2r(paddd, var, reg) 84
238 #define paddd_r2r(regs, regd) mmx_r2r(paddd, regs, regd) 85 #define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg)
239 #define paddd(vars, vard) mmx_m2m(paddd, vars, vard) 86 #define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd)
240 87 #define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg)
241 #define paddw_m2r(var, reg) mmx_m2r(paddw, var, reg) 88 #define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd)
242 #define paddw_r2r(regs, regd) mmx_r2r(paddw, regs, regd) 89 #define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg)
243 #define paddw(vars, vard) mmx_m2m(paddw, vars, vard) 90 #define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd)
244 91
245 #define paddb_m2r(var, reg) mmx_m2r(paddb, var, reg) 92 #define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg)
246 #define paddb_r2r(regs, regd) mmx_r2r(paddb, regs, regd) 93 #define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd)
247 #define paddb(vars, vard) mmx_m2m(paddb, vars, vard) 94 #define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg)
248 95 #define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd)
249 96 #define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg)
250 /* 4x16 and 8x8 Parallel ADDs using Saturation arithmetic 97 #define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd)
251 */ 98
252 #define paddsw_m2r(var, reg) mmx_m2r(paddsw, var, reg) 99 #define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg)
253 #define paddsw_r2r(regs, regd) mmx_r2r(paddsw, regs, regd) 100 #define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd)
254 #define paddsw(vars, vard) mmx_m2m(paddsw, vars, vard) 101
255 102 #define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg)
256 #define paddsb_m2r(var, reg) mmx_m2r(paddsb, var, reg) 103 #define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd)
257 #define paddsb_r2r(regs, regd) mmx_r2r(paddsb, regs, regd) 104
258 #define paddsb(vars, vard) mmx_m2m(paddsb, vars, vard) 105 #define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg)
259 106 #define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd)
260 107
261 /* 4x16 and 8x8 Parallel ADDs using Unsigned Saturation arithmetic 108 #define por_m2r(var,reg) mmx_m2r (por, var, reg)
262 */ 109 #define por_r2r(regs,regd) mmx_r2r (por, regs, regd)
263 #define paddusw_m2r(var, reg) mmx_m2r(paddusw, var, reg) 110
264 #define paddusw_r2r(regs, regd) mmx_r2r(paddusw, regs, regd) 111 #define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg)
265 #define paddusw(vars, vard) mmx_m2m(paddusw, vars, vard) 112 #define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg)
266 113 #define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd)
267 #define paddusb_m2r(var, reg) mmx_m2r(paddusb, var, reg) 114 #define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg)
268 #define paddusb_r2r(regs, regd) mmx_r2r(paddusb, regs, regd) 115 #define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg)
269 #define paddusb(vars, vard) mmx_m2m(paddusb, vars, vard) 116 #define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd)
270 117 #define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg)
271 118 #define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg)
272 /* 2x32, 4x16, and 8x8 Parallel SUBs 119 #define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd)
273 */ 120
274 #define psubd_m2r(var, reg) mmx_m2r(psubd, var, reg) 121 #define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg)
275 #define psubd_r2r(regs, regd) mmx_r2r(psubd, regs, regd) 122 #define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg)
276 #define psubd(vars, vard) mmx_m2m(psubd, vars, vard) 123 #define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd)
277 124 #define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg)
278 #define psubw_m2r(var, reg) mmx_m2r(psubw, var, reg) 125 #define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg)
279 #define psubw_r2r(regs, regd) mmx_r2r(psubw, regs, regd) 126 #define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd)
280 #define psubw(vars, vard) mmx_m2m(psubw, vars, vard) 127
281 128 #define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg)
282 #define psubb_m2r(var, reg) mmx_m2r(psubb, var, reg) 129 #define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg)
283 #define psubb_r2r(regs, regd) mmx_r2r(psubb, regs, regd) 130 #define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd)
284 #define psubb(vars, vard) mmx_m2m(psubb, vars, vard) 131 #define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg)
285 132 #define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg)
286 133 #define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd)
287 /* 4x16 and 8x8 Parallel SUBs using Saturation arithmetic 134 #define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg)
288 */ 135 #define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg)
289 #define psubsw_m2r(var, reg) mmx_m2r(psubsw, var, reg) 136 #define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd)
290 #define psubsw_r2r(regs, regd) mmx_r2r(psubsw, regs, regd) 137
291 #define psubsw(vars, vard) mmx_m2m(psubsw, vars, vard) 138 #define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg)
292 139 #define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd)
293 #define psubsb_m2r(var, reg) mmx_m2r(psubsb, var, reg) 140 #define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg)
294 #define psubsb_r2r(regs, regd) mmx_r2r(psubsb, regs, regd) 141 #define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd)
295 #define psubsb(vars, vard) mmx_m2m(psubsb, vars, vard) 142 #define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg)
296 143 #define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd)
297 144
298 /* 4x16 and 8x8 Parallel SUBs using Unsigned Saturation arithmetic 145 #define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg)
299 */ 146 #define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd)
300 #define psubusw_m2r(var, reg) mmx_m2r(psubusw, var, reg) 147 #define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg)
301 #define psubusw_r2r(regs, regd) mmx_r2r(psubusw, regs, regd) 148 #define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd)
302 #define psubusw(vars, vard) mmx_m2m(psubusw, vars, vard) 149
303 150 #define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg)
304 #define psubusb_m2r(var, reg) mmx_m2r(psubusb, var, reg) 151 #define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd)
305 #define psubusb_r2r(regs, regd) mmx_r2r(psubusb, regs, regd) 152 #define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg)
306 #define psubusb(vars, vard) mmx_m2m(psubusb, vars, vard) 153 #define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd)
307 154
308 155 #define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg)
309 /* 4x16 Parallel MULs giving Low 4x16 portions of results 156 #define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd)
310 */ 157 #define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg)
311 #define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg) 158 #define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd)
312 #define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd) 159 #define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg)
313 #define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard) 160 #define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd)
314 161
315 162 #define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg)
316 /* 4x16 Parallel MULs giving High 4x16 portions of results 163 #define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd)
317 */ 164 #define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg)
318 #define pmulhw_m2r(var, reg) mmx_m2r(pmulhw, var, reg) 165 #define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd)
319 #define pmulhw_r2r(regs, regd) mmx_r2r(pmulhw, regs, regd) 166 #define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg)
320 #define pmulhw(vars, vard) mmx_m2m(pmulhw, vars, vard) 167 #define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd)
321 168
322 169 #define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
323 /* 4x16->2x32 Parallel Mul-ADD 170 #define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
324 (muls like pmullw, then adds adjacent 16-bit fields 171
325 in the multiply result to make the final 2x32 result) 172
326 */ 173 /* 3DNOW extensions */
327 #define pmaddwd_m2r(var, reg) mmx_m2r(pmaddwd, var, reg) 174
328 #define pmaddwd_r2r(regs, regd) mmx_r2r(pmaddwd, regs, regd) 175 #define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
329 #define pmaddwd(vars, vard) mmx_m2m(pmaddwd, vars, vard) 176 #define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
330 177
331 178
332 /* 1x64 bitwise AND 179 /* AMD MMX extensions - also available in intel SSE */
333 */ 180
334 #ifdef BROKEN_PAND 181
335 #define pand_m2r(var, reg) \ 182 #define mmx_m2ri(op,mem,reg,imm) \
336 { \ 183 __asm__ __volatile__ (#op " %1, %0, %%" #reg \
337 mmx_m2r(pandn, (mmx_t) -1LL, reg); \ 184 : /* nothing */ \
338 mmx_m2r(pandn, var, reg); \ 185 : "X" (mem), "X" (imm))
339 } 186 #define mmx_r2ri(op,regs,regd,imm) \
340 #define pand_r2r(regs, regd) \ 187 __asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \
341 { \ 188 : /* nothing */ \
342 mmx_m2r(pandn, (mmx_t) -1LL, regd); \ 189 : "X" (imm) )
343 mmx_r2r(pandn, regs, regd) \ 190
344 } 191 #define mmx_fetch(mem,hint) \
345 #define pand(vars, vard) \ 192 __asm__ __volatile__ ("prefetch" #hint " %0" \
346 { \ 193 : /* nothing */ \
347 movq_m2r(vard, mm0); \ 194 : "X" (mem))
348 mmx_m2r(pandn, (mmx_t) -1LL, mm0); \ 195
349 mmx_m2r(pandn, vars, mm0); \ 196
350 movq_r2m(mm0, vard); \ 197 #define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
351 } 198
352 #else 199 #define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
353 #define pand_m2r(var, reg) mmx_m2r(pand, var, reg) 200
354 #define pand_r2r(regs, regd) mmx_r2r(pand, regs, regd) 201 #define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg)
355 #define pand(vars, vard) mmx_m2m(pand, vars, vard) 202 #define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd)
356 #endif 203 #define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg)
357 204 #define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd)
358 205
359 /* 1x64 bitwise AND with Not the destination 206 #define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm)
360 */ 207
361 #define pandn_m2r(var, reg) mmx_m2r(pandn, var, reg) 208 #define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm)
362 #define pandn_r2r(regs, regd) mmx_r2r(pandn, regs, regd) 209
363 #define pandn(vars, vard) mmx_m2m(pandn, vars, vard) 210 #define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg)
364 211 #define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd)
365 212
366 /* 1x64 bitwise OR 213 #define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg)
367 */ 214 #define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd)
368 #define por_m2r(var, reg) mmx_m2r(por, var, reg) 215
369 #define por_r2r(regs, regd) mmx_r2r(por, regs, regd) 216 #define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg)
370 #define por(vars, vard) mmx_m2m(por, vars, vard) 217 #define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd)
371 218
372 219 #define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg)
373 /* 1x64 bitwise eXclusive OR 220 #define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd)
374 */ 221
375 #define pxor_m2r(var, reg) mmx_m2r(pxor, var, reg) 222 #define pmovmskb(mmreg,reg) \
376 #define pxor_r2r(regs, regd) mmx_r2r(pxor, regs, regd) 223 __asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg)
377 #define pxor(vars, vard) mmx_m2m(pxor, vars, vard) 224
378 225 #define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg)
379 226 #define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd)
380 /* 2x32, 4x16, and 8x8 Parallel CoMPare for EQuality 227
381 (resulting fields are either 0 or -1) 228 #define prefetcht0(mem) mmx_fetch (mem, t0)
382 */ 229 #define prefetcht1(mem) mmx_fetch (mem, t1)
383 #define pcmpeqd_m2r(var, reg) mmx_m2r(pcmpeqd, var, reg) 230 #define prefetcht2(mem) mmx_fetch (mem, t2)
384 #define pcmpeqd_r2r(regs, regd) mmx_r2r(pcmpeqd, regs, regd) 231 #define prefetchnta(mem) mmx_fetch (mem, nta)
385 #define pcmpeqd(vars, vard) mmx_m2m(pcmpeqd, vars, vard) 232
386 233 #define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg)
387 #define pcmpeqw_m2r(var, reg) mmx_m2r(pcmpeqw, var, reg) 234 #define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd)
388 #define pcmpeqw_r2r(regs, regd) mmx_r2r(pcmpeqw, regs, regd) 235
389 #define pcmpeqw(vars, vard) mmx_m2m(pcmpeqw, vars, vard) 236 #define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm)
390 237 #define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm)
391 #define pcmpeqb_m2r(var, reg) mmx_m2r(pcmpeqb, var, reg) 238
392 #define pcmpeqb_r2r(regs, regd) mmx_r2r(pcmpeqb, regs, regd) 239 #define sfence() __asm__ __volatile__ ("sfence\n\t")
393 #define pcmpeqb(vars, vard) mmx_m2m(pcmpeqb, vars, vard)
394
395
396 /* 2x32, 4x16, and 8x8 Parallel CoMPare for Greater Than
397 (resulting fields are either 0 or -1)
398 */
399 #define pcmpgtd_m2r(var, reg) mmx_m2r(pcmpgtd, var, reg)
400 #define pcmpgtd_r2r(regs, regd) mmx_r2r(pcmpgtd, regs, regd)
401 #define pcmpgtd(vars, vard) mmx_m2m(pcmpgtd, vars, vard)
402
403 #define pcmpgtw_m2r(var, reg) mmx_m2r(pcmpgtw, var, reg)
404 #define pcmpgtw_r2r(regs, regd) mmx_r2r(pcmpgtw, regs, regd)
405 #define pcmpgtw(vars, vard) mmx_m2m(pcmpgtw, vars, vard)
406
407 #define pcmpgtb_m2r(var, reg) mmx_m2r(pcmpgtb, var, reg)
408 #define pcmpgtb_r2r(regs, regd) mmx_r2r(pcmpgtb, regs, regd)
409 #define pcmpgtb(vars, vard) mmx_m2m(pcmpgtb, vars, vard)
410
411
412 /* 1x64, 2x32, and 4x16 Parallel Shift Left Logical
413 */
414 #define psllq_i2r(imm, reg) mmx_i2r(psllq, imm, reg)
415 #define psllq_m2r(var, reg) mmx_m2r(psllq, var, reg)
416 #define psllq_r2r(regs, regd) mmx_r2r(psllq, regs, regd)
417 #define psllq(vars, vard) mmx_m2m(psllq, vars, vard)
418
419 #define pslld_i2r(imm, reg) mmx_i2r(pslld, imm, reg)
420 #define pslld_m2r(var, reg) mmx_m2r(pslld, var, reg)
421 #define pslld_r2r(regs, regd) mmx_r2r(pslld, regs, regd)
422 #define pslld(vars, vard) mmx_m2m(pslld, vars, vard)
423
424 #define psllw_i2r(imm, reg) mmx_i2r(psllw, imm, reg)
425 #define psllw_m2r(var, reg) mmx_m2r(psllw, var, reg)
426 #define psllw_r2r(regs, regd) mmx_r2r(psllw, regs, regd)
427 #define psllw(vars, vard) mmx_m2m(psllw, vars, vard)
428
429
430 /* 1x64, 2x32, and 4x16 Parallel Shift Right Logical
431 */
432 #define psrlq_i2r(imm, reg) mmx_i2r(psrlq, imm, reg)
433 #define psrlq_m2r(var, reg) mmx_m2r(psrlq, var, reg)
434 #define psrlq_r2r(regs, regd) mmx_r2r(psrlq, regs, regd)
435 #define psrlq(vars, vard) mmx_m2m(psrlq, vars, vard)
436
437 #define psrld_i2r(imm, reg) mmx_i2r(psrld, imm, reg)
438 #define psrld_m2r(var, reg) mmx_m2r(psrld, var, reg)
439 #define psrld_r2r(regs, regd) mmx_r2r(psrld, regs, regd)
440 #define psrld(vars, vard) mmx_m2m(psrld, vars, vard)
441
442 #define psrlw_i2r(imm, reg) mmx_i2r(psrlw, imm, reg)
443 #define psrlw_m2r(var, reg) mmx_m2r(psrlw, var, reg)
444 #define psrlw_r2r(regs, regd) mmx_r2r(psrlw, regs, regd)
445 #define psrlw(vars, vard) mmx_m2m(psrlw, vars, vard)
446
447
448 /* 2x32 and 4x16 Parallel Shift Right Arithmetic
449 */
450 #define psrad_i2r(imm, reg) mmx_i2r(psrad, imm, reg)
451 #define psrad_m2r(var, reg) mmx_m2r(psrad, var, reg)
452 #define psrad_r2r(regs, regd) mmx_r2r(psrad, regs, regd)
453 #define psrad(vars, vard) mmx_m2m(psrad, vars, vard)
454
455 #define psraw_i2r(imm, reg) mmx_i2r(psraw, imm, reg)
456 #define psraw_m2r(var, reg) mmx_m2r(psraw, var, reg)
457 #define psraw_r2r(regs, regd) mmx_r2r(psraw, regs, regd)
458 #define psraw(vars, vard) mmx_m2m(psraw, vars, vard)
459
460
461 /* 2x32->4x16 and 4x16->8x8 PACK and Signed Saturate
462 (packs source and dest fields into dest in that order)
463 */
464 #define packssdw_m2r(var, reg) mmx_m2r(packssdw, var, reg)
465 #define packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd)
466 #define packssdw(vars, vard) mmx_m2m(packssdw, vars, vard)
467
468 #define packsswb_m2r(var, reg) mmx_m2r(packsswb, var, reg)
469 #define packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd)
470 #define packsswb(vars, vard) mmx_m2m(packsswb, vars, vard)
471
472
473 /* 4x16->8x8 PACK and Unsigned Saturate
474 (packs source and dest fields into dest in that order)
475 */
476 #define packuswb_m2r(var, reg) mmx_m2r(packuswb, var, reg)
477 #define packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd)
478 #define packuswb(vars, vard) mmx_m2m(packuswb, vars, vard)
479
480
481 /* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK Low
482 (interleaves low half of dest with low half of source
483 as padding in each result field)
484 */
485 #define punpckldq_m2r(var, reg) mmx_m2r(punpckldq, var, reg)
486 #define punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd)
487 #define punpckldq(vars, vard) mmx_m2m(punpckldq, vars, vard)
488
489 #define punpcklwd_m2r(var, reg) mmx_m2r(punpcklwd, var, reg)
490 #define punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd)
491 #define punpcklwd(vars, vard) mmx_m2m(punpcklwd, vars, vard)
492
493 #define punpcklbw_m2r(var, reg) mmx_m2r(punpcklbw, var, reg)
494 #define punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd)
495 #define punpcklbw(vars, vard) mmx_m2m(punpcklbw, vars, vard)
496
497
498 /* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK High
499 (interleaves high half of dest with high half of source
500 as padding in each result field)
501 */
502 #define punpckhdq_m2r(var, reg) mmx_m2r(punpckhdq, var, reg)
503 #define punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd)
504 #define punpckhdq(vars, vard) mmx_m2m(punpckhdq, vars, vard)
505
506 #define punpckhwd_m2r(var, reg) mmx_m2r(punpckhwd, var, reg)
507 #define punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd)
508 #define punpckhwd(vars, vard) mmx_m2m(punpckhwd, vars, vard)
509
510 #define punpckhbw_m2r(var, reg) mmx_m2r(punpckhbw, var, reg)
511 #define punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd)
512 #define punpckhbw(vars, vard) mmx_m2m(punpckhbw, vars, vard)
513
514
515 /* Empty MMx State
516 (used to clean-up when going from mmx to float use
517 of the registers that are shared by both; note that
518 there is no float-to-mmx operation needed, because
519 only the float tag word info is corruptible)
520 */
521 #ifdef MMX_TRACE
522
523 #define emms() \
524 { \
525 fprintf(stderr, "emms()\n"); \
526 __asm__ __volatile__ ("emms"); \
527 }
528
529 #else
530
531 #define emms() __asm__ __volatile__ ("emms")
532
533 #endif
534
535 #endif
536