Mercurial > libavcodec.hg
annotate i386/mmx.h @ 2440:cf97353f94c6 libavcodec
div by zero aspect fix
author | michael |
---|---|
date | Wed, 19 Jan 2005 13:24:43 +0000 |
parents | 15cfba1b97b5 |
children | e04773e8b253 |
rev | line source |
---|---|
31 | 1 /* |
2 * mmx.h | |
3 * Copyright (C) 1997-2001 H. Dietz and R. Fisher | |
4 */ | |
434 | 5 #ifndef AVCODEC_I386MMX_H |
6 #define AVCODEC_I386MMX_H | |
0 | 7 |
2293
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
8 #ifdef ARCH_X86_64 |
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
9 # define REG_a "rax" |
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
10 #else |
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
11 # define REG_a "eax" |
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
12 #endif |
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
13 |
31 | 14 /* |
15 * The type of an value that fits in an MMX register (note that long | |
16 * long constant values MUST be suffixed by LL and unsigned long long | |
17 * values by ULL, lest they be truncated by the compiler) | |
18 */ | |
0 | 19 |
20 typedef union { | |
21 long long q; /* Quadword (64-bit) value */ | |
22 unsigned long long uq; /* Unsigned Quadword */ | |
23 int d[2]; /* 2 Doubleword (32-bit) values */ | |
24 unsigned int ud[2]; /* 2 Unsigned Doubleword */ | |
25 short w[4]; /* 4 Word (16-bit) values */ | |
26 unsigned short uw[4]; /* 4 Unsigned Word */ | |
27 char b[8]; /* 8 Byte (8-bit) values */ | |
28 unsigned char ub[8]; /* 8 Unsigned Byte */ | |
29 float s[2]; /* Single-precision (32-bit) value */ | |
31 | 30 } mmx_t; /* On an 8-byte (64-bit) boundary */ |
0 | 31 |
32 | |
31 | 33 #define mmx_i2r(op,imm,reg) \ |
0 | 34 __asm__ __volatile__ (#op " %0, %%" #reg \ |
35 : /* nothing */ \ | |
36 : "i" (imm) ) | |
37 | |
31 | 38 #define mmx_m2r(op,mem,reg) \ |
0 | 39 __asm__ __volatile__ (#op " %0, %%" #reg \ |
40 : /* nothing */ \ | |
41 : "m" (mem)) | |
42 | |
31 | 43 #define mmx_r2m(op,reg,mem) \ |
0 | 44 __asm__ __volatile__ (#op " %%" #reg ", %0" \ |
45 : "=m" (mem) \ | |
46 : /* nothing */ ) | |
47 | |
31 | 48 #define mmx_r2r(op,regs,regd) \ |
0 | 49 __asm__ __volatile__ (#op " %" #regs ", %" #regd) |
50 | |
51 | |
31 | 52 #define emms() __asm__ __volatile__ ("emms") |
53 | |
54 #define movd_m2r(var,reg) mmx_m2r (movd, var, reg) | |
55 #define movd_r2m(reg,var) mmx_r2m (movd, reg, var) | |
56 #define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd) | |
0 | 57 |
31 | 58 #define movq_m2r(var,reg) mmx_m2r (movq, var, reg) |
59 #define movq_r2m(reg,var) mmx_r2m (movq, reg, var) | |
60 #define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd) | |
61 | |
62 #define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg) | |
63 #define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd) | |
64 #define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg) | |
65 #define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd) | |
0 | 66 |
31 | 67 #define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg) |
68 #define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd) | |
0 | 69 |
31 | 70 #define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg) |
71 #define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd) | |
72 #define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg) | |
73 #define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd) | |
74 #define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg) | |
75 #define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd) | |
0 | 76 |
31 | 77 #define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg) |
78 #define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd) | |
79 #define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg) | |
80 #define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd) | |
0 | 81 |
31 | 82 #define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg) |
83 #define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd) | |
84 #define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg) | |
85 #define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd) | |
0 | 86 |
31 | 87 #define pand_m2r(var,reg) mmx_m2r (pand, var, reg) |
88 #define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd) | |
89 | |
90 #define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg) | |
91 #define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd) | |
0 | 92 |
31 | 93 #define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg) |
94 #define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd) | |
95 #define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg) | |
96 #define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd) | |
97 #define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg) | |
98 #define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd) | |
0 | 99 |
31 | 100 #define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg) |
101 #define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd) | |
102 #define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg) | |
103 #define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd) | |
104 #define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg) | |
105 #define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd) | |
106 | |
107 #define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg) | |
108 #define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd) | |
109 | |
110 #define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg) | |
111 #define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd) | |
112 | |
113 #define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg) | |
114 #define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd) | |
0 | 115 |
31 | 116 #define por_m2r(var,reg) mmx_m2r (por, var, reg) |
117 #define por_r2r(regs,regd) mmx_r2r (por, regs, regd) | |
0 | 118 |
31 | 119 #define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg) |
120 #define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg) | |
121 #define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd) | |
122 #define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg) | |
123 #define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg) | |
124 #define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd) | |
125 #define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg) | |
126 #define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg) | |
127 #define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd) | |
0 | 128 |
31 | 129 #define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg) |
130 #define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg) | |
131 #define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd) | |
132 #define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg) | |
133 #define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg) | |
134 #define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd) | |
0 | 135 |
31 | 136 #define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg) |
137 #define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg) | |
138 #define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd) | |
139 #define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg) | |
140 #define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg) | |
141 #define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd) | |
142 #define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg) | |
143 #define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg) | |
144 #define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd) | |
0 | 145 |
31 | 146 #define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg) |
147 #define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd) | |
148 #define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg) | |
149 #define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd) | |
150 #define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg) | |
151 #define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd) | |
0 | 152 |
31 | 153 #define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg) |
154 #define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd) | |
155 #define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg) | |
156 #define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd) | |
157 | |
158 #define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg) | |
159 #define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd) | |
160 #define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg) | |
161 #define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd) | |
0 | 162 |
31 | 163 #define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg) |
164 #define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd) | |
165 #define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg) | |
166 #define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd) | |
167 #define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg) | |
168 #define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd) | |
0 | 169 |
31 | 170 #define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg) |
171 #define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd) | |
172 #define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg) | |
173 #define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd) | |
174 #define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg) | |
175 #define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd) | |
0 | 176 |
31 | 177 #define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg) |
178 #define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd) | |
0 | 179 |
180 | |
31 | 181 /* 3DNOW extensions */ |
0 | 182 |
31 | 183 #define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg) |
184 #define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd) | |
185 | |
186 | |
187 /* AMD MMX extensions - also available in intel SSE */ | |
0 | 188 |
189 | |
31 | 190 #define mmx_m2ri(op,mem,reg,imm) \ |
191 __asm__ __volatile__ (#op " %1, %0, %%" #reg \ | |
192 : /* nothing */ \ | |
193 : "X" (mem), "X" (imm)) | |
194 #define mmx_r2ri(op,regs,regd,imm) \ | |
195 __asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \ | |
196 : /* nothing */ \ | |
197 : "X" (imm) ) | |
0 | 198 |
31 | 199 #define mmx_fetch(mem,hint) \ |
200 __asm__ __volatile__ ("prefetch" #hint " %0" \ | |
201 : /* nothing */ \ | |
202 : "X" (mem)) | |
0 | 203 |
204 | |
31 | 205 #define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg) |
206 | |
207 #define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var) | |
0 | 208 |
31 | 209 #define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg) |
210 #define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd) | |
211 #define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg) | |
212 #define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd) | |
213 | |
214 #define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm) | |
0 | 215 |
31 | 216 #define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm) |
0 | 217 |
31 | 218 #define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg) |
219 #define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd) | |
0 | 220 |
31 | 221 #define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg) |
222 #define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd) | |
223 | |
224 #define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg) | |
225 #define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd) | |
0 | 226 |
31 | 227 #define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg) |
228 #define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd) | |
0 | 229 |
31 | 230 #define pmovmskb(mmreg,reg) \ |
231 __asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg) | |
0 | 232 |
31 | 233 #define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg) |
234 #define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd) | |
0 | 235 |
31 | 236 #define prefetcht0(mem) mmx_fetch (mem, t0) |
237 #define prefetcht1(mem) mmx_fetch (mem, t1) | |
238 #define prefetcht2(mem) mmx_fetch (mem, t2) | |
239 #define prefetchnta(mem) mmx_fetch (mem, nta) | |
0 | 240 |
31 | 241 #define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg) |
242 #define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd) | |
0 | 243 |
31 | 244 #define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm) |
245 #define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm) | |
0 | 246 |
31 | 247 #define sfence() __asm__ __volatile__ ("sfence\n\t") |
434 | 248 |
1971 | 249 /* SSE2 */ |
250 #define pshufhw_m2r(var,reg,imm) mmx_m2ri(pshufhw, var, reg, imm) | |
251 #define pshufhw_r2r(regs,regd,imm) mmx_r2ri(pshufhw, regs, regd, imm) | |
252 #define pshuflw_m2r(var,reg,imm) mmx_m2ri(pshuflw, var, reg, imm) | |
253 #define pshuflw_r2r(regs,regd,imm) mmx_r2ri(pshuflw, regs, regd, imm) | |
254 | |
255 #define pshufd_r2r(regs,regd,imm) mmx_r2ri(pshufd, regs, regd, imm) | |
256 | |
257 #define movdqa_m2r(var,reg) mmx_m2r (movdqa, var, reg) | |
258 #define movdqa_r2m(reg,var) mmx_r2m (movdqa, reg, var) | |
259 #define movdqa_r2r(regs,regd) mmx_r2r (movdqa, regs, regd) | |
260 #define movdqu_m2r(var,reg) mmx_m2r (movdqu, var, reg) | |
261 #define movdqu_r2m(reg,var) mmx_r2m (movdqu, reg, var) | |
262 #define movdqu_r2r(regs,regd) mmx_r2r (movdqu, regs, regd) | |
263 | |
264 #define pmullw_r2m(reg,var) mmx_r2m (pmullw, reg, var) | |
265 | |
266 #define pslldq_i2r(imm,reg) mmx_i2r (pslldq, imm, reg) | |
267 #define psrldq_i2r(imm,reg) mmx_i2r (psrldq, imm, reg) | |
268 | |
269 #define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd) | |
270 #define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd) | |
271 | |
272 | |
434 | 273 #endif /* AVCODEC_I386MMX_H */ |