Mercurial > libavcodec.hg
annotate i386/mmx.h @ 3261:61bf38dbf774 libavcodec
Fix SIGSEGV in rv10-null-pointer-500k.rm.
patch by Wolfgang Scherer, Wolfgang__ . __Scherer__ @ __gmx__ . __de
author | diego |
---|---|
date | Sat, 15 Apr 2006 12:52:01 +0000 |
parents | 6b9f0c4fbdbe |
children | e0927bc44a10 |
rev | line source |
---|---|
31 | 1 /* |
2 * mmx.h | |
3 * Copyright (C) 1997-2001 H. Dietz and R. Fisher | |
4 */ | |
434 | 5 #ifndef AVCODEC_I386MMX_H |
6 #define AVCODEC_I386MMX_H | |
0 | 7 |
2293
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
8 #ifdef ARCH_X86_64 |
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
9 # define REG_a "rax" |
2958
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
10 # define REG_b "rbx" |
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
11 # define REG_c "rcx" |
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
12 # define REG_d "rdx" |
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
13 # define REG_D "rdi" |
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
14 # define REG_S "rsi" |
3198
6b9f0c4fbdbe
First part of a series of speed-enchancing patches.
gpoirier
parents:
2979
diff
changeset
|
15 # define PTR_SIZE "8" |
2293
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
16 #else |
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
17 # define REG_a "eax" |
2958
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
18 # define REG_b "ebx" |
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
19 # define REG_c "ecx" |
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
20 # define REG_d "edx" |
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
21 # define REG_D "edi" |
e04773e8b253
Add the rest of missing Reg_* macros to support both AMD-64 style regs and IA32 regs.
gpoirier
parents:
2293
diff
changeset
|
22 # define REG_S "esi" |
3198
6b9f0c4fbdbe
First part of a series of speed-enchancing patches.
gpoirier
parents:
2979
diff
changeset
|
23 # define PTR_SIZE "4" |
2293
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
24 #endif |
15cfba1b97b5
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64 patch by (Aurelien Jacobs <aurel at gnuage dot org>)
michael
parents:
1971
diff
changeset
|
25 |
31 | 26 /* |
27 * The type of an value that fits in an MMX register (note that long | |
28 * long constant values MUST be suffixed by LL and unsigned long long | |
29 * values by ULL, lest they be truncated by the compiler) | |
30 */ | |
0 | 31 |
2979 | 32 typedef union { |
33 long long q; /* Quadword (64-bit) value */ | |
34 unsigned long long uq; /* Unsigned Quadword */ | |
35 int d[2]; /* 2 Doubleword (32-bit) values */ | |
36 unsigned int ud[2]; /* 2 Unsigned Doubleword */ | |
37 short w[4]; /* 4 Word (16-bit) values */ | |
38 unsigned short uw[4]; /* 4 Unsigned Word */ | |
39 char b[8]; /* 8 Byte (8-bit) values */ | |
40 unsigned char ub[8]; /* 8 Unsigned Byte */ | |
41 float s[2]; /* Single-precision (32-bit) value */ | |
42 } mmx_t; /* On an 8-byte (64-bit) boundary */ | |
0 | 43 |
44 | |
2979 | 45 #define mmx_i2r(op,imm,reg) \ |
46 __asm__ __volatile__ (#op " %0, %%" #reg \ | |
47 : /* nothing */ \ | |
48 : "i" (imm) ) | |
0 | 49 |
2979 | 50 #define mmx_m2r(op,mem,reg) \ |
51 __asm__ __volatile__ (#op " %0, %%" #reg \ | |
52 : /* nothing */ \ | |
53 : "m" (mem)) | |
0 | 54 |
2979 | 55 #define mmx_r2m(op,reg,mem) \ |
56 __asm__ __volatile__ (#op " %%" #reg ", %0" \ | |
57 : "=m" (mem) \ | |
58 : /* nothing */ ) | |
0 | 59 |
2979 | 60 #define mmx_r2r(op,regs,regd) \ |
61 __asm__ __volatile__ (#op " %" #regs ", %" #regd) | |
0 | 62 |
63 | |
2979 | 64 #define emms() __asm__ __volatile__ ("emms") |
31 | 65 |
2979 | 66 #define movd_m2r(var,reg) mmx_m2r (movd, var, reg) |
67 #define movd_r2m(reg,var) mmx_r2m (movd, reg, var) | |
68 #define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd) | |
0 | 69 |
2979 | 70 #define movq_m2r(var,reg) mmx_m2r (movq, var, reg) |
71 #define movq_r2m(reg,var) mmx_r2m (movq, reg, var) | |
72 #define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd) | |
31 | 73 |
2979 | 74 #define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg) |
75 #define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd) | |
76 #define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg) | |
77 #define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd) | |
0 | 78 |
2979 | 79 #define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg) |
80 #define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd) | |
0 | 81 |
2979 | 82 #define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg) |
83 #define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd) | |
84 #define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg) | |
85 #define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd) | |
86 #define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg) | |
87 #define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd) | |
0 | 88 |
2979 | 89 #define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg) |
90 #define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd) | |
91 #define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg) | |
92 #define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd) | |
0 | 93 |
2979 | 94 #define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg) |
95 #define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd) | |
96 #define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg) | |
97 #define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd) | |
0 | 98 |
2979 | 99 #define pand_m2r(var,reg) mmx_m2r (pand, var, reg) |
100 #define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd) | |
31 | 101 |
2979 | 102 #define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg) |
103 #define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd) | |
0 | 104 |
2979 | 105 #define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg) |
106 #define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd) | |
107 #define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg) | |
108 #define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd) | |
109 #define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg) | |
110 #define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd) | |
0 | 111 |
2979 | 112 #define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg) |
113 #define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd) | |
114 #define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg) | |
115 #define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd) | |
116 #define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg) | |
117 #define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd) | |
31 | 118 |
2979 | 119 #define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg) |
120 #define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd) | |
31 | 121 |
2979 | 122 #define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg) |
123 #define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd) | |
31 | 124 |
2979 | 125 #define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg) |
126 #define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd) | |
0 | 127 |
2979 | 128 #define por_m2r(var,reg) mmx_m2r (por, var, reg) |
129 #define por_r2r(regs,regd) mmx_r2r (por, regs, regd) | |
0 | 130 |
2979 | 131 #define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg) |
132 #define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg) | |
133 #define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd) | |
134 #define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg) | |
135 #define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg) | |
136 #define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd) | |
137 #define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg) | |
138 #define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg) | |
139 #define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd) | |
0 | 140 |
2979 | 141 #define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg) |
142 #define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg) | |
143 #define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd) | |
144 #define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg) | |
145 #define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg) | |
146 #define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd) | |
0 | 147 |
2979 | 148 #define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg) |
149 #define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg) | |
150 #define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd) | |
151 #define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg) | |
152 #define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg) | |
153 #define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd) | |
154 #define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg) | |
155 #define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg) | |
156 #define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd) | |
0 | 157 |
2979 | 158 #define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg) |
159 #define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd) | |
160 #define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg) | |
161 #define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd) | |
162 #define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg) | |
163 #define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd) | |
0 | 164 |
2979 | 165 #define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg) |
166 #define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd) | |
167 #define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg) | |
168 #define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd) | |
31 | 169 |
2979 | 170 #define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg) |
171 #define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd) | |
172 #define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg) | |
173 #define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd) | |
0 | 174 |
2979 | 175 #define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg) |
176 #define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd) | |
177 #define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg) | |
178 #define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd) | |
179 #define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg) | |
180 #define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd) | |
0 | 181 |
2979 | 182 #define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg) |
183 #define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd) | |
184 #define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg) | |
185 #define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd) | |
186 #define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg) | |
187 #define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd) | |
0 | 188 |
2979 | 189 #define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg) |
190 #define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd) | |
0 | 191 |
192 | |
31 | 193 /* 3DNOW extensions */ |
0 | 194 |
2979 | 195 #define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg) |
196 #define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd) | |
31 | 197 |
198 | |
199 /* AMD MMX extensions - also available in intel SSE */ | |
0 | 200 |
201 | |
2979 | 202 #define mmx_m2ri(op,mem,reg,imm) \ |
31 | 203 __asm__ __volatile__ (#op " %1, %0, %%" #reg \ |
204 : /* nothing */ \ | |
205 : "X" (mem), "X" (imm)) | |
2979 | 206 #define mmx_r2ri(op,regs,regd,imm) \ |
31 | 207 __asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \ |
208 : /* nothing */ \ | |
209 : "X" (imm) ) | |
0 | 210 |
2979 | 211 #define mmx_fetch(mem,hint) \ |
212 __asm__ __volatile__ ("prefetch" #hint " %0" \ | |
213 : /* nothing */ \ | |
214 : "X" (mem)) | |
0 | 215 |
216 | |
2979 | 217 #define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg) |
31 | 218 |
2979 | 219 #define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var) |
0 | 220 |
2979 | 221 #define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg) |
222 #define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd) | |
223 #define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg) | |
224 #define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd) | |
31 | 225 |
2979 | 226 #define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm) |
0 | 227 |
2979 | 228 #define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm) |
0 | 229 |
2979 | 230 #define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg) |
231 #define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd) | |
0 | 232 |
2979 | 233 #define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg) |
234 #define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd) | |
31 | 235 |
2979 | 236 #define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg) |
237 #define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd) | |
0 | 238 |
2979 | 239 #define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg) |
240 #define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd) | |
0 | 241 |
2979 | 242 #define pmovmskb(mmreg,reg) \ |
243 __asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg) | |
0 | 244 |
2979 | 245 #define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg) |
246 #define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd) | |
0 | 247 |
2979 | 248 #define prefetcht0(mem) mmx_fetch (mem, t0) |
249 #define prefetcht1(mem) mmx_fetch (mem, t1) | |
250 #define prefetcht2(mem) mmx_fetch (mem, t2) | |
251 #define prefetchnta(mem) mmx_fetch (mem, nta) | |
0 | 252 |
2979 | 253 #define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg) |
254 #define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd) | |
0 | 255 |
2979 | 256 #define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm) |
257 #define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm) | |
0 | 258 |
2979 | 259 #define sfence() __asm__ __volatile__ ("sfence\n\t") |
434 | 260 |
1971 | 261 /* SSE2 */ |
2979 | 262 #define pshufhw_m2r(var,reg,imm) mmx_m2ri(pshufhw, var, reg, imm) |
263 #define pshufhw_r2r(regs,regd,imm) mmx_r2ri(pshufhw, regs, regd, imm) | |
264 #define pshuflw_m2r(var,reg,imm) mmx_m2ri(pshuflw, var, reg, imm) | |
265 #define pshuflw_r2r(regs,regd,imm) mmx_r2ri(pshuflw, regs, regd, imm) | |
1971 | 266 |
2979 | 267 #define pshufd_r2r(regs,regd,imm) mmx_r2ri(pshufd, regs, regd, imm) |
1971 | 268 |
2979 | 269 #define movdqa_m2r(var,reg) mmx_m2r (movdqa, var, reg) |
270 #define movdqa_r2m(reg,var) mmx_r2m (movdqa, reg, var) | |
271 #define movdqa_r2r(regs,regd) mmx_r2r (movdqa, regs, regd) | |
272 #define movdqu_m2r(var,reg) mmx_m2r (movdqu, var, reg) | |
273 #define movdqu_r2m(reg,var) mmx_r2m (movdqu, reg, var) | |
274 #define movdqu_r2r(regs,regd) mmx_r2r (movdqu, regs, regd) | |
1971 | 275 |
2979 | 276 #define pmullw_r2m(reg,var) mmx_r2m (pmullw, reg, var) |
1971 | 277 |
2979 | 278 #define pslldq_i2r(imm,reg) mmx_i2r (pslldq, imm, reg) |
279 #define psrldq_i2r(imm,reg) mmx_i2r (psrldq, imm, reg) | |
1971 | 280 |
2979 | 281 #define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd) |
282 #define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd) | |
1971 | 283 |
284 | |
434 | 285 #endif /* AVCODEC_I386MMX_H */ |