Mercurial > libavcodec.hg
comparison sh4/dsputil_align.c @ 2979:bfabfdf9ce55 libavcodec
COSMETICS: tabs --> spaces, some prettyprinting
author | diego |
---|---|
date | Thu, 22 Dec 2005 01:10:11 +0000 |
parents | ef2149182f1c |
children | 0b546eab515d |
comparison
equal
deleted
inserted
replaced
2978:403183bbb505 | 2979:bfabfdf9ce55 |
---|---|
21 | 21 |
22 #include "../avcodec.h" | 22 #include "../avcodec.h" |
23 #include "../dsputil.h" | 23 #include "../dsputil.h" |
24 | 24 |
25 | 25 |
26 #define LP(p) *(uint32_t*)(p) | 26 #define LP(p) *(uint32_t*)(p) |
27 | 27 |
28 | 28 |
29 #define UNPACK(ph,pl,tt0,tt1) do { \ | 29 #define UNPACK(ph,pl,tt0,tt1) do { \ |
30 uint32_t t0,t1; t0=tt0;t1=tt1; \ | 30 uint32_t t0,t1; t0=tt0;t1=tt1; \ |
31 ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \ | 31 ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \ |
32 pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0) | 32 pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0) |
33 | 33 |
34 #define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03)) | 34 #define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03)) |
35 #define no_rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03)) | 35 #define no_rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03)) |
36 | 36 |
37 /* little endian */ | 37 /* little endian */ |
38 #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) ) | 38 #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) ) |
39 #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) ) | 39 #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) ) |
40 /* big | 40 /* big |
41 #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)<<(8*ofs))|((b)>>(32-8*ofs)) ) | 41 #define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)<<(8*ofs))|((b)>>(32-8*ofs)) ) |
42 #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)<<(8+8*ofs))|((b)>>(32-8-8*ofs)) ) | 42 #define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)<<(8+8*ofs))|((b)>>(32-8-8*ofs)) ) |
43 */ | 43 */ |
44 | 44 |
45 | 45 |
46 #define put(d,s) d = s | 46 #define put(d,s) d = s |
47 #define avg(d,s) d = rnd_avg32(s,d) | 47 #define avg(d,s) d = rnd_avg32(s,d) |
48 | 48 |
49 #define OP_C4(ofs) \ | 49 #define OP_C4(ofs) \ |
50 ref-=ofs; \ | 50 ref-=ofs; \ |
51 do { \ | 51 do { \ |
52 OP(LP(dest),MERGE1(LP(ref),LP(ref+4),ofs)); \ | 52 OP(LP(dest),MERGE1(LP(ref),LP(ref+4),ofs)); \ |
53 ref+=stride; \ | 53 ref+=stride; \ |
54 dest+=stride; \ | 54 dest+=stride; \ |
55 } while(--height) | 55 } while(--height) |
56 | 56 |
57 #define OP_C40() \ | 57 #define OP_C40() \ |
58 do { \ | 58 do { \ |
59 OP(LP(dest),LP(ref)); \ | 59 OP(LP(dest),LP(ref)); \ |
60 ref+=stride; \ | 60 ref+=stride; \ |
61 dest+=stride; \ | 61 dest+=stride; \ |
62 } while(--height) | 62 } while(--height) |
63 | 63 |
64 | 64 |
65 #define OP put | 65 #define OP put |
66 | 66 |
67 static void put_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height) | 67 static void put_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height) |
68 { | 68 { |
69 switch((int)ref&3){ | 69 switch((int)ref&3){ |
70 case 0: OP_C40(); return; | 70 case 0: OP_C40(); return; |
71 case 1: OP_C4(1); return; | 71 case 1: OP_C4(1); return; |
72 case 2: OP_C4(2); return; | 72 case 2: OP_C4(2); return; |
73 case 3: OP_C4(3); return; | 73 case 3: OP_C4(3); return; |
74 } | 74 } |
75 } | 75 } |
76 | 76 |
77 #undef OP | 77 #undef OP |
78 #define OP avg | 78 #define OP avg |
79 | 79 |
80 static void avg_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height) | 80 static void avg_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height) |
81 { | 81 { |
82 switch((int)ref&3){ | 82 switch((int)ref&3){ |
83 case 0: OP_C40(); return; | 83 case 0: OP_C40(); return; |
84 case 1: OP_C4(1); return; | 84 case 1: OP_C4(1); return; |
85 case 2: OP_C4(2); return; | 85 case 2: OP_C4(2); return; |
86 case 3: OP_C4(3); return; | 86 case 3: OP_C4(3); return; |
87 } | 87 } |
88 } | 88 } |
89 | 89 |
90 #undef OP | 90 #undef OP |
91 | 91 |
92 #define OP_C(ofs,sz,avg2) \ | 92 #define OP_C(ofs,sz,avg2) \ |
93 { \ | 93 { \ |
94 ref-=ofs; \ | 94 ref-=ofs; \ |
95 do { \ | 95 do { \ |
96 uint32_t t0,t1; \ | 96 uint32_t t0,t1; \ |
97 t0 = LP(ref+0); \ | 97 t0 = LP(ref+0); \ |
98 t1 = LP(ref+4); \ | 98 t1 = LP(ref+4); \ |
99 OP(LP(dest+0), MERGE1(t0,t1,ofs)); \ | 99 OP(LP(dest+0), MERGE1(t0,t1,ofs)); \ |
100 t0 = LP(ref+8); \ | 100 t0 = LP(ref+8); \ |
101 OP(LP(dest+4), MERGE1(t1,t0,ofs)); \ | 101 OP(LP(dest+4), MERGE1(t1,t0,ofs)); \ |
102 if (sz==16) { \ | 102 if (sz==16) { \ |
103 t1 = LP(ref+12); \ | 103 t1 = LP(ref+12); \ |
104 OP(LP(dest+8), MERGE1(t0,t1,ofs)); \ | 104 OP(LP(dest+8), MERGE1(t0,t1,ofs)); \ |
105 t0 = LP(ref+16); \ | 105 t0 = LP(ref+16); \ |
106 OP(LP(dest+12), MERGE1(t1,t0,ofs)); \ | 106 OP(LP(dest+12), MERGE1(t1,t0,ofs)); \ |
107 } \ | 107 } \ |
108 ref+=stride; \ | 108 ref+=stride; \ |
109 dest+= stride; \ | 109 dest+= stride; \ |
110 } while(--height); \ | 110 } while(--height); \ |
111 } | 111 } |
112 | 112 |
113 /* aligned */ | 113 /* aligned */ |
114 #define OP_C0(sz,avg2) \ | 114 #define OP_C0(sz,avg2) \ |
115 { \ | 115 { \ |
116 do { \ | 116 do { \ |
117 OP(LP(dest+0), LP(ref+0)); \ | 117 OP(LP(dest+0), LP(ref+0)); \ |
118 OP(LP(dest+4), LP(ref+4)); \ | 118 OP(LP(dest+4), LP(ref+4)); \ |
119 if (sz==16) { \ | 119 if (sz==16) { \ |
120 OP(LP(dest+8), LP(ref+8)); \ | 120 OP(LP(dest+8), LP(ref+8)); \ |
121 OP(LP(dest+12), LP(ref+12)); \ | 121 OP(LP(dest+12), LP(ref+12)); \ |
122 } \ | 122 } \ |
123 ref+=stride; \ | 123 ref+=stride; \ |
124 dest+= stride; \ | 124 dest+= stride; \ |
125 } while(--height); \ | 125 } while(--height); \ |
126 } | 126 } |
127 | 127 |
128 #define OP_X(ofs,sz,avg2) \ | 128 #define OP_X(ofs,sz,avg2) \ |
129 { \ | 129 { \ |
130 ref-=ofs; \ | 130 ref-=ofs; \ |
131 do { \ | 131 do { \ |
132 uint32_t t0,t1; \ | 132 uint32_t t0,t1; \ |
133 t0 = LP(ref+0); \ | 133 t0 = LP(ref+0); \ |
134 t1 = LP(ref+4); \ | 134 t1 = LP(ref+4); \ |
135 OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \ | 135 OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \ |
136 t0 = LP(ref+8); \ | 136 t0 = LP(ref+8); \ |
137 OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \ | 137 OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \ |
138 if (sz==16) { \ | 138 if (sz==16) { \ |
139 t1 = LP(ref+12); \ | 139 t1 = LP(ref+12); \ |
140 OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \ | 140 OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \ |
141 t0 = LP(ref+16); \ | 141 t0 = LP(ref+16); \ |
142 OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \ | 142 OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \ |
143 } \ | 143 } \ |
144 ref+=stride; \ | 144 ref+=stride; \ |
145 dest+= stride; \ | 145 dest+= stride; \ |
146 } while(--height); \ | 146 } while(--height); \ |
147 } | 147 } |
148 | 148 |
149 /* aligned */ | 149 /* aligned */ |
150 #define OP_Y0(sz,avg2) \ | 150 #define OP_Y0(sz,avg2) \ |
151 { \ | 151 { \ |
152 uint32_t t0,t1,t2,t3,t; \ | 152 uint32_t t0,t1,t2,t3,t; \ |
153 \ | 153 \ |
154 t0 = LP(ref+0); \ | 154 t0 = LP(ref+0); \ |
155 t1 = LP(ref+4); \ | 155 t1 = LP(ref+4); \ |
156 if (sz==16) { \ | 156 if (sz==16) { \ |
157 t2 = LP(ref+8); \ | 157 t2 = LP(ref+8); \ |
158 t3 = LP(ref+12); \ | 158 t3 = LP(ref+12); \ |
159 } \ | 159 } \ |
160 do { \ | 160 do { \ |
161 ref += stride; \ | 161 ref += stride; \ |
162 \ | 162 \ |
163 t = LP(ref+0); \ | 163 t = LP(ref+0); \ |
164 OP(LP(dest+0), avg2(t0,t)); t0 = t; \ | 164 OP(LP(dest+0), avg2(t0,t)); t0 = t; \ |
165 t = LP(ref+4); \ | 165 t = LP(ref+4); \ |
166 OP(LP(dest+4), avg2(t1,t)); t1 = t; \ | 166 OP(LP(dest+4), avg2(t1,t)); t1 = t; \ |
167 if (sz==16) { \ | 167 if (sz==16) { \ |
168 t = LP(ref+8); \ | 168 t = LP(ref+8); \ |
169 OP(LP(dest+8), avg2(t2,t)); t2 = t; \ | 169 OP(LP(dest+8), avg2(t2,t)); t2 = t; \ |
170 t = LP(ref+12); \ | 170 t = LP(ref+12); \ |
171 OP(LP(dest+12), avg2(t3,t)); t3 = t; \ | 171 OP(LP(dest+12), avg2(t3,t)); t3 = t; \ |
172 } \ | 172 } \ |
173 dest+= stride; \ | 173 dest+= stride; \ |
174 } while(--height); \ | 174 } while(--height); \ |
175 } | 175 } |
176 | 176 |
177 #define OP_Y(ofs,sz,avg2) \ | 177 #define OP_Y(ofs,sz,avg2) \ |
178 { \ | 178 { \ |
179 uint32_t t0,t1,t2,t3,t,w0,w1; \ | 179 uint32_t t0,t1,t2,t3,t,w0,w1; \ |
180 \ | 180 \ |
181 ref-=ofs; \ | 181 ref-=ofs; \ |
182 w0 = LP(ref+0); \ | 182 w0 = LP(ref+0); \ |
183 w1 = LP(ref+4); \ | 183 w1 = LP(ref+4); \ |
184 t0 = MERGE1(w0,w1,ofs); \ | 184 t0 = MERGE1(w0,w1,ofs); \ |
185 w0 = LP(ref+8); \ | 185 w0 = LP(ref+8); \ |
186 t1 = MERGE1(w1,w0,ofs); \ | 186 t1 = MERGE1(w1,w0,ofs); \ |
187 if (sz==16) { \ | 187 if (sz==16) { \ |
188 w1 = LP(ref+12); \ | 188 w1 = LP(ref+12); \ |
189 t2 = MERGE1(w0,w1,ofs); \ | 189 t2 = MERGE1(w0,w1,ofs); \ |
190 w0 = LP(ref+16); \ | 190 w0 = LP(ref+16); \ |
191 t3 = MERGE1(w1,w0,ofs); \ | 191 t3 = MERGE1(w1,w0,ofs); \ |
192 } \ | 192 } \ |
193 do { \ | 193 do { \ |
194 ref += stride; \ | 194 ref += stride; \ |
195 \ | 195 \ |
196 w0 = LP(ref+0); \ | 196 w0 = LP(ref+0); \ |
197 w1 = LP(ref+4); \ | 197 w1 = LP(ref+4); \ |
198 t = MERGE1(w0,w1,ofs); \ | 198 t = MERGE1(w0,w1,ofs); \ |
199 OP(LP(dest+0), avg2(t0,t)); t0 = t; \ | 199 OP(LP(dest+0), avg2(t0,t)); t0 = t; \ |
200 w0 = LP(ref+8); \ | 200 w0 = LP(ref+8); \ |
201 t = MERGE1(w1,w0,ofs); \ | 201 t = MERGE1(w1,w0,ofs); \ |
202 OP(LP(dest+4), avg2(t1,t)); t1 = t; \ | 202 OP(LP(dest+4), avg2(t1,t)); t1 = t; \ |
203 if (sz==16) { \ | 203 if (sz==16) { \ |
204 w1 = LP(ref+12); \ | 204 w1 = LP(ref+12); \ |
205 t = MERGE1(w0,w1,ofs); \ | 205 t = MERGE1(w0,w1,ofs); \ |
206 OP(LP(dest+8), avg2(t2,t)); t2 = t; \ | 206 OP(LP(dest+8), avg2(t2,t)); t2 = t; \ |
207 w0 = LP(ref+16); \ | 207 w0 = LP(ref+16); \ |
208 t = MERGE1(w1,w0,ofs); \ | 208 t = MERGE1(w1,w0,ofs); \ |
209 OP(LP(dest+12), avg2(t3,t)); t3 = t; \ | 209 OP(LP(dest+12), avg2(t3,t)); t3 = t; \ |
210 } \ | 210 } \ |
211 dest+=stride; \ | 211 dest+=stride; \ |
212 } while(--height); \ | 212 } while(--height); \ |
213 } | 213 } |
214 | 214 |
215 #define OP_X0(sz,avg2) OP_X(0,sz,avg2) | 215 #define OP_X0(sz,avg2) OP_X(0,sz,avg2) |
216 #define OP_XY0(sz,PACK) OP_XY(0,sz,PACK) | 216 #define OP_XY0(sz,PACK) OP_XY(0,sz,PACK) |
217 #define OP_XY(ofs,sz,PACK) \ | 217 #define OP_XY(ofs,sz,PACK) \ |
218 { \ | 218 { \ |
219 uint32_t t2,t3,w0,w1; \ | 219 uint32_t t2,t3,w0,w1; \ |
220 uint32_t a0,a1,a2,a3,a4,a5,a6,a7; \ | 220 uint32_t a0,a1,a2,a3,a4,a5,a6,a7; \ |
221 \ | 221 \ |
222 ref -= ofs; \ | 222 ref -= ofs; \ |
223 w0 = LP(ref+0); \ | 223 w0 = LP(ref+0); \ |
224 w1 = LP(ref+4); \ | 224 w1 = LP(ref+4); \ |
225 UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ | 225 UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ |
226 w0 = LP(ref+8); \ | 226 w0 = LP(ref+8); \ |
227 UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ | 227 UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ |
228 if (sz==16) { \ | 228 if (sz==16) { \ |
229 w1 = LP(ref+12); \ | 229 w1 = LP(ref+12); \ |
230 UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ | 230 UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ |
231 w0 = LP(ref+16); \ | 231 w0 = LP(ref+16); \ |
232 UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ | 232 UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ |
233 } \ | 233 } \ |
234 do { \ | 234 do { \ |
235 ref+=stride; \ | 235 ref+=stride; \ |
236 w0 = LP(ref+0); \ | 236 w0 = LP(ref+0); \ |
237 w1 = LP(ref+4); \ | 237 w1 = LP(ref+4); \ |
238 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ | 238 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ |
239 OP(LP(dest+0),PACK(a0,a1,t2,t3)); \ | 239 OP(LP(dest+0),PACK(a0,a1,t2,t3)); \ |
240 a0 = t2; a1 = t3; \ | 240 a0 = t2; a1 = t3; \ |
241 w0 = LP(ref+8); \ | 241 w0 = LP(ref+8); \ |
242 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ | 242 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ |
243 OP(LP(dest+4),PACK(a2,a3,t2,t3)); \ | 243 OP(LP(dest+4),PACK(a2,a3,t2,t3)); \ |
244 a2 = t2; a3 = t3; \ | 244 a2 = t2; a3 = t3; \ |
245 if (sz==16) { \ | 245 if (sz==16) { \ |
246 w1 = LP(ref+12); \ | 246 w1 = LP(ref+12); \ |
247 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ | 247 UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \ |
248 OP(LP(dest+8),PACK(a4,a5,t2,t3)); \ | 248 OP(LP(dest+8),PACK(a4,a5,t2,t3)); \ |
249 a4 = t2; a5 = t3; \ | 249 a4 = t2; a5 = t3; \ |
250 w0 = LP(ref+16); \ | 250 w0 = LP(ref+16); \ |
251 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ | 251 UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \ |
252 OP(LP(dest+12),PACK(a6,a7,t2,t3)); \ | 252 OP(LP(dest+12),PACK(a6,a7,t2,t3)); \ |
253 a6 = t2; a7 = t3; \ | 253 a6 = t2; a7 = t3; \ |
254 } \ | 254 } \ |
255 dest+=stride; \ | 255 dest+=stride; \ |
256 } while(--height); \ | 256 } while(--height); \ |
257 } | 257 } |
258 | 258 |
259 #define DEFFUNC(op,rnd,xy,sz,OP_N,avgfunc) \ | 259 #define DEFFUNC(op,rnd,xy,sz,OP_N,avgfunc) \ |
260 static void op##_##rnd##_pixels##sz##_##xy (uint8_t * dest, const uint8_t * ref, \ | 260 static void op##_##rnd##_pixels##sz##_##xy (uint8_t * dest, const uint8_t * ref, \ |
261 const int stride, int height) \ | 261 const int stride, int height) \ |
262 { \ | 262 { \ |
263 switch((int)ref&3) { \ | 263 switch((int)ref&3) { \ |
264 case 0:OP_N##0(sz,rnd##_##avgfunc); return; \ | 264 case 0:OP_N##0(sz,rnd##_##avgfunc); return; \ |
265 case 1:OP_N(1,sz,rnd##_##avgfunc); return; \ | 265 case 1:OP_N(1,sz,rnd##_##avgfunc); return; \ |
266 case 2:OP_N(2,sz,rnd##_##avgfunc); return; \ | 266 case 2:OP_N(2,sz,rnd##_##avgfunc); return; \ |
267 case 3:OP_N(3,sz,rnd##_##avgfunc); return; \ | 267 case 3:OP_N(3,sz,rnd##_##avgfunc); return; \ |
268 } \ | 268 } \ |
269 } | 269 } |
270 | 270 |
271 #define OP put | 271 #define OP put |
272 | 272 |
273 DEFFUNC(put, rnd,o,8,OP_C,avg2) | 273 DEFFUNC(put, rnd,o,8,OP_C,avg2) |
303 DEFFUNC(avg, rnd,xy,16,OP_XY,PACK) | 303 DEFFUNC(avg, rnd,xy,16,OP_XY,PACK) |
304 DEFFUNC(avg,no_rnd,xy,16,OP_XY,PACK) | 304 DEFFUNC(avg,no_rnd,xy,16,OP_XY,PACK) |
305 | 305 |
306 #undef OP | 306 #undef OP |
307 | 307 |
308 #define put_no_rnd_pixels8_o put_rnd_pixels8_o | 308 #define put_no_rnd_pixels8_o put_rnd_pixels8_o |
309 #define put_no_rnd_pixels16_o put_rnd_pixels16_o | 309 #define put_no_rnd_pixels16_o put_rnd_pixels16_o |
310 #define avg_no_rnd_pixels8_o avg_rnd_pixels8_o | 310 #define avg_no_rnd_pixels8_o avg_rnd_pixels8_o |
311 #define avg_no_rnd_pixels16_o avg_rnd_pixels16_o | 311 #define avg_no_rnd_pixels16_o avg_rnd_pixels16_o |
312 | 312 |
313 #define put_pixels8_c put_rnd_pixels8_o | 313 #define put_pixels8_c put_rnd_pixels8_o |
314 #define put_pixels16_c put_rnd_pixels16_o | 314 #define put_pixels16_c put_rnd_pixels16_o |
315 #define avg_pixels8_c avg_rnd_pixels8_o | 315 #define avg_pixels8_c avg_rnd_pixels8_o |
316 #define avg_pixels16_c avg_rnd_pixels16_o | 316 #define avg_pixels16_c avg_rnd_pixels16_o |
317 #define put_no_rnd_pixels8_c put_rnd_pixels8_o | 317 #define put_no_rnd_pixels8_c put_rnd_pixels8_o |
318 #define put_no_rnd_pixels16_c put_rnd_pixels16_o | 318 #define put_no_rnd_pixels16_c put_rnd_pixels16_o |
319 #define avg_no_rnd_pixels8_c avg_rnd_pixels8_o | 319 #define avg_no_rnd_pixels8_c avg_rnd_pixels8_o |
320 #define avg_no_rnd_pixels16_c avg_rnd_pixels16_o | 320 #define avg_no_rnd_pixels16_c avg_rnd_pixels16_o |
321 | 321 |
322 #define QPEL | 322 #define QPEL |
323 | 323 |
324 #ifdef QPEL | 324 #ifdef QPEL |
325 | 325 |
326 #include "qpel.c" | 326 #include "qpel.c" |
327 | 327 |
328 #endif | 328 #endif |
329 | 329 |
330 void dsputil_init_align(DSPContext* c, AVCodecContext *avctx) | 330 void dsputil_init_align(DSPContext* c, AVCodecContext *avctx) |
331 { | 331 { |
332 c->put_pixels_tab[0][0] = put_rnd_pixels16_o; | 332 c->put_pixels_tab[0][0] = put_rnd_pixels16_o; |
333 c->put_pixels_tab[0][1] = put_rnd_pixels16_x; | 333 c->put_pixels_tab[0][1] = put_rnd_pixels16_x; |
334 c->put_pixels_tab[0][2] = put_rnd_pixels16_y; | 334 c->put_pixels_tab[0][2] = put_rnd_pixels16_y; |
335 c->put_pixels_tab[0][3] = put_rnd_pixels16_xy; | 335 c->put_pixels_tab[0][3] = put_rnd_pixels16_xy; |
336 c->put_pixels_tab[1][0] = put_rnd_pixels8_o; | 336 c->put_pixels_tab[1][0] = put_rnd_pixels8_o; |
337 c->put_pixels_tab[1][1] = put_rnd_pixels8_x; | 337 c->put_pixels_tab[1][1] = put_rnd_pixels8_x; |
338 c->put_pixels_tab[1][2] = put_rnd_pixels8_y; | 338 c->put_pixels_tab[1][2] = put_rnd_pixels8_y; |
339 c->put_pixels_tab[1][3] = put_rnd_pixels8_xy; | 339 c->put_pixels_tab[1][3] = put_rnd_pixels8_xy; |
340 | 340 |
341 c->put_no_rnd_pixels_tab[0][0] = put_no_rnd_pixels16_o; | 341 c->put_no_rnd_pixels_tab[0][0] = put_no_rnd_pixels16_o; |
342 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x; | 342 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x; |
343 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y; | 343 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y; |
344 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy; | 344 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy; |
345 c->put_no_rnd_pixels_tab[1][0] = put_no_rnd_pixels8_o; | 345 c->put_no_rnd_pixels_tab[1][0] = put_no_rnd_pixels8_o; |
346 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x; | 346 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x; |
347 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y; | 347 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y; |
348 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy; | 348 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy; |
349 | 349 |
350 c->avg_pixels_tab[0][0] = avg_rnd_pixels16_o; | 350 c->avg_pixels_tab[0][0] = avg_rnd_pixels16_o; |
351 c->avg_pixels_tab[0][1] = avg_rnd_pixels16_x; | 351 c->avg_pixels_tab[0][1] = avg_rnd_pixels16_x; |
352 c->avg_pixels_tab[0][2] = avg_rnd_pixels16_y; | 352 c->avg_pixels_tab[0][2] = avg_rnd_pixels16_y; |
353 c->avg_pixels_tab[0][3] = avg_rnd_pixels16_xy; | 353 c->avg_pixels_tab[0][3] = avg_rnd_pixels16_xy; |
354 c->avg_pixels_tab[1][0] = avg_rnd_pixels8_o; | 354 c->avg_pixels_tab[1][0] = avg_rnd_pixels8_o; |
355 c->avg_pixels_tab[1][1] = avg_rnd_pixels8_x; | 355 c->avg_pixels_tab[1][1] = avg_rnd_pixels8_x; |
356 c->avg_pixels_tab[1][2] = avg_rnd_pixels8_y; | 356 c->avg_pixels_tab[1][2] = avg_rnd_pixels8_y; |
357 c->avg_pixels_tab[1][3] = avg_rnd_pixels8_xy; | 357 c->avg_pixels_tab[1][3] = avg_rnd_pixels8_xy; |
358 | 358 |
359 c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_o; | 359 c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_o; |
360 c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x; | 360 c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x; |
361 c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y; | 361 c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y; |
362 c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy; | 362 c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy; |
363 c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_o; | 363 c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_o; |
364 c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x; | 364 c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x; |
365 c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y; | 365 c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y; |
366 c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy; | 366 c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy; |
367 | 367 |
368 #ifdef QPEL | 368 #ifdef QPEL |
369 | 369 |
370 #define dspfunc(PFX, IDX, NUM) \ | 370 #define dspfunc(PFX, IDX, NUM) \ |
371 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \ | 371 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \ |