Mercurial > libavcodec.hg
annotate mpegvideo_common.h @ 9830:bd0879f752e6 libavcodec
Express the H.264 parser dependency on the golomb code in configure instead of
in the Makefile as it is done for all other parts that depend on golomb.
author | diego |
---|---|
date | Tue, 09 Jun 2009 20:29:52 +0000 |
parents | 1ff6eb1d7d14 |
children | f2fa6cbb01ce |
rev | line source |
---|---|
5204 | 1 /* |
2 * The simplest mpeg encoder (well, it was the simplest!) | |
8629
04423b2f6e0b
cosmetics: Remove pointless period after copyright statement non-sentences.
diego
parents:
8596
diff
changeset
|
3 * Copyright (c) 2000,2001 Fabrice Bellard |
5204 | 4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> |
5 * | |
5214 | 6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at> |
7 * | |
5204 | 8 * This file is part of FFmpeg. |
9 * | |
10 * FFmpeg is free software; you can redistribute it and/or | |
11 * modify it under the terms of the GNU Lesser General Public | |
12 * License as published by the Free Software Foundation; either | |
13 * version 2.1 of the License, or (at your option) any later version. | |
14 * | |
15 * FFmpeg is distributed in the hope that it will be useful, | |
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 * Lesser General Public License for more details. | |
19 * | |
20 * You should have received a copy of the GNU Lesser General Public | |
21 * License along with FFmpeg; if not, write to the Free Software | |
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
23 */ | |
24 | |
25 /** | |
8718
e9d9d946f213
Use full internal pathname in doxygen @file directives.
diego
parents:
8667
diff
changeset
|
26 * @file libavcodec/mpegvideo_common.h |
5204 | 27 * The simplest mpeg encoder (well, it was the simplest!). |
28 */ | |
29 | |
7760 | 30 #ifndef AVCODEC_MPEGVIDEO_COMMON_H |
31 #define AVCODEC_MPEGVIDEO_COMMON_H | |
5204 | 32 |
8667 | 33 #include <string.h> |
5204 | 34 #include "avcodec.h" |
35 #include "dsputil.h" | |
36 #include "mpegvideo.h" | |
37 #include "mjpegenc.h" | |
38 #include "msmpeg4.h" | |
39 #include "faandct.h" | |
40 #include <limits.h> | |
41 | |
42 int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); | |
43 int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); | |
44 void denoise_dct_c(MpegEncContext *s, DCTELEM *block); | |
45 | |
46 /** | |
47 * allocates a Picture | |
48 * The pixels are allocated/set by calling get_buffer() if shared=0 | |
49 */ | |
50 int alloc_picture(MpegEncContext *s, Picture *pic, int shared); | |
51 | |
52 /** | |
53 * sets the given MpegEncContext to common defaults (same for encoding and decoding). | |
54 * the changed fields will not depend upon the prior state of the MpegEncContext. | |
55 */ | |
56 void MPV_common_defaults(MpegEncContext *s); | |
57 | |
58 static inline void gmc1_motion(MpegEncContext *s, | |
59 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, | |
60 uint8_t **ref_picture) | |
61 { | |
62 uint8_t *ptr; | |
63 int offset, src_x, src_y, linesize, uvlinesize; | |
64 int motion_x, motion_y; | |
65 int emu=0; | |
66 | |
67 motion_x= s->sprite_offset[0][0]; | |
68 motion_y= s->sprite_offset[0][1]; | |
69 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1)); | |
70 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1)); | |
71 motion_x<<=(3-s->sprite_warping_accuracy); | |
72 motion_y<<=(3-s->sprite_warping_accuracy); | |
73 src_x = av_clip(src_x, -16, s->width); | |
74 if (src_x == s->width) | |
75 motion_x =0; | |
76 src_y = av_clip(src_y, -16, s->height); | |
77 if (src_y == s->height) | |
78 motion_y =0; | |
79 | |
80 linesize = s->linesize; | |
81 uvlinesize = s->uvlinesize; | |
82 | |
83 ptr = ref_picture[0] + (src_y * linesize) + src_x; | |
84 | |
85 if(s->flags&CODEC_FLAG_EMU_EDGE){ | |
86 if( (unsigned)src_x >= s->h_edge_pos - 17 | |
87 || (unsigned)src_y >= s->v_edge_pos - 17){ | |
88 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos); | |
89 ptr= s->edge_emu_buffer; | |
90 } | |
91 } | |
92 | |
93 if((motion_x|motion_y)&7){ | |
94 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding); | |
95 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding); | |
96 }else{ | |
97 int dxy; | |
98 | |
99 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2); | |
100 if (s->no_rounding){ | |
101 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); | |
102 }else{ | |
103 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16); | |
104 } | |
105 } | |
106 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
107 if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return; |
5204 | 108 |
109 motion_x= s->sprite_offset[1][0]; | |
110 motion_y= s->sprite_offset[1][1]; | |
111 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1)); | |
112 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1)); | |
113 motion_x<<=(3-s->sprite_warping_accuracy); | |
114 motion_y<<=(3-s->sprite_warping_accuracy); | |
115 src_x = av_clip(src_x, -8, s->width>>1); | |
116 if (src_x == s->width>>1) | |
117 motion_x =0; | |
118 src_y = av_clip(src_y, -8, s->height>>1); | |
119 if (src_y == s->height>>1) | |
120 motion_y =0; | |
121 | |
122 offset = (src_y * uvlinesize) + src_x; | |
123 ptr = ref_picture[1] + offset; | |
124 if(s->flags&CODEC_FLAG_EMU_EDGE){ | |
125 if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9 | |
126 || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){ | |
127 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); | |
128 ptr= s->edge_emu_buffer; | |
129 emu=1; | |
130 } | |
131 } | |
132 s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); | |
133 | |
134 ptr = ref_picture[2] + offset; | |
135 if(emu){ | |
136 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); | |
137 ptr= s->edge_emu_buffer; | |
138 } | |
139 s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); | |
140 | |
141 return; | |
142 } | |
143 | |
144 static inline void gmc_motion(MpegEncContext *s, | |
145 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, | |
146 uint8_t **ref_picture) | |
147 { | |
148 uint8_t *ptr; | |
149 int linesize, uvlinesize; | |
150 const int a= s->sprite_warping_accuracy; | |
151 int ox, oy; | |
152 | |
153 linesize = s->linesize; | |
154 uvlinesize = s->uvlinesize; | |
155 | |
156 ptr = ref_picture[0]; | |
157 | |
158 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16; | |
159 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16; | |
160 | |
161 s->dsp.gmc(dest_y, ptr, linesize, 16, | |
162 ox, | |
163 oy, | |
164 s->sprite_delta[0][0], s->sprite_delta[0][1], | |
165 s->sprite_delta[1][0], s->sprite_delta[1][1], | |
166 a+1, (1<<(2*a+1)) - s->no_rounding, | |
167 s->h_edge_pos, s->v_edge_pos); | |
168 s->dsp.gmc(dest_y+8, ptr, linesize, 16, | |
169 ox + s->sprite_delta[0][0]*8, | |
170 oy + s->sprite_delta[1][0]*8, | |
171 s->sprite_delta[0][0], s->sprite_delta[0][1], | |
172 s->sprite_delta[1][0], s->sprite_delta[1][1], | |
173 a+1, (1<<(2*a+1)) - s->no_rounding, | |
174 s->h_edge_pos, s->v_edge_pos); | |
175 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
176 if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return; |
5204 | 177 |
178 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8; | |
179 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8; | |
180 | |
181 ptr = ref_picture[1]; | |
182 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8, | |
183 ox, | |
184 oy, | |
185 s->sprite_delta[0][0], s->sprite_delta[0][1], | |
186 s->sprite_delta[1][0], s->sprite_delta[1][1], | |
187 a+1, (1<<(2*a+1)) - s->no_rounding, | |
188 s->h_edge_pos>>1, s->v_edge_pos>>1); | |
189 | |
190 ptr = ref_picture[2]; | |
191 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8, | |
192 ox, | |
193 oy, | |
194 s->sprite_delta[0][0], s->sprite_delta[0][1], | |
195 s->sprite_delta[1][0], s->sprite_delta[1][1], | |
196 a+1, (1<<(2*a+1)) - s->no_rounding, | |
197 s->h_edge_pos>>1, s->v_edge_pos>>1); | |
198 } | |
199 | |
200 static inline int hpel_motion(MpegEncContext *s, | |
201 uint8_t *dest, uint8_t *src, | |
202 int field_based, int field_select, | |
203 int src_x, int src_y, | |
204 int width, int height, int stride, | |
205 int h_edge_pos, int v_edge_pos, | |
206 int w, int h, op_pixels_func *pix_op, | |
207 int motion_x, int motion_y) | |
208 { | |
209 int dxy; | |
210 int emu=0; | |
211 | |
212 dxy = ((motion_y & 1) << 1) | (motion_x & 1); | |
213 src_x += motion_x >> 1; | |
214 src_y += motion_y >> 1; | |
215 | |
216 /* WARNING: do no forget half pels */ | |
217 src_x = av_clip(src_x, -16, width); //FIXME unneeded for emu? | |
218 if (src_x == width) | |
219 dxy &= ~1; | |
220 src_y = av_clip(src_y, -16, height); | |
221 if (src_y == height) | |
222 dxy &= ~2; | |
223 src += src_y * stride + src_x; | |
224 | |
225 if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){ | |
226 if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w | |
227 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){ | |
228 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based, | |
229 src_x, src_y<<field_based, h_edge_pos, s->v_edge_pos); | |
230 src= s->edge_emu_buffer; | |
231 emu=1; | |
232 } | |
233 } | |
234 if(field_select) | |
235 src += s->linesize; | |
236 pix_op[dxy](dest, src, stride, h); | |
237 return emu; | |
238 } | |
239 | |
6578 | 240 static av_always_inline |
6658 | 241 void mpeg_motion_internal(MpegEncContext *s, |
6578 | 242 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, |
243 int field_based, int bottom_field, int field_select, | |
244 uint8_t **ref_picture, op_pixels_func (*pix_op)[4], | |
6658 | 245 int motion_x, int motion_y, int h, int is_mpeg12) |
5204 | 246 { |
247 uint8_t *ptr_y, *ptr_cb, *ptr_cr; | |
6578 | 248 int dxy, uvdxy, mx, my, src_x, src_y, |
249 uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize; | |
5204 | 250 |
251 #if 0 | |
252 if(s->quarter_sample) | |
253 { | |
254 motion_x>>=1; | |
255 motion_y>>=1; | |
256 } | |
257 #endif | |
258 | |
259 v_edge_pos = s->v_edge_pos >> field_based; | |
260 linesize = s->current_picture.linesize[0] << field_based; | |
261 uvlinesize = s->current_picture.linesize[1] << field_based; | |
262 | |
263 dxy = ((motion_y & 1) << 1) | (motion_x & 1); | |
264 src_x = s->mb_x* 16 + (motion_x >> 1); | |
265 src_y =(s->mb_y<<(4-field_based)) + (motion_y >> 1); | |
266 | |
6658 | 267 if (!is_mpeg12 && s->out_format == FMT_H263) { |
5204 | 268 if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){ |
269 mx = (motion_x>>1)|(motion_x&1); | |
270 my = motion_y >>1; | |
271 uvdxy = ((my & 1) << 1) | (mx & 1); | |
272 uvsrc_x = s->mb_x* 8 + (mx >> 1); | |
273 uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1); | |
274 }else{ | |
275 uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1); | |
276 uvsrc_x = src_x>>1; | |
277 uvsrc_y = src_y>>1; | |
278 } | |
6658 | 279 }else if(!is_mpeg12 && s->out_format == FMT_H261){//even chroma mv's are full pel in H261 |
5204 | 280 mx = motion_x / 4; |
281 my = motion_y / 4; | |
282 uvdxy = 0; | |
283 uvsrc_x = s->mb_x*8 + mx; | |
284 uvsrc_y = s->mb_y*8 + my; | |
285 } else { | |
286 if(s->chroma_y_shift){ | |
287 mx = motion_x / 2; | |
288 my = motion_y / 2; | |
289 uvdxy = ((my & 1) << 1) | (mx & 1); | |
290 uvsrc_x = s->mb_x* 8 + (mx >> 1); | |
291 uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1); | |
292 } else { | |
293 if(s->chroma_x_shift){ | |
294 //Chroma422 | |
295 mx = motion_x / 2; | |
296 uvdxy = ((motion_y & 1) << 1) | (mx & 1); | |
297 uvsrc_x = s->mb_x* 8 + (mx >> 1); | |
298 uvsrc_y = src_y; | |
299 } else { | |
300 //Chroma444 | |
301 uvdxy = dxy; | |
302 uvsrc_x = src_x; | |
303 uvsrc_y = src_y; | |
304 } | |
305 } | |
306 } | |
307 | |
308 ptr_y = ref_picture[0] + src_y * linesize + src_x; | |
309 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; | |
310 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; | |
311 | |
312 if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16 | |
313 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){ | |
6658 | 314 if(is_mpeg12 || s->codec_id == CODEC_ID_MPEG2VIDEO || |
5204 | 315 s->codec_id == CODEC_ID_MPEG1VIDEO){ |
6578 | 316 av_log(s->avctx,AV_LOG_DEBUG, |
317 "MPEG motion vector out of boundary\n"); | |
9352
03fd7ea4926b
Try to honor even completely invalid motion vectors as far as is
michael
parents:
8718
diff
changeset
|
318 if(!s->chroma_y_shift) |
9425
1ff6eb1d7d14
fix indentation and remove whitespace after return
bcoudurier
parents:
9352
diff
changeset
|
319 return; |
5204 | 320 } |
6578 | 321 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, |
322 17, 17+field_based, | |
323 src_x, src_y<<field_based, | |
324 s->h_edge_pos, s->v_edge_pos); | |
5204 | 325 ptr_y = s->edge_emu_buffer; |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
326 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ |
5204 | 327 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize; |
6578 | 328 ff_emulated_edge_mc(uvbuf , |
329 ptr_cb, s->uvlinesize, | |
330 9, 9+field_based, | |
331 uvsrc_x, uvsrc_y<<field_based, | |
332 s->h_edge_pos>>1, s->v_edge_pos>>1); | |
333 ff_emulated_edge_mc(uvbuf+16, | |
334 ptr_cr, s->uvlinesize, | |
335 9, 9+field_based, | |
336 uvsrc_x, uvsrc_y<<field_based, | |
337 s->h_edge_pos>>1, s->v_edge_pos>>1); | |
5204 | 338 ptr_cb= uvbuf; |
339 ptr_cr= uvbuf+16; | |
340 } | |
341 } | |
342 | |
343 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data | |
344 dest_y += s->linesize; | |
345 dest_cb+= s->uvlinesize; | |
346 dest_cr+= s->uvlinesize; | |
347 } | |
348 | |
349 if(field_select){ | |
350 ptr_y += s->linesize; | |
351 ptr_cb+= s->uvlinesize; | |
352 ptr_cr+= s->uvlinesize; | |
353 } | |
354 | |
355 pix_op[0][dxy](dest_y, ptr_y, linesize, h); | |
356 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
357 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ |
6578 | 358 pix_op[s->chroma_x_shift][uvdxy] |
359 (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift); | |
360 pix_op[s->chroma_x_shift][uvdxy] | |
361 (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift); | |
5204 | 362 } |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
363 if(!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) && |
6578 | 364 s->out_format == FMT_H261){ |
5204 | 365 ff_h261_loop_filter(s); |
366 } | |
367 } | |
6658 | 368 /* apply one mpeg motion vector to the three components */ |
369 static av_always_inline | |
370 void mpeg_motion(MpegEncContext *s, | |
371 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, | |
372 int field_based, int bottom_field, int field_select, | |
373 uint8_t **ref_picture, op_pixels_func (*pix_op)[4], | |
374 int motion_x, int motion_y, int h) | |
375 { | |
8590 | 376 #if !CONFIG_SMALL |
6658 | 377 if(s->out_format == FMT_MPEG1) |
378 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, field_based, | |
379 bottom_field, field_select, ref_picture, pix_op, | |
380 motion_x, motion_y, h, 1); | |
381 else | |
382 #endif | |
383 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, field_based, | |
384 bottom_field, field_select, ref_picture, pix_op, | |
385 motion_x, motion_y, h, 0); | |
386 } | |
5204 | 387 |
388 //FIXME move to dsputil, avg variant, 16x16 version | |
389 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){ | |
390 int x; | |
391 uint8_t * const top = src[1]; | |
392 uint8_t * const left = src[2]; | |
393 uint8_t * const mid = src[0]; | |
394 uint8_t * const right = src[3]; | |
395 uint8_t * const bottom= src[4]; | |
396 #define OBMC_FILTER(x, t, l, m, r, b)\ | |
397 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3 | |
398 #define OBMC_FILTER4(x, t, l, m, r, b)\ | |
399 OBMC_FILTER(x , t, l, m, r, b);\ | |
400 OBMC_FILTER(x+1 , t, l, m, r, b);\ | |
401 OBMC_FILTER(x +stride, t, l, m, r, b);\ | |
402 OBMC_FILTER(x+1+stride, t, l, m, r, b); | |
403 | |
404 x=0; | |
405 OBMC_FILTER (x , 2, 2, 4, 0, 0); | |
406 OBMC_FILTER (x+1, 2, 1, 5, 0, 0); | |
407 OBMC_FILTER4(x+2, 2, 1, 5, 0, 0); | |
408 OBMC_FILTER4(x+4, 2, 0, 5, 1, 0); | |
409 OBMC_FILTER (x+6, 2, 0, 5, 1, 0); | |
410 OBMC_FILTER (x+7, 2, 0, 4, 2, 0); | |
411 x+= stride; | |
412 OBMC_FILTER (x , 1, 2, 5, 0, 0); | |
413 OBMC_FILTER (x+1, 1, 2, 5, 0, 0); | |
414 OBMC_FILTER (x+6, 1, 0, 5, 2, 0); | |
415 OBMC_FILTER (x+7, 1, 0, 5, 2, 0); | |
416 x+= stride; | |
417 OBMC_FILTER4(x , 1, 2, 5, 0, 0); | |
418 OBMC_FILTER4(x+2, 1, 1, 6, 0, 0); | |
419 OBMC_FILTER4(x+4, 1, 0, 6, 1, 0); | |
420 OBMC_FILTER4(x+6, 1, 0, 5, 2, 0); | |
421 x+= 2*stride; | |
422 OBMC_FILTER4(x , 0, 2, 5, 0, 1); | |
423 OBMC_FILTER4(x+2, 0, 1, 6, 0, 1); | |
424 OBMC_FILTER4(x+4, 0, 0, 6, 1, 1); | |
425 OBMC_FILTER4(x+6, 0, 0, 5, 2, 1); | |
426 x+= 2*stride; | |
427 OBMC_FILTER (x , 0, 2, 5, 0, 1); | |
428 OBMC_FILTER (x+1, 0, 2, 5, 0, 1); | |
429 OBMC_FILTER4(x+2, 0, 1, 5, 0, 2); | |
430 OBMC_FILTER4(x+4, 0, 0, 5, 1, 2); | |
431 OBMC_FILTER (x+6, 0, 0, 5, 2, 1); | |
432 OBMC_FILTER (x+7, 0, 0, 5, 2, 1); | |
433 x+= stride; | |
434 OBMC_FILTER (x , 0, 2, 4, 0, 2); | |
435 OBMC_FILTER (x+1, 0, 1, 5, 0, 2); | |
436 OBMC_FILTER (x+6, 0, 0, 5, 1, 2); | |
437 OBMC_FILTER (x+7, 0, 0, 4, 2, 2); | |
438 } | |
439 | |
440 /* obmc for 1 8x8 luma block */ | |
441 static inline void obmc_motion(MpegEncContext *s, | |
442 uint8_t *dest, uint8_t *src, | |
443 int src_x, int src_y, | |
444 op_pixels_func *pix_op, | |
445 int16_t mv[5][2]/* mid top left right bottom*/) | |
446 #define MID 0 | |
447 { | |
448 int i; | |
449 uint8_t *ptr[5]; | |
450 | |
451 assert(s->quarter_sample==0); | |
452 | |
453 for(i=0; i<5; i++){ | |
454 if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){ | |
455 ptr[i]= ptr[MID]; | |
456 }else{ | |
457 ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1); | |
458 hpel_motion(s, ptr[i], src, 0, 0, | |
459 src_x, src_y, | |
460 s->width, s->height, s->linesize, | |
461 s->h_edge_pos, s->v_edge_pos, | |
462 8, 8, pix_op, | |
463 mv[i][0], mv[i][1]); | |
464 } | |
465 } | |
466 | |
467 put_obmc(dest, ptr, s->linesize); | |
468 } | |
469 | |
470 static inline void qpel_motion(MpegEncContext *s, | |
471 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, | |
472 int field_based, int bottom_field, int field_select, | |
473 uint8_t **ref_picture, op_pixels_func (*pix_op)[4], | |
474 qpel_mc_func (*qpix_op)[16], | |
475 int motion_x, int motion_y, int h) | |
476 { | |
477 uint8_t *ptr_y, *ptr_cb, *ptr_cr; | |
478 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize; | |
479 | |
480 dxy = ((motion_y & 3) << 2) | (motion_x & 3); | |
481 src_x = s->mb_x * 16 + (motion_x >> 2); | |
482 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2); | |
483 | |
484 v_edge_pos = s->v_edge_pos >> field_based; | |
485 linesize = s->linesize << field_based; | |
486 uvlinesize = s->uvlinesize << field_based; | |
487 | |
488 if(field_based){ | |
489 mx= motion_x/2; | |
490 my= motion_y>>1; | |
491 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){ | |
492 static const int rtab[8]= {0,0,1,1,0,0,0,1}; | |
493 mx= (motion_x>>1) + rtab[motion_x&7]; | |
494 my= (motion_y>>1) + rtab[motion_y&7]; | |
495 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){ | |
496 mx= (motion_x>>1)|(motion_x&1); | |
497 my= (motion_y>>1)|(motion_y&1); | |
498 }else{ | |
499 mx= motion_x/2; | |
500 my= motion_y/2; | |
501 } | |
502 mx= (mx>>1)|(mx&1); | |
503 my= (my>>1)|(my&1); | |
504 | |
505 uvdxy= (mx&1) | ((my&1)<<1); | |
506 mx>>=1; | |
507 my>>=1; | |
508 | |
509 uvsrc_x = s->mb_x * 8 + mx; | |
510 uvsrc_y = s->mb_y * (8 >> field_based) + my; | |
511 | |
512 ptr_y = ref_picture[0] + src_y * linesize + src_x; | |
513 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; | |
514 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; | |
515 | |
516 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16 | |
517 || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){ | |
6579 | 518 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, |
519 17, 17+field_based, src_x, src_y<<field_based, | |
520 s->h_edge_pos, s->v_edge_pos); | |
5204 | 521 ptr_y= s->edge_emu_buffer; |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
522 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ |
5204 | 523 uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize; |
6579 | 524 ff_emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize, |
525 9, 9 + field_based, | |
526 uvsrc_x, uvsrc_y<<field_based, | |
527 s->h_edge_pos>>1, s->v_edge_pos>>1); | |
528 ff_emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, | |
529 9, 9 + field_based, | |
530 uvsrc_x, uvsrc_y<<field_based, | |
531 s->h_edge_pos>>1, s->v_edge_pos>>1); | |
5204 | 532 ptr_cb= uvbuf; |
533 ptr_cr= uvbuf + 16; | |
534 } | |
535 } | |
536 | |
537 if(!field_based) | |
538 qpix_op[0][dxy](dest_y, ptr_y, linesize); | |
539 else{ | |
540 if(bottom_field){ | |
541 dest_y += s->linesize; | |
542 dest_cb+= s->uvlinesize; | |
543 dest_cr+= s->uvlinesize; | |
544 } | |
545 | |
546 if(field_select){ | |
547 ptr_y += s->linesize; | |
548 ptr_cb += s->uvlinesize; | |
549 ptr_cr += s->uvlinesize; | |
550 } | |
551 //damn interlaced mode | |
552 //FIXME boundary mirroring is not exactly correct here | |
553 qpix_op[1][dxy](dest_y , ptr_y , linesize); | |
554 qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize); | |
555 } | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
556 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ |
5204 | 557 pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1); |
558 pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1); | |
559 } | |
560 } | |
561 | |
562 /** | |
5428 | 563 * h263 chroma 4mv motion compensation. |
5204 | 564 */ |
565 static inline void chroma_4mv_motion(MpegEncContext *s, | |
566 uint8_t *dest_cb, uint8_t *dest_cr, | |
567 uint8_t **ref_picture, | |
568 op_pixels_func *pix_op, | |
569 int mx, int my){ | |
570 int dxy, emu=0, src_x, src_y, offset; | |
571 uint8_t *ptr; | |
572 | |
573 /* In case of 8X8, we construct a single chroma motion vector | |
574 with a special rounding */ | |
575 mx= ff_h263_round_chroma(mx); | |
576 my= ff_h263_round_chroma(my); | |
577 | |
578 dxy = ((my & 1) << 1) | (mx & 1); | |
579 mx >>= 1; | |
580 my >>= 1; | |
581 | |
582 src_x = s->mb_x * 8 + mx; | |
583 src_y = s->mb_y * 8 + my; | |
584 src_x = av_clip(src_x, -8, s->width/2); | |
585 if (src_x == s->width/2) | |
586 dxy &= ~1; | |
587 src_y = av_clip(src_y, -8, s->height/2); | |
588 if (src_y == s->height/2) | |
589 dxy &= ~2; | |
590 | |
591 offset = (src_y * (s->uvlinesize)) + src_x; | |
592 ptr = ref_picture[1] + offset; | |
593 if(s->flags&CODEC_FLAG_EMU_EDGE){ | |
594 if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8 | |
595 || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){ | |
6579 | 596 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, |
597 9, 9, src_x, src_y, | |
598 s->h_edge_pos>>1, s->v_edge_pos>>1); | |
5204 | 599 ptr= s->edge_emu_buffer; |
600 emu=1; | |
601 } | |
602 } | |
603 pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8); | |
604 | |
605 ptr = ref_picture[2] + offset; | |
606 if(emu){ | |
6579 | 607 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, |
608 9, 9, src_x, src_y, | |
609 s->h_edge_pos>>1, s->v_edge_pos>>1); | |
5204 | 610 ptr= s->edge_emu_buffer; |
611 } | |
612 pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8); | |
613 } | |
614 | |
615 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir){ | |
616 /* fetch pixels for estimated mv 4 macroblocks ahead | |
617 * optimized for 64byte cache lines */ | |
618 const int shift = s->quarter_sample ? 2 : 1; | |
619 const int mx= (s->mv[dir][0][0]>>shift) + 16*s->mb_x + 8; | |
620 const int my= (s->mv[dir][0][1]>>shift) + 16*s->mb_y; | |
621 int off= mx + (my + (s->mb_x&3)*4)*s->linesize + 64; | |
622 s->dsp.prefetch(pix[0]+off, s->linesize, 4); | |
623 off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64; | |
624 s->dsp.prefetch(pix[1]+off, pix[2]-pix[1], 2); | |
625 } | |
626 | |
627 /** | |
628 * motion compensation of a single macroblock | |
629 * @param s context | |
630 * @param dest_y luma destination pointer | |
631 * @param dest_cb chroma cb/u destination pointer | |
632 * @param dest_cr chroma cr/v destination pointer | |
633 * @param dir direction (0->forward, 1->backward) | |
634 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture | |
635 * @param pic_op halfpel motion compensation function (average or put normally) | |
636 * @param pic_op qpel motion compensation function (average or put normally) | |
637 * the motion vectors are taken from s->mv and the MV type from s->mv_type | |
638 */ | |
6659 | 639 static av_always_inline void MPV_motion_internal(MpegEncContext *s, |
6579 | 640 uint8_t *dest_y, uint8_t *dest_cb, |
641 uint8_t *dest_cr, int dir, | |
642 uint8_t **ref_picture, | |
643 op_pixels_func (*pix_op)[4], | |
6658 | 644 qpel_mc_func (*qpix_op)[16], int is_mpeg12) |
5204 | 645 { |
646 int dxy, mx, my, src_x, src_y, motion_x, motion_y; | |
647 int mb_x, mb_y, i; | |
648 uint8_t *ptr, *dest; | |
649 | |
650 mb_x = s->mb_x; | |
651 mb_y = s->mb_y; | |
652 | |
653 prefetch_motion(s, ref_picture, dir); | |
654 | |
6658 | 655 if(!is_mpeg12 && s->obmc && s->pict_type != FF_B_TYPE){ |
5204 | 656 int16_t mv_cache[4][4][2]; |
657 const int xy= s->mb_x + s->mb_y*s->mb_stride; | |
658 const int mot_stride= s->b8_stride; | |
659 const int mot_xy= mb_x*2 + mb_y*2*mot_stride; | |
660 | |
661 assert(!s->mb_skipped); | |
662 | |
663 memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4); | |
664 memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); | |
665 memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); | |
666 | |
667 if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){ | |
668 memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4); | |
669 }else{ | |
670 memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4); | |
671 } | |
672 | |
673 if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){ | |
674 *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1]; | |
675 *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1]; | |
676 }else{ | |
677 *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1]; | |
678 *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride]; | |
679 } | |
680 | |
681 if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){ | |
682 *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2]; | |
683 *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2]; | |
684 }else{ | |
685 *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2]; | |
686 *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride]; | |
687 } | |
688 | |
689 mx = 0; | |
690 my = 0; | |
691 for(i=0;i<4;i++) { | |
692 const int x= (i&1)+1; | |
693 const int y= (i>>1)+1; | |
694 int16_t mv[5][2]= { | |
695 {mv_cache[y][x ][0], mv_cache[y][x ][1]}, | |
696 {mv_cache[y-1][x][0], mv_cache[y-1][x][1]}, | |
697 {mv_cache[y][x-1][0], mv_cache[y][x-1][1]}, | |
698 {mv_cache[y][x+1][0], mv_cache[y][x+1][1]}, | |
699 {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}}; | |
700 //FIXME cleanup | |
701 obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, | |
702 ref_picture[0], | |
703 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8, | |
704 pix_op[1], | |
705 mv); | |
706 | |
707 mx += mv[0][0]; | |
708 my += mv[0][1]; | |
709 } | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
710 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)) |
5204 | 711 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my); |
712 | |
713 return; | |
714 } | |
715 | |
716 switch(s->mv_type) { | |
717 case MV_TYPE_16X16: | |
718 if(s->mcsel){ | |
719 if(s->real_sprite_warping_points==1){ | |
720 gmc1_motion(s, dest_y, dest_cb, dest_cr, | |
721 ref_picture); | |
722 }else{ | |
723 gmc_motion(s, dest_y, dest_cb, dest_cr, | |
724 ref_picture); | |
725 } | |
6658 | 726 }else if(!is_mpeg12 && s->quarter_sample){ |
5204 | 727 qpel_motion(s, dest_y, dest_cb, dest_cr, |
728 0, 0, 0, | |
729 ref_picture, pix_op, qpix_op, | |
730 s->mv[dir][0][0], s->mv[dir][0][1], 16); | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
731 }else if(!is_mpeg12 && CONFIG_WMV2 && s->mspel){ |
5204 | 732 ff_mspel_motion(s, dest_y, dest_cb, dest_cr, |
733 ref_picture, pix_op, | |
734 s->mv[dir][0][0], s->mv[dir][0][1], 16); | |
735 }else | |
736 { | |
737 mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
738 0, 0, 0, | |
739 ref_picture, pix_op, | |
740 s->mv[dir][0][0], s->mv[dir][0][1], 16); | |
741 } | |
742 break; | |
743 case MV_TYPE_8X8: | |
6658 | 744 if (!is_mpeg12) { |
5204 | 745 mx = 0; |
746 my = 0; | |
747 if(s->quarter_sample){ | |
748 for(i=0;i<4;i++) { | |
749 motion_x = s->mv[dir][i][0]; | |
750 motion_y = s->mv[dir][i][1]; | |
751 | |
752 dxy = ((motion_y & 3) << 2) | (motion_x & 3); | |
753 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8; | |
754 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8; | |
755 | |
756 /* WARNING: do no forget half pels */ | |
757 src_x = av_clip(src_x, -16, s->width); | |
758 if (src_x == s->width) | |
759 dxy &= ~3; | |
760 src_y = av_clip(src_y, -16, s->height); | |
761 if (src_y == s->height) | |
762 dxy &= ~12; | |
763 | |
764 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); | |
765 if(s->flags&CODEC_FLAG_EMU_EDGE){ | |
766 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8 | |
767 || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){ | |
6579 | 768 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, |
769 s->linesize, 9, 9, | |
770 src_x, src_y, | |
771 s->h_edge_pos, s->v_edge_pos); | |
5204 | 772 ptr= s->edge_emu_buffer; |
773 } | |
774 } | |
775 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; | |
776 qpix_op[1][dxy](dest, ptr, s->linesize); | |
777 | |
778 mx += s->mv[dir][i][0]/2; | |
779 my += s->mv[dir][i][1]/2; | |
780 } | |
781 }else{ | |
782 for(i=0;i<4;i++) { | |
783 hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, | |
784 ref_picture[0], 0, 0, | |
785 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8, | |
786 s->width, s->height, s->linesize, | |
787 s->h_edge_pos, s->v_edge_pos, | |
788 8, 8, pix_op[1], | |
789 s->mv[dir][i][0], s->mv[dir][i][1]); | |
790 | |
791 mx += s->mv[dir][i][0]; | |
792 my += s->mv[dir][i][1]; | |
793 } | |
794 } | |
795 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
796 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)) |
5204 | 797 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my); |
6658 | 798 } |
5204 | 799 break; |
800 case MV_TYPE_FIELD: | |
801 if (s->picture_structure == PICT_FRAME) { | |
6658 | 802 if(!is_mpeg12 && s->quarter_sample){ |
5204 | 803 for(i=0; i<2; i++){ |
804 qpel_motion(s, dest_y, dest_cb, dest_cr, | |
805 1, i, s->field_select[dir][i], | |
806 ref_picture, pix_op, qpix_op, | |
807 s->mv[dir][i][0], s->mv[dir][i][1], 8); | |
808 } | |
809 }else{ | |
810 /* top field */ | |
811 mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
812 1, 0, s->field_select[dir][0], | |
813 ref_picture, pix_op, | |
814 s->mv[dir][0][0], s->mv[dir][0][1], 8); | |
815 /* bottom field */ | |
816 mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
817 1, 1, s->field_select[dir][1], | |
818 ref_picture, pix_op, | |
819 s->mv[dir][1][0], s->mv[dir][1][1], 8); | |
820 } | |
821 } else { | |
6481 | 822 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){ |
5204 | 823 ref_picture= s->current_picture_ptr->data; |
824 } | |
825 | |
826 mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
827 0, 0, s->field_select[dir][0], | |
828 ref_picture, pix_op, | |
829 s->mv[dir][0][0], s->mv[dir][0][1], 16); | |
830 } | |
831 break; | |
832 case MV_TYPE_16X8: | |
833 for(i=0; i<2; i++){ | |
834 uint8_t ** ref2picture; | |
835 | |
6579 | 836 if(s->picture_structure == s->field_select[dir][i] + 1 |
837 || s->pict_type == FF_B_TYPE || s->first_field){ | |
5204 | 838 ref2picture= ref_picture; |
839 }else{ | |
840 ref2picture= s->current_picture_ptr->data; | |
841 } | |
842 | |
843 mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
844 0, 0, s->field_select[dir][i], | |
845 ref2picture, pix_op, | |
846 s->mv[dir][i][0], s->mv[dir][i][1] + 16*i, 8); | |
847 | |
848 dest_y += 16*s->linesize; | |
849 dest_cb+= (16>>s->chroma_y_shift)*s->uvlinesize; | |
850 dest_cr+= (16>>s->chroma_y_shift)*s->uvlinesize; | |
851 } | |
852 break; | |
853 case MV_TYPE_DMV: | |
854 if(s->picture_structure == PICT_FRAME){ | |
855 for(i=0; i<2; i++){ | |
856 int j; | |
857 for(j=0; j<2; j++){ | |
858 mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
859 1, j, j^i, | |
860 ref_picture, pix_op, | |
861 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], 8); | |
862 } | |
863 pix_op = s->dsp.avg_pixels_tab; | |
864 } | |
865 }else{ | |
866 for(i=0; i<2; i++){ | |
867 mpeg_motion(s, dest_y, dest_cb, dest_cr, | |
868 0, 0, s->picture_structure != i+1, | |
869 ref_picture, pix_op, | |
870 s->mv[dir][2*i][0],s->mv[dir][2*i][1],16); | |
871 | |
872 // after put we make avg of the same block | |
873 pix_op=s->dsp.avg_pixels_tab; | |
874 | |
875 //opposite parity is always in the same frame if this is second field | |
876 if(!s->first_field){ | |
877 ref_picture = s->current_picture_ptr->data; | |
878 } | |
879 } | |
880 } | |
881 break; | |
882 default: assert(0); | |
883 } | |
884 } | |
885 | |
6658 | 886 static inline void MPV_motion(MpegEncContext *s, |
887 uint8_t *dest_y, uint8_t *dest_cb, | |
888 uint8_t *dest_cr, int dir, | |
889 uint8_t **ref_picture, | |
890 op_pixels_func (*pix_op)[4], | |
891 qpel_mc_func (*qpix_op)[16]) | |
892 { | |
8590 | 893 #if !CONFIG_SMALL |
6658 | 894 if(s->out_format == FMT_MPEG1) |
895 MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir, | |
896 ref_picture, pix_op, qpix_op, 1); | |
897 else | |
898 #endif | |
899 MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir, | |
900 ref_picture, pix_op, qpix_op, 0); | |
901 } | |
7760 | 902 #endif /* AVCODEC_MPEGVIDEO_COMMON_H */ |