Mercurial > audlegacy-plugins
comparison src/wma/libffwma/futils.c @ 12:3da1b8942b8b trunk
[svn] - remove src/Input src/Output src/Effect src/General src/Visualization src/Container
author | nenolod |
---|---|
date | Mon, 18 Sep 2006 03:14:20 -0700 |
parents | src/Input/wma/libffwma/futils.c@13389e613d67 |
children | 1e5df88b631d |
comparison
equal
deleted
inserted
replaced
11:cff1d04026ae | 12:3da1b8942b8b |
---|---|
1 /* | |
2 * Various utilities for ffmpeg system | |
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard | |
4 * | |
5 * This library is free software; you can redistribute it and/or | |
6 * modify it under the terms of the GNU Lesser General Public | |
7 * License as published by the Free Software Foundation; either | |
8 * version 2 of the License, or (at your option) any later version. | |
9 * | |
10 * This library is distributed in the hope that it will be useful, | |
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 * Lesser General Public License for more details. | |
14 * | |
15 * You should have received a copy of the GNU Lesser General Public | |
16 * License along with this library; if not, write to the Free Software | |
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
18 */ | |
19 #include "avformat.h" | |
20 #include "avcodec.h" | |
21 #include "cutils.h" | |
22 #include "utils.h" | |
23 | |
24 #undef NDEBUG | |
25 #include <assert.h> | |
26 | |
27 AVInputFormat *first_iformat; | |
28 AVOutputFormat *first_oformat; | |
29 AVImageFormat *first_image_format; | |
30 | |
31 void av_register_input_format(AVInputFormat *format) | |
32 { | |
33 AVInputFormat **p; | |
34 p = &first_iformat; | |
35 while (*p != NULL) p = &(*p)->next; | |
36 *p = format; | |
37 format->next = NULL; | |
38 } | |
39 | |
40 int match_ext(const char *filename, const char *extensions) | |
41 { | |
42 const char *ext, *p; | |
43 char ext1[32], *q; | |
44 | |
45 ext = strrchr(filename, '.'); | |
46 if (ext) { | |
47 ext++; | |
48 p = extensions; | |
49 for(;;) { | |
50 q = ext1; | |
51 while (*p != '\0' && *p != ',') | |
52 *q++ = *p++; | |
53 *q = '\0'; | |
54 if (!strcasecmp(ext1, ext)) | |
55 return 1; | |
56 if (*p == '\0') | |
57 break; | |
58 p++; | |
59 } | |
60 } | |
61 return 0; | |
62 } | |
63 | |
64 AVOutputFormat *guess_format(const char *short_name, const char *filename, | |
65 const char *mime_type) | |
66 { | |
67 AVOutputFormat *fmt, *fmt_found; | |
68 int score_max, score; | |
69 | |
70 /* specific test for image sequences */ | |
71 if (!short_name && filename && | |
72 filename_number_test(filename) >= 0 && | |
73 guess_image_format(filename)) { | |
74 return guess_format("image", NULL, NULL); | |
75 } | |
76 | |
77 /* find the proper file type */ | |
78 fmt_found = NULL; | |
79 score_max = 0; | |
80 fmt = first_oformat; | |
81 while (fmt != NULL) { | |
82 score = 0; | |
83 if (fmt->name && short_name && !strcmp(fmt->name, short_name)) | |
84 score += 100; | |
85 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) | |
86 score += 10; | |
87 if (filename && fmt->extensions && | |
88 match_ext(filename, fmt->extensions)) { | |
89 score += 5; | |
90 } | |
91 if (score > score_max) { | |
92 score_max = score; | |
93 fmt_found = fmt; | |
94 } | |
95 fmt = fmt->next; | |
96 } | |
97 return fmt_found; | |
98 } | |
99 | |
100 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename, | |
101 const char *mime_type) | |
102 { | |
103 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type); | |
104 | |
105 if (fmt) { | |
106 AVOutputFormat *stream_fmt; | |
107 char stream_format_name[64]; | |
108 | |
109 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name); | |
110 stream_fmt = guess_format(stream_format_name, NULL, NULL); | |
111 | |
112 if (stream_fmt) | |
113 fmt = stream_fmt; | |
114 } | |
115 | |
116 return fmt; | |
117 } | |
118 | |
119 AVInputFormat *av_find_input_format(const char *short_name) | |
120 { | |
121 AVInputFormat *fmt; | |
122 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) { | |
123 if (!strcmp(fmt->name, short_name)) | |
124 return fmt; | |
125 } | |
126 return NULL; | |
127 } | |
128 | |
129 /* memory handling */ | |
130 | |
131 /** | |
132 * Default packet destructor | |
133 */ | |
134 static void av_destruct_packet(AVPacket *pkt) | |
135 { | |
136 free(pkt->data); | |
137 pkt->data = NULL; pkt->size = 0; | |
138 } | |
139 | |
140 /** | |
141 * Allocate the payload of a packet and intialized its fields to default values. | |
142 * | |
143 * @param pkt packet | |
144 * @param size wanted payload size | |
145 * @return 0 if OK. AVERROR_xxx otherwise. | |
146 */ | |
147 int av_new_packet(AVPacket *pkt, int size) | |
148 { | |
149 unsigned char *data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); | |
150 if (!data) | |
151 return AVERROR_NOMEM; | |
152 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE); | |
153 | |
154 av_init_packet(pkt); | |
155 pkt->data = data; | |
156 pkt->size = size; | |
157 pkt->destruct = av_destruct_packet; | |
158 return 0; | |
159 } | |
160 | |
161 /* This is a hack - the packet memory allocation stuff is broken. The | |
162 packet is allocated if it was not really allocated */ | |
163 int av_dup_packet(AVPacket *pkt) | |
164 { | |
165 if (pkt->destruct != av_destruct_packet) { | |
166 uint8_t *data; | |
167 /* we duplicate the packet and don't forget to put the padding | |
168 again */ | |
169 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE); | |
170 if (!data) { | |
171 return AVERROR_NOMEM; | |
172 } | |
173 memcpy(data, pkt->data, pkt->size); | |
174 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE); | |
175 pkt->data = data; | |
176 pkt->destruct = av_destruct_packet; | |
177 } | |
178 return 0; | |
179 } | |
180 | |
181 /* fifo handling */ | |
182 | |
183 int fifo_init(FifoBuffer *f, int size) | |
184 { | |
185 f->buffer = av_malloc(size); | |
186 if (!f->buffer) | |
187 return -1; | |
188 f->end = f->buffer + size; | |
189 f->wptr = f->rptr = f->buffer; | |
190 return 0; | |
191 } | |
192 | |
193 void fifo_free(FifoBuffer *f) | |
194 { | |
195 free(f->buffer); | |
196 } | |
197 | |
198 int fifo_size(FifoBuffer *f, uint8_t *rptr) | |
199 { | |
200 int size; | |
201 | |
202 if (f->wptr >= rptr) { | |
203 size = f->wptr - rptr; | |
204 } else { | |
205 size = (f->end - rptr) + (f->wptr - f->buffer); | |
206 } | |
207 return size; | |
208 } | |
209 | |
210 /* get data from the fifo (return -1 if not enough data) */ | |
211 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr) | |
212 { | |
213 uint8_t *rptr = *rptr_ptr; | |
214 int size, len; | |
215 | |
216 if (f->wptr >= rptr) { | |
217 size = f->wptr - rptr; | |
218 } else { | |
219 size = (f->end - rptr) + (f->wptr - f->buffer); | |
220 } | |
221 | |
222 if (size < buf_size) | |
223 return -1; | |
224 while (buf_size > 0) { | |
225 len = f->end - rptr; | |
226 if (len > buf_size) | |
227 len = buf_size; | |
228 memcpy(buf, rptr, len); | |
229 buf += len; | |
230 rptr += len; | |
231 if (rptr >= f->end) | |
232 rptr = f->buffer; | |
233 buf_size -= len; | |
234 } | |
235 *rptr_ptr = rptr; | |
236 return 0; | |
237 } | |
238 | |
239 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr) | |
240 { | |
241 int len; | |
242 uint8_t *wptr; | |
243 wptr = *wptr_ptr; | |
244 while (size > 0) { | |
245 len = f->end - wptr; | |
246 if (len > size) | |
247 len = size; | |
248 memcpy(wptr, buf, len); | |
249 wptr += len; | |
250 if (wptr >= f->end) | |
251 wptr = f->buffer; | |
252 buf += len; | |
253 size -= len; | |
254 } | |
255 *wptr_ptr = wptr; | |
256 } | |
257 | |
258 int filename_number_test(const char *filename) | |
259 { | |
260 char buf[1024]; | |
261 return get_frame_filename(buf, sizeof(buf), filename, 1); | |
262 } | |
263 | |
264 /* guess file format */ | |
265 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened) | |
266 { | |
267 AVInputFormat *fmt1, *fmt; | |
268 int score, score_max; | |
269 | |
270 fmt = NULL; | |
271 score_max = 0; | |
272 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) { | |
273 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE)) | |
274 continue; | |
275 score = 0; | |
276 if (fmt1->read_probe) { | |
277 score = fmt1->read_probe(pd); | |
278 } else if (fmt1->extensions) { | |
279 if (match_ext(pd->filename, fmt1->extensions)) { | |
280 score = 50; | |
281 } | |
282 } | |
283 if (score > score_max) { | |
284 score_max = score; | |
285 fmt = fmt1; | |
286 } | |
287 } | |
288 return fmt; | |
289 } | |
290 | |
291 /************************************************************/ | |
292 /* input media file */ | |
293 | |
294 /** | |
295 * open a media file from an IO stream. 'fmt' must be specified. | |
296 */ | |
297 int av_open_input_stream(AVFormatContext **ic_ptr, | |
298 ByteIOContext *pb, const char *filename, | |
299 AVInputFormat *fmt, AVFormatParameters *ap) | |
300 { | |
301 int err; | |
302 AVFormatContext *ic; | |
303 | |
304 ic = av_mallocz(sizeof(AVFormatContext)); | |
305 if (!ic) { | |
306 err = AVERROR_NOMEM; | |
307 goto fail; | |
308 } | |
309 ic->iformat = fmt; | |
310 if (pb) | |
311 ic->pb = *pb; | |
312 ic->duration = AV_NOPTS_VALUE; | |
313 ic->start_time = AV_NOPTS_VALUE; | |
314 pstrcpy(ic->filename, sizeof(ic->filename), filename); | |
315 | |
316 /* allocate private data */ | |
317 if (fmt->priv_data_size > 0) { | |
318 ic->priv_data = av_mallocz(fmt->priv_data_size); | |
319 if (!ic->priv_data) { | |
320 err = AVERROR_NOMEM; | |
321 goto fail; | |
322 } | |
323 } else { | |
324 ic->priv_data = NULL; | |
325 } | |
326 | |
327 /* default pts settings is MPEG like */ | |
328 av_set_pts_info(ic, 33, 1, 90000); | |
329 ic->last_pkt_pts = AV_NOPTS_VALUE; | |
330 ic->last_pkt_dts = AV_NOPTS_VALUE; | |
331 ic->last_pkt_stream_pts = AV_NOPTS_VALUE; | |
332 ic->last_pkt_stream_dts = AV_NOPTS_VALUE; | |
333 | |
334 err = ic->iformat->read_header(ic, ap); | |
335 if (err < 0) | |
336 goto fail; | |
337 | |
338 if (pb) | |
339 ic->data_offset = url_ftell(&ic->pb); | |
340 | |
341 *ic_ptr = ic; | |
342 return 0; | |
343 fail: | |
344 if (ic) { | |
345 av_freep(&ic->priv_data); | |
346 } | |
347 free(ic); | |
348 *ic_ptr = NULL; | |
349 return err; | |
350 } | |
351 | |
352 #define PROBE_BUF_SIZE 2048 | |
353 | |
354 /** | |
355 * Open a media file as input. The codec are not opened. Only the file | |
356 * header (if present) is read. | |
357 * | |
358 * @param ic_ptr the opened media file handle is put here | |
359 * @param filename filename to open. | |
360 * @param fmt if non NULL, force the file format to use | |
361 * @param buf_size optional buffer size (zero if default is OK) | |
362 * @param ap additionnal parameters needed when opening the file (NULL if default) | |
363 * @return 0 if OK. AVERROR_xxx otherwise. | |
364 */ | |
365 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, | |
366 AVInputFormat *fmt, | |
367 int buf_size, | |
368 AVFormatParameters *ap) | |
369 { | |
370 int err, must_open_file, file_opened; | |
371 uint8_t buf[PROBE_BUF_SIZE]; | |
372 AVProbeData probe_data, *pd = &probe_data; | |
373 ByteIOContext pb1, *pb = &pb1; | |
374 | |
375 file_opened = 0; | |
376 pd->filename = ""; | |
377 if (filename) | |
378 pd->filename = filename; | |
379 pd->buf = buf; | |
380 pd->buf_size = 0; | |
381 | |
382 if (!fmt) { | |
383 /* guess format if no file can be opened */ | |
384 fmt = av_probe_input_format(pd, 0); | |
385 } | |
386 | |
387 /* do not open file if the format does not need it. XXX: specific | |
388 hack needed to handle RTSP/TCP */ | |
389 must_open_file = 1; | |
390 if (fmt && (fmt->flags & AVFMT_NOFILE)) { | |
391 must_open_file = 0; | |
392 } | |
393 | |
394 if (!fmt || must_open_file) { | |
395 /* if no file needed do not try to open one */ | |
396 if (url_fopen(pb, filename, URL_RDONLY) < 0) { | |
397 err = AVERROR_IO; | |
398 goto fail; | |
399 } | |
400 file_opened = 1; | |
401 if (buf_size > 0) { | |
402 url_setbufsize(pb, buf_size); | |
403 } | |
404 if (!fmt) { | |
405 /* read probe data */ | |
406 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE); | |
407 url_fseek(pb, 0, SEEK_SET); | |
408 } | |
409 } | |
410 | |
411 /* guess file format */ | |
412 if (!fmt) { | |
413 fmt = av_probe_input_format(pd, 1); | |
414 } | |
415 | |
416 /* if still no format found, error */ | |
417 if (!fmt) { | |
418 err = AVERROR_NOFMT; | |
419 goto fail; | |
420 } | |
421 | |
422 /* check filename in case of an image number is expected */ | |
423 if (fmt->flags & AVFMT_NEEDNUMBER) { | |
424 if (filename_number_test(filename) < 0) { | |
425 err = AVERROR_NUMEXPECTED; | |
426 goto fail; | |
427 } | |
428 } | |
429 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap); | |
430 if (err) | |
431 goto fail; | |
432 return 0; | |
433 fail: | |
434 if (file_opened) | |
435 url_fclose(pb); | |
436 *ic_ptr = NULL; | |
437 return err; | |
438 | |
439 } | |
440 | |
441 /*******************************************************/ | |
442 | |
443 /** | |
444 * Read a transport packet from a media file. This function is | |
445 * absolete and should never be used. Use av_read_frame() instead. | |
446 * | |
447 * @param s media file handle | |
448 * @param pkt is filled | |
449 * @return 0 if OK. AVERROR_xxx if error. | |
450 */ | |
451 int av_read_packet(AVFormatContext *s, AVPacket *pkt) | |
452 { | |
453 return s->iformat->read_packet(s, pkt); | |
454 } | |
455 | |
456 /**********************************************************/ | |
457 | |
458 /* convert the packet time stamp units and handle wrapping. The | |
459 wrapping is handled by considering the next PTS/DTS as a delta to | |
460 the previous value. We handle the delta as a fraction to avoid any | |
461 rounding errors. */ | |
462 static inline int64_t convert_timestamp_units(AVFormatContext *s, | |
463 int64_t *plast_pkt_pts, | |
464 int *plast_pkt_pts_frac, | |
465 int64_t *plast_pkt_stream_pts, | |
466 int64_t pts) | |
467 { | |
468 int64_t stream_pts; | |
469 int64_t delta_pts; | |
470 int shift, pts_frac; | |
471 | |
472 if (pts != (int64_t)AV_NOPTS_VALUE) { | |
473 stream_pts = pts; | |
474 if (*plast_pkt_stream_pts != (int64_t)AV_NOPTS_VALUE) { | |
475 shift = 64 - s->pts_wrap_bits; | |
476 delta_pts = ((stream_pts - *plast_pkt_stream_pts) << shift) >> shift; | |
477 /* XXX: overflow possible but very unlikely as it is a delta */ | |
478 delta_pts = delta_pts * AV_TIME_BASE * s->pts_num; | |
479 pts = *plast_pkt_pts + (delta_pts / s->pts_den); | |
480 pts_frac = *plast_pkt_pts_frac + (delta_pts % s->pts_den); | |
481 if (pts_frac >= s->pts_den) { | |
482 pts_frac -= s->pts_den; | |
483 pts++; | |
484 } | |
485 } else { | |
486 /* no previous pts, so no wrapping possible */ | |
487 pts = (int64_t)(((double)stream_pts * AV_TIME_BASE * s->pts_num) / | |
488 (double)s->pts_den); | |
489 pts_frac = 0; | |
490 } | |
491 *plast_pkt_stream_pts = stream_pts; | |
492 *plast_pkt_pts = pts; | |
493 *plast_pkt_pts_frac = pts_frac; | |
494 } | |
495 return pts; | |
496 } | |
497 | |
498 /* get the number of samples of an audio frame. Return (-1) if error */ | |
499 static int get_audio_frame_size(AVCodecContext *enc, int size) | |
500 { | |
501 int frame_size; | |
502 | |
503 if (enc->frame_size <= 1) { | |
504 /* specific hack for pcm codecs because no frame size is | |
505 provided */ | |
506 switch(enc->codec_id) { | |
507 case CODEC_ID_PCM_S16LE: | |
508 case CODEC_ID_PCM_S16BE: | |
509 case CODEC_ID_PCM_U16LE: | |
510 case CODEC_ID_PCM_U16BE: | |
511 if (enc->channels == 0) | |
512 return -1; | |
513 frame_size = size / (2 * enc->channels); | |
514 break; | |
515 case CODEC_ID_PCM_S8: | |
516 case CODEC_ID_PCM_U8: | |
517 case CODEC_ID_PCM_MULAW: | |
518 case CODEC_ID_PCM_ALAW: | |
519 if (enc->channels == 0) | |
520 return -1; | |
521 frame_size = size / (enc->channels); | |
522 break; | |
523 default: | |
524 /* used for example by ADPCM codecs */ | |
525 if (enc->bit_rate == 0) | |
526 return -1; | |
527 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate; | |
528 break; | |
529 } | |
530 } else { | |
531 frame_size = enc->frame_size; | |
532 } | |
533 return frame_size; | |
534 } | |
535 | |
536 | |
537 /* return the frame duration in seconds, return 0 if not available */ | |
538 static void compute_frame_duration(int *pnum, int *pden, | |
539 AVFormatContext *s, AVStream *st, | |
540 AVCodecParserContext *pc, AVPacket *pkt) | |
541 { | |
542 int frame_size; | |
543 | |
544 *pnum = 0; | |
545 *pden = 0; | |
546 switch(st->codec.codec_type) { | |
547 case CODEC_TYPE_AUDIO: | |
548 frame_size = get_audio_frame_size(&st->codec, pkt->size); | |
549 if (frame_size < 0) | |
550 break; | |
551 *pnum = frame_size; | |
552 *pden = st->codec.sample_rate; | |
553 break; | |
554 default: | |
555 break; | |
556 } | |
557 } | |
558 | |
559 static void compute_pkt_fields(AVFormatContext *s, AVStream *st, | |
560 AVCodecParserContext *pc, AVPacket *pkt) | |
561 { | |
562 int num, den, presentation_delayed; | |
563 | |
564 if (pkt->duration == 0) { | |
565 compute_frame_duration(&num, &den, s, st, pc, pkt); | |
566 if (den && num) { | |
567 pkt->duration = (num * (int64_t)AV_TIME_BASE) / den; | |
568 } | |
569 } | |
570 | |
571 /* do we have a video B frame ? */ | |
572 presentation_delayed = 0; | |
573 | |
574 /* interpolate PTS and DTS if they are not present */ | |
575 if (presentation_delayed) { | |
576 /* DTS = decompression time stamp */ | |
577 /* PTS = presentation time stamp */ | |
578 if (pkt->dts == (int64_t)AV_NOPTS_VALUE) { | |
579 pkt->dts = st->cur_dts; | |
580 } else { | |
581 st->cur_dts = pkt->dts; | |
582 } | |
583 /* this is tricky: the dts must be incremented by the duration | |
584 of the frame we are displaying, i.e. the last I or P frame */ | |
585 if (st->last_IP_duration == 0) | |
586 st->cur_dts += pkt->duration; | |
587 else | |
588 st->cur_dts += st->last_IP_duration; | |
589 st->last_IP_duration = pkt->duration; | |
590 /* cannot compute PTS if not present (we can compute it only | |
591 by knowing the futur */ | |
592 } else { | |
593 /* presentation is not delayed : PTS and DTS are the same */ | |
594 if (pkt->pts == (int64_t)AV_NOPTS_VALUE) { | |
595 pkt->pts = st->cur_dts; | |
596 pkt->dts = st->cur_dts; | |
597 } else { | |
598 st->cur_dts = pkt->pts; | |
599 pkt->dts = pkt->pts; | |
600 } | |
601 st->cur_dts += pkt->duration; | |
602 } | |
603 | |
604 /* update flags */ | |
605 if (pc) { | |
606 pkt->flags = 0; | |
607 /* XXX: that's odd, fix it later */ | |
608 switch(st->codec.codec_type) { | |
609 case CODEC_TYPE_AUDIO: | |
610 pkt->flags |= PKT_FLAG_KEY; | |
611 break; | |
612 default: | |
613 break; | |
614 } | |
615 } | |
616 | |
617 } | |
618 | |
619 static void av_destruct_packet_nofree(AVPacket *pkt) | |
620 { | |
621 pkt->data = NULL; pkt->size = 0; | |
622 } | |
623 | |
624 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) | |
625 { | |
626 AVStream *st; | |
627 int len, ret, i; | |
628 | |
629 for(;;) { | |
630 /* select current input stream component */ | |
631 st = s->cur_st; | |
632 if (st) { | |
633 if (!st->parser) { | |
634 /* no parsing needed: we just output the packet as is */ | |
635 /* raw data support */ | |
636 *pkt = s->cur_pkt; | |
637 compute_pkt_fields(s, st, NULL, pkt); | |
638 s->cur_st = NULL; | |
639 return 0; | |
640 } else if (s->cur_len > 0) { | |
641 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size, | |
642 s->cur_ptr, s->cur_len, | |
643 s->cur_pkt.pts, s->cur_pkt.dts); | |
644 s->cur_pkt.pts = AV_NOPTS_VALUE; | |
645 s->cur_pkt.dts = AV_NOPTS_VALUE; | |
646 /* increment read pointer */ | |
647 s->cur_ptr += len; | |
648 s->cur_len -= len; | |
649 | |
650 /* return packet if any */ | |
651 if (pkt->size) { | |
652 got_packet: | |
653 pkt->duration = 0; | |
654 pkt->stream_index = st->index; | |
655 pkt->pts = st->parser->pts; | |
656 pkt->dts = st->parser->dts; | |
657 pkt->destruct = av_destruct_packet_nofree; | |
658 compute_pkt_fields(s, st, st->parser, pkt); | |
659 return 0; | |
660 } | |
661 } else { | |
662 /* free packet */ | |
663 av_free_packet(&s->cur_pkt); | |
664 s->cur_st = NULL; | |
665 } | |
666 } else { | |
667 /* read next packet */ | |
668 ret = av_read_packet(s, &s->cur_pkt); | |
669 if (ret < 0) { | |
670 if (ret == -EAGAIN) | |
671 return ret; | |
672 /* return the last frames, if any */ | |
673 for(i = 0; i < s->nb_streams; i++) { | |
674 st = s->streams[i]; | |
675 if (st->parser) { | |
676 av_parser_parse(st->parser, &st->codec, | |
677 &pkt->data, &pkt->size, | |
678 NULL, 0, | |
679 AV_NOPTS_VALUE, AV_NOPTS_VALUE); | |
680 if (pkt->size) | |
681 goto got_packet; | |
682 } | |
683 } | |
684 /* no more packets: really terminates parsing */ | |
685 return ret; | |
686 } | |
687 | |
688 /* convert the packet time stamp units and handle wrapping */ | |
689 s->cur_pkt.pts = convert_timestamp_units(s, | |
690 &s->last_pkt_pts, &s->last_pkt_pts_frac, | |
691 &s->last_pkt_stream_pts, | |
692 s->cur_pkt.pts); | |
693 s->cur_pkt.dts = convert_timestamp_units(s, | |
694 &s->last_pkt_dts, &s->last_pkt_dts_frac, | |
695 &s->last_pkt_stream_dts, | |
696 s->cur_pkt.dts); | |
697 #if 0 | |
698 if (s->cur_pkt.stream_index == 0) { | |
699 if (s->cur_pkt.pts != AV_NOPTS_VALUE) | |
700 printf("PACKET pts=%0.3f\n", | |
701 (double)s->cur_pkt.pts / AV_TIME_BASE); | |
702 if (s->cur_pkt.dts != AV_NOPTS_VALUE) | |
703 printf("PACKET dts=%0.3f\n", | |
704 (double)s->cur_pkt.dts / AV_TIME_BASE); | |
705 } | |
706 #endif | |
707 | |
708 /* duration field */ | |
709 if (s->cur_pkt.duration != 0) { | |
710 s->cur_pkt.duration = ((int64_t)s->cur_pkt.duration * AV_TIME_BASE * s->pts_num) / | |
711 s->pts_den; | |
712 } | |
713 | |
714 st = s->streams[s->cur_pkt.stream_index]; | |
715 s->cur_st = st; | |
716 s->cur_ptr = s->cur_pkt.data; | |
717 s->cur_len = s->cur_pkt.size; | |
718 if (st->need_parsing && !st->parser) { | |
719 st->parser = av_parser_init(st->codec.codec_id); | |
720 if (!st->parser) { | |
721 /* no parser available : just output the raw packets */ | |
722 st->need_parsing = 0; | |
723 } | |
724 } | |
725 } | |
726 } | |
727 } | |
728 | |
729 /** | |
730 * Return the next frame of a stream. The returned packet is valid | |
731 * until the next av_read_frame() or until av_close_input_file() and | |
732 * must be freed with av_free_packet. For video, the packet contains | |
733 * exactly one frame. For audio, it contains an integer number of | |
734 * frames if each frame has a known fixed size (e.g. PCM or ADPCM | |
735 * data). If the audio frames have a variable size (e.g. MPEG audio), | |
736 * then it contains one frame. | |
737 * | |
738 * pkt->pts, pkt->dts and pkt->duration are always set to correct | |
739 * values in AV_TIME_BASE unit (and guessed if the format cannot | |
740 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format | |
741 * has B frames, so it is better to rely on pkt->dts if you do not | |
742 * decompress the payload. | |
743 * | |
744 * Return 0 if OK, < 0 if error or end of file. | |
745 */ | |
746 int av_read_frame(AVFormatContext *s, AVPacket *pkt) | |
747 { | |
748 AVPacketList *pktl; | |
749 | |
750 pktl = s->packet_buffer; | |
751 if (pktl) { | |
752 /* read packet from packet buffer, if there is data */ | |
753 *pkt = pktl->pkt; | |
754 s->packet_buffer = pktl->next; | |
755 free(pktl); | |
756 return 0; | |
757 } else { | |
758 return av_read_frame_internal(s, pkt); | |
759 } | |
760 } | |
761 | |
762 /* XXX: suppress the packet queue */ | |
763 static void flush_packet_queue(AVFormatContext *s) | |
764 { | |
765 AVPacketList *pktl; | |
766 | |
767 for(;;) { | |
768 pktl = s->packet_buffer; | |
769 if (!pktl) | |
770 break; | |
771 s->packet_buffer = pktl->next; | |
772 av_free_packet(&pktl->pkt); | |
773 free(pktl); | |
774 } | |
775 } | |
776 | |
777 /*******************************************************/ | |
778 /* seek support */ | |
779 | |
780 int av_find_default_stream_index(AVFormatContext *s) | |
781 { | |
782 int i; | |
783 AVStream *st; | |
784 | |
785 if (s->nb_streams <= 0) | |
786 return -1; | |
787 for(i = 0; i < s->nb_streams; i++) { | |
788 st = s->streams[i]; | |
789 | |
790 } | |
791 return 0; | |
792 } | |
793 | |
794 /* flush the frame reader */ | |
795 static void av_read_frame_flush(AVFormatContext *s) | |
796 { | |
797 AVStream *st; | |
798 int i; | |
799 | |
800 flush_packet_queue(s); | |
801 | |
802 /* free previous packet */ | |
803 if (s->cur_st) { | |
804 if (s->cur_st->parser) | |
805 av_free_packet(&s->cur_pkt); | |
806 s->cur_st = NULL; | |
807 } | |
808 /* fail safe */ | |
809 s->cur_ptr = NULL; | |
810 s->cur_len = 0; | |
811 | |
812 /* for each stream, reset read state */ | |
813 for(i = 0; i < s->nb_streams; i++) { | |
814 st = s->streams[i]; | |
815 | |
816 if (st->parser) { | |
817 av_parser_close(st->parser); | |
818 st->parser = NULL; | |
819 } | |
820 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */ | |
821 } | |
822 } | |
823 | |
824 /* add a index entry into a sorted list updateing if it is already there */ | |
825 int av_add_index_entry(AVStream *st, | |
826 int64_t pos, int64_t timestamp, int distance, int flags) | |
827 { | |
828 AVIndexEntry *entries, *ie; | |
829 int index; | |
830 | |
831 entries = av_fast_realloc(st->index_entries, | |
832 &st->index_entries_allocated_size, | |
833 (st->nb_index_entries + 1) * | |
834 sizeof(AVIndexEntry)); | |
835 st->index_entries= entries; | |
836 | |
837 if(st->nb_index_entries){ | |
838 index= av_index_search_timestamp(st, timestamp); | |
839 ie= &entries[index]; | |
840 | |
841 if(ie->timestamp != timestamp){ | |
842 if(ie->timestamp < timestamp){ | |
843 index++; //index points to next instead of previous entry, maybe nonexistant | |
844 ie= &st->index_entries[index]; | |
845 }else | |
846 assert(index==0); | |
847 | |
848 if(index != st->nb_index_entries){ | |
849 assert(index < st->nb_index_entries); | |
850 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index)); | |
851 } | |
852 st->nb_index_entries++; | |
853 } | |
854 }else{ | |
855 index= st->nb_index_entries++; | |
856 ie= &entries[index]; | |
857 } | |
858 | |
859 ie->pos = pos; | |
860 ie->timestamp = timestamp; | |
861 ie->min_distance= distance; | |
862 ie->flags = flags; | |
863 | |
864 return index; | |
865 } | |
866 | |
867 /* build an index for raw streams using a parser */ | |
868 static void av_build_index_raw(AVFormatContext *s) | |
869 { | |
870 AVPacket pkt1, *pkt = &pkt1; | |
871 int ret; | |
872 AVStream *st; | |
873 | |
874 st = s->streams[0]; | |
875 av_read_frame_flush(s); | |
876 url_fseek(&s->pb, s->data_offset, SEEK_SET); | |
877 | |
878 for(;;) { | |
879 ret = av_read_frame(s, pkt); | |
880 if (ret < 0) | |
881 break; | |
882 if (pkt->stream_index == 0 && st->parser && | |
883 (pkt->flags & PKT_FLAG_KEY)) { | |
884 av_add_index_entry(st, st->parser->frame_offset, pkt->dts, | |
885 0, AVINDEX_KEYFRAME); | |
886 } | |
887 av_free_packet(pkt); | |
888 } | |
889 } | |
890 | |
891 /* return TRUE if we deal with a raw stream (raw codec data and | |
892 parsing needed) */ | |
893 static int is_raw_stream(AVFormatContext *s) | |
894 { | |
895 AVStream *st; | |
896 | |
897 if (s->nb_streams != 1) | |
898 return 0; | |
899 st = s->streams[0]; | |
900 if (!st->need_parsing) | |
901 return 0; | |
902 return 1; | |
903 } | |
904 | |
905 /* return the largest index entry whose timestamp is <= | |
906 wanted_timestamp */ | |
907 int av_index_search_timestamp(AVStream *st, int wanted_timestamp) | |
908 { | |
909 AVIndexEntry *entries= st->index_entries; | |
910 int nb_entries= st->nb_index_entries; | |
911 int a, b, m; | |
912 int64_t timestamp; | |
913 | |
914 if (nb_entries <= 0) | |
915 return -1; | |
916 | |
917 a = 0; | |
918 b = nb_entries - 1; | |
919 | |
920 while (a < b) { | |
921 m = (a + b + 1) >> 1; | |
922 timestamp = entries[m].timestamp; | |
923 if (timestamp > wanted_timestamp) { | |
924 b = m - 1; | |
925 } else { | |
926 a = m; | |
927 } | |
928 } | |
929 return a; | |
930 } | |
931 | |
932 static int av_seek_frame_generic(AVFormatContext *s, | |
933 int stream_index, int64_t timestamp) | |
934 { | |
935 int index; | |
936 AVStream *st; | |
937 AVIndexEntry *ie; | |
938 | |
939 if (!s->index_built) { | |
940 if (is_raw_stream(s)) { | |
941 av_build_index_raw(s); | |
942 } else { | |
943 return -1; | |
944 } | |
945 s->index_built = 1; | |
946 } | |
947 | |
948 if (stream_index < 0) | |
949 stream_index = 0; | |
950 st = s->streams[stream_index]; | |
951 index = av_index_search_timestamp(st, timestamp); | |
952 if (index < 0) | |
953 return -1; | |
954 | |
955 /* now we have found the index, we can seek */ | |
956 ie = &st->index_entries[index]; | |
957 av_read_frame_flush(s); | |
958 url_fseek(&s->pb, ie->pos, SEEK_SET); | |
959 st->cur_dts = ie->timestamp; | |
960 return 0; | |
961 } | |
962 | |
963 /** | |
964 * Seek to the key frame just before the frame at timestamp | |
965 * 'timestamp' in 'stream_index'. If stream_index is (-1), a default | |
966 * stream is selected | |
967 */ | |
968 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp) | |
969 { | |
970 int ret; | |
971 | |
972 av_read_frame_flush(s); | |
973 | |
974 /* first, we try the format specific seek */ | |
975 if (s->iformat->read_seek) | |
976 ret = s->iformat->read_seek(s, stream_index, timestamp); | |
977 else | |
978 ret = -1; | |
979 if (ret >= 0) { | |
980 return 0; | |
981 } | |
982 | |
983 return av_seek_frame_generic(s, stream_index, timestamp); | |
984 } | |
985 | |
986 /*******************************************************/ | |
987 | |
988 /* return TRUE if the stream has accurate timings for at least one component */ | |
989 //#if 0 McMCC | |
990 static int av_has_timings(AVFormatContext *ic) | |
991 { | |
992 int i; | |
993 AVStream *st; | |
994 | |
995 for(i = 0;i < ic->nb_streams; i++) { | |
996 st = ic->streams[i]; | |
997 if (st->start_time != (int64_t)AV_NOPTS_VALUE && | |
998 st->duration != (int64_t)AV_NOPTS_VALUE) | |
999 return 1; | |
1000 } | |
1001 return 0; | |
1002 } | |
1003 | |
1004 /* estimate the stream timings from the one of each components. Also | |
1005 compute the global bitrate if possible */ | |
1006 static void av_update_stream_timings(AVFormatContext *ic) | |
1007 { | |
1008 int64_t start_time, end_time, end_time1; | |
1009 int i; | |
1010 AVStream *st; | |
1011 | |
1012 start_time = MAXINT64; | |
1013 end_time = MININT64; | |
1014 for(i = 0;i < ic->nb_streams; i++) { | |
1015 st = ic->streams[i]; | |
1016 if (st->start_time != (int64_t)AV_NOPTS_VALUE) { | |
1017 if (st->start_time < start_time) | |
1018 start_time = st->start_time; | |
1019 if (st->duration != (int64_t)AV_NOPTS_VALUE) { | |
1020 end_time1 = st->start_time + st->duration; | |
1021 if (end_time1 > end_time) | |
1022 end_time = end_time1; | |
1023 } | |
1024 } | |
1025 } | |
1026 if (start_time != MAXINT64) { | |
1027 ic->start_time = start_time; | |
1028 if (end_time != MAXINT64) { | |
1029 ic->duration = end_time - start_time; | |
1030 if (ic->file_size > 0) { | |
1031 /* compute the bit rate */ | |
1032 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE / | |
1033 (double)ic->duration; | |
1034 } | |
1035 } | |
1036 } | |
1037 | |
1038 } | |
1039 | |
1040 static void fill_all_stream_timings(AVFormatContext *ic) | |
1041 { | |
1042 int i; | |
1043 AVStream *st; | |
1044 | |
1045 av_update_stream_timings(ic); | |
1046 for(i = 0;i < ic->nb_streams; i++) { | |
1047 st = ic->streams[i]; | |
1048 if (st->start_time == (int64_t)AV_NOPTS_VALUE) { | |
1049 st->start_time = ic->start_time; | |
1050 st->duration = ic->duration; | |
1051 } | |
1052 } | |
1053 } | |
1054 | |
1055 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic) | |
1056 { | |
1057 int64_t filesize, duration; | |
1058 int bit_rate, i; | |
1059 AVStream *st; | |
1060 | |
1061 /* if bit_rate is already set, we believe it */ | |
1062 if (ic->bit_rate == 0) { | |
1063 bit_rate = 0; | |
1064 for(i=0;i<ic->nb_streams;i++) { | |
1065 st = ic->streams[i]; | |
1066 bit_rate += st->codec.bit_rate; | |
1067 } | |
1068 ic->bit_rate = bit_rate; | |
1069 } | |
1070 | |
1071 /* if duration is already set, we believe it */ | |
1072 if (ic->duration == (int64_t)AV_NOPTS_VALUE && | |
1073 ic->bit_rate != 0 && | |
1074 ic->file_size != 0) { | |
1075 filesize = ic->file_size; | |
1076 if (filesize > 0) { | |
1077 duration = (int64_t)((8 * AV_TIME_BASE * (double)filesize) / (double)ic->bit_rate); | |
1078 for(i = 0; i < ic->nb_streams; i++) { | |
1079 st = ic->streams[i]; | |
1080 if (st->start_time == (int64_t)AV_NOPTS_VALUE || | |
1081 st->duration == (int64_t)AV_NOPTS_VALUE) { | |
1082 st->start_time = 0; | |
1083 st->duration = duration; | |
1084 } | |
1085 } | |
1086 } | |
1087 } | |
1088 } | |
1089 | |
1090 #define DURATION_MAX_READ_SIZE 250000 | |
1091 #if 0 | |
1092 /* only usable for MPEG-PS streams */ | |
1093 static void av_estimate_timings_from_pts(AVFormatContext *ic) | |
1094 { | |
1095 AVPacket pkt1, *pkt = &pkt1; | |
1096 AVStream *st; | |
1097 int read_size, i, ret; | |
1098 int64_t start_time, end_time, end_time1; | |
1099 int64_t filesize, offset, duration; | |
1100 | |
1101 /* free previous packet */ | |
1102 if (ic->cur_st && ic->cur_st->parser) | |
1103 av_free_packet(&ic->cur_pkt); | |
1104 ic->cur_st = NULL; | |
1105 | |
1106 /* flush packet queue */ | |
1107 flush_packet_queue(ic); | |
1108 | |
1109 | |
1110 /* we read the first packets to get the first PTS (not fully | |
1111 accurate, but it is enough now) */ | |
1112 url_fseek(&ic->pb, 0, SEEK_SET); | |
1113 read_size = 0; | |
1114 for(;;) { | |
1115 if (read_size >= DURATION_MAX_READ_SIZE) | |
1116 break; | |
1117 /* if all info is available, we can stop */ | |
1118 for(i = 0;i < ic->nb_streams; i++) { | |
1119 st = ic->streams[i]; | |
1120 if (st->start_time == AV_NOPTS_VALUE) | |
1121 break; | |
1122 } | |
1123 if (i == ic->nb_streams) | |
1124 break; | |
1125 | |
1126 ret = av_read_packet(ic, pkt); | |
1127 if (ret != 0) | |
1128 break; | |
1129 read_size += pkt->size; | |
1130 st = ic->streams[pkt->stream_index]; | |
1131 if (pkt->pts != AV_NOPTS_VALUE) { | |
1132 if (st->start_time == AV_NOPTS_VALUE) | |
1133 st->start_time = (int64_t)((double)pkt->pts * ic->pts_num * (double)AV_TIME_BASE / ic->pts_den); | |
1134 } | |
1135 av_free_packet(pkt); | |
1136 } | |
1137 | |
1138 /* we compute the minimum start_time and use it as default */ | |
1139 start_time = MAXINT64; | |
1140 for(i = 0; i < ic->nb_streams; i++) { | |
1141 st = ic->streams[i]; | |
1142 if (st->start_time != AV_NOPTS_VALUE && | |
1143 st->start_time < start_time) | |
1144 start_time = st->start_time; | |
1145 } | |
1146 if (start_time != MAXINT64) | |
1147 ic->start_time = start_time; | |
1148 | |
1149 /* estimate the end time (duration) */ | |
1150 /* XXX: may need to support wrapping */ | |
1151 filesize = ic->file_size; | |
1152 offset = filesize - DURATION_MAX_READ_SIZE; | |
1153 if (offset < 0) | |
1154 offset = 0; | |
1155 | |
1156 url_fseek(&ic->pb, offset, SEEK_SET); | |
1157 read_size = 0; | |
1158 for(;;) { | |
1159 if (read_size >= DURATION_MAX_READ_SIZE) | |
1160 break; | |
1161 /* if all info is available, we can stop */ | |
1162 for(i = 0;i < ic->nb_streams; i++) { | |
1163 st = ic->streams[i]; | |
1164 if (st->duration == AV_NOPTS_VALUE) | |
1165 break; | |
1166 } | |
1167 if (i == ic->nb_streams) | |
1168 break; | |
1169 | |
1170 ret = av_read_packet(ic, pkt); | |
1171 if (ret != 0) | |
1172 break; | |
1173 read_size += pkt->size; | |
1174 st = ic->streams[pkt->stream_index]; | |
1175 if (pkt->pts != AV_NOPTS_VALUE) { | |
1176 end_time = (int64_t)((double)pkt->pts * ic->pts_num * (double)AV_TIME_BASE / ic->pts_den); | |
1177 duration = end_time - st->start_time; | |
1178 if (duration > 0) { | |
1179 if (st->duration == AV_NOPTS_VALUE || | |
1180 st->duration < duration) | |
1181 st->duration = duration; | |
1182 } | |
1183 } | |
1184 av_free_packet(pkt); | |
1185 } | |
1186 | |
1187 /* estimate total duration */ | |
1188 end_time = MININT64; | |
1189 for(i = 0;i < ic->nb_streams; i++) { | |
1190 st = ic->streams[i]; | |
1191 if (st->duration != AV_NOPTS_VALUE) { | |
1192 end_time1 = st->start_time + st->duration; | |
1193 if (end_time1 > end_time) | |
1194 end_time = end_time1; | |
1195 } | |
1196 } | |
1197 | |
1198 /* update start_time (new stream may have been created, so we do | |
1199 it at the end */ | |
1200 if (ic->start_time != AV_NOPTS_VALUE) { | |
1201 for(i = 0; i < ic->nb_streams; i++) { | |
1202 st = ic->streams[i]; | |
1203 if (st->start_time == AV_NOPTS_VALUE) | |
1204 st->start_time = ic->start_time; | |
1205 } | |
1206 } | |
1207 | |
1208 if (end_time != MININT64) { | |
1209 /* put dummy values for duration if needed */ | |
1210 for(i = 0;i < ic->nb_streams; i++) { | |
1211 st = ic->streams[i]; | |
1212 if (st->duration == AV_NOPTS_VALUE && | |
1213 st->start_time != AV_NOPTS_VALUE) | |
1214 st->duration = end_time - st->start_time; | |
1215 } | |
1216 ic->duration = end_time - ic->start_time; | |
1217 } | |
1218 | |
1219 url_fseek(&ic->pb, 0, SEEK_SET); | |
1220 } | |
1221 #endif | |
1222 static void av_estimate_timings(AVFormatContext *ic) | |
1223 { | |
1224 URLContext *h; | |
1225 int64_t file_size; | |
1226 | |
1227 /* get the file size, if possible */ | |
1228 if (ic->iformat->flags & AVFMT_NOFILE) { | |
1229 file_size = 0; | |
1230 } else { | |
1231 h = url_fileno(&ic->pb); | |
1232 file_size = url_filesize(h); | |
1233 if (file_size < 0) | |
1234 file_size = 0; | |
1235 } | |
1236 ic->file_size = file_size; | |
1237 | |
1238 #if 0 | |
1239 if (ic->iformat == &mpegps_demux) { | |
1240 /* get accurate estimate from the PTSes */ | |
1241 av_estimate_timings_from_pts(ic); | |
1242 } else | |
1243 #endif | |
1244 if (av_has_timings(ic)) { | |
1245 /* at least one components has timings - we use them for all | |
1246 the components */ | |
1247 fill_all_stream_timings(ic); | |
1248 } else { | |
1249 /* less precise: use bit rate info */ | |
1250 av_estimate_timings_from_bit_rate(ic); | |
1251 } | |
1252 av_update_stream_timings(ic); | |
1253 | |
1254 #if 0 | |
1255 { | |
1256 int i; | |
1257 AVStream *st; | |
1258 for(i = 0;i < ic->nb_streams; i++) { | |
1259 st = ic->streams[i]; | |
1260 printf("%d: start_time: %0.3f duration: %0.3f\n", | |
1261 i, (double)st->start_time / AV_TIME_BASE, | |
1262 (double)st->duration / AV_TIME_BASE); | |
1263 } | |
1264 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", | |
1265 (double)ic->start_time / AV_TIME_BASE, | |
1266 (double)ic->duration / AV_TIME_BASE, | |
1267 ic->bit_rate / 1000); | |
1268 } | |
1269 #endif | |
1270 } | |
1271 | |
1272 | |
1273 static int has_codec_parameters(AVCodecContext *enc) | |
1274 { | |
1275 int val; | |
1276 switch(enc->codec_type) { | |
1277 case CODEC_TYPE_AUDIO: | |
1278 val = enc->sample_rate; | |
1279 break; | |
1280 default: | |
1281 val = 1; | |
1282 break; | |
1283 } | |
1284 return (val != 0); | |
1285 } | |
1286 | |
1287 #if 0 /* Dead code; compiler assures me it isn't used anywhere */ | |
1288 static int try_decode_frame(AVStream *st, const uint8_t *data, int size) | |
1289 { | |
1290 int16_t *samples; | |
1291 AVCodec *codec; | |
1292 int got_picture, ret; | |
1293 AVFrame picture; | |
1294 | |
1295 codec = avcodec_find_decoder(st->codec.codec_id); | |
1296 if (!codec) | |
1297 return -1; | |
1298 ret = avcodec_open(&st->codec, codec); | |
1299 if (ret < 0) | |
1300 return ret; | |
1301 switch(st->codec.codec_type) { | |
1302 case CODEC_TYPE_AUDIO: | |
1303 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); | |
1304 if (!samples) | |
1305 goto fail; | |
1306 | |
1307 ret = avcodec_decode_audio(&st->codec, samples, | |
1308 &got_picture, (uint8_t *)data, size); | |
1309 free(samples); | |
1310 | |
1311 break; | |
1312 default: | |
1313 break; | |
1314 } | |
1315 fail: | |
1316 avcodec_close(&st->codec); | |
1317 return ret; | |
1318 } | |
1319 #endif | |
1320 | |
1321 /* absolute maximum size we read until we abort */ | |
1322 #define MAX_READ_SIZE 5000000 | |
1323 | |
1324 /* maximum duration until we stop analysing the stream */ | |
1325 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0)) | |
1326 | |
1327 /** | |
1328 * Read the beginning of a media file to get stream information. This | |
1329 * is useful for file formats with no headers such as MPEG. This | |
1330 * function also compute the real frame rate in case of mpeg2 repeat | |
1331 * frame mode. | |
1332 * | |
1333 * @param ic media file handle | |
1334 * @return >=0 if OK. AVERROR_xxx if error. | |
1335 */ | |
1336 int av_find_stream_info(AVFormatContext *ic) | |
1337 { | |
1338 int i, count, ret, read_size; | |
1339 AVStream *st; | |
1340 AVPacket pkt1, *pkt; | |
1341 AVPacketList *pktl=NULL, **ppktl; | |
1342 | |
1343 count = 0; | |
1344 read_size = 0; | |
1345 ppktl = &ic->packet_buffer; | |
1346 for(;;) { | |
1347 /* check if one codec still needs to be handled */ | |
1348 for(i=0;i<ic->nb_streams;i++) { | |
1349 st = ic->streams[i]; | |
1350 if (!has_codec_parameters(&st->codec)) | |
1351 break; | |
1352 } | |
1353 if (i == ic->nb_streams) { | |
1354 /* NOTE: if the format has no header, then we need to read | |
1355 some packets to get most of the streams, so we cannot | |
1356 stop here */ | |
1357 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { | |
1358 /* if we found the info for all the codecs, we can stop */ | |
1359 ret = count; | |
1360 break; | |
1361 } | |
1362 } else { | |
1363 /* we did not get all the codec info, but we read too much data */ | |
1364 if (read_size >= MAX_READ_SIZE) { | |
1365 ret = count; | |
1366 break; | |
1367 } | |
1368 } | |
1369 | |
1370 /* NOTE: a new stream can be added there if no header in file | |
1371 (AVFMTCTX_NOHEADER) */ | |
1372 ret = av_read_frame_internal(ic, &pkt1); | |
1373 if (ret < 0) { | |
1374 /* EOF or error */ | |
1375 ret = -1; /* we could not have all the codec parameters before EOF */ | |
1376 if ((ic->ctx_flags & AVFMTCTX_NOHEADER) && | |
1377 i == ic->nb_streams) | |
1378 ret = 0; | |
1379 break; | |
1380 } | |
1381 | |
1382 pktl = av_mallocz(sizeof(AVPacketList)); | |
1383 if (!pktl) { | |
1384 ret = AVERROR_NOMEM; | |
1385 break; | |
1386 } | |
1387 | |
1388 /* add the packet in the buffered packet list */ | |
1389 *ppktl = pktl; | |
1390 ppktl = &pktl->next; | |
1391 | |
1392 pkt = &pktl->pkt; | |
1393 *pkt = pkt1; | |
1394 | |
1395 /* duplicate the packet */ | |
1396 if (av_dup_packet(pkt) < 0) { | |
1397 ret = AVERROR_NOMEM; | |
1398 break; | |
1399 } | |
1400 | |
1401 read_size += pkt->size; | |
1402 | |
1403 st = ic->streams[pkt->stream_index]; | |
1404 st->codec_info_duration += pkt->duration; | |
1405 if (pkt->duration != 0) | |
1406 st->codec_info_nb_frames++; | |
1407 | |
1408 /* if still no information, we try to open the codec and to | |
1409 decompress the frame. We try to avoid that in most cases as | |
1410 it takes longer and uses more memory. For MPEG4, we need to | |
1411 decompress for Quicktime. */ | |
1412 if (st->codec_info_duration >= MAX_STREAM_DURATION) { | |
1413 break; | |
1414 } | |
1415 count++; | |
1416 } | |
1417 | |
1418 /* set real frame rate info */ | |
1419 for(i=0;i<ic->nb_streams;i++) { | |
1420 st = ic->streams[i]; | |
1421 } | |
1422 | |
1423 av_estimate_timings(ic); | |
1424 return ret; | |
1425 } | |
1426 //McMCC | |
1427 /*******************************************************/ | |
1428 | |
1429 /** | |
1430 * start playing a network based stream (e.g. RTSP stream) at the | |
1431 * current position | |
1432 */ | |
1433 int av_read_play(AVFormatContext *s) | |
1434 { | |
1435 if (!s->iformat->read_play) | |
1436 return AVERROR_NOTSUPP; | |
1437 return s->iformat->read_play(s); | |
1438 } | |
1439 | |
1440 /** | |
1441 * pause a network based stream (e.g. RTSP stream). Use av_read_play() | |
1442 * to resume it. | |
1443 */ | |
1444 int av_read_pause(AVFormatContext *s) | |
1445 { | |
1446 if (!s->iformat->read_pause) | |
1447 return AVERROR_NOTSUPP; | |
1448 return s->iformat->read_pause(s); | |
1449 } | |
1450 | |
1451 /** | |
1452 * Close a media file (but not its codecs) | |
1453 * | |
1454 * @param s media file handle | |
1455 */ | |
1456 void av_close_input_file(AVFormatContext *s) | |
1457 { | |
1458 int i, must_open_file; | |
1459 AVStream *st; | |
1460 | |
1461 /* free previous packet */ | |
1462 if (s->cur_st && s->cur_st->parser) | |
1463 av_free_packet(&s->cur_pkt); | |
1464 | |
1465 if (s->iformat->read_close) | |
1466 s->iformat->read_close(s); | |
1467 for(i=0;i<s->nb_streams;i++) { | |
1468 /* free all data in a stream component */ | |
1469 st = s->streams[i]; | |
1470 if (st->parser) { | |
1471 av_parser_close(st->parser); | |
1472 } | |
1473 free(st->index_entries); | |
1474 free(st); | |
1475 } | |
1476 flush_packet_queue(s); | |
1477 must_open_file = 1; | |
1478 if (s->iformat->flags & AVFMT_NOFILE) { | |
1479 must_open_file = 0; | |
1480 } | |
1481 if (must_open_file) { | |
1482 url_fclose(&s->pb); | |
1483 } | |
1484 av_freep(&s->priv_data); | |
1485 free(s); | |
1486 } | |
1487 | |
1488 /** | |
1489 * Add a new stream to a media file. Can only be called in the | |
1490 * read_header function. If the flag AVFMTCTX_NOHEADER is in the | |
1491 * format context, then new streams can be added in read_packet too. | |
1492 * | |
1493 * | |
1494 * @param s media file handle | |
1495 * @param id file format dependent stream id | |
1496 */ | |
1497 AVStream *av_new_stream(AVFormatContext *s, int id) | |
1498 { | |
1499 AVStream *st; | |
1500 | |
1501 if (s->nb_streams >= MAX_STREAMS) | |
1502 return NULL; | |
1503 | |
1504 st = av_mallocz(sizeof(AVStream)); | |
1505 if (!st) | |
1506 return NULL; | |
1507 avcodec_get_context_defaults(&st->codec); | |
1508 if (s->iformat) { | |
1509 /* no default bitrate if decoding */ | |
1510 st->codec.bit_rate = 0; | |
1511 } | |
1512 st->index = s->nb_streams; | |
1513 st->id = id; | |
1514 st->start_time = AV_NOPTS_VALUE; | |
1515 st->duration = AV_NOPTS_VALUE; | |
1516 s->streams[s->nb_streams++] = st; | |
1517 return st; | |
1518 } | |
1519 | |
1520 /************************************************************/ | |
1521 /* output media file */ | |
1522 | |
1523 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap) | |
1524 { | |
1525 int ret; | |
1526 | |
1527 if (s->oformat->priv_data_size > 0) { | |
1528 s->priv_data = av_mallocz(s->oformat->priv_data_size); | |
1529 if (!s->priv_data) | |
1530 return AVERROR_NOMEM; | |
1531 } else | |
1532 s->priv_data = NULL; | |
1533 | |
1534 if (s->oformat->set_parameters) { | |
1535 ret = s->oformat->set_parameters(s, ap); | |
1536 if (ret < 0) | |
1537 return ret; | |
1538 } | |
1539 return 0; | |
1540 } | |
1541 | |
1542 /** | |
1543 * allocate the stream private data and write the stream header to an | |
1544 * output media file | |
1545 * | |
1546 * @param s media file handle | |
1547 * @return 0 if OK. AVERROR_xxx if error. | |
1548 */ | |
1549 int av_write_header(AVFormatContext *s) | |
1550 { | |
1551 int ret, i; | |
1552 AVStream *st; | |
1553 | |
1554 /* default pts settings is MPEG like */ | |
1555 av_set_pts_info(s, 33, 1, 90000); | |
1556 ret = s->oformat->write_header(s); | |
1557 if (ret < 0) | |
1558 return ret; | |
1559 | |
1560 /* init PTS generation */ | |
1561 for(i=0;i<s->nb_streams;i++) { | |
1562 st = s->streams[i]; | |
1563 | |
1564 switch (st->codec.codec_type) { | |
1565 case CODEC_TYPE_AUDIO: | |
1566 av_frac_init(&st->pts, 0, 0, | |
1567 (int64_t)s->pts_num * st->codec.sample_rate); | |
1568 break; | |
1569 default: | |
1570 break; | |
1571 } | |
1572 } | |
1573 return 0; | |
1574 } | |
1575 | |
1576 /** | |
1577 * Write a packet to an output media file. The packet shall contain | |
1578 * one audio or video frame. | |
1579 * | |
1580 * @param s media file handle | |
1581 * @param stream_index stream index | |
1582 * @param buf buffer containing the frame data | |
1583 * @param size size of buffer | |
1584 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted. | |
1585 */ | |
1586 int av_write_frame(AVFormatContext *s, int stream_index, const uint8_t *buf, | |
1587 int size) | |
1588 { | |
1589 AVStream *st; | |
1590 int64_t pts_mask; | |
1591 int ret, frame_size; | |
1592 | |
1593 st = s->streams[stream_index]; | |
1594 pts_mask = (1LL << s->pts_wrap_bits) - 1; | |
1595 ret = s->oformat->write_packet(s, stream_index, buf, size, | |
1596 st->pts.val & pts_mask); | |
1597 if (ret < 0) | |
1598 return ret; | |
1599 | |
1600 /* update pts */ | |
1601 switch (st->codec.codec_type) { | |
1602 case CODEC_TYPE_AUDIO: | |
1603 frame_size = get_audio_frame_size(&st->codec, size); | |
1604 if (frame_size >= 0) { | |
1605 av_frac_add(&st->pts, | |
1606 (int64_t)s->pts_den * frame_size); | |
1607 } | |
1608 break; | |
1609 default: | |
1610 break; | |
1611 } | |
1612 return ret; | |
1613 } | |
1614 | |
1615 /** | |
1616 * write the stream trailer to an output media file and and free the | |
1617 * file private data. | |
1618 * | |
1619 * @param s media file handle | |
1620 * @return 0 if OK. AVERROR_xxx if error. */ | |
1621 int av_write_trailer(AVFormatContext *s) | |
1622 { | |
1623 int ret; | |
1624 ret = s->oformat->write_trailer(s); | |
1625 av_freep(&s->priv_data); | |
1626 return ret; | |
1627 } | |
1628 | |
1629 /* "user interface" functions */ | |
1630 | |
1631 void dump_format(AVFormatContext *ic, | |
1632 int index, | |
1633 const char *url, | |
1634 int is_output) | |
1635 { | |
1636 int i, flags; | |
1637 char buf[256]; | |
1638 | |
1639 fprintf(stderr, "%s #%d, %s, %s '%s':\n", | |
1640 is_output ? "Output" : "Input", | |
1641 index, | |
1642 is_output ? ic->oformat->name : ic->iformat->name, | |
1643 is_output ? "to" : "from", url); | |
1644 if (!is_output) { | |
1645 fprintf(stderr, " Duration: "); | |
1646 if (ic->duration != (int64_t)AV_NOPTS_VALUE) { | |
1647 int hours, mins, secs, us; | |
1648 secs = ic->duration / AV_TIME_BASE; | |
1649 us = ic->duration % AV_TIME_BASE; | |
1650 mins = secs / 60; | |
1651 secs %= 60; | |
1652 hours = mins / 60; | |
1653 mins %= 60; | |
1654 fprintf(stderr, "%02d:%02d:%02d.%01d", hours, mins, secs, | |
1655 (10 * us) / AV_TIME_BASE); | |
1656 } else { | |
1657 fprintf(stderr, "N/A"); | |
1658 } | |
1659 fprintf(stderr, ", bitrate: "); | |
1660 if (ic->bit_rate) { | |
1661 fprintf(stderr,"%d kb/s", ic->bit_rate / 1000); | |
1662 } else { | |
1663 fprintf(stderr, "N/A"); | |
1664 } | |
1665 fprintf(stderr, "\n"); | |
1666 } | |
1667 for(i=0;i<ic->nb_streams;i++) { | |
1668 AVStream *st = ic->streams[i]; | |
1669 avcodec_string(buf, sizeof(buf), &st->codec, is_output); | |
1670 fprintf(stderr, " Stream #%d.%d", index, i); | |
1671 /* the pid is an important information, so we display it */ | |
1672 /* XXX: add a generic system */ | |
1673 if (is_output) | |
1674 flags = ic->oformat->flags; | |
1675 else | |
1676 flags = ic->iformat->flags; | |
1677 if (flags & AVFMT_SHOW_IDS) { | |
1678 fprintf(stderr, "[0x%x]", st->id); | |
1679 } | |
1680 fprintf(stderr, ": %s\n", buf); | |
1681 } | |
1682 } | |
1683 | |
1684 typedef struct { | |
1685 const char *abv; | |
1686 int width, height; | |
1687 int frame_rate, frame_rate_base; | |
1688 } AbvEntry; | |
1689 | |
1690 static AbvEntry frame_abvs[] = { | |
1691 { "ntsc", 720, 480, 30000, 1001 }, | |
1692 { "pal", 720, 576, 25, 1 }, | |
1693 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */ | |
1694 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */ | |
1695 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */ | |
1696 { "spal", 768, 576, 25, 1 }, /* square pixel pal */ | |
1697 { "film", 352, 240, 24, 1 }, | |
1698 { "ntsc-film", 352, 240, 24000, 1001 }, | |
1699 { "sqcif", 128, 96, 0, 0 }, | |
1700 { "qcif", 176, 144, 0, 0 }, | |
1701 { "cif", 352, 288, 0, 0 }, | |
1702 { "4cif", 704, 576, 0, 0 }, | |
1703 }; | |
1704 | |
1705 int parse_image_size(int *width_ptr, int *height_ptr, const char *str) | |
1706 { | |
1707 int i; | |
1708 int n = sizeof(frame_abvs) / sizeof(AbvEntry); | |
1709 const char *p; | |
1710 int frame_width = 0, frame_height = 0; | |
1711 | |
1712 for(i=0;i<n;i++) { | |
1713 if (!strcmp(frame_abvs[i].abv, str)) { | |
1714 frame_width = frame_abvs[i].width; | |
1715 frame_height = frame_abvs[i].height; | |
1716 break; | |
1717 } | |
1718 } | |
1719 if (i == n) { | |
1720 p = str; | |
1721 frame_width = strtol(p, (char **)&p, 10); | |
1722 if (*p) | |
1723 p++; | |
1724 frame_height = strtol(p, (char **)&p, 10); | |
1725 } | |
1726 if (frame_width <= 0 || frame_height <= 0) | |
1727 return -1; | |
1728 *width_ptr = frame_width; | |
1729 *height_ptr = frame_height; | |
1730 return 0; | |
1731 } | |
1732 | |
1733 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg) | |
1734 { | |
1735 size_t i; | |
1736 char* cp; | |
1737 | |
1738 /* First, we check our abbreviation table */ | |
1739 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i) | |
1740 if (!strcmp(frame_abvs[i].abv, arg)) { | |
1741 *frame_rate = frame_abvs[i].frame_rate; | |
1742 *frame_rate_base = frame_abvs[i].frame_rate_base; | |
1743 return 0; | |
1744 } | |
1745 | |
1746 /* Then, we try to parse it as fraction */ | |
1747 cp = strchr(arg, '/'); | |
1748 if (cp) { | |
1749 char* cpp; | |
1750 *frame_rate = strtol(arg, &cpp, 10); | |
1751 if (cpp != arg || cpp == cp) | |
1752 *frame_rate_base = strtol(cp+1, &cpp, 10); | |
1753 else | |
1754 *frame_rate = 0; | |
1755 } | |
1756 else { | |
1757 /* Finally we give up and parse it as double */ | |
1758 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; | |
1759 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5); | |
1760 } | |
1761 if (!*frame_rate || !*frame_rate_base) | |
1762 return -1; | |
1763 else | |
1764 return 0; | |
1765 } | |
1766 | |
1767 /* Syntax: | |
1768 * - If not a duration: | |
1769 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]} | |
1770 * Time is localtime unless Z is suffixed to the end. In this case GMT | |
1771 * Return the date in micro seconds since 1970 | |
1772 * - If duration: | |
1773 * HH[:MM[:SS[.m...]]] | |
1774 * S+[.m...] | |
1775 */ | |
1776 int64_t parse_date(const char *datestr, int duration) | |
1777 { | |
1778 const char *p; | |
1779 int64_t t; | |
1780 struct tm dt; | |
1781 size_t i; | |
1782 static const char *date_fmt[] = { | |
1783 "%Y-%m-%d", | |
1784 "%Y%m%d", | |
1785 }; | |
1786 static const char *time_fmt[] = { | |
1787 "%H:%M:%S", | |
1788 "%H%M%S", | |
1789 }; | |
1790 const char *q; | |
1791 int is_utc, len; | |
1792 char lastch; | |
1793 time_t now = time(0); | |
1794 | |
1795 len = strlen(datestr); | |
1796 if (len > 0) | |
1797 lastch = datestr[len - 1]; | |
1798 else | |
1799 lastch = '\0'; | |
1800 is_utc = (lastch == 'z' || lastch == 'Z'); | |
1801 | |
1802 memset(&dt, 0, sizeof(dt)); | |
1803 | |
1804 p = datestr; | |
1805 q = NULL; | |
1806 if (!duration) { | |
1807 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) { | |
1808 q = small_strptime(p, date_fmt[i], &dt); | |
1809 if (q) { | |
1810 break; | |
1811 } | |
1812 } | |
1813 | |
1814 if (!q) { | |
1815 if (is_utc) { | |
1816 dt = *gmtime(&now); | |
1817 } else { | |
1818 dt = *localtime(&now); | |
1819 } | |
1820 dt.tm_hour = dt.tm_min = dt.tm_sec = 0; | |
1821 } else { | |
1822 p = q; | |
1823 } | |
1824 | |
1825 if (*p == 'T' || *p == 't' || *p == ' ') | |
1826 p++; | |
1827 | |
1828 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) { | |
1829 q = small_strptime(p, time_fmt[i], &dt); | |
1830 if (q) { | |
1831 break; | |
1832 } | |
1833 } | |
1834 } else { | |
1835 q = small_strptime(p, time_fmt[0], &dt); | |
1836 if (!q) { | |
1837 dt.tm_sec = strtol(p, (char **)&q, 10); | |
1838 dt.tm_min = 0; | |
1839 dt.tm_hour = 0; | |
1840 } | |
1841 } | |
1842 | |
1843 /* Now we have all the fields that we can get */ | |
1844 if (!q) { | |
1845 if (duration) | |
1846 return 0; | |
1847 else | |
1848 return now * int64_t_C(1000000); | |
1849 } | |
1850 | |
1851 if (duration) { | |
1852 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec; | |
1853 } else { | |
1854 dt.tm_isdst = -1; /* unknown */ | |
1855 if (is_utc) { | |
1856 t = mktimegm(&dt); | |
1857 } else { | |
1858 t = mktime(&dt); | |
1859 } | |
1860 } | |
1861 | |
1862 t *= 1000000; | |
1863 | |
1864 if (*q == '.') { | |
1865 int val, n; | |
1866 q++; | |
1867 for (val = 0, n = 100000; n >= 1; n /= 10, q++) { | |
1868 if (!isdigit((int) *q)) | |
1869 break; | |
1870 val += n * (*q - '0'); | |
1871 } | |
1872 t += val; | |
1873 } | |
1874 return t; | |
1875 } | |
1876 | |
1877 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return | |
1878 1 if found */ | |
1879 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info) | |
1880 { | |
1881 const char *p; | |
1882 char tag[128], *q; | |
1883 | |
1884 p = info; | |
1885 if (*p == '?') | |
1886 p++; | |
1887 for(;;) { | |
1888 q = tag; | |
1889 while (*p != '\0' && *p != '=' && *p != '&') { | |
1890 if ((size_t)(q - tag) < sizeof(tag) - 1) | |
1891 *q++ = *p; | |
1892 p++; | |
1893 } | |
1894 *q = '\0'; | |
1895 q = arg; | |
1896 if (*p == '=') { | |
1897 p++; | |
1898 while (*p != '&' && *p != '\0') { | |
1899 if ((q - arg) < arg_size - 1) { | |
1900 if (*p == '+') | |
1901 *q++ = ' '; | |
1902 else | |
1903 *q++ = *p; | |
1904 } | |
1905 p++; | |
1906 } | |
1907 *q = '\0'; | |
1908 } | |
1909 if (!strcmp(tag, tag1)) | |
1910 return 1; | |
1911 if (*p != '&') | |
1912 break; | |
1913 p++; | |
1914 } | |
1915 return 0; | |
1916 } | |
1917 | |
1918 /* Return in 'buf' the path with '%d' replaced by number. Also handles | |
1919 the '%0nd' format where 'n' is the total number of digits and | |
1920 '%%'. Return 0 if OK, and -1 if format error */ | |
1921 int get_frame_filename(char *buf, int buf_size, | |
1922 const char *path, int number) | |
1923 { | |
1924 const char *p; | |
1925 char *q, buf1[20], c; | |
1926 int nd, len, percentd_found; | |
1927 | |
1928 q = buf; | |
1929 p = path; | |
1930 percentd_found = 0; | |
1931 for(;;) { | |
1932 c = *p++; | |
1933 if (c == '\0') | |
1934 break; | |
1935 if (c == '%') { | |
1936 do { | |
1937 nd = 0; | |
1938 while (isdigit((int) *p)) { | |
1939 nd = nd * 10 + *p++ - '0'; | |
1940 } | |
1941 c = *p++; | |
1942 } while (isdigit((int) c)); | |
1943 | |
1944 switch(c) { | |
1945 case '%': | |
1946 goto addchar; | |
1947 case 'd': | |
1948 if (percentd_found) | |
1949 goto fail; | |
1950 percentd_found = 1; | |
1951 snprintf(buf1, sizeof(buf1), "%0*d", nd, number); | |
1952 len = strlen(buf1); | |
1953 if ((q - buf + len) > buf_size - 1) | |
1954 goto fail; | |
1955 memcpy(q, buf1, len); | |
1956 q += len; | |
1957 break; | |
1958 default: | |
1959 goto fail; | |
1960 } | |
1961 } else { | |
1962 addchar: | |
1963 if ((q - buf) < buf_size - 1) | |
1964 *q++ = c; | |
1965 } | |
1966 } | |
1967 if (!percentd_found) | |
1968 goto fail; | |
1969 *q = '\0'; | |
1970 return 0; | |
1971 fail: | |
1972 *q = '\0'; | |
1973 return -1; | |
1974 } | |
1975 | |
1976 /** | |
1977 * Print nice hexa dump of a buffer | |
1978 * @param f stream for output | |
1979 * @param buf buffer | |
1980 * @param size buffer size | |
1981 */ | |
1982 void av_hex_dump(FILE *f, uint8_t *buf, int size) | |
1983 { | |
1984 int len, i, j, c; | |
1985 | |
1986 for(i=0;i<size;i+=16) { | |
1987 len = size - i; | |
1988 if (len > 16) | |
1989 len = 16; | |
1990 fprintf(f, "%08x ", i); | |
1991 for(j=0;j<16;j++) { | |
1992 if (j < len) | |
1993 fprintf(f, " %02x", buf[i+j]); | |
1994 else | |
1995 fprintf(f, " "); | |
1996 } | |
1997 fprintf(f, " "); | |
1998 for(j=0;j<len;j++) { | |
1999 c = buf[i+j]; | |
2000 if (c < ' ' || c > '~') | |
2001 c = '.'; | |
2002 fprintf(f, "%c", c); | |
2003 } | |
2004 fprintf(f, "\n"); | |
2005 } | |
2006 } | |
2007 | |
2008 /** | |
2009 * Print on 'f' a nice dump of a packet | |
2010 * @param f stream for output | |
2011 * @param pkt packet to dump | |
2012 * @param dump_payload true if the payload must be displayed too | |
2013 */ | |
2014 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload) | |
2015 { | |
2016 fprintf(f, "stream #%d:\n", pkt->stream_index); | |
2017 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0)); | |
2018 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE); | |
2019 /* DTS is _always_ valid after av_read_frame() */ | |
2020 fprintf(f, " dts="); | |
2021 if (pkt->dts == (int64_t)AV_NOPTS_VALUE) | |
2022 fprintf(f, "N/A"); | |
2023 else | |
2024 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE); | |
2025 /* PTS may be not known if B frames are present */ | |
2026 fprintf(f, " pts="); | |
2027 if (pkt->pts == (int64_t)AV_NOPTS_VALUE) | |
2028 fprintf(f, "N/A"); | |
2029 else | |
2030 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE); | |
2031 fprintf(f, "\n"); | |
2032 fprintf(f, " size=%d\n", pkt->size); | |
2033 if (dump_payload) | |
2034 av_hex_dump(f, pkt->data, pkt->size); | |
2035 } | |
2036 | |
2037 void url_split(char *proto, int proto_size, | |
2038 char *hostname, int hostname_size, | |
2039 int *port_ptr, | |
2040 char *path, int path_size, | |
2041 const char *url) | |
2042 { | |
2043 const char *p; | |
2044 char *q; | |
2045 int port; | |
2046 | |
2047 port = -1; | |
2048 | |
2049 p = url; | |
2050 q = proto; | |
2051 while (*p != ':' && *p != '\0') { | |
2052 if ((q - proto) < proto_size - 1) | |
2053 *q++ = *p; | |
2054 p++; | |
2055 } | |
2056 if (proto_size > 0) | |
2057 *q = '\0'; | |
2058 if (*p == '\0') { | |
2059 if (proto_size > 0) | |
2060 proto[0] = '\0'; | |
2061 if (hostname_size > 0) | |
2062 hostname[0] = '\0'; | |
2063 p = url; | |
2064 } else { | |
2065 p++; | |
2066 if (*p == '/') | |
2067 p++; | |
2068 if (*p == '/') | |
2069 p++; | |
2070 q = hostname; | |
2071 while (*p != ':' && *p != '/' && *p != '?' && *p != '\0') { | |
2072 if ((q - hostname) < hostname_size - 1) | |
2073 *q++ = *p; | |
2074 p++; | |
2075 } | |
2076 if (hostname_size > 0) | |
2077 *q = '\0'; | |
2078 if (*p == ':') { | |
2079 p++; | |
2080 port = strtoul(p, (char **)&p, 10); | |
2081 } | |
2082 } | |
2083 if (port_ptr) | |
2084 *port_ptr = port; | |
2085 pstrcpy(path, path_size, p); | |
2086 } | |
2087 | |
2088 /** | |
2089 * Set the pts for a given stream | |
2090 * @param s stream | |
2091 * @param pts_wrap_bits number of bits effectively used by the pts | |
2092 * (used for wrap control, 33 is the value for MPEG) | |
2093 * @param pts_num numerator to convert to seconds (MPEG: 1) | |
2094 * @param pts_den denominator to convert to seconds (MPEG: 90000) | |
2095 */ | |
2096 void av_set_pts_info(AVFormatContext *s, int pts_wrap_bits, | |
2097 int pts_num, int pts_den) | |
2098 { | |
2099 s->pts_wrap_bits = pts_wrap_bits; | |
2100 s->pts_num = pts_num; | |
2101 s->pts_den = pts_den; | |
2102 } | |
2103 | |
2104 /* fraction handling */ | |
2105 | |
2106 /** | |
2107 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such | |
2108 * as 0 <= num < den. | |
2109 * | |
2110 * @param f fractional number | |
2111 * @param val integer value | |
2112 * @param num must be >= 0 | |
2113 * @param den must be >= 1 | |
2114 */ | |
2115 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) | |
2116 { | |
2117 num += (den >> 1); | |
2118 if (num >= den) { | |
2119 val += num / den; | |
2120 num = num % den; | |
2121 } | |
2122 f->val = val; | |
2123 f->num = num; | |
2124 f->den = den; | |
2125 } | |
2126 | |
2127 /* set f to (val + 0.5) */ | |
2128 void av_frac_set(AVFrac *f, int64_t val) | |
2129 { | |
2130 f->val = val; | |
2131 f->num = f->den >> 1; | |
2132 } | |
2133 | |
2134 /** | |
2135 * Fractionnal addition to f: f = f + (incr / f->den) | |
2136 * | |
2137 * @param f fractional number | |
2138 * @param incr increment, can be positive or negative | |
2139 */ | |
2140 void av_frac_add(AVFrac *f, int64_t incr) | |
2141 { | |
2142 int64_t num, den; | |
2143 | |
2144 num = f->num + incr; | |
2145 den = f->den; | |
2146 if (num < 0) { | |
2147 f->val += num / den; | |
2148 num = num % den; | |
2149 if (num < 0) { | |
2150 num += den; | |
2151 f->val--; | |
2152 } | |
2153 } else if (num >= den) { | |
2154 f->val += num / den; | |
2155 num = num % den; | |
2156 } | |
2157 f->num = num; | |
2158 } | |
2159 | |
2160 /** | |
2161 * register a new image format | |
2162 * @param img_fmt Image format descriptor | |
2163 */ | |
2164 void av_register_image_format(AVImageFormat *img_fmt) | |
2165 { | |
2166 AVImageFormat **p; | |
2167 | |
2168 p = &first_image_format; | |
2169 while (*p != NULL) p = &(*p)->next; | |
2170 *p = img_fmt; | |
2171 img_fmt->next = NULL; | |
2172 } | |
2173 | |
2174 /* guess image format */ | |
2175 AVImageFormat *av_probe_image_format(AVProbeData *pd) | |
2176 { | |
2177 AVImageFormat *fmt1, *fmt; | |
2178 int score, score_max; | |
2179 | |
2180 fmt = NULL; | |
2181 score_max = 0; | |
2182 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) { | |
2183 if (fmt1->img_probe) { | |
2184 score = fmt1->img_probe(pd); | |
2185 if (score > score_max) { | |
2186 score_max = score; | |
2187 fmt = fmt1; | |
2188 } | |
2189 } | |
2190 } | |
2191 return fmt; | |
2192 } | |
2193 | |
2194 AVImageFormat *guess_image_format(const char *filename) | |
2195 { | |
2196 AVImageFormat *fmt1; | |
2197 | |
2198 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) { | |
2199 if (fmt1->extensions && match_ext(filename, fmt1->extensions)) | |
2200 return fmt1; | |
2201 } | |
2202 return NULL; | |
2203 } | |
2204 | |
2205 /** | |
2206 * Read an image from a stream. | |
2207 * @param gb byte stream containing the image | |
2208 * @param fmt image format, NULL if probing is required | |
2209 */ | |
2210 int av_read_image(ByteIOContext *pb, const char *filename, | |
2211 AVImageFormat *fmt, | |
2212 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque) | |
2213 { | |
2214 unsigned char buf[PROBE_BUF_SIZE]; | |
2215 AVProbeData probe_data, *pd = &probe_data; | |
2216 offset_t pos; | |
2217 int ret; | |
2218 | |
2219 if (!fmt) { | |
2220 pd->filename = filename; | |
2221 pd->buf = buf; | |
2222 pos = url_ftell(pb); | |
2223 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE); | |
2224 url_fseek(pb, pos, SEEK_SET); | |
2225 fmt = av_probe_image_format(pd); | |
2226 } | |
2227 if (!fmt) | |
2228 return AVERROR_NOFMT; | |
2229 ret = fmt->img_read(pb, alloc_cb, opaque); | |
2230 return ret; | |
2231 } | |
2232 | |
2233 /** | |
2234 * Write an image to a stream. | |
2235 * @param pb byte stream for the image output | |
2236 * @param fmt image format | |
2237 * @param img image data and informations | |
2238 */ | |
2239 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img) | |
2240 { | |
2241 return fmt->img_write(pb, img); | |
2242 } | |
2243 |