Mercurial > libavformat.hg
comparison utils.c @ 885:da1d5db0ce5c libavformat
COSMETICS: Remove all trailing whitespace.
author | diego |
---|---|
date | Sat, 17 Dec 2005 18:14:38 +0000 |
parents | 7a7a6eda769d |
children | d70e50f1495f |
comparison
equal
deleted
inserted
replaced
884:2ece9c9dd94c | 885:da1d5db0ce5c |
---|---|
56 const char *ext, *p; | 56 const char *ext, *p; |
57 char ext1[32], *q; | 57 char ext1[32], *q; |
58 | 58 |
59 if(!filename) | 59 if(!filename) |
60 return 0; | 60 return 0; |
61 | 61 |
62 ext = strrchr(filename, '.'); | 62 ext = strrchr(filename, '.'); |
63 if (ext) { | 63 if (ext) { |
64 ext++; | 64 ext++; |
65 p = extensions; | 65 p = extensions; |
66 for(;;) { | 66 for(;;) { |
67 q = ext1; | 67 q = ext1; |
68 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1) | 68 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1) |
69 *q++ = *p++; | 69 *q++ = *p++; |
70 *q = '\0'; | 70 *q = '\0'; |
71 if (!strcasecmp(ext1, ext)) | 71 if (!strcasecmp(ext1, ext)) |
72 return 1; | 72 return 1; |
73 if (*p == '\0') | 73 if (*p == '\0') |
74 break; | 74 break; |
75 p++; | 75 p++; |
76 } | 76 } |
77 } | 77 } |
78 return 0; | 78 return 0; |
79 } | 79 } |
80 | 80 |
81 AVOutputFormat *guess_format(const char *short_name, const char *filename, | 81 AVOutputFormat *guess_format(const char *short_name, const char *filename, |
82 const char *mime_type) | 82 const char *mime_type) |
83 { | 83 { |
84 AVOutputFormat *fmt, *fmt_found; | 84 AVOutputFormat *fmt, *fmt_found; |
85 int score_max, score; | 85 int score_max, score; |
86 | 86 |
87 /* specific test for image sequences */ | 87 /* specific test for image sequences */ |
88 if (!short_name && filename && | 88 if (!short_name && filename && |
89 filename_number_test(filename) >= 0 && | 89 filename_number_test(filename) >= 0 && |
90 av_guess_image2_codec(filename) != CODEC_ID_NONE) { | 90 av_guess_image2_codec(filename) != CODEC_ID_NONE) { |
91 return guess_format("image2", NULL, NULL); | 91 return guess_format("image2", NULL, NULL); |
92 } | 92 } |
93 if (!short_name && filename && | 93 if (!short_name && filename && |
94 filename_number_test(filename) >= 0 && | 94 filename_number_test(filename) >= 0 && |
95 guess_image_format(filename)) { | 95 guess_image_format(filename)) { |
96 return guess_format("image", NULL, NULL); | 96 return guess_format("image", NULL, NULL); |
97 } | 97 } |
98 | 98 |
104 score = 0; | 104 score = 0; |
105 if (fmt->name && short_name && !strcmp(fmt->name, short_name)) | 105 if (fmt->name && short_name && !strcmp(fmt->name, short_name)) |
106 score += 100; | 106 score += 100; |
107 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) | 107 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) |
108 score += 10; | 108 score += 10; |
109 if (filename && fmt->extensions && | 109 if (filename && fmt->extensions && |
110 match_ext(filename, fmt->extensions)) { | 110 match_ext(filename, fmt->extensions)) { |
111 score += 5; | 111 score += 5; |
112 } | 112 } |
113 if (score > score_max) { | 113 if (score > score_max) { |
114 score_max = score; | 114 score_max = score; |
115 fmt_found = fmt; | 115 fmt_found = fmt; |
116 } | 116 } |
117 fmt = fmt->next; | 117 fmt = fmt->next; |
118 } | 118 } |
119 return fmt_found; | 119 return fmt_found; |
120 } | 120 } |
121 | 121 |
122 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename, | 122 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename, |
123 const char *mime_type) | 123 const char *mime_type) |
124 { | 124 { |
125 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type); | 125 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type); |
126 | 126 |
127 if (fmt) { | 127 if (fmt) { |
139 } | 139 } |
140 | 140 |
141 /** | 141 /** |
142 * Guesses the codec id based upon muxer and filename. | 142 * Guesses the codec id based upon muxer and filename. |
143 */ | 143 */ |
144 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, | 144 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, |
145 const char *filename, const char *mime_type, enum CodecType type){ | 145 const char *filename, const char *mime_type, enum CodecType type){ |
146 if(type == CODEC_TYPE_VIDEO){ | 146 if(type == CODEC_TYPE_VIDEO){ |
147 enum CodecID codec_id= CODEC_ID_NONE; | 147 enum CodecID codec_id= CODEC_ID_NONE; |
148 | 148 |
149 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){ | 149 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){ |
191 */ | 191 */ |
192 int av_new_packet(AVPacket *pkt, int size) | 192 int av_new_packet(AVPacket *pkt, int size) |
193 { | 193 { |
194 void *data; | 194 void *data; |
195 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE) | 195 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE) |
196 return AVERROR_NOMEM; | 196 return AVERROR_NOMEM; |
197 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); | 197 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); |
198 if (!data) | 198 if (!data) |
199 return AVERROR_NOMEM; | 199 return AVERROR_NOMEM; |
200 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE); | 200 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE); |
201 | 201 |
202 av_init_packet(pkt); | 202 av_init_packet(pkt); |
203 pkt->data = data; | 203 pkt->data = data; |
204 pkt->size = size; | 204 pkt->size = size; |
205 pkt->destruct = av_destruct_packet; | 205 pkt->destruct = av_destruct_packet; |
206 return 0; | 206 return 0; |
207 } | 207 } |
208 | 208 |
238 if (pkt->destruct != av_destruct_packet) { | 238 if (pkt->destruct != av_destruct_packet) { |
239 uint8_t *data; | 239 uint8_t *data; |
240 /* we duplicate the packet and don't forget to put the padding | 240 /* we duplicate the packet and don't forget to put the padding |
241 again */ | 241 again */ |
242 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE) | 242 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE) |
243 return AVERROR_NOMEM; | 243 return AVERROR_NOMEM; |
244 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE); | 244 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE); |
245 if (!data) { | 245 if (!data) { |
246 return AVERROR_NOMEM; | 246 return AVERROR_NOMEM; |
247 } | 247 } |
248 memcpy(data, pkt->data, pkt->size); | 248 memcpy(data, pkt->data, pkt->size); |
271 } | 271 } |
272 | 272 |
273 int fifo_size(FifoBuffer *f, uint8_t *rptr) | 273 int fifo_size(FifoBuffer *f, uint8_t *rptr) |
274 { | 274 { |
275 int size; | 275 int size; |
276 | 276 |
277 if(!rptr) | 277 if(!rptr) |
278 rptr= f->rptr; | 278 rptr= f->rptr; |
279 | 279 |
280 if (f->wptr >= rptr) { | 280 if (f->wptr >= rptr) { |
281 size = f->wptr - rptr; | 281 size = f->wptr - rptr; |
300 if (f->wptr >= rptr) { | 300 if (f->wptr >= rptr) { |
301 size = f->wptr - rptr; | 301 size = f->wptr - rptr; |
302 } else { | 302 } else { |
303 size = (f->end - rptr) + (f->wptr - f->buffer); | 303 size = (f->end - rptr) + (f->wptr - f->buffer); |
304 } | 304 } |
305 | 305 |
306 if (size < buf_size) | 306 if (size < buf_size) |
307 return -1; | 307 return -1; |
308 while (buf_size > 0) { | 308 while (buf_size > 0) { |
309 len = f->end - rptr; | 309 len = f->end - rptr; |
310 if (len > buf_size) | 310 if (len > buf_size) |
323 /** | 323 /** |
324 * Resizes a FIFO. | 324 * Resizes a FIFO. |
325 */ | 325 */ |
326 void fifo_realloc(FifoBuffer *f, unsigned int new_size){ | 326 void fifo_realloc(FifoBuffer *f, unsigned int new_size){ |
327 unsigned int old_size= f->end - f->buffer; | 327 unsigned int old_size= f->end - f->buffer; |
328 | 328 |
329 if(old_size < new_size){ | 329 if(old_size < new_size){ |
330 uint8_t *old= f->buffer; | 330 uint8_t *old= f->buffer; |
331 | 331 |
332 f->buffer= av_realloc(f->buffer, new_size); | 332 f->buffer= av_realloc(f->buffer, new_size); |
333 | 333 |
374 if (f->wptr >= rptr) { | 374 if (f->wptr >= rptr) { |
375 size = f->wptr - rptr; | 375 size = f->wptr - rptr; |
376 } else { | 376 } else { |
377 size = (f->end - rptr) + (f->wptr - f->buffer); | 377 size = (f->end - rptr) + (f->wptr - f->buffer); |
378 } | 378 } |
379 | 379 |
380 if (size < buf_size) | 380 if (size < buf_size) |
381 return -1; | 381 return -1; |
382 while (buf_size > 0) { | 382 while (buf_size > 0) { |
383 len = f->end - rptr; | 383 len = f->end - rptr; |
384 if (len > buf_size) | 384 if (len > buf_size) |
419 score = fmt1->read_probe(pd); | 419 score = fmt1->read_probe(pd); |
420 } else if (fmt1->extensions) { | 420 } else if (fmt1->extensions) { |
421 if (match_ext(pd->filename, fmt1->extensions)) { | 421 if (match_ext(pd->filename, fmt1->extensions)) { |
422 score = 50; | 422 score = 50; |
423 } | 423 } |
424 } | 424 } |
425 if (score > score_max) { | 425 if (score > score_max) { |
426 score_max = score; | 426 score_max = score; |
427 fmt = fmt1; | 427 fmt = fmt1; |
428 } | 428 } |
429 } | 429 } |
457 | 457 |
458 /** | 458 /** |
459 * Allocates all the structures needed to read an input stream. | 459 * Allocates all the structures needed to read an input stream. |
460 * This does not open the needed codecs for decoding the stream[s]. | 460 * This does not open the needed codecs for decoding the stream[s]. |
461 */ | 461 */ |
462 int av_open_input_stream(AVFormatContext **ic_ptr, | 462 int av_open_input_stream(AVFormatContext **ic_ptr, |
463 ByteIOContext *pb, const char *filename, | 463 ByteIOContext *pb, const char *filename, |
464 AVInputFormat *fmt, AVFormatParameters *ap) | 464 AVInputFormat *fmt, AVFormatParameters *ap) |
465 { | 465 { |
466 int err; | 466 int err; |
467 AVFormatContext *ic; | 467 AVFormatContext *ic; |
468 | 468 |
519 * @param fmt if non NULL, force the file format to use | 519 * @param fmt if non NULL, force the file format to use |
520 * @param buf_size optional buffer size (zero if default is OK) | 520 * @param buf_size optional buffer size (zero if default is OK) |
521 * @param ap additionnal parameters needed when opening the file (NULL if default) | 521 * @param ap additionnal parameters needed when opening the file (NULL if default) |
522 * @return 0 if OK. AVERROR_xxx otherwise. | 522 * @return 0 if OK. AVERROR_xxx otherwise. |
523 */ | 523 */ |
524 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, | 524 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, |
525 AVInputFormat *fmt, | 525 AVInputFormat *fmt, |
526 int buf_size, | 526 int buf_size, |
527 AVFormatParameters *ap) | 527 AVFormatParameters *ap) |
528 { | 528 { |
529 int err, must_open_file, file_opened; | 529 int err, must_open_file, file_opened; |
530 uint8_t buf[PROBE_BUF_SIZE]; | 530 uint8_t buf[PROBE_BUF_SIZE]; |
531 AVProbeData probe_data, *pd = &probe_data; | 531 AVProbeData probe_data, *pd = &probe_data; |
532 ByteIOContext pb1, *pb = &pb1; | 532 ByteIOContext pb1, *pb = &pb1; |
533 | 533 |
534 file_opened = 0; | 534 file_opened = 0; |
535 pd->filename = ""; | 535 pd->filename = ""; |
536 if (filename) | 536 if (filename) |
537 pd->filename = filename; | 537 pd->filename = filename; |
538 pd->buf = buf; | 538 pd->buf = buf; |
571 goto fail; | 571 goto fail; |
572 } | 572 } |
573 } | 573 } |
574 } | 574 } |
575 } | 575 } |
576 | 576 |
577 /* guess file format */ | 577 /* guess file format */ |
578 if (!fmt) { | 578 if (!fmt) { |
579 fmt = av_probe_input_format(pd, 1); | 579 fmt = av_probe_input_format(pd, 1); |
580 } | 580 } |
581 | 581 |
582 /* if still no format found, error */ | 582 /* if still no format found, error */ |
583 if (!fmt) { | 583 if (!fmt) { |
584 err = AVERROR_NOFMT; | 584 err = AVERROR_NOFMT; |
585 goto fail; | 585 goto fail; |
586 } | 586 } |
587 | 587 |
588 /* XXX: suppress this hack for redirectors */ | 588 /* XXX: suppress this hack for redirectors */ |
589 #ifdef CONFIG_NETWORK | 589 #ifdef CONFIG_NETWORK |
590 if (fmt == &redir_demux) { | 590 if (fmt == &redir_demux) { |
591 err = redir_open(ic_ptr, pb); | 591 err = redir_open(ic_ptr, pb); |
592 url_fclose(pb); | 592 url_fclose(pb); |
594 } | 594 } |
595 #endif | 595 #endif |
596 | 596 |
597 /* check filename in case of an image number is expected */ | 597 /* check filename in case of an image number is expected */ |
598 if (fmt->flags & AVFMT_NEEDNUMBER) { | 598 if (fmt->flags & AVFMT_NEEDNUMBER) { |
599 if (filename_number_test(filename) < 0) { | 599 if (filename_number_test(filename) < 0) { |
600 err = AVERROR_NUMEXPECTED; | 600 err = AVERROR_NUMEXPECTED; |
601 goto fail; | 601 goto fail; |
602 } | 602 } |
603 } | 603 } |
604 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap); | 604 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap); |
608 fail: | 608 fail: |
609 if (file_opened) | 609 if (file_opened) |
610 url_fclose(pb); | 610 url_fclose(pb); |
611 *ic_ptr = NULL; | 611 *ic_ptr = NULL; |
612 return err; | 612 return err; |
613 | 613 |
614 } | 614 } |
615 | 615 |
616 /*******************************************************/ | 616 /*******************************************************/ |
617 | 617 |
618 /** | 618 /** |
619 * Read a transport packet from a media file. | 619 * Read a transport packet from a media file. |
620 * | 620 * |
621 * This function is absolete and should never be used. | 621 * This function is absolete and should never be used. |
622 * Use av_read_frame() instead. | 622 * Use av_read_frame() instead. |
623 * | 623 * |
624 * @param s media file handle | 624 * @param s media file handle |
625 * @param pkt is filled | 625 * @param pkt is filled |
626 * @return 0 if OK. AVERROR_xxx if error. | 626 * @return 0 if OK. AVERROR_xxx if error. |
627 */ | 627 */ |
628 int av_read_packet(AVFormatContext *s, AVPacket *pkt) | 628 int av_read_packet(AVFormatContext *s, AVPacket *pkt) |
629 { | 629 { |
630 return s->iformat->read_packet(s, pkt); | 630 return s->iformat->read_packet(s, pkt); |
631 } | 631 } |
691 | 691 |
692 | 692 |
693 /** | 693 /** |
694 * Return the frame duration in seconds, return 0 if not available. | 694 * Return the frame duration in seconds, return 0 if not available. |
695 */ | 695 */ |
696 static void compute_frame_duration(int *pnum, int *pden, AVStream *st, | 696 static void compute_frame_duration(int *pnum, int *pden, AVStream *st, |
697 AVCodecParserContext *pc, AVPacket *pkt) | 697 AVCodecParserContext *pc, AVPacket *pkt) |
698 { | 698 { |
699 int frame_size; | 699 int frame_size; |
700 | 700 |
701 *pnum = 0; | 701 *pnum = 0; |
752 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL; | 752 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL; |
753 int64_t delta= last_ts - mask/2; | 753 int64_t delta= last_ts - mask/2; |
754 return ((lsb - delta)&mask) + delta; | 754 return ((lsb - delta)&mask) + delta; |
755 } | 755 } |
756 | 756 |
757 static void compute_pkt_fields(AVFormatContext *s, AVStream *st, | 757 static void compute_pkt_fields(AVFormatContext *s, AVStream *st, |
758 AVCodecParserContext *pc, AVPacket *pkt) | 758 AVCodecParserContext *pc, AVPacket *pkt) |
759 { | 759 { |
760 int num, den, presentation_delayed; | 760 int num, den, presentation_delayed; |
761 /* handle wrapping */ | 761 /* handle wrapping */ |
762 if(st->cur_dts != AV_NOPTS_VALUE){ | 762 if(st->cur_dts != AV_NOPTS_VALUE){ |
763 if(pkt->pts != AV_NOPTS_VALUE) | 763 if(pkt->pts != AV_NOPTS_VALUE) |
764 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits); | 764 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits); |
765 if(pkt->dts != AV_NOPTS_VALUE) | 765 if(pkt->dts != AV_NOPTS_VALUE) |
766 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits); | 766 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits); |
767 } | 767 } |
768 | 768 |
769 if (pkt->duration == 0) { | 769 if (pkt->duration == 0) { |
770 compute_frame_duration(&num, &den, st, pc, pkt); | 770 compute_frame_duration(&num, &den, st, pc, pkt); |
771 if (den && num) { | 771 if (den && num) { |
772 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); | 772 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); |
773 } | 773 } |
779 /* do we have a video B frame ? */ | 779 /* do we have a video B frame ? */ |
780 presentation_delayed = 0; | 780 presentation_delayed = 0; |
781 if (st->codec->codec_type == CODEC_TYPE_VIDEO) { | 781 if (st->codec->codec_type == CODEC_TYPE_VIDEO) { |
782 /* XXX: need has_b_frame, but cannot get it if the codec is | 782 /* XXX: need has_b_frame, but cannot get it if the codec is |
783 not initialized */ | 783 not initialized */ |
784 if (( st->codec->codec_id == CODEC_ID_H264 | 784 if (( st->codec->codec_id == CODEC_ID_H264 |
785 || st->codec->has_b_frames) && | 785 || st->codec->has_b_frames) && |
786 pc && pc->pict_type != FF_B_TYPE) | 786 pc && pc->pict_type != FF_B_TYPE) |
787 presentation_delayed = 1; | 787 presentation_delayed = 1; |
788 /* this may be redundant, but it shouldnt hurt */ | 788 /* this may be redundant, but it shouldnt hurt */ |
789 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) | 789 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) |
790 presentation_delayed = 1; | 790 presentation_delayed = 1; |
791 } | 791 } |
792 | 792 |
793 if(st->cur_dts == AV_NOPTS_VALUE){ | 793 if(st->cur_dts == AV_NOPTS_VALUE){ |
794 if(presentation_delayed) st->cur_dts = -pkt->duration; | 794 if(presentation_delayed) st->cur_dts = -pkt->duration; |
795 else st->cur_dts = 0; | 795 else st->cur_dts = 0; |
796 } | 796 } |
797 | 797 |
826 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){ | 826 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){ |
827 pkt->pts += pkt->duration; | 827 pkt->pts += pkt->duration; |
828 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size); | 828 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size); |
829 } | 829 } |
830 } | 830 } |
831 | 831 |
832 /* presentation is not delayed : PTS and DTS are the same */ | 832 /* presentation is not delayed : PTS and DTS are the same */ |
833 if (pkt->pts == AV_NOPTS_VALUE) { | 833 if (pkt->pts == AV_NOPTS_VALUE) { |
834 if (pkt->dts == AV_NOPTS_VALUE) { | 834 if (pkt->dts == AV_NOPTS_VALUE) { |
835 pkt->pts = st->cur_dts; | 835 pkt->pts = st->cur_dts; |
836 pkt->dts = st->cur_dts; | 836 pkt->dts = st->cur_dts; |
844 pkt->dts = pkt->pts; | 844 pkt->dts = pkt->pts; |
845 } | 845 } |
846 st->cur_dts += pkt->duration; | 846 st->cur_dts += pkt->duration; |
847 } | 847 } |
848 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts); | 848 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts); |
849 | 849 |
850 /* update flags */ | 850 /* update flags */ |
851 if (pc) { | 851 if (pc) { |
852 pkt->flags = 0; | 852 pkt->flags = 0; |
853 /* key frame computation */ | 853 /* key frame computation */ |
854 switch(st->codec->codec_type) { | 854 switch(st->codec->codec_type) { |
885 *pkt = s->cur_pkt; | 885 *pkt = s->cur_pkt; |
886 compute_pkt_fields(s, st, NULL, pkt); | 886 compute_pkt_fields(s, st, NULL, pkt); |
887 s->cur_st = NULL; | 887 s->cur_st = NULL; |
888 return 0; | 888 return 0; |
889 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) { | 889 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) { |
890 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size, | 890 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size, |
891 s->cur_ptr, s->cur_len, | 891 s->cur_ptr, s->cur_len, |
892 s->cur_pkt.pts, s->cur_pkt.dts); | 892 s->cur_pkt.pts, s->cur_pkt.dts); |
893 s->cur_pkt.pts = AV_NOPTS_VALUE; | 893 s->cur_pkt.pts = AV_NOPTS_VALUE; |
894 s->cur_pkt.dts = AV_NOPTS_VALUE; | 894 s->cur_pkt.dts = AV_NOPTS_VALUE; |
895 /* increment read pointer */ | 895 /* increment read pointer */ |
896 s->cur_ptr += len; | 896 s->cur_ptr += len; |
897 s->cur_len -= len; | 897 s->cur_len -= len; |
898 | 898 |
899 /* return packet if any */ | 899 /* return packet if any */ |
900 if (pkt->size) { | 900 if (pkt->size) { |
901 got_packet: | 901 got_packet: |
902 pkt->duration = 0; | 902 pkt->duration = 0; |
903 pkt->stream_index = st->index; | 903 pkt->stream_index = st->index; |
907 compute_pkt_fields(s, st, st->parser, pkt); | 907 compute_pkt_fields(s, st, st->parser, pkt); |
908 return 0; | 908 return 0; |
909 } | 909 } |
910 } else { | 910 } else { |
911 /* free packet */ | 911 /* free packet */ |
912 av_free_packet(&s->cur_pkt); | 912 av_free_packet(&s->cur_pkt); |
913 s->cur_st = NULL; | 913 s->cur_st = NULL; |
914 } | 914 } |
915 } else { | 915 } else { |
916 /* read next packet */ | 916 /* read next packet */ |
917 ret = av_read_packet(s, &s->cur_pkt); | 917 ret = av_read_packet(s, &s->cur_pkt); |
920 return ret; | 920 return ret; |
921 /* return the last frames, if any */ | 921 /* return the last frames, if any */ |
922 for(i = 0; i < s->nb_streams; i++) { | 922 for(i = 0; i < s->nb_streams; i++) { |
923 st = s->streams[i]; | 923 st = s->streams[i]; |
924 if (st->parser && st->need_parsing) { | 924 if (st->parser && st->need_parsing) { |
925 av_parser_parse(st->parser, st->codec, | 925 av_parser_parse(st->parser, st->codec, |
926 &pkt->data, &pkt->size, | 926 &pkt->data, &pkt->size, |
927 NULL, 0, | 927 NULL, 0, |
928 AV_NOPTS_VALUE, AV_NOPTS_VALUE); | 928 AV_NOPTS_VALUE, AV_NOPTS_VALUE); |
929 if (pkt->size) | 929 if (pkt->size) |
930 goto got_packet; | 930 goto got_packet; |
931 } | 931 } |
932 } | 932 } |
933 /* no more packets: really terminates parsing */ | 933 /* no more packets: really terminates parsing */ |
934 return ret; | 934 return ret; |
935 } | 935 } |
936 | 936 |
937 st = s->streams[s->cur_pkt.stream_index]; | 937 st = s->streams[s->cur_pkt.stream_index]; |
938 | 938 |
939 s->cur_st = st; | 939 s->cur_st = st; |
940 s->cur_ptr = s->cur_pkt.data; | 940 s->cur_ptr = s->cur_pkt.data; |
941 s->cur_len = s->cur_pkt.size; | 941 s->cur_len = s->cur_pkt.size; |
960 * must be freed with av_free_packet. For video, the packet contains | 960 * must be freed with av_free_packet. For video, the packet contains |
961 * exactly one frame. For audio, it contains an integer number of | 961 * exactly one frame. For audio, it contains an integer number of |
962 * frames if each frame has a known fixed size (e.g. PCM or ADPCM | 962 * frames if each frame has a known fixed size (e.g. PCM or ADPCM |
963 * data). If the audio frames have a variable size (e.g. MPEG audio), | 963 * data). If the audio frames have a variable size (e.g. MPEG audio), |
964 * then it contains one frame. | 964 * then it contains one frame. |
965 * | 965 * |
966 * pkt->pts, pkt->dts and pkt->duration are always set to correct | 966 * pkt->pts, pkt->dts and pkt->duration are always set to correct |
967 * values in AV_TIME_BASE unit (and guessed if the format cannot | 967 * values in AV_TIME_BASE unit (and guessed if the format cannot |
968 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format | 968 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format |
969 * has B frames, so it is better to rely on pkt->dts if you do not | 969 * has B frames, so it is better to rely on pkt->dts if you do not |
970 * decompress the payload. | 970 * decompress the payload. |
971 * | 971 * |
972 * @return 0 if OK, < 0 if error or end of file. | 972 * @return 0 if OK, < 0 if error or end of file. |
973 */ | 973 */ |
974 int av_read_frame(AVFormatContext *s, AVPacket *pkt) | 974 int av_read_frame(AVFormatContext *s, AVPacket *pkt) |
975 { | 975 { |
976 AVPacketList *pktl; | 976 AVPacketList *pktl; |
982 if (pktl) { | 982 if (pktl) { |
983 AVPacket *next_pkt= &pktl->pkt; | 983 AVPacket *next_pkt= &pktl->pkt; |
984 | 984 |
985 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){ | 985 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){ |
986 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){ | 986 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){ |
987 if( pktl->pkt.stream_index == next_pkt->stream_index | 987 if( pktl->pkt.stream_index == next_pkt->stream_index |
988 && next_pkt->dts < pktl->pkt.dts | 988 && next_pkt->dts < pktl->pkt.dts |
989 && pktl->pkt.pts != pktl->pkt.dts //not b frame | 989 && pktl->pkt.pts != pktl->pkt.dts //not b frame |
990 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){ | 990 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){ |
991 next_pkt->pts= pktl->pkt.dts; | 991 next_pkt->pts= pktl->pkt.dts; |
992 } | 992 } |
993 pktl= pktl->next; | 993 pktl= pktl->next; |
994 } | 994 } |
995 pktl = s->packet_buffer; | 995 pktl = s->packet_buffer; |
996 } | 996 } |
997 | 997 |
998 if( next_pkt->pts != AV_NOPTS_VALUE | 998 if( next_pkt->pts != AV_NOPTS_VALUE |
999 || next_pkt->dts == AV_NOPTS_VALUE | 999 || next_pkt->dts == AV_NOPTS_VALUE |
1000 || !genpts || eof){ | 1000 || !genpts || eof){ |
1001 /* read packet from packet buffer, if there is data */ | 1001 /* read packet from packet buffer, if there is data */ |
1002 *pkt = *next_pkt; | 1002 *pkt = *next_pkt; |
1003 s->packet_buffer = pktl->next; | 1003 s->packet_buffer = pktl->next; |
1004 av_free(pktl); | 1004 av_free(pktl); |
1013 eof=1; | 1013 eof=1; |
1014 continue; | 1014 continue; |
1015 }else | 1015 }else |
1016 return ret; | 1016 return ret; |
1017 } | 1017 } |
1018 | 1018 |
1019 /* duplicate the packet */ | 1019 /* duplicate the packet */ |
1020 if (av_dup_packet(pkt) < 0) | 1020 if (av_dup_packet(pkt) < 0) |
1021 return AVERROR_NOMEM; | 1021 return AVERROR_NOMEM; |
1022 | 1022 |
1023 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last? | 1023 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last? |
1024 | 1024 |
1025 pktl = av_mallocz(sizeof(AVPacketList)); | 1025 pktl = av_mallocz(sizeof(AVPacketList)); |
1026 if (!pktl) | 1026 if (!pktl) |
1027 return AVERROR_NOMEM; | 1027 return AVERROR_NOMEM; |
1028 | 1028 |
1029 /* add the packet in the buffered packet list */ | 1029 /* add the packet in the buffered packet list */ |
1030 *plast_pktl = pktl; | 1030 *plast_pktl = pktl; |
1031 pktl->pkt= *pkt; | 1031 pktl->pkt= *pkt; |
1032 }else{ | 1032 }else{ |
1033 assert(!s->packet_buffer); | 1033 assert(!s->packet_buffer); |
1034 return av_read_frame_internal(s, pkt); | 1034 return av_read_frame_internal(s, pkt); |
1035 } | 1035 } |
1036 } | 1036 } |
1041 { | 1041 { |
1042 AVPacketList *pktl; | 1042 AVPacketList *pktl; |
1043 | 1043 |
1044 for(;;) { | 1044 for(;;) { |
1045 pktl = s->packet_buffer; | 1045 pktl = s->packet_buffer; |
1046 if (!pktl) | 1046 if (!pktl) |
1047 break; | 1047 break; |
1048 s->packet_buffer = pktl->next; | 1048 s->packet_buffer = pktl->next; |
1049 av_free_packet(&pktl->pkt); | 1049 av_free_packet(&pktl->pkt); |
1050 av_free(pktl); | 1050 av_free(pktl); |
1051 } | 1051 } |
1087 s->cur_st = NULL; | 1087 s->cur_st = NULL; |
1088 } | 1088 } |
1089 /* fail safe */ | 1089 /* fail safe */ |
1090 s->cur_ptr = NULL; | 1090 s->cur_ptr = NULL; |
1091 s->cur_len = 0; | 1091 s->cur_len = 0; |
1092 | 1092 |
1093 /* for each stream, reset read state */ | 1093 /* for each stream, reset read state */ |
1094 for(i = 0; i < s->nb_streams; i++) { | 1094 for(i = 0; i < s->nb_streams; i++) { |
1095 st = s->streams[i]; | 1095 st = s->streams[i]; |
1096 | 1096 |
1097 if (st->parser) { | 1097 if (st->parser) { |
1098 av_parser_close(st->parser); | 1098 av_parser_close(st->parser); |
1099 st->parser = NULL; | 1099 st->parser = NULL; |
1100 } | 1100 } |
1101 st->last_IP_pts = AV_NOPTS_VALUE; | 1101 st->last_IP_pts = AV_NOPTS_VALUE; |
1115 int i; | 1115 int i; |
1116 | 1116 |
1117 for(i = 0; i < s->nb_streams; i++) { | 1117 for(i = 0; i < s->nb_streams; i++) { |
1118 AVStream *st = s->streams[i]; | 1118 AVStream *st = s->streams[i]; |
1119 | 1119 |
1120 st->cur_dts = av_rescale(timestamp, | 1120 st->cur_dts = av_rescale(timestamp, |
1121 st->time_base.den * (int64_t)ref_st->time_base.num, | 1121 st->time_base.den * (int64_t)ref_st->time_base.num, |
1122 st->time_base.num * (int64_t)ref_st->time_base.den); | 1122 st->time_base.num * (int64_t)ref_st->time_base.den); |
1123 } | 1123 } |
1124 } | 1124 } |
1125 | 1125 |
1131 int av_add_index_entry(AVStream *st, | 1131 int av_add_index_entry(AVStream *st, |
1132 int64_t pos, int64_t timestamp, int distance, int flags) | 1132 int64_t pos, int64_t timestamp, int distance, int flags) |
1133 { | 1133 { |
1134 AVIndexEntry *entries, *ie; | 1134 AVIndexEntry *entries, *ie; |
1135 int index; | 1135 int index; |
1136 | 1136 |
1137 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) | 1137 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) |
1138 return -1; | 1138 return -1; |
1139 | 1139 |
1140 entries = av_fast_realloc(st->index_entries, | 1140 entries = av_fast_realloc(st->index_entries, |
1141 &st->index_entries_allocated_size, | 1141 &st->index_entries_allocated_size, |
1142 (st->nb_index_entries + 1) * | 1142 (st->nb_index_entries + 1) * |
1143 sizeof(AVIndexEntry)); | 1143 sizeof(AVIndexEntry)); |
1144 if(!entries) | 1144 if(!entries) |
1145 return -1; | 1145 return -1; |
1146 | 1146 |
1147 st->index_entries= entries; | 1147 st->index_entries= entries; |
1165 | 1165 |
1166 ie->pos = pos; | 1166 ie->pos = pos; |
1167 ie->timestamp = timestamp; | 1167 ie->timestamp = timestamp; |
1168 ie->min_distance= distance; | 1168 ie->min_distance= distance; |
1169 ie->flags = flags; | 1169 ie->flags = flags; |
1170 | 1170 |
1171 return index; | 1171 return index; |
1172 } | 1172 } |
1173 | 1173 |
1174 /** | 1174 /** |
1175 * build an index for raw streams using a parser. | 1175 * build an index for raw streams using a parser. |
1188 ret = av_read_frame(s, pkt); | 1188 ret = av_read_frame(s, pkt); |
1189 if (ret < 0) | 1189 if (ret < 0) |
1190 break; | 1190 break; |
1191 if (pkt->stream_index == 0 && st->parser && | 1191 if (pkt->stream_index == 0 && st->parser && |
1192 (pkt->flags & PKT_FLAG_KEY)) { | 1192 (pkt->flags & PKT_FLAG_KEY)) { |
1193 av_add_index_entry(st, st->parser->frame_offset, pkt->dts, | 1193 av_add_index_entry(st, st->parser->frame_offset, pkt->dts, |
1194 0, AVINDEX_KEYFRAME); | 1194 0, AVINDEX_KEYFRAME); |
1195 } | 1195 } |
1196 av_free_packet(pkt); | 1196 av_free_packet(pkt); |
1197 } | 1197 } |
1198 } | 1198 } |
1214 return 1; | 1214 return 1; |
1215 } | 1215 } |
1216 | 1216 |
1217 /** | 1217 /** |
1218 * Gets the index for a specific timestamp. | 1218 * Gets the index for a specific timestamp. |
1219 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to | 1219 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to |
1220 * the timestamp which is <= the requested one, if backward is 0 | 1220 * the timestamp which is <= the requested one, if backward is 0 |
1221 * then it will be >= | 1221 * then it will be >= |
1222 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise | 1222 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise |
1223 * @return < 0 if no such timestamp could be found | 1223 * @return < 0 if no such timestamp could be found |
1224 */ | 1224 */ |
1225 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, | 1225 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, |
1240 b = m; | 1240 b = m; |
1241 if(timestamp <= wanted_timestamp) | 1241 if(timestamp <= wanted_timestamp) |
1242 a = m; | 1242 a = m; |
1243 } | 1243 } |
1244 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; | 1244 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; |
1245 | 1245 |
1246 if(!(flags & AVSEEK_FLAG_ANY)){ | 1246 if(!(flags & AVSEEK_FLAG_ANY)){ |
1247 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ | 1247 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ |
1248 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; | 1248 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; |
1249 } | 1249 } |
1250 } | 1250 } |
1251 | 1251 |
1252 if(m == nb_entries) | 1252 if(m == nb_entries) |
1253 return -1; | 1253 return -1; |
1254 return m; | 1254 return m; |
1255 } | 1255 } |
1256 | 1256 |
1257 #define DEBUG_SEEK | 1257 #define DEBUG_SEEK |
1270 int index, no_change; | 1270 int index, no_change; |
1271 AVStream *st; | 1271 AVStream *st; |
1272 | 1272 |
1273 if (stream_index < 0) | 1273 if (stream_index < 0) |
1274 return -1; | 1274 return -1; |
1275 | 1275 |
1276 #ifdef DEBUG_SEEK | 1276 #ifdef DEBUG_SEEK |
1277 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts); | 1277 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts); |
1278 #endif | 1278 #endif |
1279 | 1279 |
1280 ts_max= | 1280 ts_max= |
1291 | 1291 |
1292 if(e->timestamp <= target_ts || e->pos == e->min_distance){ | 1292 if(e->timestamp <= target_ts || e->pos == e->min_distance){ |
1293 pos_min= e->pos; | 1293 pos_min= e->pos; |
1294 ts_min= e->timestamp; | 1294 ts_min= e->timestamp; |
1295 #ifdef DEBUG_SEEK | 1295 #ifdef DEBUG_SEEK |
1296 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n", | 1296 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n", |
1297 pos_min,ts_min); | 1297 pos_min,ts_min); |
1298 #endif | 1298 #endif |
1299 }else{ | 1299 }else{ |
1300 assert(index==0); | 1300 assert(index==0); |
1301 } | 1301 } |
1302 | 1302 |
1303 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD); | 1303 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD); |
1304 assert(index < st->nb_index_entries); | 1304 assert(index < st->nb_index_entries); |
1305 if(index >= 0){ | 1305 if(index >= 0){ |
1306 e= &st->index_entries[index]; | 1306 e= &st->index_entries[index]; |
1307 assert(e->timestamp >= target_ts); | 1307 assert(e->timestamp >= target_ts); |
1308 pos_max= e->pos; | 1308 pos_max= e->pos; |
1309 ts_max= e->timestamp; | 1309 ts_max= e->timestamp; |
1310 pos_limit= pos_max - e->min_distance; | 1310 pos_limit= pos_max - e->min_distance; |
1311 #ifdef DEBUG_SEEK | 1311 #ifdef DEBUG_SEEK |
1312 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n", | 1312 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n", |
1313 pos_max,pos_limit, ts_max); | 1313 pos_max,pos_limit, ts_max); |
1314 #endif | 1314 #endif |
1315 } | 1315 } |
1316 } | 1316 } |
1317 | 1317 |
1331 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step); | 1331 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step); |
1332 step += step; | 1332 step += step; |
1333 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step); | 1333 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step); |
1334 if (ts_max == AV_NOPTS_VALUE) | 1334 if (ts_max == AV_NOPTS_VALUE) |
1335 return -1; | 1335 return -1; |
1336 | 1336 |
1337 for(;;){ | 1337 for(;;){ |
1338 int64_t tmp_pos= pos_max + 1; | 1338 int64_t tmp_pos= pos_max + 1; |
1339 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX); | 1339 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX); |
1340 if(tmp_ts == AV_NOPTS_VALUE) | 1340 if(tmp_ts == AV_NOPTS_VALUE) |
1341 break; | 1341 break; |
1348 } | 1348 } |
1349 | 1349 |
1350 no_change=0; | 1350 no_change=0; |
1351 while (pos_min < pos_limit) { | 1351 while (pos_min < pos_limit) { |
1352 #ifdef DEBUG_SEEK | 1352 #ifdef DEBUG_SEEK |
1353 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n", | 1353 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n", |
1354 pos_min, pos_max, | 1354 pos_min, pos_max, |
1355 ts_min, ts_max); | 1355 ts_min, ts_max); |
1356 #endif | 1356 #endif |
1357 assert(pos_limit <= pos_max); | 1357 assert(pos_limit <= pos_max); |
1358 | 1358 |
1391 if (target_ts >= ts) { | 1391 if (target_ts >= ts) { |
1392 pos_min = pos; | 1392 pos_min = pos; |
1393 ts_min = ts; | 1393 ts_min = ts; |
1394 } | 1394 } |
1395 } | 1395 } |
1396 | 1396 |
1397 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; | 1397 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; |
1398 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; | 1398 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; |
1399 #ifdef DEBUG_SEEK | 1399 #ifdef DEBUG_SEEK |
1400 pos_min = pos; | 1400 pos_min = pos; |
1401 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX); | 1401 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX); |
1402 pos_min++; | 1402 pos_min++; |
1403 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX); | 1403 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX); |
1404 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n", | 1404 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n", |
1405 pos, ts_min, target_ts, ts_max); | 1405 pos, ts_min, target_ts, ts_max); |
1406 #endif | 1406 #endif |
1407 /* do the seek */ | 1407 /* do the seek */ |
1408 url_fseek(&s->pb, pos, SEEK_SET); | 1408 url_fseek(&s->pb, pos, SEEK_SET); |
1409 | 1409 |
1435 av_update_cur_dts(s, st, ts); | 1435 av_update_cur_dts(s, st, ts); |
1436 #endif | 1436 #endif |
1437 return 0; | 1437 return 0; |
1438 } | 1438 } |
1439 | 1439 |
1440 static int av_seek_frame_generic(AVFormatContext *s, | 1440 static int av_seek_frame_generic(AVFormatContext *s, |
1441 int stream_index, int64_t timestamp, int flags) | 1441 int stream_index, int64_t timestamp, int flags) |
1442 { | 1442 { |
1443 int index; | 1443 int index; |
1444 AVStream *st; | 1444 AVStream *st; |
1445 AVIndexEntry *ie; | 1445 AVIndexEntry *ie; |
1470 | 1470 |
1471 /** | 1471 /** |
1472 * Seek to the key frame at timestamp. | 1472 * Seek to the key frame at timestamp. |
1473 * 'timestamp' in 'stream_index'. | 1473 * 'timestamp' in 'stream_index'. |
1474 * @param stream_index If stream_index is (-1), a default | 1474 * @param stream_index If stream_index is (-1), a default |
1475 * stream is selected, and timestamp is automatically converted | 1475 * stream is selected, and timestamp is automatically converted |
1476 * from AV_TIME_BASE units to the stream specific time_base. | 1476 * from AV_TIME_BASE units to the stream specific time_base. |
1477 * @param timestamp timestamp in AVStream.time_base units | 1477 * @param timestamp timestamp in AVStream.time_base units |
1478 * or if there is no stream specified then in AV_TIME_BASE units | 1478 * or if there is no stream specified then in AV_TIME_BASE units |
1479 * @param flags flags which select direction and seeking mode | 1479 * @param flags flags which select direction and seeking mode |
1480 * @return >= 0 on success | 1480 * @return >= 0 on success |
1481 */ | 1481 */ |
1482 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) | 1482 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) |
1483 { | 1483 { |
1484 int ret; | 1484 int ret; |
1485 AVStream *st; | 1485 AVStream *st; |
1486 | 1486 |
1487 av_read_frame_flush(s); | 1487 av_read_frame_flush(s); |
1488 | 1488 |
1489 if(flags & AVSEEK_FLAG_BYTE) | 1489 if(flags & AVSEEK_FLAG_BYTE) |
1490 return av_seek_frame_byte(s, stream_index, timestamp, flags); | 1490 return av_seek_frame_byte(s, stream_index, timestamp, flags); |
1491 | 1491 |
1492 if(stream_index < 0){ | 1492 if(stream_index < 0){ |
1493 stream_index= av_find_default_stream_index(s); | 1493 stream_index= av_find_default_stream_index(s); |
1494 if(stream_index < 0) | 1494 if(stream_index < 0) |
1495 return -1; | 1495 return -1; |
1496 | 1496 |
1497 st= s->streams[stream_index]; | 1497 st= s->streams[stream_index]; |
1498 /* timestamp for default must be expressed in AV_TIME_BASE units */ | 1498 /* timestamp for default must be expressed in AV_TIME_BASE units */ |
1499 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); | 1499 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); |
1500 } | 1500 } |
1501 st= s->streams[stream_index]; | 1501 st= s->streams[stream_index]; |
1567 ic->start_time = start_time; | 1567 ic->start_time = start_time; |
1568 if (end_time != MININT64) { | 1568 if (end_time != MININT64) { |
1569 ic->duration = end_time - start_time; | 1569 ic->duration = end_time - start_time; |
1570 if (ic->file_size > 0) { | 1570 if (ic->file_size > 0) { |
1571 /* compute the bit rate */ | 1571 /* compute the bit rate */ |
1572 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE / | 1572 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE / |
1573 (double)ic->duration; | 1573 (double)ic->duration; |
1574 } | 1574 } |
1575 } | 1575 } |
1576 } | 1576 } |
1577 | 1577 |
1609 } | 1609 } |
1610 ic->bit_rate = bit_rate; | 1610 ic->bit_rate = bit_rate; |
1611 } | 1611 } |
1612 | 1612 |
1613 /* if duration is already set, we believe it */ | 1613 /* if duration is already set, we believe it */ |
1614 if (ic->duration == AV_NOPTS_VALUE && | 1614 if (ic->duration == AV_NOPTS_VALUE && |
1615 ic->bit_rate != 0 && | 1615 ic->bit_rate != 0 && |
1616 ic->file_size != 0) { | 1616 ic->file_size != 0) { |
1617 filesize = ic->file_size; | 1617 filesize = ic->file_size; |
1618 if (filesize > 0) { | 1618 if (filesize > 0) { |
1619 for(i = 0; i < ic->nb_streams; i++) { | 1619 for(i = 0; i < ic->nb_streams; i++) { |
1620 st = ic->streams[i]; | 1620 st = ic->streams[i]; |
1637 AVPacket pkt1, *pkt = &pkt1; | 1637 AVPacket pkt1, *pkt = &pkt1; |
1638 AVStream *st; | 1638 AVStream *st; |
1639 int read_size, i, ret; | 1639 int read_size, i, ret; |
1640 int64_t end_time; | 1640 int64_t end_time; |
1641 int64_t filesize, offset, duration; | 1641 int64_t filesize, offset, duration; |
1642 | 1642 |
1643 /* free previous packet */ | 1643 /* free previous packet */ |
1644 if (ic->cur_st && ic->cur_st->parser) | 1644 if (ic->cur_st && ic->cur_st->parser) |
1645 av_free_packet(&ic->cur_pkt); | 1645 av_free_packet(&ic->cur_pkt); |
1646 ic->cur_st = NULL; | 1646 ic->cur_st = NULL; |
1647 | 1647 |
1648 /* flush packet queue */ | 1648 /* flush packet queue */ |
1649 flush_packet_queue(ic); | 1649 flush_packet_queue(ic); |
1650 | 1650 |
1653 if (st->parser) { | 1653 if (st->parser) { |
1654 av_parser_close(st->parser); | 1654 av_parser_close(st->parser); |
1655 st->parser= NULL; | 1655 st->parser= NULL; |
1656 } | 1656 } |
1657 } | 1657 } |
1658 | 1658 |
1659 /* we read the first packets to get the first PTS (not fully | 1659 /* we read the first packets to get the first PTS (not fully |
1660 accurate, but it is enough now) */ | 1660 accurate, but it is enough now) */ |
1661 url_fseek(&ic->pb, 0, SEEK_SET); | 1661 url_fseek(&ic->pb, 0, SEEK_SET); |
1662 read_size = 0; | 1662 read_size = 0; |
1663 for(;;) { | 1663 for(;;) { |
1702 if (st->duration == AV_NOPTS_VALUE) | 1702 if (st->duration == AV_NOPTS_VALUE) |
1703 break; | 1703 break; |
1704 } | 1704 } |
1705 if (i == ic->nb_streams) | 1705 if (i == ic->nb_streams) |
1706 break; | 1706 break; |
1707 | 1707 |
1708 ret = av_read_packet(ic, pkt); | 1708 ret = av_read_packet(ic, pkt); |
1709 if (ret != 0) | 1709 if (ret != 0) |
1710 break; | 1710 break; |
1711 read_size += pkt->size; | 1711 read_size += pkt->size; |
1712 st = ic->streams[pkt->stream_index]; | 1712 st = ic->streams[pkt->stream_index]; |
1719 st->duration = duration; | 1719 st->duration = duration; |
1720 } | 1720 } |
1721 } | 1721 } |
1722 av_free_packet(pkt); | 1722 av_free_packet(pkt); |
1723 } | 1723 } |
1724 | 1724 |
1725 fill_all_stream_timings(ic); | 1725 fill_all_stream_timings(ic); |
1726 | 1726 |
1727 url_fseek(&ic->pb, 0, SEEK_SET); | 1727 url_fseek(&ic->pb, 0, SEEK_SET); |
1728 } | 1728 } |
1729 | 1729 |
1758 { | 1758 { |
1759 int i; | 1759 int i; |
1760 AVStream *st; | 1760 AVStream *st; |
1761 for(i = 0;i < ic->nb_streams; i++) { | 1761 for(i = 0;i < ic->nb_streams; i++) { |
1762 st = ic->streams[i]; | 1762 st = ic->streams[i]; |
1763 printf("%d: start_time: %0.3f duration: %0.3f\n", | 1763 printf("%d: start_time: %0.3f duration: %0.3f\n", |
1764 i, (double)st->start_time / AV_TIME_BASE, | 1764 i, (double)st->start_time / AV_TIME_BASE, |
1765 (double)st->duration / AV_TIME_BASE); | 1765 (double)st->duration / AV_TIME_BASE); |
1766 } | 1766 } |
1767 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", | 1767 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", |
1768 (double)ic->start_time / AV_TIME_BASE, | 1768 (double)ic->start_time / AV_TIME_BASE, |
1769 (double)ic->duration / AV_TIME_BASE, | 1769 (double)ic->duration / AV_TIME_BASE, |
1770 ic->bit_rate / 1000); | 1770 ic->bit_rate / 1000); |
1771 } | 1771 } |
1772 #endif | 1772 #endif |
1773 } | 1773 } |
1793 { | 1793 { |
1794 int16_t *samples; | 1794 int16_t *samples; |
1795 AVCodec *codec; | 1795 AVCodec *codec; |
1796 int got_picture, ret=0; | 1796 int got_picture, ret=0; |
1797 AVFrame picture; | 1797 AVFrame picture; |
1798 | 1798 |
1799 if(!st->codec->codec){ | 1799 if(!st->codec->codec){ |
1800 codec = avcodec_find_decoder(st->codec->codec_id); | 1800 codec = avcodec_find_decoder(st->codec->codec_id); |
1801 if (!codec) | 1801 if (!codec) |
1802 return -1; | 1802 return -1; |
1803 ret = avcodec_open(st->codec, codec); | 1803 ret = avcodec_open(st->codec, codec); |
1806 } | 1806 } |
1807 | 1807 |
1808 if(!has_codec_parameters(st->codec)){ | 1808 if(!has_codec_parameters(st->codec)){ |
1809 switch(st->codec->codec_type) { | 1809 switch(st->codec->codec_type) { |
1810 case CODEC_TYPE_VIDEO: | 1810 case CODEC_TYPE_VIDEO: |
1811 ret = avcodec_decode_video(st->codec, &picture, | 1811 ret = avcodec_decode_video(st->codec, &picture, |
1812 &got_picture, (uint8_t *)data, size); | 1812 &got_picture, (uint8_t *)data, size); |
1813 break; | 1813 break; |
1814 case CODEC_TYPE_AUDIO: | 1814 case CODEC_TYPE_AUDIO: |
1815 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); | 1815 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
1816 if (!samples) | 1816 if (!samples) |
1817 goto fail; | 1817 goto fail; |
1818 ret = avcodec_decode_audio(st->codec, samples, | 1818 ret = avcodec_decode_audio(st->codec, samples, |
1819 &got_picture, (uint8_t *)data, size); | 1819 &got_picture, (uint8_t *)data, size); |
1820 av_free(samples); | 1820 av_free(samples); |
1821 break; | 1821 break; |
1822 default: | 1822 default: |
1823 break; | 1823 break; |
1838 * is useful for file formats with no headers such as MPEG. This | 1838 * is useful for file formats with no headers such as MPEG. This |
1839 * function also compute the real frame rate in case of mpeg2 repeat | 1839 * function also compute the real frame rate in case of mpeg2 repeat |
1840 * frame mode. | 1840 * frame mode. |
1841 * | 1841 * |
1842 * @param ic media file handle | 1842 * @param ic media file handle |
1843 * @return >=0 if OK. AVERROR_xxx if error. | 1843 * @return >=0 if OK. AVERROR_xxx if error. |
1844 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need | 1844 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need |
1845 */ | 1845 */ |
1846 int av_find_stream_info(AVFormatContext *ic) | 1846 int av_find_stream_info(AVFormatContext *ic) |
1847 { | 1847 { |
1848 int i, count, ret, read_size; | 1848 int i, count, ret, read_size; |
1872 | 1872 |
1873 for(i=0;i<MAX_STREAMS;i++){ | 1873 for(i=0;i<MAX_STREAMS;i++){ |
1874 last_dts[i]= AV_NOPTS_VALUE; | 1874 last_dts[i]= AV_NOPTS_VALUE; |
1875 duration_sum[i]= INT64_MAX; | 1875 duration_sum[i]= INT64_MAX; |
1876 } | 1876 } |
1877 | 1877 |
1878 count = 0; | 1878 count = 0; |
1879 read_size = 0; | 1879 read_size = 0; |
1880 ppktl = &ic->packet_buffer; | 1880 ppktl = &ic->packet_buffer; |
1881 for(;;) { | 1881 for(;;) { |
1882 /* check if one codec still needs to be handled */ | 1882 /* check if one codec still needs to be handled */ |
1934 *ppktl = pktl; | 1934 *ppktl = pktl; |
1935 ppktl = &pktl->next; | 1935 ppktl = &pktl->next; |
1936 | 1936 |
1937 pkt = &pktl->pkt; | 1937 pkt = &pktl->pkt; |
1938 *pkt = pkt1; | 1938 *pkt = pkt1; |
1939 | 1939 |
1940 /* duplicate the packet */ | 1940 /* duplicate the packet */ |
1941 if (av_dup_packet(pkt) < 0) { | 1941 if (av_dup_packet(pkt) < 0) { |
1942 ret = AVERROR_NOMEM; | 1942 ret = AVERROR_NOMEM; |
1943 break; | 1943 break; |
1944 } | 1944 } |
1975 st->codec->extradata_size= i; | 1975 st->codec->extradata_size= i; |
1976 st->codec->extradata= av_malloc(st->codec->extradata_size); | 1976 st->codec->extradata= av_malloc(st->codec->extradata_size); |
1977 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); | 1977 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); |
1978 } | 1978 } |
1979 } | 1979 } |
1980 | 1980 |
1981 /* if still no information, we try to open the codec and to | 1981 /* if still no information, we try to open the codec and to |
1982 decompress the frame. We try to avoid that in most cases as | 1982 decompress the frame. We try to avoid that in most cases as |
1983 it takes longer and uses more memory. For MPEG4, we need to | 1983 it takes longer and uses more memory. For MPEG4, we need to |
1984 decompress for Quicktime. */ | 1984 decompress for Quicktime. */ |
1985 if (!has_codec_parameters(st->codec) /*&& | 1985 if (!has_codec_parameters(st->codec) /*&& |
1996 st->codec->codec_id == CODEC_ID_PBM || | 1996 st->codec->codec_id == CODEC_ID_PBM || |
1997 st->codec->codec_id == CODEC_ID_PPM || | 1997 st->codec->codec_id == CODEC_ID_PPM || |
1998 st->codec->codec_id == CODEC_ID_SHORTEN || | 1998 st->codec->codec_id == CODEC_ID_SHORTEN || |
1999 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/) | 1999 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/) |
2000 try_decode_frame(st, pkt->data, pkt->size); | 2000 try_decode_frame(st, pkt->data, pkt->size); |
2001 | 2001 |
2002 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) { | 2002 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) { |
2003 break; | 2003 break; |
2004 } | 2004 } |
2005 count++; | 2005 count++; |
2006 } | 2006 } |
2022 AVRational fps1; | 2022 AVRational fps1; |
2023 int64_t num, den; | 2023 int64_t num, den; |
2024 | 2024 |
2025 num= st->time_base.den*duration_count[i]; | 2025 num= st->time_base.den*duration_count[i]; |
2026 den= st->time_base.num*duration_sum[i]; | 2026 den= st->time_base.num*duration_sum[i]; |
2027 | 2027 |
2028 av_reduce(&fps1.num, &fps1.den, num*1001, den*1000, FFMAX(st->time_base.den, st->time_base.num)/4); | 2028 av_reduce(&fps1.num, &fps1.den, num*1001, den*1000, FFMAX(st->time_base.den, st->time_base.num)/4); |
2029 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, den, FFMAX(st->time_base.den, st->time_base.num)/4); | 2029 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, den, FFMAX(st->time_base.den, st->time_base.num)/4); |
2030 if(fps1.num < st->r_frame_rate.num && fps1.den == 1 && (fps1.num==24 || fps1.num==30)){ //FIXME better decission | 2030 if(fps1.num < st->r_frame_rate.num && fps1.den == 1 && (fps1.num==24 || fps1.num==30)){ //FIXME better decission |
2031 st->r_frame_rate.num= fps1.num*1000; | 2031 st->r_frame_rate.num= fps1.num*1000; |
2032 st->r_frame_rate.den= fps1.den*1001; | 2032 st->r_frame_rate.den= fps1.den*1001; |
2038 if ((st->codec->codec_id == CODEC_ID_MPEG1VIDEO || | 2038 if ((st->codec->codec_id == CODEC_ID_MPEG1VIDEO || |
2039 st->codec->codec_id == CODEC_ID_MPEG2VIDEO) && | 2039 st->codec->codec_id == CODEC_ID_MPEG2VIDEO) && |
2040 st->codec->sub_id == 2) { | 2040 st->codec->sub_id == 2) { |
2041 if (st->codec_info_nb_frames >= 20) { | 2041 if (st->codec_info_nb_frames >= 20) { |
2042 float coded_frame_rate, est_frame_rate; | 2042 float coded_frame_rate, est_frame_rate; |
2043 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) / | 2043 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) / |
2044 (double)st->codec_info_duration ; | 2044 (double)st->codec_info_duration ; |
2045 coded_frame_rate = 1.0/av_q2d(st->codec->time_base); | 2045 coded_frame_rate = 1.0/av_q2d(st->codec->time_base); |
2046 #if 0 | 2046 #if 0 |
2047 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n", | 2047 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n", |
2048 coded_frame_rate, est_frame_rate); | 2048 coded_frame_rate, est_frame_rate); |
2049 #endif | 2049 #endif |
2050 /* if we detect that it could be a telecine, we | 2050 /* if we detect that it could be a telecine, we |
2051 signal it. It would be better to do it at a | 2051 signal it. It would be better to do it at a |
2052 higher level as it can change in a film */ | 2052 higher level as it can change in a film */ |
2053 if (coded_frame_rate >= 24.97 && | 2053 if (coded_frame_rate >= 24.97 && |
2054 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) { | 2054 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) { |
2055 st->r_frame_rate = (AVRational){24000, 1001}; | 2055 st->r_frame_rate = (AVRational){24000, 1001}; |
2056 } | 2056 } |
2057 } | 2057 } |
2058 } | 2058 } |
2094 | 2094 |
2095 /*******************************************************/ | 2095 /*******************************************************/ |
2096 | 2096 |
2097 /** | 2097 /** |
2098 * start playing a network based stream (e.g. RTSP stream) at the | 2098 * start playing a network based stream (e.g. RTSP stream) at the |
2099 * current position | 2099 * current position |
2100 */ | 2100 */ |
2101 int av_read_play(AVFormatContext *s) | 2101 int av_read_play(AVFormatContext *s) |
2102 { | 2102 { |
2103 if (!s->iformat->read_play) | 2103 if (!s->iformat->read_play) |
2104 return AVERROR_NOTSUPP; | 2104 return AVERROR_NOTSUPP; |
2127 int i, must_open_file; | 2127 int i, must_open_file; |
2128 AVStream *st; | 2128 AVStream *st; |
2129 | 2129 |
2130 /* free previous packet */ | 2130 /* free previous packet */ |
2131 if (s->cur_st && s->cur_st->parser) | 2131 if (s->cur_st && s->cur_st->parser) |
2132 av_free_packet(&s->cur_pkt); | 2132 av_free_packet(&s->cur_pkt); |
2133 | 2133 |
2134 if (s->iformat->read_close) | 2134 if (s->iformat->read_close) |
2135 s->iformat->read_close(s); | 2135 s->iformat->read_close(s); |
2136 for(i=0;i<s->nb_streams;i++) { | 2136 for(i=0;i<s->nb_streams;i++) { |
2137 /* free all data in a stream component */ | 2137 /* free all data in a stream component */ |
2161 * Can only be called in the read_header() function. If the flag | 2161 * Can only be called in the read_header() function. If the flag |
2162 * AVFMTCTX_NOHEADER is in the format context, then new streams | 2162 * AVFMTCTX_NOHEADER is in the format context, then new streams |
2163 * can be added in read_packet too. | 2163 * can be added in read_packet too. |
2164 * | 2164 * |
2165 * @param s media file handle | 2165 * @param s media file handle |
2166 * @param id file format dependent stream id | 2166 * @param id file format dependent stream id |
2167 */ | 2167 */ |
2168 AVStream *av_new_stream(AVFormatContext *s, int id) | 2168 AVStream *av_new_stream(AVFormatContext *s, int id) |
2169 { | 2169 { |
2170 AVStream *st; | 2170 AVStream *st; |
2171 | 2171 |
2173 return NULL; | 2173 return NULL; |
2174 | 2174 |
2175 st = av_mallocz(sizeof(AVStream)); | 2175 st = av_mallocz(sizeof(AVStream)); |
2176 if (!st) | 2176 if (!st) |
2177 return NULL; | 2177 return NULL; |
2178 | 2178 |
2179 st->codec= avcodec_alloc_context(); | 2179 st->codec= avcodec_alloc_context(); |
2180 if (s->iformat) { | 2180 if (s->iformat) { |
2181 /* no default bitrate if decoding */ | 2181 /* no default bitrate if decoding */ |
2182 st->codec->bit_rate = 0; | 2182 st->codec->bit_rate = 0; |
2183 } | 2183 } |
2199 /* output media file */ | 2199 /* output media file */ |
2200 | 2200 |
2201 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap) | 2201 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap) |
2202 { | 2202 { |
2203 int ret; | 2203 int ret; |
2204 | 2204 |
2205 if (s->oformat->priv_data_size > 0) { | 2205 if (s->oformat->priv_data_size > 0) { |
2206 s->priv_data = av_mallocz(s->oformat->priv_data_size); | 2206 s->priv_data = av_mallocz(s->oformat->priv_data_size); |
2207 if (!s->priv_data) | 2207 if (!s->priv_data) |
2208 return AVERROR_NOMEM; | 2208 return AVERROR_NOMEM; |
2209 } else | 2209 } else |
2210 s->priv_data = NULL; | 2210 s->priv_data = NULL; |
2211 | 2211 |
2212 if (s->oformat->set_parameters) { | 2212 if (s->oformat->set_parameters) { |
2213 ret = s->oformat->set_parameters(s, ap); | 2213 ret = s->oformat->set_parameters(s, ap); |
2214 if (ret < 0) | 2214 if (ret < 0) |
2215 return ret; | 2215 return ret; |
2216 } | 2216 } |
2220 /** | 2220 /** |
2221 * allocate the stream private data and write the stream header to an | 2221 * allocate the stream private data and write the stream header to an |
2222 * output media file | 2222 * output media file |
2223 * | 2223 * |
2224 * @param s media file handle | 2224 * @param s media file handle |
2225 * @return 0 if OK. AVERROR_xxx if error. | 2225 * @return 0 if OK. AVERROR_xxx if error. |
2226 */ | 2226 */ |
2227 int av_write_header(AVFormatContext *s) | 2227 int av_write_header(AVFormatContext *s) |
2228 { | 2228 { |
2229 int ret, i; | 2229 int ret, i; |
2230 AVStream *st; | 2230 AVStream *st; |
2287 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){ | 2287 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){ |
2288 int b_frames = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames); | 2288 int b_frames = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames); |
2289 int num, den, frame_size; | 2289 int num, den, frame_size; |
2290 | 2290 |
2291 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index); | 2291 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index); |
2292 | 2292 |
2293 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) | 2293 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) |
2294 return -1;*/ | 2294 return -1;*/ |
2295 | 2295 |
2296 /* duration field */ | 2296 /* duration field */ |
2297 if (pkt->duration == 0) { | 2297 if (pkt->duration == 0) { |
2298 compute_frame_duration(&num, &den, st, NULL, pkt); | 2298 compute_frame_duration(&num, &den, st, NULL, pkt); |
2299 if (den && num) { | 2299 if (den && num) { |
2300 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); | 2300 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); |
2306 pkt->dts= | 2306 pkt->dts= |
2307 // pkt->pts= st->cur_dts; | 2307 // pkt->pts= st->cur_dts; |
2308 pkt->pts= st->pts.val; | 2308 pkt->pts= st->pts.val; |
2309 } | 2309 } |
2310 | 2310 |
2311 //calculate dts from pts | 2311 //calculate dts from pts |
2312 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){ | 2312 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){ |
2313 if(b_frames){ | 2313 if(b_frames){ |
2314 if(st->last_IP_pts == AV_NOPTS_VALUE){ | 2314 if(st->last_IP_pts == AV_NOPTS_VALUE){ |
2315 st->last_IP_pts= -pkt->duration; | 2315 st->last_IP_pts= -pkt->duration; |
2316 } | 2316 } |
2320 }else | 2320 }else |
2321 pkt->dts= pkt->pts; | 2321 pkt->dts= pkt->pts; |
2322 }else | 2322 }else |
2323 pkt->dts= pkt->pts; | 2323 pkt->dts= pkt->pts; |
2324 } | 2324 } |
2325 | 2325 |
2326 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ | 2326 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ |
2327 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts); | 2327 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts); |
2328 return -1; | 2328 return -1; |
2329 } | 2329 } |
2330 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ | 2330 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ |
2356 return 0; | 2356 return 0; |
2357 } | 2357 } |
2358 | 2358 |
2359 static void truncate_ts(AVStream *st, AVPacket *pkt){ | 2359 static void truncate_ts(AVStream *st, AVPacket *pkt){ |
2360 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1; | 2360 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1; |
2361 | 2361 |
2362 // if(pkt->dts < 0) | 2362 // if(pkt->dts < 0) |
2363 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here | 2363 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here |
2364 | 2364 |
2365 pkt->pts &= pts_mask; | 2365 pkt->pts &= pts_mask; |
2366 pkt->dts &= pts_mask; | 2366 pkt->dts &= pts_mask; |
2367 } | 2367 } |
2368 | 2368 |
2369 /** | 2369 /** |
2380 int ret; | 2380 int ret; |
2381 | 2381 |
2382 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt); | 2382 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt); |
2383 if(ret<0) | 2383 if(ret<0) |
2384 return ret; | 2384 return ret; |
2385 | 2385 |
2386 truncate_ts(s->streams[pkt->stream_index], pkt); | 2386 truncate_ts(s->streams[pkt->stream_index], pkt); |
2387 | 2387 |
2388 ret= s->oformat->write_packet(s, pkt); | 2388 ret= s->oformat->write_packet(s, pkt); |
2389 if(!ret) | 2389 if(!ret) |
2390 ret= url_ferror(&s->pb); | 2390 ret= url_ferror(&s->pb); |
2391 return ret; | 2391 return ret; |
2392 } | 2392 } |
2393 | 2393 |
2394 /** | 2394 /** |
2395 * interleave_packet implementation which will interleave per DTS. | 2395 * interleave_packet implementation which will interleave per DTS. |
2396 * packets with pkt->destruct == av_destruct_packet will be freed inside this function. | 2396 * packets with pkt->destruct == av_destruct_packet will be freed inside this function. |
2397 * so they cannot be used after it, note calling av_free_packet() on them is still safe | 2397 * so they cannot be used after it, note calling av_free_packet() on them is still safe |
2398 */ | 2398 */ |
2399 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ | 2399 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ |
2400 AVPacketList *pktl, **next_point, *this_pktl; | 2400 AVPacketList *pktl, **next_point, *this_pktl; |
2401 int stream_count=0; | 2401 int stream_count=0; |
2423 next_point= &(*next_point)->next; | 2423 next_point= &(*next_point)->next; |
2424 } | 2424 } |
2425 this_pktl->next= *next_point; | 2425 this_pktl->next= *next_point; |
2426 *next_point= this_pktl; | 2426 *next_point= this_pktl; |
2427 } | 2427 } |
2428 | 2428 |
2429 memset(streams, 0, sizeof(streams)); | 2429 memset(streams, 0, sizeof(streams)); |
2430 pktl= s->packet_buffer; | 2430 pktl= s->packet_buffer; |
2431 while(pktl){ | 2431 while(pktl){ |
2432 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts); | 2432 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts); |
2433 if(streams[ pktl->pkt.stream_index ] == 0) | 2433 if(streams[ pktl->pkt.stream_index ] == 0) |
2434 stream_count++; | 2434 stream_count++; |
2435 streams[ pktl->pkt.stream_index ]++; | 2435 streams[ pktl->pkt.stream_index ]++; |
2436 pktl= pktl->next; | 2436 pktl= pktl->next; |
2437 } | 2437 } |
2438 | 2438 |
2439 if(s->nb_streams == stream_count || (flush && stream_count)){ | 2439 if(s->nb_streams == stream_count || (flush && stream_count)){ |
2440 pktl= s->packet_buffer; | 2440 pktl= s->packet_buffer; |
2441 *out= pktl->pkt; | 2441 *out= pktl->pkt; |
2442 | 2442 |
2443 s->packet_buffer= pktl->next; | 2443 s->packet_buffer= pktl->next; |
2444 av_freep(&pktl); | 2444 av_freep(&pktl); |
2445 return 1; | 2445 return 1; |
2446 }else{ | 2446 }else{ |
2447 av_init_packet(out); | 2447 av_init_packet(out); |
2448 return 0; | 2448 return 0; |
2453 * Interleaves a AVPacket correctly so it can be muxed. | 2453 * Interleaves a AVPacket correctly so it can be muxed. |
2454 * @param out the interleaved packet will be output here | 2454 * @param out the interleaved packet will be output here |
2455 * @param in the input packet | 2455 * @param in the input packet |
2456 * @param flush 1 if no further packets are available as input and all | 2456 * @param flush 1 if no further packets are available as input and all |
2457 * remaining packets should be output | 2457 * remaining packets should be output |
2458 * @return 1 if a packet was output, 0 if no packet could be output, | 2458 * @return 1 if a packet was output, 0 if no packet could be output, |
2459 * < 0 if an error occured | 2459 * < 0 if an error occured |
2460 */ | 2460 */ |
2461 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ | 2461 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ |
2462 if(s->oformat->interleave_packet) | 2462 if(s->oformat->interleave_packet) |
2463 return s->oformat->interleave_packet(s, out, in, flush); | 2463 return s->oformat->interleave_packet(s, out, in, flush); |
2487 return 0; | 2487 return 0; |
2488 | 2488 |
2489 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts); | 2489 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts); |
2490 if(compute_pkt_fields2(st, pkt) < 0) | 2490 if(compute_pkt_fields2(st, pkt) < 0) |
2491 return -1; | 2491 return -1; |
2492 | 2492 |
2493 if(pkt->dts == AV_NOPTS_VALUE) | 2493 if(pkt->dts == AV_NOPTS_VALUE) |
2494 return -1; | 2494 return -1; |
2495 | 2495 |
2496 for(;;){ | 2496 for(;;){ |
2497 AVPacket opkt; | 2497 AVPacket opkt; |
2498 int ret= av_interleave_packet(s, &opkt, pkt, 0); | 2498 int ret= av_interleave_packet(s, &opkt, pkt, 0); |
2499 if(ret<=0) //FIXME cleanup needed for ret<0 ? | 2499 if(ret<=0) //FIXME cleanup needed for ret<0 ? |
2500 return ret; | 2500 return ret; |
2501 | 2501 |
2502 truncate_ts(s->streams[opkt.stream_index], &opkt); | 2502 truncate_ts(s->streams[opkt.stream_index], &opkt); |
2503 ret= s->oformat->write_packet(s, &opkt); | 2503 ret= s->oformat->write_packet(s, &opkt); |
2504 | 2504 |
2505 av_free_packet(&opkt); | 2505 av_free_packet(&opkt); |
2506 pkt= NULL; | 2506 pkt= NULL; |
2507 | 2507 |
2508 if(ret<0) | 2508 if(ret<0) |
2509 return ret; | 2509 return ret; |
2510 if(url_ferror(&s->pb)) | 2510 if(url_ferror(&s->pb)) |
2511 return url_ferror(&s->pb); | 2511 return url_ferror(&s->pb); |
2512 } | 2512 } |
2520 * @return 0 if OK. AVERROR_xxx if error. | 2520 * @return 0 if OK. AVERROR_xxx if error. |
2521 */ | 2521 */ |
2522 int av_write_trailer(AVFormatContext *s) | 2522 int av_write_trailer(AVFormatContext *s) |
2523 { | 2523 { |
2524 int ret, i; | 2524 int ret, i; |
2525 | 2525 |
2526 for(;;){ | 2526 for(;;){ |
2527 AVPacket pkt; | 2527 AVPacket pkt; |
2528 ret= av_interleave_packet(s, &pkt, NULL, 1); | 2528 ret= av_interleave_packet(s, &pkt, NULL, 1); |
2529 if(ret<0) //FIXME cleanup needed for ret<0 ? | 2529 if(ret<0) //FIXME cleanup needed for ret<0 ? |
2530 goto fail; | 2530 goto fail; |
2531 if(!ret) | 2531 if(!ret) |
2532 break; | 2532 break; |
2533 | 2533 |
2534 truncate_ts(s->streams[pkt.stream_index], &pkt); | 2534 truncate_ts(s->streams[pkt.stream_index], &pkt); |
2535 ret= s->oformat->write_packet(s, &pkt); | 2535 ret= s->oformat->write_packet(s, &pkt); |
2536 | 2536 |
2537 av_free_packet(&pkt); | 2537 av_free_packet(&pkt); |
2538 | 2538 |
2539 if(ret<0) | 2539 if(ret<0) |
2540 goto fail; | 2540 goto fail; |
2541 if(url_ferror(&s->pb)) | 2541 if(url_ferror(&s->pb)) |
2542 goto fail; | 2542 goto fail; |
2543 } | 2543 } |
2554 } | 2554 } |
2555 | 2555 |
2556 /* "user interface" functions */ | 2556 /* "user interface" functions */ |
2557 | 2557 |
2558 void dump_format(AVFormatContext *ic, | 2558 void dump_format(AVFormatContext *ic, |
2559 int index, | 2559 int index, |
2560 const char *url, | 2560 const char *url, |
2561 int is_output) | 2561 int is_output) |
2562 { | 2562 { |
2563 int i, flags; | 2563 int i, flags; |
2564 char buf[256]; | 2564 char buf[256]; |
2565 | 2565 |
2566 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", | 2566 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", |
2567 is_output ? "Output" : "Input", | 2567 is_output ? "Output" : "Input", |
2568 index, | 2568 index, |
2569 is_output ? ic->oformat->name : ic->iformat->name, | 2569 is_output ? ic->oformat->name : ic->iformat->name, |
2570 is_output ? "to" : "from", url); | 2570 is_output ? "to" : "from", url); |
2571 if (!is_output) { | 2571 if (!is_output) { |
2572 av_log(NULL, AV_LOG_INFO, " Duration: "); | 2572 av_log(NULL, AV_LOG_INFO, " Duration: "); |
2573 if (ic->duration != AV_NOPTS_VALUE) { | 2573 if (ic->duration != AV_NOPTS_VALUE) { |
2574 int hours, mins, secs, us; | 2574 int hours, mins, secs, us; |
2576 us = ic->duration % AV_TIME_BASE; | 2576 us = ic->duration % AV_TIME_BASE; |
2577 mins = secs / 60; | 2577 mins = secs / 60; |
2578 secs %= 60; | 2578 secs %= 60; |
2579 hours = mins / 60; | 2579 hours = mins / 60; |
2580 mins %= 60; | 2580 mins %= 60; |
2581 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs, | 2581 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs, |
2582 (10 * us) / AV_TIME_BASE); | 2582 (10 * us) / AV_TIME_BASE); |
2583 } else { | 2583 } else { |
2584 av_log(NULL, AV_LOG_INFO, "N/A"); | 2584 av_log(NULL, AV_LOG_INFO, "N/A"); |
2585 } | 2585 } |
2586 if (ic->start_time != AV_NOPTS_VALUE) { | 2586 if (ic->start_time != AV_NOPTS_VALUE) { |
2690 */ | 2690 */ |
2691 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg) | 2691 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg) |
2692 { | 2692 { |
2693 int i; | 2693 int i; |
2694 char* cp; | 2694 char* cp; |
2695 | 2695 |
2696 /* First, we check our abbreviation table */ | 2696 /* First, we check our abbreviation table */ |
2697 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i) | 2697 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i) |
2698 if (!strcmp(frame_abvs[i].abv, arg)) { | 2698 if (!strcmp(frame_abvs[i].abv, arg)) { |
2699 *frame_rate = frame_abvs[i].frame_rate; | 2699 *frame_rate = frame_abvs[i].frame_rate; |
2700 *frame_rate_base = frame_abvs[i].frame_rate_base; | 2700 *frame_rate_base = frame_abvs[i].frame_rate_base; |
2706 if (!cp) | 2706 if (!cp) |
2707 cp = strchr(arg, ':'); | 2707 cp = strchr(arg, ':'); |
2708 if (cp) { | 2708 if (cp) { |
2709 char* cpp; | 2709 char* cpp; |
2710 *frame_rate = strtol(arg, &cpp, 10); | 2710 *frame_rate = strtol(arg, &cpp, 10); |
2711 if (cpp != arg || cpp == cp) | 2711 if (cpp != arg || cpp == cp) |
2712 *frame_rate_base = strtol(cp+1, &cpp, 10); | 2712 *frame_rate_base = strtol(cp+1, &cpp, 10); |
2713 else | 2713 else |
2714 *frame_rate = 0; | 2714 *frame_rate = 0; |
2715 } | 2715 } |
2716 else { | 2716 else { |
2717 /* Finally we give up and parse it as double */ | 2717 /* Finally we give up and parse it as double */ |
2718 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q() | 2718 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q() |
2719 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5); | 2719 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5); |
2720 } | 2720 } |
2730 * @code | 2730 * @code |
2731 * Syntax: | 2731 * Syntax: |
2732 * - If not a duration: | 2732 * - If not a duration: |
2733 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]} | 2733 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]} |
2734 * Time is localtime unless Z is suffixed to the end. In this case GMT | 2734 * Time is localtime unless Z is suffixed to the end. In this case GMT |
2735 * Return the date in micro seconds since 1970 | 2735 * Return the date in micro seconds since 1970 |
2736 * | 2736 * |
2737 * - If a duration: | 2737 * - If a duration: |
2738 * HH[:MM[:SS[.m...]]] | 2738 * HH[:MM[:SS[.m...]]] |
2739 * S+[.m...] | 2739 * S+[.m...] |
2740 * @endcode | 2740 * @endcode |
2836 | 2836 |
2837 if (*q == '.') { | 2837 if (*q == '.') { |
2838 int val, n; | 2838 int val, n; |
2839 q++; | 2839 q++; |
2840 for (val = 0, n = 100000; n >= 1; n /= 10, q++) { | 2840 for (val = 0, n = 100000; n >= 1; n /= 10, q++) { |
2841 if (!isdigit(*q)) | 2841 if (!isdigit(*q)) |
2842 break; | 2842 break; |
2843 val += n * (*q - '0'); | 2843 val += n * (*q - '0'); |
2844 } | 2844 } |
2845 t += val; | 2845 t += val; |
2846 } | 2846 } |
2881 } | 2881 } |
2882 p++; | 2882 p++; |
2883 } | 2883 } |
2884 *q = '\0'; | 2884 *q = '\0'; |
2885 } | 2885 } |
2886 if (!strcmp(tag, tag1)) | 2886 if (!strcmp(tag, tag1)) |
2887 return 1; | 2887 return 1; |
2888 if (*p != '&') | 2888 if (*p != '&') |
2889 break; | 2889 break; |
2890 p++; | 2890 p++; |
2891 } | 2891 } |
3087 } | 3087 } |
3088 | 3088 |
3089 /** | 3089 /** |
3090 * Set the pts for a given stream. | 3090 * Set the pts for a given stream. |
3091 * | 3091 * |
3092 * @param s stream | 3092 * @param s stream |
3093 * @param pts_wrap_bits number of bits effectively used by the pts | 3093 * @param pts_wrap_bits number of bits effectively used by the pts |
3094 * (used for wrap control, 33 is the value for MPEG) | 3094 * (used for wrap control, 33 is the value for MPEG) |
3095 * @param pts_num numerator to convert to seconds (MPEG: 1) | 3095 * @param pts_num numerator to convert to seconds (MPEG: 1) |
3096 * @param pts_den denominator to convert to seconds (MPEG: 90000) | 3096 * @param pts_den denominator to convert to seconds (MPEG: 90000) |
3097 */ | 3097 */ |
3098 void av_set_pts_info(AVStream *s, int pts_wrap_bits, | 3098 void av_set_pts_info(AVStream *s, int pts_wrap_bits, |
3099 int pts_num, int pts_den) | 3099 int pts_num, int pts_den) |
3100 { | 3100 { |
3111 * 'num' is normalized so that it is such as 0 <= num < den. | 3111 * 'num' is normalized so that it is such as 0 <= num < den. |
3112 * | 3112 * |
3113 * @param f fractional number | 3113 * @param f fractional number |
3114 * @param val integer value | 3114 * @param val integer value |
3115 * @param num must be >= 0 | 3115 * @param num must be >= 0 |
3116 * @param den must be >= 1 | 3116 * @param den must be >= 1 |
3117 */ | 3117 */ |
3118 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) | 3118 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) |
3119 { | 3119 { |
3120 num += (den >> 1); | 3120 num += (den >> 1); |
3121 if (num >= den) { | 3121 if (num >= den) { |
3211 } | 3211 } |
3212 return NULL; | 3212 return NULL; |
3213 } | 3213 } |
3214 | 3214 |
3215 /** | 3215 /** |
3216 * Read an image from a stream. | 3216 * Read an image from a stream. |
3217 * @param gb byte stream containing the image | 3217 * @param gb byte stream containing the image |
3218 * @param fmt image format, NULL if probing is required | 3218 * @param fmt image format, NULL if probing is required |
3219 */ | 3219 */ |
3220 int av_read_image(ByteIOContext *pb, const char *filename, | 3220 int av_read_image(ByteIOContext *pb, const char *filename, |
3221 AVImageFormat *fmt, | 3221 AVImageFormat *fmt, |