comparison mxfenc.c @ 4400:65adb9e5214f libavformat

extract audio interleaving code from mxf muxer, will be used by gxf and dv
author bcoudurier
date Sun, 08 Feb 2009 04:31:44 +0000
parents 6d1626886974
children 671d415e1786
comparison
equal deleted inserted replaced
4399:530e55405feb 4400:65adb9e5214f
34 34
35 #include <math.h> 35 #include <math.h>
36 #include <time.h> 36 #include <time.h>
37 37
38 #include "libavutil/fifo.h" 38 #include "libavutil/fifo.h"
39 #include "audiointerleave.h"
39 #include "mxf.h" 40 #include "mxf.h"
40 41
41 static const int NTSC_samples_per_frame[] = { 1602, 1601, 1602, 1601, 1602, 0 }; 42 static const int NTSC_samples_per_frame[] = { 1602, 1601, 1602, 1601, 1602, 0 };
42 static const int PAL_samples_per_frame[] = { 1920, 0 }; 43 static const int PAL_samples_per_frame[] = { 1920, 0 };
43 44
44 #define MXF_INDEX_CLUSTER_SIZE 4096 45 #define MXF_INDEX_CLUSTER_SIZE 4096
45 #define KAG_SIZE 512 46 #define KAG_SIZE 512
46
47 typedef struct {
48 AVFifoBuffer fifo;
49 unsigned fifo_size; ///< current fifo size allocated
50 uint64_t dts; ///< current dts
51 int sample_size; ///< size of one sample all channels included
52 const int *samples_per_frame; ///< must be 0 terminated
53 const int *samples; ///< current samples per frame, pointer to samples_per_frame
54 AVRational time_base; ///< time base of output audio packets
55 } AudioInterleaveContext;
56 47
57 typedef struct { 48 typedef struct {
58 int local_tag; 49 int local_tag;
59 UID uid; 50 UID uid;
60 } MXFLocalTagPair; 51 } MXFLocalTagPair;
1108 } 1099 }
1109 sc->codec_ul = mxf_get_mpeg2_codec_ul(st->codec); 1100 sc->codec_ul = mxf_get_mpeg2_codec_ul(st->codec);
1110 return !!sc->codec_ul; 1101 return !!sc->codec_ul;
1111 } 1102 }
1112 1103
1113 static int ff_audio_interleave_init(AVFormatContext *s,
1114 const int *samples_per_frame,
1115 AVRational time_base)
1116 {
1117 int i;
1118
1119 if (!samples_per_frame)
1120 return -1;
1121
1122 for (i = 0; i < s->nb_streams; i++) {
1123 AVStream *st = s->streams[i];
1124 AudioInterleaveContext *aic = st->priv_data;
1125
1126 if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
1127 aic->sample_size = (st->codec->channels *
1128 av_get_bits_per_sample(st->codec->codec_id)) / 8;
1129 if (!aic->sample_size) {
1130 av_log(s, AV_LOG_ERROR, "could not compute sample size\n");
1131 return -1;
1132 }
1133 aic->samples_per_frame = samples_per_frame;
1134 aic->samples = aic->samples_per_frame;
1135 aic->time_base = time_base;
1136
1137 av_fifo_init(&aic->fifo, 100 * *aic->samples);
1138 }
1139 }
1140
1141 return 0;
1142 }
1143
1144 static void ff_audio_interleave_close(AVFormatContext *s)
1145 {
1146 int i;
1147 for (i = 0; i < s->nb_streams; i++) {
1148 AVStream *st = s->streams[i];
1149 AudioInterleaveContext *aic = st->priv_data;
1150
1151 if (st->codec->codec_type == CODEC_TYPE_AUDIO)
1152 av_fifo_free(&aic->fifo);
1153 }
1154 }
1155
1156 static uint64_t mxf_parse_timestamp(time_t timestamp) 1104 static uint64_t mxf_parse_timestamp(time_t timestamp)
1157 { 1105 {
1158 struct tm *time = localtime(&timestamp); 1106 struct tm *time = localtime(&timestamp);
1159 return (uint64_t)(time->tm_year+1900) << 48 | 1107 return (uint64_t)(time->tm_year+1900) << 48 |
1160 (uint64_t)(time->tm_mon+1) << 40 | 1108 (uint64_t)(time->tm_mon+1) << 40 |
1426 1374
1427 mxf_free(s); 1375 mxf_free(s);
1428 return 0; 1376 return 0;
1429 } 1377 }
1430 1378
1431 static int mxf_interleave_new_audio_packet(AVFormatContext *s, AVPacket *pkt,
1432 int stream_index, int flush)
1433 {
1434 AVStream *st = s->streams[stream_index];
1435 AudioInterleaveContext *aic = st->priv_data;
1436
1437 int size = FFMIN(av_fifo_size(&aic->fifo), *aic->samples * aic->sample_size);
1438 if (!size || (!flush && size == av_fifo_size(&aic->fifo)))
1439 return 0;
1440
1441 av_new_packet(pkt, size);
1442 av_fifo_read(&aic->fifo, pkt->data, size);
1443
1444 pkt->dts = pkt->pts = aic->dts;
1445 pkt->duration = av_rescale_q(*aic->samples, st->time_base, aic->time_base);
1446 pkt->stream_index = stream_index;
1447 aic->dts += pkt->duration;
1448
1449 aic->samples++;
1450 if (!*aic->samples)
1451 aic->samples = aic->samples_per_frame;
1452
1453 return size;
1454 }
1455
1456 static int mxf_interleave_get_packet(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush) 1379 static int mxf_interleave_get_packet(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
1457 { 1380 {
1458 AVPacketList *pktl; 1381 AVPacketList *pktl;
1459 int stream_count = 0; 1382 int stream_count = 0;
1460 int streams[MAX_STREAMS]; 1383 int streams[MAX_STREAMS];
1515 (next->dts == pkt->dts && sc->order < sc2->order); 1438 (next->dts == pkt->dts && sc->order < sc2->order);
1516 } 1439 }
1517 1440
1518 static int mxf_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush) 1441 static int mxf_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
1519 { 1442 {
1520 int i; 1443 return ff_audio_interleave(s, out, pkt, flush,
1521 1444 mxf_interleave_get_packet, mxf_compare_timestamps);
1522 if (pkt) {
1523 AVStream *st = s->streams[pkt->stream_index];
1524 AudioInterleaveContext *aic = st->priv_data;
1525 if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
1526 av_fifo_generic_write(&aic->fifo, pkt->data, pkt->size, NULL);
1527 } else {
1528 // rewrite pts and dts to be decoded time line position
1529 pkt->pts = pkt->dts = aic->dts;
1530 aic->dts += pkt->duration;
1531 ff_interleave_add_packet(s, pkt, mxf_compare_timestamps);
1532 }
1533 pkt = NULL;
1534 }
1535
1536 for (i = 0; i < s->nb_streams; i++) {
1537 AVStream *st = s->streams[i];
1538 if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
1539 AVPacket new_pkt;
1540 while (mxf_interleave_new_audio_packet(s, &new_pkt, i, flush))
1541 ff_interleave_add_packet(s, &new_pkt, mxf_compare_timestamps);
1542 }
1543 }
1544
1545 return mxf_interleave_get_packet(s, out, pkt, flush);
1546 } 1445 }
1547 1446
1548 AVOutputFormat mxf_muxer = { 1447 AVOutputFormat mxf_muxer = {
1549 "mxf", 1448 "mxf",
1550 NULL_IF_CONFIG_SMALL("Material eXchange Format"), 1449 NULL_IF_CONFIG_SMALL("Material eXchange Format"),