4794
|
1 /*
|
|
2 * Libavformat API example: Output a media file in any supported
|
|
3 * libavformat format. The default codecs are used.
|
|
4 *
|
|
5 * Copyright (c) 2003 Fabrice Bellard
|
|
6 *
|
|
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
8 * of this software and associated documentation files (the "Software"), to deal
|
|
9 * in the Software without restriction, including without limitation the rights
|
|
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
11 * copies of the Software, and to permit persons to whom the Software is
|
|
12 * furnished to do so, subject to the following conditions:
|
|
13 *
|
|
14 * The above copyright notice and this permission notice shall be included in
|
|
15 * all copies or substantial portions of the Software.
|
|
16 *
|
|
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
23 * THE SOFTWARE.
|
|
24 */
|
|
25 #include <stdlib.h>
|
|
26 #include <stdio.h>
|
|
27 #include <string.h>
|
|
28 #include <math.h>
|
|
29
|
|
30 #ifndef M_PI
|
|
31 #define M_PI 3.14159265358979323846
|
|
32 #endif
|
|
33
|
|
34 #include "libavformat/avformat.h"
|
|
35 #include "libswscale/swscale.h"
|
|
36
|
|
37 #undef exit
|
|
38
|
|
39 /* 5 seconds stream duration */
|
|
40 #define STREAM_DURATION 5.0
|
|
41 #define STREAM_FRAME_RATE 25 /* 25 images/s */
|
|
42 #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
|
|
43 #define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
|
|
44
|
|
45 static int sws_flags = SWS_BICUBIC;
|
|
46
|
|
47 /**************************************************************/
|
|
48 /* audio output */
|
|
49
|
|
50 float t, tincr, tincr2;
|
|
51 int16_t *samples;
|
|
52 uint8_t *audio_outbuf;
|
|
53 int audio_outbuf_size;
|
|
54 int audio_input_frame_size;
|
|
55
|
|
56 /*
|
|
57 * add an audio output stream
|
|
58 */
|
|
59 static AVStream *add_audio_stream(AVFormatContext *oc, int codec_id)
|
|
60 {
|
|
61 AVCodecContext *c;
|
|
62 AVStream *st;
|
|
63
|
|
64 st = av_new_stream(oc, 1);
|
|
65 if (!st) {
|
|
66 fprintf(stderr, "Could not alloc stream\n");
|
|
67 exit(1);
|
|
68 }
|
|
69
|
|
70 c = st->codec;
|
|
71 c->codec_id = codec_id;
|
|
72 c->codec_type = CODEC_TYPE_AUDIO;
|
|
73
|
|
74 /* put sample parameters */
|
|
75 c->bit_rate = 64000;
|
|
76 c->sample_rate = 44100;
|
|
77 c->channels = 2;
|
|
78 return st;
|
|
79 }
|
|
80
|
|
81 static void open_audio(AVFormatContext *oc, AVStream *st)
|
|
82 {
|
|
83 AVCodecContext *c;
|
|
84 AVCodec *codec;
|
|
85
|
|
86 c = st->codec;
|
|
87
|
|
88 /* find the audio encoder */
|
|
89 codec = avcodec_find_encoder(c->codec_id);
|
|
90 if (!codec) {
|
|
91 fprintf(stderr, "codec not found\n");
|
|
92 exit(1);
|
|
93 }
|
|
94
|
|
95 /* open it */
|
|
96 if (avcodec_open(c, codec) < 0) {
|
|
97 fprintf(stderr, "could not open codec\n");
|
|
98 exit(1);
|
|
99 }
|
|
100
|
|
101 /* init signal generator */
|
|
102 t = 0;
|
|
103 tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
|
104 /* increment frequency by 110 Hz per second */
|
|
105 tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
|
106
|
|
107 audio_outbuf_size = 10000;
|
|
108 audio_outbuf = av_malloc(audio_outbuf_size);
|
|
109
|
|
110 /* ugly hack for PCM codecs (will be removed ASAP with new PCM
|
|
111 support to compute the input frame size in samples */
|
|
112 if (c->frame_size <= 1) {
|
|
113 audio_input_frame_size = audio_outbuf_size / c->channels;
|
|
114 switch(st->codec->codec_id) {
|
|
115 case CODEC_ID_PCM_S16LE:
|
|
116 case CODEC_ID_PCM_S16BE:
|
|
117 case CODEC_ID_PCM_U16LE:
|
|
118 case CODEC_ID_PCM_U16BE:
|
|
119 audio_input_frame_size >>= 1;
|
|
120 break;
|
|
121 default:
|
|
122 break;
|
|
123 }
|
|
124 } else {
|
|
125 audio_input_frame_size = c->frame_size;
|
|
126 }
|
|
127 samples = av_malloc(audio_input_frame_size * 2 * c->channels);
|
|
128 }
|
|
129
|
|
130 /* prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
|
131 'nb_channels' channels */
|
|
132 static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
|
133 {
|
|
134 int j, i, v;
|
|
135 int16_t *q;
|
|
136
|
|
137 q = samples;
|
|
138 for(j=0;j<frame_size;j++) {
|
|
139 v = (int)(sin(t) * 10000);
|
|
140 for(i = 0; i < nb_channels; i++)
|
|
141 *q++ = v;
|
|
142 t += tincr;
|
|
143 tincr += tincr2;
|
|
144 }
|
|
145 }
|
|
146
|
|
147 static void write_audio_frame(AVFormatContext *oc, AVStream *st)
|
|
148 {
|
|
149 AVCodecContext *c;
|
|
150 AVPacket pkt;
|
|
151 av_init_packet(&pkt);
|
|
152
|
|
153 c = st->codec;
|
|
154
|
|
155 get_audio_frame(samples, audio_input_frame_size, c->channels);
|
|
156
|
|
157 pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
|
|
158
|
|
159 if (c->coded_frame->pts != AV_NOPTS_VALUE)
|
|
160 pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
|
|
161 pkt.flags |= PKT_FLAG_KEY;
|
|
162 pkt.stream_index= st->index;
|
|
163 pkt.data= audio_outbuf;
|
|
164
|
|
165 /* write the compressed frame in the media file */
|
|
166 if (av_interleaved_write_frame(oc, &pkt) != 0) {
|
|
167 fprintf(stderr, "Error while writing audio frame\n");
|
|
168 exit(1);
|
|
169 }
|
|
170 }
|
|
171
|
|
172 static void close_audio(AVFormatContext *oc, AVStream *st)
|
|
173 {
|
|
174 avcodec_close(st->codec);
|
|
175
|
|
176 av_free(samples);
|
|
177 av_free(audio_outbuf);
|
|
178 }
|
|
179
|
|
180 /**************************************************************/
|
|
181 /* video output */
|
|
182
|
|
183 AVFrame *picture, *tmp_picture;
|
|
184 uint8_t *video_outbuf;
|
|
185 int frame_count, video_outbuf_size;
|
|
186
|
|
187 /* add a video output stream */
|
|
188 static AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
|
|
189 {
|
|
190 AVCodecContext *c;
|
|
191 AVStream *st;
|
|
192
|
|
193 st = av_new_stream(oc, 0);
|
|
194 if (!st) {
|
|
195 fprintf(stderr, "Could not alloc stream\n");
|
|
196 exit(1);
|
|
197 }
|
|
198
|
|
199 c = st->codec;
|
|
200 c->codec_id = codec_id;
|
|
201 c->codec_type = CODEC_TYPE_VIDEO;
|
|
202
|
|
203 /* put sample parameters */
|
|
204 c->bit_rate = 400000;
|
|
205 /* resolution must be a multiple of two */
|
|
206 c->width = 352;
|
|
207 c->height = 288;
|
|
208 /* time base: this is the fundamental unit of time (in seconds) in terms
|
|
209 of which frame timestamps are represented. for fixed-fps content,
|
|
210 timebase should be 1/framerate and timestamp increments should be
|
|
211 identically 1. */
|
|
212 c->time_base.den = STREAM_FRAME_RATE;
|
|
213 c->time_base.num = 1;
|
|
214 c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
|
215 c->pix_fmt = STREAM_PIX_FMT;
|
|
216 if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
|
|
217 /* just for testing, we also add B frames */
|
|
218 c->max_b_frames = 2;
|
|
219 }
|
|
220 if (c->codec_id == CODEC_ID_MPEG1VIDEO){
|
|
221 /* Needed to avoid using macroblocks in which some coeffs overflow.
|
|
222 This does not happen with normal video, it just happens here as
|
|
223 the motion of the chroma plane does not match the luma plane. */
|
|
224 c->mb_decision=2;
|
|
225 }
|
|
226 // some formats want stream headers to be separate
|
|
227 if(oc->oformat->flags & AVFMT_GLOBALHEADER)
|
|
228 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
|
229
|
|
230 return st;
|
|
231 }
|
|
232
|
|
233 static AVFrame *alloc_picture(int pix_fmt, int width, int height)
|
|
234 {
|
|
235 AVFrame *picture;
|
|
236 uint8_t *picture_buf;
|
|
237 int size;
|
|
238
|
|
239 picture = avcodec_alloc_frame();
|
|
240 if (!picture)
|
|
241 return NULL;
|
|
242 size = avpicture_get_size(pix_fmt, width, height);
|
|
243 picture_buf = av_malloc(size);
|
|
244 if (!picture_buf) {
|
|
245 av_free(picture);
|
|
246 return NULL;
|
|
247 }
|
|
248 avpicture_fill((AVPicture *)picture, picture_buf,
|
|
249 pix_fmt, width, height);
|
|
250 return picture;
|
|
251 }
|
|
252
|
|
253 static void open_video(AVFormatContext *oc, AVStream *st)
|
|
254 {
|
|
255 AVCodec *codec;
|
|
256 AVCodecContext *c;
|
|
257
|
|
258 c = st->codec;
|
|
259
|
|
260 /* find the video encoder */
|
|
261 codec = avcodec_find_encoder(c->codec_id);
|
|
262 if (!codec) {
|
|
263 fprintf(stderr, "codec not found\n");
|
|
264 exit(1);
|
|
265 }
|
|
266
|
|
267 /* open the codec */
|
|
268 if (avcodec_open(c, codec) < 0) {
|
|
269 fprintf(stderr, "could not open codec\n");
|
|
270 exit(1);
|
|
271 }
|
|
272
|
|
273 video_outbuf = NULL;
|
|
274 if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
|
|
275 /* allocate output buffer */
|
|
276 /* XXX: API change will be done */
|
|
277 /* buffers passed into lav* can be allocated any way you prefer,
|
|
278 as long as they're aligned enough for the architecture, and
|
|
279 they're freed appropriately (such as using av_free for buffers
|
|
280 allocated with av_malloc) */
|
|
281 video_outbuf_size = 200000;
|
|
282 video_outbuf = av_malloc(video_outbuf_size);
|
|
283 }
|
|
284
|
|
285 /* allocate the encoded raw picture */
|
|
286 picture = alloc_picture(c->pix_fmt, c->width, c->height);
|
|
287 if (!picture) {
|
|
288 fprintf(stderr, "Could not allocate picture\n");
|
|
289 exit(1);
|
|
290 }
|
|
291
|
|
292 /* if the output format is not YUV420P, then a temporary YUV420P
|
|
293 picture is needed too. It is then converted to the required
|
|
294 output format */
|
|
295 tmp_picture = NULL;
|
|
296 if (c->pix_fmt != PIX_FMT_YUV420P) {
|
|
297 tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
|
|
298 if (!tmp_picture) {
|
|
299 fprintf(stderr, "Could not allocate temporary picture\n");
|
|
300 exit(1);
|
|
301 }
|
|
302 }
|
|
303 }
|
|
304
|
|
305 /* prepare a dummy image */
|
|
306 static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
|
|
307 {
|
|
308 int x, y, i;
|
|
309
|
|
310 i = frame_index;
|
|
311
|
|
312 /* Y */
|
|
313 for(y=0;y<height;y++) {
|
|
314 for(x=0;x<width;x++) {
|
|
315 pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
|
316 }
|
|
317 }
|
|
318
|
|
319 /* Cb and Cr */
|
|
320 for(y=0;y<height/2;y++) {
|
|
321 for(x=0;x<width/2;x++) {
|
|
322 pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
|
323 pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
|
324 }
|
|
325 }
|
|
326 }
|
|
327
|
|
328 static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
|
329 {
|
|
330 int out_size, ret;
|
|
331 AVCodecContext *c;
|
|
332 static struct SwsContext *img_convert_ctx;
|
|
333
|
|
334 c = st->codec;
|
|
335
|
|
336 if (frame_count >= STREAM_NB_FRAMES) {
|
|
337 /* no more frame to compress. The codec has a latency of a few
|
|
338 frames if using B frames, so we get the last frames by
|
|
339 passing the same picture again */
|
|
340 } else {
|
|
341 if (c->pix_fmt != PIX_FMT_YUV420P) {
|
|
342 /* as we only generate a YUV420P picture, we must convert it
|
|
343 to the codec pixel format if needed */
|
|
344 if (img_convert_ctx == NULL) {
|
|
345 img_convert_ctx = sws_getContext(c->width, c->height,
|
|
346 PIX_FMT_YUV420P,
|
|
347 c->width, c->height,
|
|
348 c->pix_fmt,
|
|
349 sws_flags, NULL, NULL, NULL);
|
|
350 if (img_convert_ctx == NULL) {
|
|
351 fprintf(stderr, "Cannot initialize the conversion context\n");
|
|
352 exit(1);
|
|
353 }
|
|
354 }
|
|
355 fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
|
|
356 sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
|
|
357 0, c->height, picture->data, picture->linesize);
|
|
358 } else {
|
|
359 fill_yuv_image(picture, frame_count, c->width, c->height);
|
|
360 }
|
|
361 }
|
|
362
|
|
363
|
|
364 if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
|
365 /* raw video case. The API will change slightly in the near
|
|
366 futur for that */
|
|
367 AVPacket pkt;
|
|
368 av_init_packet(&pkt);
|
|
369
|
|
370 pkt.flags |= PKT_FLAG_KEY;
|
|
371 pkt.stream_index= st->index;
|
|
372 pkt.data= (uint8_t *)picture;
|
|
373 pkt.size= sizeof(AVPicture);
|
|
374
|
|
375 ret = av_interleaved_write_frame(oc, &pkt);
|
|
376 } else {
|
|
377 /* encode the image */
|
|
378 out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
|
|
379 /* if zero size, it means the image was buffered */
|
|
380 if (out_size > 0) {
|
|
381 AVPacket pkt;
|
|
382 av_init_packet(&pkt);
|
|
383
|
|
384 if (c->coded_frame->pts != AV_NOPTS_VALUE)
|
|
385 pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
|
|
386 if(c->coded_frame->key_frame)
|
|
387 pkt.flags |= PKT_FLAG_KEY;
|
|
388 pkt.stream_index= st->index;
|
|
389 pkt.data= video_outbuf;
|
|
390 pkt.size= out_size;
|
|
391
|
|
392 /* write the compressed frame in the media file */
|
|
393 ret = av_interleaved_write_frame(oc, &pkt);
|
|
394 } else {
|
|
395 ret = 0;
|
|
396 }
|
|
397 }
|
|
398 if (ret != 0) {
|
|
399 fprintf(stderr, "Error while writing video frame\n");
|
|
400 exit(1);
|
|
401 }
|
|
402 frame_count++;
|
|
403 }
|
|
404
|
|
405 static void close_video(AVFormatContext *oc, AVStream *st)
|
|
406 {
|
|
407 avcodec_close(st->codec);
|
|
408 av_free(picture->data[0]);
|
|
409 av_free(picture);
|
|
410 if (tmp_picture) {
|
|
411 av_free(tmp_picture->data[0]);
|
|
412 av_free(tmp_picture);
|
|
413 }
|
|
414 av_free(video_outbuf);
|
|
415 }
|
|
416
|
|
417 /**************************************************************/
|
|
418 /* media file output */
|
|
419
|
|
420 int main(int argc, char **argv)
|
|
421 {
|
|
422 const char *filename;
|
|
423 AVOutputFormat *fmt;
|
|
424 AVFormatContext *oc;
|
|
425 AVStream *audio_st, *video_st;
|
|
426 double audio_pts, video_pts;
|
|
427 int i;
|
|
428
|
|
429 /* initialize libavcodec, and register all codecs and formats */
|
|
430 av_register_all();
|
|
431
|
|
432 if (argc != 2) {
|
|
433 printf("usage: %s output_file\n"
|
|
434 "API example program to output a media file with libavformat.\n"
|
|
435 "The output format is automatically guessed according to the file extension.\n"
|
|
436 "Raw images can also be output by using '%%d' in the filename\n"
|
|
437 "\n", argv[0]);
|
|
438 exit(1);
|
|
439 }
|
|
440
|
|
441 filename = argv[1];
|
|
442
|
|
443 /* auto detect the output format from the name. default is
|
|
444 mpeg. */
|
|
445 fmt = guess_format(NULL, filename, NULL);
|
|
446 if (!fmt) {
|
|
447 printf("Could not deduce output format from file extension: using MPEG.\n");
|
|
448 fmt = guess_format("mpeg", NULL, NULL);
|
|
449 }
|
|
450 if (!fmt) {
|
|
451 fprintf(stderr, "Could not find suitable output format\n");
|
|
452 exit(1);
|
|
453 }
|
|
454
|
|
455 /* allocate the output media context */
|
|
456 oc = avformat_alloc_context();
|
|
457 if (!oc) {
|
|
458 fprintf(stderr, "Memory error\n");
|
|
459 exit(1);
|
|
460 }
|
|
461 oc->oformat = fmt;
|
|
462 snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
|
|
463
|
|
464 /* add the audio and video streams using the default format codecs
|
|
465 and initialize the codecs */
|
|
466 video_st = NULL;
|
|
467 audio_st = NULL;
|
|
468 if (fmt->video_codec != CODEC_ID_NONE) {
|
|
469 video_st = add_video_stream(oc, fmt->video_codec);
|
|
470 }
|
|
471 if (fmt->audio_codec != CODEC_ID_NONE) {
|
|
472 audio_st = add_audio_stream(oc, fmt->audio_codec);
|
|
473 }
|
|
474
|
|
475 /* set the output parameters (must be done even if no
|
|
476 parameters). */
|
|
477 if (av_set_parameters(oc, NULL) < 0) {
|
|
478 fprintf(stderr, "Invalid output format parameters\n");
|
|
479 exit(1);
|
|
480 }
|
|
481
|
|
482 dump_format(oc, 0, filename, 1);
|
|
483
|
|
484 /* now that all the parameters are set, we can open the audio and
|
|
485 video codecs and allocate the necessary encode buffers */
|
|
486 if (video_st)
|
|
487 open_video(oc, video_st);
|
|
488 if (audio_st)
|
|
489 open_audio(oc, audio_st);
|
|
490
|
|
491 /* open the output file, if needed */
|
|
492 if (!(fmt->flags & AVFMT_NOFILE)) {
|
|
493 if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
|
|
494 fprintf(stderr, "Could not open '%s'\n", filename);
|
|
495 exit(1);
|
|
496 }
|
|
497 }
|
|
498
|
|
499 /* write the stream header, if any */
|
|
500 av_write_header(oc);
|
|
501
|
|
502 for(;;) {
|
|
503 /* compute current audio and video time */
|
|
504 if (audio_st)
|
|
505 audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
|
|
506 else
|
|
507 audio_pts = 0.0;
|
|
508
|
|
509 if (video_st)
|
|
510 video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
|
|
511 else
|
|
512 video_pts = 0.0;
|
|
513
|
|
514 if ((!audio_st || audio_pts >= STREAM_DURATION) &&
|
|
515 (!video_st || video_pts >= STREAM_DURATION))
|
|
516 break;
|
|
517
|
|
518 /* write interleaved audio and video frames */
|
|
519 if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
|
|
520 write_audio_frame(oc, audio_st);
|
|
521 } else {
|
|
522 write_video_frame(oc, video_st);
|
|
523 }
|
|
524 }
|
|
525
|
|
526 /* write the trailer, if any. the trailer must be written
|
|
527 * before you close the CodecContexts open when you wrote the
|
|
528 * header; otherwise write_trailer may try to use memory that
|
|
529 * was freed on av_codec_close() */
|
|
530 av_write_trailer(oc);
|
|
531
|
|
532 /* close each codec */
|
|
533 if (video_st)
|
|
534 close_video(oc, video_st);
|
|
535 if (audio_st)
|
|
536 close_audio(oc, audio_st);
|
|
537
|
|
538 /* free the streams */
|
|
539 for(i = 0; i < oc->nb_streams; i++) {
|
|
540 av_freep(&oc->streams[i]->codec);
|
|
541 av_freep(&oc->streams[i]);
|
|
542 }
|
|
543
|
|
544 if (!(fmt->flags & AVFMT_NOFILE)) {
|
|
545 /* close the output file */
|
|
546 url_fclose(oc->pb);
|
|
547 }
|
|
548
|
|
549 /* free the stream */
|
|
550 av_free(oc);
|
|
551
|
|
552 return 0;
|
|
553 }
|