Mercurial > libavcodec.hg
comparison dv.c @ 1489:337d13aee605 libavcodec
* DV handling was streamlined for both muxing/demuxing and
decoding. All muxing/demuxing functionality is now available
in libavformat/dv.[ch].
* dv1394.c and avidec.c were hooked up with general DV demuxer.
* DVAUDIO is dead! Long live pcm_s16le!
* DV audio is now always recognized -- which means we can
now hear all those ducks quaking in pond.dv.
author | romansh |
---|---|
date | Mon, 29 Sep 2003 17:54:07 +0000 |
parents | 8edad1e372d1 |
children | ad7e62df9962 |
comparison
equal
deleted
inserted
replaced
1488:766a2f4edbea | 1489:337d13aee605 |
---|---|
23 */ | 23 */ |
24 #include "avcodec.h" | 24 #include "avcodec.h" |
25 #include "dsputil.h" | 25 #include "dsputil.h" |
26 #include "mpegvideo.h" | 26 #include "mpegvideo.h" |
27 #include "simple_idct.h" | 27 #include "simple_idct.h" |
28 | 28 #include "dvdata.h" |
29 #define NTSC_FRAME_SIZE 120000 | |
30 #define PAL_FRAME_SIZE 144000 | |
31 | |
32 #define TEX_VLC_BITS 9 | |
33 | 29 |
34 typedef struct DVVideoDecodeContext { | 30 typedef struct DVVideoDecodeContext { |
35 AVCodecContext *avctx; | 31 const DVprofile* sys; |
36 GetBitContext gb; | 32 GetBitContext gb; |
37 VLC *vlc; | |
38 int sampling_411; /* 0 = 420, 1 = 411 */ | |
39 int width, height; | |
40 uint8_t *current_picture[3]; /* picture structure */ | |
41 AVFrame picture; | 33 AVFrame picture; |
42 int linesize[3]; | |
43 DCTELEM block[5*6][64] __align8; | 34 DCTELEM block[5*6][64] __align8; |
35 | |
36 /* FIXME: the following is extracted from DSP */ | |
44 uint8_t dv_zigzag[2][64]; | 37 uint8_t dv_zigzag[2][64]; |
45 uint8_t idct_permutation[64]; | 38 uint8_t idct_permutation[64]; |
39 void (*get_pixels)(DCTELEM *block, const uint8_t *pixels, int line_size); | |
40 void (*fdct)(DCTELEM *block); | |
41 | |
46 /* XXX: move it to static storage ? */ | 42 /* XXX: move it to static storage ? */ |
47 uint8_t dv_shift[2][22][64]; | 43 uint8_t dv_shift[2][22][64]; |
48 void (*idct_put[2])(uint8_t *dest, int line_size, DCTELEM *block); | 44 void (*idct_put[2])(uint8_t *dest, int line_size, DCTELEM *block); |
49 } DVVideoDecodeContext; | 45 } DVVideoDecodeContext; |
50 | 46 |
51 #include "dvdata.h" | 47 #define TEX_VLC_BITS 9 |
52 | |
53 static VLC dv_vlc; | |
54 /* XXX: also include quantization */ | 48 /* XXX: also include quantization */ |
55 static RL_VLC_ELEM *dv_rl_vlc[1]; | 49 static RL_VLC_ELEM *dv_rl_vlc[1]; |
50 static VLC_TYPE dv_vlc_codes[15][23]; | |
56 | 51 |
57 static void dv_build_unquantize_tables(DVVideoDecodeContext *s) | 52 static void dv_build_unquantize_tables(DVVideoDecodeContext *s) |
58 { | 53 { |
59 int i, q, j; | 54 int i, q, j; |
60 | 55 |
83 MpegEncContext s2; | 78 MpegEncContext s2; |
84 static int done=0; | 79 static int done=0; |
85 | 80 |
86 if (!done) { | 81 if (!done) { |
87 int i; | 82 int i; |
83 VLC dv_vlc; | |
88 | 84 |
89 done = 1; | 85 done = 1; |
90 | 86 |
91 /* NOTE: as a trick, we use the fact the no codes are unused | 87 /* NOTE: as a trick, we use the fact the no codes are unused |
92 to accelerate the parsing of partial codes */ | 88 to accelerate the parsing of partial codes */ |
112 } | 108 } |
113 dv_rl_vlc[0][i].len = len; | 109 dv_rl_vlc[0][i].len = len; |
114 dv_rl_vlc[0][i].level = level; | 110 dv_rl_vlc[0][i].level = level; |
115 dv_rl_vlc[0][i].run = run; | 111 dv_rl_vlc[0][i].run = run; |
116 } | 112 } |
113 | |
114 memset(dv_vlc_codes, 0xff, sizeof(dv_vlc_codes)); | |
115 for (i = 0; i < NB_DV_VLC - 1; i++) { | |
116 if (dv_vlc_run[i] < 15 && dv_vlc_level[i] < 23 && dv_vlc_len[i] < 15) | |
117 dv_vlc_codes[dv_vlc_run[i]][dv_vlc_level[i]] = i; | |
118 } | |
117 } | 119 } |
118 | 120 |
119 /* ugly way to get the idct & scantable */ | 121 /* ugly way to get the idct & scantable */ |
120 /* XXX: fix it */ | 122 /* XXX: fix it */ |
121 memset(&s2, 0, sizeof(MpegEncContext)); | 123 memset(&s2, 0, sizeof(MpegEncContext)); |
122 s2.avctx = avctx; | 124 s2.avctx = avctx; |
123 dsputil_init(&s2.dsp, avctx); | 125 dsputil_init(&s2.dsp, avctx); |
124 if (DCT_common_init(&s2) < 0) | 126 if (DCT_common_init(&s2) < 0) |
125 return -1; | 127 return -1; |
126 | 128 |
129 s->get_pixels = s2.dsp.get_pixels; | |
130 s->fdct = s2.dsp.fdct; | |
131 | |
127 s->idct_put[0] = s2.dsp.idct_put; | 132 s->idct_put[0] = s2.dsp.idct_put; |
128 memcpy(s->idct_permutation, s2.dsp.idct_permutation, 64); | 133 memcpy(s->idct_permutation, s2.dsp.idct_permutation, 64); |
129 memcpy(s->dv_zigzag[0], s2.intra_scantable.permutated, 64); | 134 memcpy(s->dv_zigzag[0], s2.intra_scantable.permutated, 64); |
130 | 135 |
131 /* XXX: use MMX also for idct248 */ | 136 /* XXX: use MMX also for idct248 */ |
132 s->idct_put[1] = simple_idct248_put; | 137 s->idct_put[1] = simple_idct248_put; |
133 memcpy(s->dv_zigzag[1], dv_248_zigzag, 64); | 138 memcpy(s->dv_zigzag[1], dv_248_zigzag, 64); |
134 | 139 |
135 /* XXX: do it only for constant case */ | 140 /* XXX: do it only for constant case */ |
136 dv_build_unquantize_tables(s); | 141 dv_build_unquantize_tables(s); |
137 | 142 |
138 return 0; | 143 return 0; |
139 } | 144 } |
140 | 145 |
141 //#define VLC_DEBUG | 146 // #define VLC_DEBUG |
142 | 147 |
143 typedef struct BlockInfo { | 148 typedef struct BlockInfo { |
144 const uint8_t *shift_table; | 149 const uint8_t *shift_table; |
145 const uint8_t *scan_table; | 150 const uint8_t *scan_table; |
146 uint8_t pos; /* position in block */ | 151 uint8_t pos; /* position in block */ |
448 mb = mb_data; | 453 mb = mb_data; |
449 for(mb_index = 0; mb_index < 5; mb_index++) { | 454 for(mb_index = 0; mb_index < 5; mb_index++) { |
450 v = *mb_pos_ptr++; | 455 v = *mb_pos_ptr++; |
451 mb_x = v & 0xff; | 456 mb_x = v & 0xff; |
452 mb_y = v >> 8; | 457 mb_y = v >> 8; |
453 y_ptr = s->current_picture[0] + (mb_y * s->linesize[0] * 8) + (mb_x * 8); | 458 y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 8); |
454 if (s->sampling_411) | 459 if (s->sys->pix_fmt == PIX_FMT_YUV411P) |
455 c_offset = (mb_y * s->linesize[1] * 8) + ((mb_x >> 2) * 8); | 460 c_offset = (mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8); |
456 else | 461 else |
457 c_offset = ((mb_y >> 1) * s->linesize[1] * 8) + ((mb_x >> 1) * 8); | 462 c_offset = ((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8); |
458 for(j = 0;j < 6; j++) { | 463 for(j = 0;j < 6; j++) { |
459 idct_put = s->idct_put[mb->dct_mode]; | 464 idct_put = s->idct_put[mb->dct_mode]; |
460 if (j < 4) { | 465 if (j < 4) { |
461 if (s->sampling_411 && mb_x < (704 / 8)) { | 466 if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) { |
462 /* NOTE: at end of line, the macroblock is handled as 420 */ | 467 /* NOTE: at end of line, the macroblock is handled as 420 */ |
463 idct_put(y_ptr + (j * 8), s->linesize[0], block); | 468 idct_put(y_ptr + (j * 8), s->picture.linesize[0], block); |
464 } else { | 469 } else { |
465 idct_put(y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->linesize[0]), | 470 idct_put(y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]), |
466 s->linesize[0], block); | 471 s->picture.linesize[0], block); |
467 } | 472 } |
468 } else { | 473 } else { |
469 if (s->sampling_411 && mb_x >= (704 / 8)) { | 474 if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) { |
470 uint8_t pixels[64], *c_ptr, *c_ptr1, *ptr; | 475 uint8_t pixels[64], *c_ptr, *c_ptr1, *ptr; |
471 int y, linesize; | 476 int y, linesize; |
472 /* NOTE: at end of line, the macroblock is handled as 420 */ | 477 /* NOTE: at end of line, the macroblock is handled as 420 */ |
473 idct_put(pixels, 8, block); | 478 idct_put(pixels, 8, block); |
474 linesize = s->linesize[6 - j]; | 479 linesize = s->picture.linesize[6 - j]; |
475 c_ptr = s->current_picture[6 - j] + c_offset; | 480 c_ptr = s->picture.data[6 - j] + c_offset; |
476 ptr = pixels; | 481 ptr = pixels; |
477 for(y = 0;y < 8; y++) { | 482 for(y = 0;y < 8; y++) { |
478 /* convert to 411P */ | 483 /* convert to 411P */ |
479 c_ptr1 = c_ptr + 8*linesize; | 484 c_ptr1 = c_ptr + 8*linesize; |
480 c_ptr[0]= ptr[0]; c_ptr1[0]= ptr[4]; | 485 c_ptr[0]= ptr[0]; c_ptr1[0]= ptr[4]; |
484 c_ptr += linesize; | 489 c_ptr += linesize; |
485 ptr += 8; | 490 ptr += 8; |
486 } | 491 } |
487 } else { | 492 } else { |
488 /* don't ask me why they inverted Cb and Cr ! */ | 493 /* don't ask me why they inverted Cb and Cr ! */ |
489 idct_put(s->current_picture[6 - j] + c_offset, | 494 idct_put(s->picture.data[6 - j] + c_offset, |
490 s->linesize[6 - j], block); | 495 s->picture.linesize[6 - j], block); |
491 } | 496 } |
492 } | 497 } |
493 block += 64; | 498 block += 64; |
494 mb++; | 499 mb++; |
495 } | 500 } |
496 } | 501 } |
497 } | 502 } |
498 | |
499 | 503 |
500 /* NOTE: exactly one frame must be given (120000 bytes for NTSC, | 504 /* NOTE: exactly one frame must be given (120000 bytes for NTSC, |
501 144000 bytes for PAL) */ | 505 144000 bytes for PAL) */ |
502 static int dvvideo_decode_frame(AVCodecContext *avctx, | 506 static int dvvideo_decode_frame(AVCodecContext *avctx, |
503 void *data, int *data_size, | 507 void *data, int *data_size, |
504 uint8_t *buf, int buf_size) | 508 uint8_t *buf, int buf_size) |
505 { | 509 { |
506 DVVideoDecodeContext *s = avctx->priv_data; | 510 DVVideoDecodeContext *s = avctx->priv_data; |
507 int sct, dsf, apt, ds, nb_dif_segs, vs, width, height, i, packet_size; | 511 int ds, vs; |
508 uint8_t *buf_ptr; | |
509 const uint16_t *mb_pos_ptr; | 512 const uint16_t *mb_pos_ptr; |
510 | 513 |
511 /* parse id */ | 514 s->sys = dv_frame_profile(buf); |
512 init_get_bits(&s->gb, buf, buf_size*8); | 515 if (!s->sys || buf_size < s->sys->frame_size) |
513 sct = get_bits(&s->gb, 3); | 516 return -1; /* NOTE: we only accept several full frames */ |
514 if (sct != 0) | 517 |
515 return -1; | 518 |
516 skip_bits(&s->gb, 5); | |
517 get_bits(&s->gb, 4); /* dsn (sequence number */ | |
518 get_bits(&s->gb, 1); /* fsc (channel number) */ | |
519 skip_bits(&s->gb, 3); | |
520 get_bits(&s->gb, 8); /* dbn (diff block number 0-134) */ | |
521 | |
522 dsf = get_bits(&s->gb, 1); /* 0 = NTSC 1 = PAL */ | |
523 if (get_bits(&s->gb, 1) != 0) | |
524 return -1; | |
525 skip_bits(&s->gb, 11); | |
526 apt = get_bits(&s->gb, 3); /* apt */ | |
527 | |
528 get_bits(&s->gb, 1); /* tf1 */ | |
529 skip_bits(&s->gb, 4); | |
530 get_bits(&s->gb, 3); /* ap1 */ | |
531 | |
532 get_bits(&s->gb, 1); /* tf2 */ | |
533 skip_bits(&s->gb, 4); | |
534 get_bits(&s->gb, 3); /* ap2 */ | |
535 | |
536 get_bits(&s->gb, 1); /* tf3 */ | |
537 skip_bits(&s->gb, 4); | |
538 get_bits(&s->gb, 3); /* ap3 */ | |
539 | |
540 /* init size */ | |
541 width = 720; | |
542 if (dsf) { | |
543 avctx->frame_rate = 25; | |
544 avctx->frame_rate_base = 1; | |
545 packet_size = PAL_FRAME_SIZE; | |
546 height = 576; | |
547 nb_dif_segs = 12; | |
548 } else { | |
549 avctx->frame_rate = 30000; | |
550 avctx->frame_rate_base = 1001; | |
551 packet_size = NTSC_FRAME_SIZE; | |
552 height = 480; | |
553 nb_dif_segs = 10; | |
554 } | |
555 /* NOTE: we only accept several full frames */ | |
556 if (buf_size < packet_size) | |
557 return -1; | |
558 | |
559 /* NTSC[dsf == 0] is always 720x480, 4:1:1 | |
560 * PAL[dsf == 1] is always 720x576, 4:2:0 for IEC 68134[apt == 0] | |
561 * but for the SMPTE 314M[apt == 1] it is 720x576, 4:1:1 | |
562 */ | |
563 s->sampling_411 = !dsf || apt; | |
564 if (s->sampling_411) { | |
565 mb_pos_ptr = dsf ? dv_place_411P : dv_place_411; | |
566 avctx->pix_fmt = PIX_FMT_YUV411P; | |
567 } else { | |
568 mb_pos_ptr = dv_place_420; | |
569 avctx->pix_fmt = PIX_FMT_YUV420P; | |
570 } | |
571 | |
572 avctx->width = width; | |
573 avctx->height = height; | |
574 | |
575 /* Once again, this is pretty complicated by the fact that the same | |
576 * field is used differently by IEC 68134[apt == 0] and | |
577 * SMPTE 314M[apt == 1]. | |
578 */ | |
579 if (buf[VAUX_TC61_OFFSET] == 0x61 && | |
580 ((apt == 0 && (buf[VAUX_TC61_OFFSET + 2] & 0x07) == 0x07) || | |
581 (apt == 1 && (buf[VAUX_TC61_OFFSET + 2] & 0x07) == 0x02))) | |
582 avctx->aspect_ratio = 16.0 / 9.0; | |
583 else | |
584 avctx->aspect_ratio = 4.0 / 3.0; | |
585 | |
586 if(s->picture.data[0]) | 519 if(s->picture.data[0]) |
587 avctx->release_buffer(avctx, &s->picture); | 520 avctx->release_buffer(avctx, &s->picture); |
588 | 521 |
589 s->picture.reference= 0; | 522 s->picture.reference = 0; |
523 avctx->pix_fmt = s->sys->pix_fmt; | |
590 if(avctx->get_buffer(avctx, &s->picture) < 0) { | 524 if(avctx->get_buffer(avctx, &s->picture) < 0) { |
591 fprintf(stderr, "get_buffer() failed\n"); | 525 fprintf(stderr, "get_buffer() failed\n"); |
592 return -1; | 526 return -1; |
593 } | 527 } |
594 | 528 |
595 for(i=0;i<3;i++) { | |
596 s->current_picture[i] = s->picture.data[i]; | |
597 s->linesize[i] = s->picture.linesize[i]; | |
598 if (!s->current_picture[i]) | |
599 return -1; | |
600 } | |
601 s->width = width; | |
602 s->height = height; | |
603 | |
604 /* for each DIF segment */ | 529 /* for each DIF segment */ |
605 buf_ptr = buf; | 530 mb_pos_ptr = s->sys->video_place; |
606 for (ds = 0; ds < nb_dif_segs; ds++) { | 531 for (ds = 0; ds < s->sys->difseg_size; ds++) { |
607 buf_ptr += 6 * 80; /* skip DIF segment header */ | 532 buf += 6 * 80; /* skip DIF segment header */ |
608 | 533 |
609 for(vs = 0; vs < 27; vs++) { | 534 for(vs = 0; vs < 27; vs++) { |
610 if ((vs % 3) == 0) { | 535 if ((vs % 3) == 0) |
611 /* skip audio block */ | 536 buf += 80; /* skip audio block */ |
612 buf_ptr += 80; | 537 |
613 } | 538 dv_decode_video_segment(s, buf, mb_pos_ptr); |
614 dv_decode_video_segment(s, buf_ptr, mb_pos_ptr); | 539 buf += 5 * 80; |
615 buf_ptr += 5 * 80; | |
616 mb_pos_ptr += 5; | 540 mb_pos_ptr += 5; |
617 } | 541 } |
618 } | 542 } |
619 | 543 |
620 emms_c(); | 544 emms_c(); |
621 | 545 |
622 /* return image */ | 546 /* return image */ |
623 *data_size = sizeof(AVFrame); | 547 *data_size = sizeof(AVFrame); |
624 *(AVFrame*)data= s->picture; | 548 *(AVFrame*)data= s->picture; |
625 | 549 |
626 return packet_size; | 550 return s->sys->frame_size; |
627 } | 551 } |
628 | 552 |
629 static int dvvideo_decode_end(AVCodecContext *avctx) | 553 static int dvvideo_decode_end(AVCodecContext *avctx) |
630 { | 554 { |
631 avcodec_default_free_buffers(avctx); | 555 avcodec_default_free_buffers(avctx); |
643 dvvideo_decode_end, | 567 dvvideo_decode_end, |
644 dvvideo_decode_frame, | 568 dvvideo_decode_frame, |
645 CODEC_CAP_DR1, | 569 CODEC_CAP_DR1, |
646 NULL | 570 NULL |
647 }; | 571 }; |
648 | |
649 typedef struct DVAudioDecodeContext { | |
650 AVCodecContext *avctx; | |
651 GetBitContext gb; | |
652 } DVAudioDecodeContext; | |
653 | |
654 static int dvaudio_decode_init(AVCodecContext *avctx) | |
655 { | |
656 // DVAudioDecodeContext *s = avctx->priv_data; | |
657 return 0; | |
658 } | |
659 | |
660 static uint16_t dv_audio_12to16(uint16_t sample) | |
661 { | |
662 uint16_t shift, result; | |
663 | |
664 sample = (sample < 0x800) ? sample : sample | 0xf000; | |
665 shift = (sample & 0xf00) >> 8; | |
666 | |
667 if (shift < 0x2 || shift > 0xd) { | |
668 result = sample; | |
669 } else if (shift < 0x8) { | |
670 shift--; | |
671 result = (sample - (256 * shift)) << shift; | |
672 } else { | |
673 shift = 0xe - shift; | |
674 result = ((sample + ((256 * shift) + 1)) << shift) - 1; | |
675 } | |
676 | |
677 return result; | |
678 } | |
679 | |
680 /* NOTE: exactly one frame must be given (120000 bytes for NTSC, | |
681 144000 bytes for PAL) | |
682 | |
683 There's a couple of assumptions being made here: | |
684 1. By default we silence erroneous (0x8000/16bit 0x800/12bit) | |
685 audio samples. We can pass them upwards when ffmpeg will be ready | |
686 to deal with them. | |
687 2. We don't do software emphasis. | |
688 3. Audio is always returned as 16bit linear samples: 12bit | |
689 nonlinear samples are converted into 16bit linear ones. | |
690 */ | |
691 static int dvaudio_decode_frame(AVCodecContext *avctx, | |
692 void *data, int *data_size, | |
693 uint8_t *buf, int buf_size) | |
694 { | |
695 DVVideoDecodeContext *s = avctx->priv_data; | |
696 const uint16_t (*unshuffle)[9]; | |
697 int smpls, freq, quant, sys, stride, difseg, ad, dp, nb_dif_segs, i; | |
698 uint16_t lc, rc; | |
699 uint8_t *buf_ptr; | |
700 | |
701 /* parse id */ | |
702 init_get_bits(&s->gb, &buf[AAUX_AS_OFFSET], 5*8); | |
703 i = get_bits(&s->gb, 8); | |
704 if (i != 0x50) { /* No audio ? */ | |
705 *data_size = 0; | |
706 return buf_size; | |
707 } | |
708 | |
709 get_bits(&s->gb, 1); /* 0 - locked audio, 1 - unlocked audio */ | |
710 skip_bits(&s->gb, 1); | |
711 smpls = get_bits(&s->gb, 6); /* samples in this frame - min. samples */ | |
712 | |
713 skip_bits(&s->gb, 8); | |
714 | |
715 skip_bits(&s->gb, 2); | |
716 sys = get_bits(&s->gb, 1); /* 0 - 60 fields, 1 = 50 fields */ | |
717 skip_bits(&s->gb, 5); | |
718 | |
719 get_bits(&s->gb, 1); /* 0 - emphasis on, 1 - emphasis off */ | |
720 get_bits(&s->gb, 1); /* 0 - reserved, 1 - emphasis time constant 50/15us */ | |
721 freq = get_bits(&s->gb, 3); /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */ | |
722 quant = get_bits(&s->gb, 3); /* 0 - 16bit linear, 1 - 12bit nonlinear */ | |
723 | |
724 if (quant > 1) | |
725 return -1; /* Unsupported quantization */ | |
726 | |
727 avctx->sample_rate = dv_audio_frequency[freq]; | |
728 avctx->channels = 2; | |
729 avctx->bit_rate = avctx->channels * avctx->sample_rate * 16; | |
730 // What about: | |
731 // avctx->frame_size = | |
732 | |
733 *data_size = (dv_audio_min_samples[sys][freq] + smpls) * | |
734 avctx->channels * 2; | |
735 | |
736 if (sys) { | |
737 nb_dif_segs = 12; | |
738 stride = 108; | |
739 unshuffle = dv_place_audio50; | |
740 } else { | |
741 nb_dif_segs = 10; | |
742 stride = 90; | |
743 unshuffle = dv_place_audio60; | |
744 } | |
745 | |
746 /* for each DIF segment */ | |
747 buf_ptr = buf; | |
748 for (difseg = 0; difseg < nb_dif_segs; difseg++) { | |
749 buf_ptr += 6 * 80; /* skip DIF segment header */ | |
750 for (ad = 0; ad < 9; ad++) { | |
751 | |
752 for (dp = 8; dp < 80; dp+=2) { | |
753 if (quant == 0) { /* 16bit quantization */ | |
754 i = unshuffle[difseg][ad] + (dp - 8)/2 * stride; | |
755 ((short *)data)[i] = (buf_ptr[dp] << 8) | buf_ptr[dp+1]; | |
756 if (((unsigned short *)data)[i] == 0x8000) | |
757 ((short *)data)[i] = 0; | |
758 } else { /* 12bit quantization */ | |
759 if (difseg >= nb_dif_segs/2) | |
760 goto out; /* We're not doing 4ch at this time */ | |
761 | |
762 lc = ((uint16_t)buf_ptr[dp] << 4) | | |
763 ((uint16_t)buf_ptr[dp+2] >> 4); | |
764 rc = ((uint16_t)buf_ptr[dp+1] << 4) | | |
765 ((uint16_t)buf_ptr[dp+2] & 0x0f); | |
766 lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc)); | |
767 rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc)); | |
768 | |
769 i = unshuffle[difseg][ad] + (dp - 8)/3 * stride; | |
770 ((short *)data)[i] = lc; | |
771 i = unshuffle[difseg+nb_dif_segs/2][ad] + (dp - 8)/3 * stride; | |
772 ((short *)data)[i] = rc; | |
773 ++dp; | |
774 } | |
775 } | |
776 | |
777 buf_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ | |
778 } | |
779 } | |
780 | |
781 out: | |
782 return buf_size; | |
783 } | |
784 | |
785 static int dvaudio_decode_end(AVCodecContext *avctx) | |
786 { | |
787 // DVAudioDecodeContext *s = avctx->priv_data; | |
788 return 0; | |
789 } | |
790 | |
791 AVCodec dvaudio_decoder = { | |
792 "dvaudio", | |
793 CODEC_TYPE_AUDIO, | |
794 CODEC_ID_DVAUDIO, | |
795 sizeof(DVAudioDecodeContext), | |
796 dvaudio_decode_init, | |
797 NULL, | |
798 dvaudio_decode_end, | |
799 dvaudio_decode_frame, | |
800 0, | |
801 NULL | |
802 }; |