comparison mpegvideo.c @ 924:3814e9115672 libavcodec

cleanup / messup? fixes 20% speedloss bug removes redundant variables from MpegEncContext release buffers in avcodec_flush_buffers() (untested)
author michaelni
date Mon, 09 Dec 2002 00:29:17 +0000
parents 75ee49a4a516
children 7fccaa0d699d
comparison
equal deleted inserted replaced
923:3b5d9ecedc73 924:3814e9115672
266 266
267 return 0; 267 return 0;
268 } 268 }
269 269
270 /** 270 /**
271 * allocates various arrays for a Picture structure, except the pixels themself. 271 * allocates a Picture
272 * The pixels are allocated/set in te get_buffer() 272 * The pixels are allocated/set by calling get_buffer() if shared=0
273 */ 273 */
274 static int alloc_picture(MpegEncContext *s, Picture *pic){ 274 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
275 if (s->encoding) { 275
276 CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(INT16)) 276 if(shared){
277 CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16)) 277 assert(pic->data[0]);
278 CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(INT8)) 278 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
279 } 279 pic->type= FF_BUFFER_TYPE_SHARED;
280 280 }else{
281 CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check 281 int r;
282 CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8)) 282
283 pic->qstride= s->mb_width; 283 assert(!pic->data[0]);
284
285 r= s->avctx->get_buffer(s->avctx, (AVVideoFrame*)pic);
286
287 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
288 fprintf(stderr, "get_buffer() failed (%d %d %d %X)\n", r, pic->age, pic->type, (int)pic->data[0]);
289 return -1;
290 }
291
292 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
293 fprintf(stderr, "get_buffer() failed (stride changed)\n");
294 return -1;
295 }
296
297 if(pic->linesize[1] != pic->linesize[2]){
298 fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
299 return -1;
300 }
301
302 s->linesize = pic->linesize[0];
303 s->uvlinesize= pic->linesize[1];
304 }
305
306 if(pic->qscale_table==NULL){
307 if (s->encoding) {
308 CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(INT16))
309 CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16))
310 CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(INT8))
311 }
312
313 CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check
314 CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8))
315 pic->qstride= s->mb_width;
316 }
284 317
285 return 0; 318 return 0;
286 fail: //for the CHECKED_ALLOCZ macro 319 fail: //for the CHECKED_ALLOCZ macro
287 return -1; 320 return -1;
288 } 321 }
289 322
323 /**
324 * deallocates a picture
325 */
290 static void free_picture(MpegEncContext *s, Picture *pic){ 326 static void free_picture(MpegEncContext *s, Picture *pic){
291 int i; 327 int i;
292 328
329 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
330 s->avctx->release_buffer(s->avctx, (AVVideoFrame*)pic);
331 }
332
293 av_freep(&pic->mb_var); 333 av_freep(&pic->mb_var);
294 av_freep(&pic->mc_mb_var); 334 av_freep(&pic->mc_mb_var);
295 av_freep(&pic->mb_mean); 335 av_freep(&pic->mb_mean);
296 av_freep(&pic->mbskip_table); 336 av_freep(&pic->mbskip_table);
297 av_freep(&pic->qscale_table); 337 av_freep(&pic->qscale_table);
298 338
299 if(s->avctx->get_buffer == avcodec_default_get_buffer){ 339 if(pic->type == FF_BUFFER_TYPE_INTERNAL){
300 for(i=0; i<4; i++){ 340 for(i=0; i<4; i++){
301 av_freep(&pic->base[i]); 341 av_freep(&pic->base[i]);
302 pic->data[i]= NULL; 342 pic->data[i]= NULL;
303 } 343 }
304 av_freep(&pic->opaque); 344 av_freep(&pic->opaque);
345 pic->type= 0;
346 }else if(pic->type == FF_BUFFER_TYPE_SHARED){
347 for(i=0; i<4; i++){
348 pic->base[i]=
349 pic->data[i]= NULL;
350 }
351 pic->type= 0;
305 } 352 }
306 } 353 }
307 354
308 /* init common structure for both encoder and decoder */ 355 /* init common structure for both encoder and decoder */
309 int MPV_common_init(MpegEncContext *s) 356 int MPV_common_init(MpegEncContext *s)
443 /* init common structure for both encoder and decoder */ 490 /* init common structure for both encoder and decoder */
444 void MPV_common_end(MpegEncContext *s) 491 void MPV_common_end(MpegEncContext *s)
445 { 492 {
446 int i; 493 int i;
447 494
448 for(i=0; i<MAX_PICTURE_COUNT; i++){
449 if(s->picture[i].data[0]){
450 s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]);
451 }
452 }
453
454 av_freep(&s->mb_type); 495 av_freep(&s->mb_type);
455 av_freep(&s->p_mv_table); 496 av_freep(&s->p_mv_table);
456 av_freep(&s->b_forw_mv_table); 497 av_freep(&s->b_forw_mv_table);
457 av_freep(&s->b_back_mv_table); 498 av_freep(&s->b_back_mv_table);
458 av_freep(&s->b_bidir_forw_mv_table); 499 av_freep(&s->b_bidir_forw_mv_table);
570 s->mjpeg_hsample[1] = 1; 611 s->mjpeg_hsample[1] = 1;
571 s->mjpeg_hsample[2] = 1; 612 s->mjpeg_hsample[2] = 1;
572 if (mjpeg_init(s) < 0) 613 if (mjpeg_init(s) < 0)
573 return -1; 614 return -1;
574 avctx->delay=0; 615 avctx->delay=0;
616 s->low_delay=1;
575 break; 617 break;
576 case CODEC_ID_H263: 618 case CODEC_ID_H263:
577 if (h263_get_picture_format(s->width, s->height) == 7) { 619 if (h263_get_picture_format(s->width, s->height) == 7) {
578 printf("Input picture size isn't suitable for h263 codec! try h263+\n"); 620 printf("Input picture size isn't suitable for h263 codec! try h263+\n");
579 return -1; 621 return -1;
580 } 622 }
581 s->out_format = FMT_H263; 623 s->out_format = FMT_H263;
582 avctx->delay=0; 624 avctx->delay=0;
625 s->low_delay=1;
583 break; 626 break;
584 case CODEC_ID_H263P: 627 case CODEC_ID_H263P:
585 s->out_format = FMT_H263; 628 s->out_format = FMT_H263;
586 s->h263_plus = 1; 629 s->h263_plus = 1;
587 s->unrestricted_mv = 1; 630 s->unrestricted_mv = 1;
589 632
590 /* These are just to be sure */ 633 /* These are just to be sure */
591 s->umvplus = 0; 634 s->umvplus = 0;
592 s->umvplus_dec = 0; 635 s->umvplus_dec = 0;
593 avctx->delay=0; 636 avctx->delay=0;
637 s->low_delay=1;
594 break; 638 break;
595 case CODEC_ID_RV10: 639 case CODEC_ID_RV10:
596 s->out_format = FMT_H263; 640 s->out_format = FMT_H263;
597 s->h263_rv10 = 1; 641 s->h263_rv10 = 1;
598 avctx->delay=0; 642 avctx->delay=0;
643 s->low_delay=1;
599 break; 644 break;
600 case CODEC_ID_MPEG4: 645 case CODEC_ID_MPEG4:
601 s->out_format = FMT_H263; 646 s->out_format = FMT_H263;
602 s->h263_pred = 1; 647 s->h263_pred = 1;
603 s->unrestricted_mv = 1; 648 s->unrestricted_mv = 1;
604 s->has_b_frames= s->max_b_frames ? 1 : 0; 649 s->low_delay= s->max_b_frames ? 0 : 1;
605 s->low_delay= !s->has_b_frames;
606 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); 650 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
607 break; 651 break;
608 case CODEC_ID_MSMPEG4V1: 652 case CODEC_ID_MSMPEG4V1:
609 s->out_format = FMT_H263; 653 s->out_format = FMT_H263;
610 s->h263_msmpeg4 = 1; 654 s->h263_msmpeg4 = 1;
611 s->h263_pred = 1; 655 s->h263_pred = 1;
612 s->unrestricted_mv = 1; 656 s->unrestricted_mv = 1;
613 s->msmpeg4_version= 1; 657 s->msmpeg4_version= 1;
614 avctx->delay=0; 658 avctx->delay=0;
659 s->low_delay=1;
615 break; 660 break;
616 case CODEC_ID_MSMPEG4V2: 661 case CODEC_ID_MSMPEG4V2:
617 s->out_format = FMT_H263; 662 s->out_format = FMT_H263;
618 s->h263_msmpeg4 = 1; 663 s->h263_msmpeg4 = 1;
619 s->h263_pred = 1; 664 s->h263_pred = 1;
620 s->unrestricted_mv = 1; 665 s->unrestricted_mv = 1;
621 s->msmpeg4_version= 2; 666 s->msmpeg4_version= 2;
622 avctx->delay=0; 667 avctx->delay=0;
668 s->low_delay=1;
623 break; 669 break;
624 case CODEC_ID_MSMPEG4V3: 670 case CODEC_ID_MSMPEG4V3:
625 s->out_format = FMT_H263; 671 s->out_format = FMT_H263;
626 s->h263_msmpeg4 = 1; 672 s->h263_msmpeg4 = 1;
627 s->h263_pred = 1; 673 s->h263_pred = 1;
628 s->unrestricted_mv = 1; 674 s->unrestricted_mv = 1;
629 s->msmpeg4_version= 3; 675 s->msmpeg4_version= 3;
630 avctx->delay=0; 676 avctx->delay=0;
677 s->low_delay=1;
631 break; 678 break;
632 case CODEC_ID_WMV1: 679 case CODEC_ID_WMV1:
633 s->out_format = FMT_H263; 680 s->out_format = FMT_H263;
634 s->h263_msmpeg4 = 1; 681 s->h263_msmpeg4 = 1;
635 s->h263_pred = 1; 682 s->h263_pred = 1;
636 s->unrestricted_mv = 1; 683 s->unrestricted_mv = 1;
637 s->msmpeg4_version= 4; 684 s->msmpeg4_version= 4;
638 avctx->delay=0; 685 avctx->delay=0;
686 s->low_delay=1;
639 break; 687 break;
640 case CODEC_ID_WMV2: 688 case CODEC_ID_WMV2:
641 s->out_format = FMT_H263; 689 s->out_format = FMT_H263;
642 s->h263_msmpeg4 = 1; 690 s->h263_msmpeg4 = 1;
643 s->h263_pred = 1; 691 s->h263_pred = 1;
644 s->unrestricted_mv = 1; 692 s->unrestricted_mv = 1;
645 s->msmpeg4_version= 5; 693 s->msmpeg4_version= 5;
646 avctx->delay=0; 694 avctx->delay=0;
695 s->low_delay=1;
647 break; 696 break;
648 default: 697 default:
649 return -1; 698 return -1;
650 } 699 }
651 700
768 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */ 817 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
769 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */ 818 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
770 } 819 }
771 } 820 }
772 821
822 static int find_unused_picture(MpegEncContext *s, int shared){
823 int i;
824
825 if(shared){
826 for(i=0; i<MAX_PICTURE_COUNT; i++){
827 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
828 }
829 }else{
830 for(i=0; i<MAX_PICTURE_COUNT; i++){
831 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break;
832 }
833 for(i=0; i<MAX_PICTURE_COUNT; i++){
834 if(s->picture[i].data[0]==NULL) break;
835 }
836 }
837
838 assert(i<MAX_PICTURE_COUNT);
839 return i;
840 }
841
773 /* generic function for encode/decode called before a frame is coded/decoded */ 842 /* generic function for encode/decode called before a frame is coded/decoded */
774 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) 843 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
775 { 844 {
776 int i, r; 845 int i;
777 AVVideoFrame *pic; 846 AVVideoFrame *pic;
778 847
779 s->mb_skiped = 0; 848 s->mb_skiped = 0;
780 849
781 /* mark&release old frames */ 850 /* mark&release old frames */
782 if (s->pict_type != B_TYPE && s->last_picture.data[0]) { 851 if (s->pict_type != B_TYPE && s->last_picture.data[0]) {
783 for(i=0; i<MAX_PICTURE_COUNT; i++){ 852 for(i=0; i<MAX_PICTURE_COUNT; i++){
853 //printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]);
784 if(s->picture[i].data[0] == s->last_picture.data[0]){ 854 if(s->picture[i].data[0] == s->last_picture.data[0]){
785 // s->picture[i].reference=0; 855 // s->picture[i].reference=0;
786 avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]); 856 avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]);
787 break; 857 break;
788 } 858 }
800 } 870 }
801 } 871 }
802 } 872 }
803 alloc: 873 alloc:
804 if(!s->encoding){ 874 if(!s->encoding){
805 /* find unused Picture */ 875 i= find_unused_picture(s, 0);
806 for(i=0; i<MAX_PICTURE_COUNT; i++){
807 if(s->picture[i].data[0]==NULL) break;
808 }
809 assert(i<MAX_PICTURE_COUNT);
810 876
811 pic= (AVVideoFrame*)&s->picture[i]; 877 pic= (AVVideoFrame*)&s->picture[i];
812 pic->reference= s->pict_type != B_TYPE; 878 pic->reference= s->pict_type != B_TYPE;
813 pic->coded_picture_number= s->current_picture.coded_picture_number+1; 879 pic->coded_picture_number= s->current_picture.coded_picture_number+1;
814 880
815 r= avctx->get_buffer(avctx, pic); 881 alloc_picture(s, (Picture*)pic, 0);
816
817 if(r<0 || (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1]))){
818 fprintf(stderr, "get_buffer() failed (stride changed), bye bye\n");
819 return -1;
820 }
821
822 s->linesize = pic->linesize[0];
823 s->uvlinesize= pic->linesize[1];
824
825 if(pic->qscale_table==NULL)
826 alloc_picture(s, (Picture*)pic);
827 882
828 s->current_picture= s->picture[i]; 883 s->current_picture= s->picture[i];
829 } 884 }
830 885
831 if (s->pict_type != B_TYPE) { 886 if (s->pict_type != B_TYPE) {
871 emms_c(); 926 emms_c();
872 927
873 s->last_pict_type = s->pict_type; 928 s->last_pict_type = s->pict_type;
874 if(s->pict_type!=B_TYPE){ 929 if(s->pict_type!=B_TYPE){
875 s->last_non_b_pict_type= s->pict_type; 930 s->last_non_b_pict_type= s->pict_type;
876 s->num_available_buffers++;
877 if(s->num_available_buffers>2) s->num_available_buffers= 2;
878 } 931 }
879 932
880 s->current_picture.quality= s->qscale; //FIXME get average of qscale_table 933 s->current_picture.quality= s->qscale; //FIXME get average of qscale_table
881 s->current_picture.pict_type= s->pict_type; 934 s->current_picture.pict_type= s->pict_type;
882 s->current_picture.key_frame= s->pict_type == I_TYPE; 935 s->current_picture.key_frame= s->pict_type == I_TYPE;
890 } 943 }
891 assert(i<MAX_PICTURE_COUNT); 944 assert(i<MAX_PICTURE_COUNT);
892 945
893 /* release non refernce frames */ 946 /* release non refernce frames */
894 for(i=0; i<MAX_PICTURE_COUNT; i++){ 947 for(i=0; i<MAX_PICTURE_COUNT; i++){
895 if(s->picture[i].data[0] && !s->picture[i].reference) 948 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
896 s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]); 949 s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]);
897 } 950 }
898 } 951 }
899 952
900 static int get_sae(uint8_t *src, int ref, int stride){ 953 static int get_sae(uint8_t *src, int ref, int stride){
928 } 981 }
929 } 982 }
930 return acc; 983 return acc;
931 } 984 }
932 985
986
933 static int load_input_picture(MpegEncContext *s, AVVideoFrame *pic_arg){ 987 static int load_input_picture(MpegEncContext *s, AVVideoFrame *pic_arg){
934 AVVideoFrame *pic; 988 AVVideoFrame *pic;
935 int i,r; 989 int i;
936 const int encoding_delay= s->max_b_frames; 990 const int encoding_delay= s->max_b_frames;
937 991 int direct=1;
938 /* find unused Picture */ 992
939 for(i=0; i<MAX_PICTURE_COUNT; i++){ 993 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
940 if(s->picture[i].data[0]==NULL) break; 994 if(pic_arg->linesize[0] != s->linesize) direct=0;
941 } 995 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
942 assert(i<MAX_PICTURE_COUNT); 996 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
997
998 // printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
999
1000 if(direct){
1001 i= find_unused_picture(s, 1);
1002
1003 pic= (AVVideoFrame*)&s->picture[i];
1004 pic->reference= 1;
1005
1006 for(i=0; i<4; i++){
1007 pic->data[i]= pic_arg->data[i];
1008 pic->linesize[i]= pic_arg->linesize[i];
1009 }
1010 alloc_picture(s, (Picture*)pic, 1);
1011 }else{
1012 i= find_unused_picture(s, 0);
1013
1014 pic= (AVVideoFrame*)&s->picture[i];
1015 pic->reference= 1;
1016
1017 alloc_picture(s, (Picture*)pic, 0);
1018
1019 if( pic->data[0] == pic_arg->data[0]
1020 && pic->data[1] == pic_arg->data[1]
1021 && pic->data[2] == pic_arg->data[2]){
1022 // empty
1023 }else{
1024 int h_chroma_shift, v_chroma_shift;
943 1025
944 pic= (AVVideoFrame*)&s->picture[i]; 1026 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
945 pic->reference= 1; 1027
946 1028 for(i=0; i<3; i++){
947 // assert(avctx->get_buffer == default_get_buffer || avctx->get_buffer==NULL); 1029 int src_stride= pic_arg->linesize[i];
948 r= s->avctx->get_buffer(s->avctx, pic); 1030 int dst_stride= i ? s->uvlinesize : s->linesize;
949 1031 int h_shift= i ? h_chroma_shift : 0;
950 if(r<0 || (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1]))){ 1032 int v_shift= i ? v_chroma_shift : 0;
951 fprintf(stderr, "get_buffer() failed (stride changed), bye bye\n"); 1033 int w= s->width >>h_shift;
952 return -1; 1034 int h= s->height>>v_shift;
953 } 1035 uint8_t *src= pic_arg->data[i];
954 1036 uint8_t *dst= pic->data[i];
955 assert(s->linesize==0 || s->linesize ==pic->linesize[0]); 1037
956 assert(s->uvlinesize==0 || s->uvlinesize==pic->linesize[1]); 1038 if(src_stride==dst_stride)
957 assert(pic->linesize[1] == pic->linesize[2]); 1039 memcpy(dst, src, src_stride*h);
958 s->linesize = pic->linesize[0]; 1040 else{
959 s->uvlinesize= pic->linesize[1]; 1041 while(h--){
960 1042 memcpy(dst, src, w);
961 if(pic->qscale_table==NULL) 1043 dst += dst_stride;
962 alloc_picture(s, (Picture*)pic); 1044 src += src_stride;
963 1045 }
964 // assert(s->input_picture[0]==NULL || s->input_picture[0]->data[0]==NULL); 1046 }
1047 }
1048 }
1049 }
1050 pic->quality= pic_arg->quality;
1051 pic->pict_type= pic_arg->pict_type;
965 1052
966 if(s->input_picture[encoding_delay]) 1053 if(s->input_picture[encoding_delay])
967 pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1; 1054 pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
968 //printf("dpn2:%d\n", pic->display_picture_number);
969 1055
970 /* shift buffer entries */ 1056 /* shift buffer entries */
971 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++) 1057 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
972 s->input_picture[i-1]= s->input_picture[i]; 1058 s->input_picture[i-1]= s->input_picture[i];
973 1059
974 s->input_picture[encoding_delay]= (Picture*)pic; 1060 s->input_picture[encoding_delay]= (Picture*)pic;
975 pic->pict_type= pic_arg->pict_type;
976 pic->quality= pic_arg->quality;
977
978 if( pic->data[0] == pic_arg->data[0]
979 && pic->data[1] == pic_arg->data[1]
980 && pic->data[2] == pic_arg->data[2]){
981 // empty
982 }else{
983 int h_chroma_shift, v_chroma_shift;
984
985 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
986
987 for(i=0; i<3; i++){
988 int src_stride= pic_arg->linesize[i];
989 int dst_stride= i ? s->uvlinesize : s->linesize;
990 int h_shift= i ? h_chroma_shift : 0;
991 int v_shift= i ? v_chroma_shift : 0;
992 int w= s->width >>h_shift;
993 int h= s->height>>v_shift;
994 uint8_t *src= pic_arg->data[i];
995 uint8_t *dst= pic->data[i] + 16;
996
997 if(src_stride==dst_stride)
998 memcpy(dst, src, src_stride*h);
999 else{
1000 while(h--){
1001 memcpy(dst, src, w);
1002 dst += dst_stride;
1003 src += src_stride;
1004 }
1005 }
1006 }
1007 }
1008 1061
1009 return 0; 1062 return 0;
1010 } 1063 }
1011 1064
1012 static void select_input_picture(MpegEncContext *s){ 1065 static void select_input_picture(MpegEncContext *s){
1014 const int encoding_delay= s->max_b_frames; 1067 const int encoding_delay= s->max_b_frames;
1015 int coded_pic_num=0; 1068 int coded_pic_num=0;
1016 1069
1017 if(s->reordered_input_picture[0]) 1070 if(s->reordered_input_picture[0])
1018 coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1; 1071 coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1019 //printf("cpn:%d\n", coded_pic_num); 1072
1020 for(i=1; i<MAX_PICTURE_COUNT; i++) 1073 for(i=1; i<MAX_PICTURE_COUNT; i++)
1021 s->reordered_input_picture[i-1]= s->reordered_input_picture[i]; 1074 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1022 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL; 1075 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1023 1076
1024 /* set next picture types & ordering */ 1077 /* set next picture types & ordering */
1037 s->input_picture[i]->pict_type= pict_type; 1090 s->input_picture[i]->pict_type= pict_type;
1038 1091
1039 if(i + 1 >= s->rc_context.num_entries) break; 1092 if(i + 1 >= s->rc_context.num_entries) break;
1040 } 1093 }
1041 } 1094 }
1042 1095
1043 if(s->input_picture[0]->pict_type){ 1096 if(s->input_picture[0]->pict_type){
1044 /* user selected pict_type */ 1097 /* user selected pict_type */
1045 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){ 1098 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1046 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break; 1099 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1047 } 1100 }
1054 b_frames= s->max_b_frames; 1107 b_frames= s->max_b_frames;
1055 }else if(s->b_frame_strategy==1){ 1108 }else if(s->b_frame_strategy==1){
1056 for(i=1; i<s->max_b_frames+1; i++){ 1109 for(i=1; i<s->max_b_frames+1; i++){
1057 if(s->input_picture[i]->b_frame_score==0){ 1110 if(s->input_picture[i]->b_frame_score==0){
1058 s->input_picture[i]->b_frame_score= 1111 s->input_picture[i]->b_frame_score=
1059 get_intra_count(s, s->input_picture[i ]->data[0] + 16, 1112 get_intra_count(s, s->input_picture[i ]->data[0],
1060 s->input_picture[i-1]->data[0] + 16, s->linesize) + 1; 1113 s->input_picture[i-1]->data[0], s->linesize) + 1;
1061 } 1114 }
1062 } 1115 }
1063 for(i=0; i<s->max_b_frames; i++){ 1116 for(i=0; i<s->max_b_frames; i++){
1064 if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break; 1117 if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1065 } 1118 }
1095 } 1148 }
1096 } 1149 }
1097 } 1150 }
1098 1151
1099 if(s->reordered_input_picture[0]){ 1152 if(s->reordered_input_picture[0]){
1100 if(s->reordered_input_picture[0]->pict_type==B_TYPE){ 1153 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE;
1101 s->reordered_input_picture[0]->reference=0; 1154
1102 } 1155 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1103 s->current_picture= *s->reordered_input_picture[0]; 1156 int i= find_unused_picture(s, 0);
1104 s->new_picture= s->current_picture; 1157 Picture *pic= &s->picture[i];
1105 s->new_picture.data[0]+=16; 1158
1106 s->new_picture.data[1]+=16; 1159 s->new_picture= *s->reordered_input_picture[0];
1107 s->new_picture.data[2]+=16; 1160
1161 /* mark us unused / free shared pic */
1162 for(i=0; i<4; i++)
1163 s->reordered_input_picture[0]->data[i]= NULL;
1164 s->reordered_input_picture[0]->type= 0;
1165
1166 pic->pict_type = s->reordered_input_picture[0]->pict_type;
1167 pic->quality = s->reordered_input_picture[0]->quality;
1168 pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1169 pic->reference = s->reordered_input_picture[0]->reference;
1170
1171 alloc_picture(s, pic, 0);
1172
1173 s->current_picture= *pic;
1174 }else{
1175 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
1176 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1177
1178 s->new_picture= *s->reordered_input_picture[0];
1179
1180 for(i=0; i<4; i++){
1181 s->reordered_input_picture[0]->data[i]-=16; //FIXME dirty
1182 }
1183 s->current_picture= *s->reordered_input_picture[0];
1184 }
1108 1185
1109 s->picture_number= s->new_picture.display_picture_number; 1186 s->picture_number= s->new_picture.display_picture_number;
1110 //printf("dpn:%d\n", s->picture_number); 1187 //printf("dpn:%d\n", s->picture_number);
1111 }else{ 1188 }else{
1112 memset(&s->new_picture, 0, sizeof(Picture)); 1189 memset(&s->new_picture, 0, sizeof(Picture));
2229 2306
2230 #endif 2307 #endif
2231 2308
2232 void ff_draw_horiz_band(MpegEncContext *s){ 2309 void ff_draw_horiz_band(MpegEncContext *s){
2233 if ( s->avctx->draw_horiz_band 2310 if ( s->avctx->draw_horiz_band
2234 && (s->num_available_buffers>=1 || (!s->has_b_frames)) ) { 2311 && (s->last_picture.data[0] || s->low_delay) ) {
2235 UINT8 *src_ptr[3]; 2312 UINT8 *src_ptr[3];
2236 int y, h, offset; 2313 int y, h, offset;
2237 y = s->mb_y * 16; 2314 y = s->mb_y * 16;
2238 h = s->height - y; 2315 h = s->height - y;
2239 if (h > 16) 2316 if (h > 16)
2242 if(s->pict_type==B_TYPE) 2319 if(s->pict_type==B_TYPE)
2243 offset = 0; 2320 offset = 0;
2244 else 2321 else
2245 offset = y * s->linesize; 2322 offset = y * s->linesize;
2246 2323
2247 if(s->pict_type==B_TYPE || (!s->has_b_frames)){ 2324 if(s->pict_type==B_TYPE || s->low_delay){
2248 src_ptr[0] = s->current_picture.data[0] + offset; 2325 src_ptr[0] = s->current_picture.data[0] + offset;
2249 src_ptr[1] = s->current_picture.data[1] + (offset >> 2); 2326 src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
2250 src_ptr[2] = s->current_picture.data[2] + (offset >> 2); 2327 src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
2251 } else { 2328 } else {
2252 src_ptr[0] = s->last_picture.data[0] + offset; 2329 src_ptr[0] = s->last_picture.data[0] + offset;