Mercurial > libavcodec.hg
comparison h264.c @ 1187:f3c659bfdb8e libavcodec
minor cleanup / some warning fixes
author | michaelni |
---|---|
date | Mon, 14 Apr 2003 13:44:38 +0000 |
parents | 05a2ac8978ad |
children | 327c5a36dfe7 |
comparison
equal
deleted
inserted
replaced
1186:4f0072371bb9 | 1187:f3c659bfdb8e |
---|---|
295 * fill a rectangle. | 295 * fill a rectangle. |
296 * @param h height of the recatangle, should be a constant | 296 * @param h height of the recatangle, should be a constant |
297 * @param w width of the recatangle, should be a constant | 297 * @param w width of the recatangle, should be a constant |
298 * @param size the size of val (1 or 4), should be a constant | 298 * @param size the size of val (1 or 4), should be a constant |
299 */ | 299 */ |
300 static inline void fill_rectangle(void *p, int w, int h, int stride, uint32_t val, int size){ //FIXME ensure this IS inlined | 300 static inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){ //FIXME ensure this IS inlined |
301 uint8_t *p= (uint8_t*)vp; | |
301 assert(size==1 || size==4); | 302 assert(size==1 || size==4); |
302 | 303 |
303 w *= size; | 304 w *= size; |
304 stride *= size; | 305 stride *= size; |
305 | 306 |
747 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4) | 748 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4) |
748 * @param mx the x component of the predicted motion vector | 749 * @param mx the x component of the predicted motion vector |
749 * @param my the y component of the predicted motion vector | 750 * @param my the y component of the predicted motion vector |
750 */ | 751 */ |
751 static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){ | 752 static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){ |
752 MpegEncContext * const s = &h->s; | |
753 const int index8= scan8[n]; | 753 const int index8= scan8[n]; |
754 const int top_ref= h->ref_cache[list][ index8 - 8 ]; | 754 const int top_ref= h->ref_cache[list][ index8 - 8 ]; |
755 const int left_ref= h->ref_cache[list][ index8 - 1 ]; | 755 const int left_ref= h->ref_cache[list][ index8 - 1 ]; |
756 const int16_t * const A= h->mv_cache[list][ index8 - 1 ]; | 756 const int16_t * const A= h->mv_cache[list][ index8 - 1 ]; |
757 const int16_t * const B= h->mv_cache[list][ index8 - 8 ]; | 757 const int16_t * const B= h->mv_cache[list][ index8 - 8 ]; |
793 *mx= mid_pred(A[0], B[0], C[0]); | 793 *mx= mid_pred(A[0], B[0], C[0]); |
794 *my= mid_pred(A[1], B[1], C[1]); | 794 *my= mid_pred(A[1], B[1], C[1]); |
795 } | 795 } |
796 } | 796 } |
797 | 797 |
798 tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, s->mb_x, s->mb_y, n, list); | 798 tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list); |
799 } | 799 } |
800 | 800 |
801 /** | 801 /** |
802 * gets the directionally predicted 16x8 MV. | 802 * gets the directionally predicted 16x8 MV. |
803 * @param n the block index | 803 * @param n the block index |
804 * @param mx the x component of the predicted motion vector | 804 * @param mx the x component of the predicted motion vector |
805 * @param my the y component of the predicted motion vector | 805 * @param my the y component of the predicted motion vector |
806 */ | 806 */ |
807 static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ | 807 static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ |
808 MpegEncContext * const s = &h->s; | |
809 if(n==0){ | 808 if(n==0){ |
810 const int top_ref= h->ref_cache[list][ scan8[0] - 8 ]; | 809 const int top_ref= h->ref_cache[list][ scan8[0] - 8 ]; |
811 const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ]; | 810 const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ]; |
812 | 811 |
813 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", top_ref, B[0], B[1], s->mb_x, s->mb_y, n, list); | 812 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list); |
814 | 813 |
815 if(top_ref == ref){ | 814 if(top_ref == ref){ |
816 *mx= B[0]; | 815 *mx= B[0]; |
817 *my= B[1]; | 816 *my= B[1]; |
818 return; | 817 return; |
819 } | 818 } |
820 }else{ | 819 }else{ |
821 const int left_ref= h->ref_cache[list][ scan8[8] - 1 ]; | 820 const int left_ref= h->ref_cache[list][ scan8[8] - 1 ]; |
822 const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ]; | 821 const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ]; |
823 | 822 |
824 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], s->mb_x, s->mb_y, n, list); | 823 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); |
825 | 824 |
826 if(left_ref == ref){ | 825 if(left_ref == ref){ |
827 *mx= A[0]; | 826 *mx= A[0]; |
828 *my= A[1]; | 827 *my= A[1]; |
829 return; | 828 return; |
839 * @param n the block index | 838 * @param n the block index |
840 * @param mx the x component of the predicted motion vector | 839 * @param mx the x component of the predicted motion vector |
841 * @param my the y component of the predicted motion vector | 840 * @param my the y component of the predicted motion vector |
842 */ | 841 */ |
843 static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ | 842 static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ |
844 MpegEncContext * const s = &h->s; | |
845 if(n==0){ | 843 if(n==0){ |
846 const int left_ref= h->ref_cache[list][ scan8[0] - 1 ]; | 844 const int left_ref= h->ref_cache[list][ scan8[0] - 1 ]; |
847 const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ]; | 845 const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ]; |
848 | 846 |
849 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], s->mb_x, s->mb_y, n, list); | 847 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); |
850 | 848 |
851 if(left_ref == ref){ | 849 if(left_ref == ref){ |
852 *mx= A[0]; | 850 *mx= A[0]; |
853 *my= A[1]; | 851 *my= A[1]; |
854 return; | 852 return; |
857 const int16_t * C; | 855 const int16_t * C; |
858 int diagonal_ref; | 856 int diagonal_ref; |
859 | 857 |
860 diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2); | 858 diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2); |
861 | 859 |
862 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", diagonal_ref, C[0], C[1], s->mb_x, s->mb_y, n, list); | 860 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list); |
863 | 861 |
864 if(diagonal_ref == ref){ | 862 if(diagonal_ref == ref){ |
865 *mx= C[0]; | 863 *mx= C[0]; |
866 *my= C[1]; | 864 *my= C[1]; |
867 return; | 865 return; |
871 //RARE | 869 //RARE |
872 pred_motion(h, n, 2, list, ref, mx, my); | 870 pred_motion(h, n, 2, list, ref, mx, my); |
873 } | 871 } |
874 | 872 |
875 static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){ | 873 static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){ |
876 MpegEncContext * const s = &h->s; | |
877 const int top_ref = h->ref_cache[0][ scan8[0] - 8 ]; | 874 const int top_ref = h->ref_cache[0][ scan8[0] - 8 ]; |
878 const int left_ref= h->ref_cache[0][ scan8[0] - 1 ]; | 875 const int left_ref= h->ref_cache[0][ scan8[0] - 1 ]; |
879 | 876 |
880 tprintf("pred_pskip: (%d) (%d) at %2d %2d", top_ref, left_ref, s->mb_x, s->mb_y); | 877 tprintf("pred_pskip: (%d) (%d) at %2d %2d", top_ref, left_ref, h->s.mb_x, h->s.mb_y); |
881 | 878 |
882 if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE | 879 if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE |
883 || (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0) | 880 || (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0) |
884 || (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){ | 881 || (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){ |
885 | 882 |
892 return; | 889 return; |
893 } | 890 } |
894 | 891 |
895 static inline void write_back_motion(H264Context *h, int mb_type){ | 892 static inline void write_back_motion(H264Context *h, int mb_type){ |
896 MpegEncContext * const s = &h->s; | 893 MpegEncContext * const s = &h->s; |
897 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride; | |
898 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; | 894 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; |
899 const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride; | 895 const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride; |
900 int list; | 896 int list; |
901 | 897 |
902 for(list=0; list<2; list++){ | 898 for(list=0; list<2; list++){ |
2088 goto fail;\ | 2084 goto fail;\ |
2089 }\ | 2085 }\ |
2090 } | 2086 } |
2091 | 2087 |
2092 static void free_tables(H264Context *h){ | 2088 static void free_tables(H264Context *h){ |
2093 MpegEncContext * const s = &h->s; | |
2094 | |
2095 av_freep(&h->intra4x4_pred_mode); | 2089 av_freep(&h->intra4x4_pred_mode); |
2096 av_freep(&h->non_zero_count); | 2090 av_freep(&h->non_zero_count); |
2097 av_freep(&h->slice_table_base); | 2091 av_freep(&h->slice_table_base); |
2098 h->slice_table= NULL; | 2092 h->slice_table= NULL; |
2099 | 2093 |
2136 return -1; | 2130 return -1; |
2137 } | 2131 } |
2138 | 2132 |
2139 static void common_init(H264Context *h){ | 2133 static void common_init(H264Context *h){ |
2140 MpegEncContext * const s = &h->s; | 2134 MpegEncContext * const s = &h->s; |
2141 int i; | |
2142 | 2135 |
2143 s->width = s->avctx->width; | 2136 s->width = s->avctx->width; |
2144 s->height = s->avctx->height; | 2137 s->height = s->avctx->height; |
2145 s->codec_id= s->avctx->codec->id; | 2138 s->codec_id= s->avctx->codec->id; |
2146 | 2139 |
3165 if(h->slice_type != I_TYPE && h->slice_type != SI_TYPE){ | 3158 if(h->slice_type != I_TYPE && h->slice_type != SI_TYPE){ |
3166 if(s->mb_skip_run==-1) | 3159 if(s->mb_skip_run==-1) |
3167 s->mb_skip_run= get_ue_golomb(&s->gb); | 3160 s->mb_skip_run= get_ue_golomb(&s->gb); |
3168 | 3161 |
3169 if (s->mb_skip_run--) { | 3162 if (s->mb_skip_run--) { |
3170 int i, mx, my; | 3163 int mx, my; |
3171 /* skip mb */ | 3164 /* skip mb */ |
3172 #if 0 //FIXME | |
3173 for(i=0;i<6;i++) | |
3174 s->block_last_index[i] = -1; | |
3175 s->mv_type = MV_TYPE_16X16; | |
3176 /* if P type, zero motion vector is implied */ | |
3177 s->mv_dir = MV_DIR_FORWARD; | |
3178 s->mb_skiped = 1; | |
3179 #endif | |
3180 //FIXME b frame | 3165 //FIXME b frame |
3181 mb_type= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0; | 3166 mb_type= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0; |
3182 | 3167 |
3183 memset(h->non_zero_count[mb_xy], 0, 16); | 3168 memset(h->non_zero_count[mb_xy], 0, 16); |
3184 memset(h->non_zero_count_cache + 8, 0, 8*5); //FIXME ugly, remove pfui | 3169 memset(h->non_zero_count_cache + 8, 0, 8*5); //FIXME ugly, remove pfui |
3247 s->current_picture.mb_type[mb_xy]= mb_type; | 3232 s->current_picture.mb_type[mb_xy]= mb_type; |
3248 h->slice_table[ mb_xy ]= h->slice_num; | 3233 h->slice_table[ mb_xy ]= h->slice_num; |
3249 | 3234 |
3250 if(IS_INTRA_PCM(mb_type)){ | 3235 if(IS_INTRA_PCM(mb_type)){ |
3251 const uint8_t *ptr; | 3236 const uint8_t *ptr; |
3252 int x, y, i; | 3237 int x, y; |
3253 | 3238 |
3254 // we assume these blocks are very rare so we dont optimize it | 3239 // we assume these blocks are very rare so we dont optimize it |
3255 align_get_bits(&s->gb); | 3240 align_get_bits(&s->gb); |
3256 | 3241 |
3257 ptr= s->gb.buffer + get_bits_count(&s->gb); | 3242 ptr= s->gb.buffer + get_bits_count(&s->gb); |
3936 * finds the end of the current frame in the bitstream. | 3921 * finds the end of the current frame in the bitstream. |
3937 * @return the position of the first byte of the next frame, or -1 | 3922 * @return the position of the first byte of the next frame, or -1 |
3938 */ | 3923 */ |
3939 static int find_frame_end(MpegEncContext *s, uint8_t *buf, int buf_size){ | 3924 static int find_frame_end(MpegEncContext *s, uint8_t *buf, int buf_size){ |
3940 ParseContext *pc= &s->parse_context; | 3925 ParseContext *pc= &s->parse_context; |
3941 int last_addr, i; | 3926 int i; |
3942 uint32_t state; | 3927 uint32_t state; |
3943 //printf("first %02X%02X%02X%02X\n", buf[0], buf[1],buf[2],buf[3]); | 3928 //printf("first %02X%02X%02X%02X\n", buf[0], buf[1],buf[2],buf[3]); |
3944 // mb_addr= pc->mb_addr - 1; | 3929 // mb_addr= pc->mb_addr - 1; |
3945 state= pc->state; | 3930 state= pc->state; |
3946 //FIXME this will fail with slices | 3931 //FIXME this will fail with slices |
4101 uint8_t *buf, int buf_size) | 4086 uint8_t *buf, int buf_size) |
4102 { | 4087 { |
4103 H264Context *h = avctx->priv_data; | 4088 H264Context *h = avctx->priv_data; |
4104 MpegEncContext *s = &h->s; | 4089 MpegEncContext *s = &h->s; |
4105 AVFrame *pict = data; | 4090 AVFrame *pict = data; |
4106 float new_aspect; | |
4107 int buf_index; | 4091 int buf_index; |
4108 | 4092 |
4109 s->flags= avctx->flags; | 4093 s->flags= avctx->flags; |
4110 | 4094 |
4111 *data_size = 0; | 4095 *data_size = 0; |