Mercurial > libavcodec.hg
annotate cabac.h @ 5246:9975783f1cb2 libavcodec
whitespace cosmetics
author | mru |
---|---|
date | Sat, 07 Jul 2007 20:50:29 +0000 |
parents | 2b72f9bc4f06 |
children | 20bea6a9950c |
rev | line source |
---|---|
1287 | 1 /* |
2 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder | |
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> | |
4 * | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3946
diff
changeset
|
5 * This file is part of FFmpeg. |
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3946
diff
changeset
|
6 * |
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3946
diff
changeset
|
7 * FFmpeg is free software; you can redistribute it and/or |
1287 | 8 * modify it under the terms of the GNU Lesser General Public |
9 * License as published by the Free Software Foundation; either | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3946
diff
changeset
|
10 * version 2.1 of the License, or (at your option) any later version. |
1287 | 11 * |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3946
diff
changeset
|
12 * FFmpeg is distributed in the hope that it will be useful, |
1287 | 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 * Lesser General Public License for more details. | |
16 * | |
17 * You should have received a copy of the GNU Lesser General Public | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3946
diff
changeset
|
18 * License along with FFmpeg; if not, write to the Free Software |
3036
0b546eab515d
Update licensing information: The FSF changed postal address.
diego
parents:
2967
diff
changeset
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
1287 | 20 */ |
2967 | 21 |
1287 | 22 /** |
23 * @file cabac.h | |
24 * Context Adaptive Binary Arithmetic Coder. | |
25 */ | |
26 | |
4975 | 27 #ifndef CABAC_H |
28 #define CABAC_H | |
29 | |
30 #include "bitstream.h" | |
1287 | 31 |
3284
a224d9752912
don't force asserts in release builds. 2% faster h264.
lorenm
parents:
3036
diff
changeset
|
32 //#undef NDEBUG |
1287 | 33 #include <assert.h> |
4064 | 34 #ifdef ARCH_X86 |
35 #include "x86_cpu.h" | |
36 #endif | |
1287 | 37 |
3948
3edbf131ee44
refill cabac variables in 16bit steps, 3% faster get_cabac()
michael
parents:
3947
diff
changeset
|
38 #define CABAC_BITS 16 |
2323 | 39 #define CABAC_MASK ((1<<CABAC_BITS)-1) |
3984 | 40 #define BRANCHLESS_CABAC_DECODER 1 |
4039 | 41 //#define ARCH_X86_DISABLED 1 |
2323 | 42 |
1287 | 43 typedef struct CABACContext{ |
44 int low; | |
45 int range; | |
46 int outstanding_count; | |
47 #ifdef STRICT_LIMITS | |
48 int symCount; | |
49 #endif | |
2024
f65d87bfdd5a
some of the warning fixes by (Michael Roitzsch <mroi at users dot sourceforge dot net>)
michael
parents:
1787
diff
changeset
|
50 const uint8_t *bytestream_start; |
f65d87bfdd5a
some of the warning fixes by (Michael Roitzsch <mroi at users dot sourceforge dot net>)
michael
parents:
1787
diff
changeset
|
51 const uint8_t *bytestream; |
2116 | 52 const uint8_t *bytestream_end; |
1287 | 53 PutBitContext pb; |
54 }CABACContext; | |
55 | |
4014
b2582438effe
dehack *ps_state indexing in the branchless decoder
michael
parents:
4012
diff
changeset
|
56 extern uint8_t ff_h264_mlps_state[4*64]; |
4039 | 57 extern uint8_t ff_h264_lps_range[4*2*64]; ///< rangeTabLPS |
3993
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
58 extern uint8_t ff_h264_mps_state[2*64]; ///< transIdxMPS |
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
59 extern uint8_t ff_h264_lps_state[2*64]; ///< transIdxLPS |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
60 extern const uint8_t ff_h264_norm_shift[512]; |
2323 | 61 |
1287 | 62 |
63 void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size); | |
2024
f65d87bfdd5a
some of the warning fixes by (Michael Roitzsch <mroi at users dot sourceforge dot net>)
michael
parents:
1787
diff
changeset
|
64 void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size); |
3993
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
65 void ff_init_cabac_states(CABACContext *c); |
1287 | 66 |
67 | |
68 static inline void put_cabac_bit(CABACContext *c, int b){ | |
2967 | 69 put_bits(&c->pb, 1, b); |
70 for(;c->outstanding_count; c->outstanding_count--){ | |
1287 | 71 put_bits(&c->pb, 1, 1-b); |
72 } | |
73 } | |
74 | |
75 static inline void renorm_cabac_encoder(CABACContext *c){ | |
76 while(c->range < 0x100){ | |
77 //FIXME optimize | |
78 if(c->low<0x100){ | |
79 put_cabac_bit(c, 0); | |
80 }else if(c->low<0x200){ | |
81 c->outstanding_count++; | |
82 c->low -= 0x100; | |
83 }else{ | |
84 put_cabac_bit(c, 1); | |
85 c->low -= 0x200; | |
86 } | |
2967 | 87 |
1287 | 88 c->range+= c->range; |
89 c->low += c->low; | |
90 } | |
91 } | |
92 | |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
93 static void put_cabac(CABACContext *c, uint8_t * const state, int bit){ |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
94 int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + *state]; |
2967 | 95 |
1287 | 96 if(bit == ((*state)&1)){ |
97 c->range -= RangeLPS; | |
3993
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
98 *state= ff_h264_mps_state[*state]; |
1287 | 99 }else{ |
100 c->low += c->range - RangeLPS; | |
101 c->range = RangeLPS; | |
3993
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
102 *state= ff_h264_lps_state[*state]; |
1287 | 103 } |
2967 | 104 |
1287 | 105 renorm_cabac_encoder(c); |
106 | |
107 #ifdef STRICT_LIMITS | |
108 c->symCount++; | |
109 #endif | |
110 } | |
111 | |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
112 static void put_cabac_static(CABACContext *c, int RangeLPS, int bit){ |
1287 | 113 assert(c->range > RangeLPS); |
114 | |
115 if(!bit){ | |
116 c->range -= RangeLPS; | |
117 }else{ | |
118 c->low += c->range - RangeLPS; | |
119 c->range = RangeLPS; | |
120 } | |
121 | |
122 renorm_cabac_encoder(c); | |
123 | |
124 #ifdef STRICT_LIMITS | |
125 c->symCount++; | |
126 #endif | |
127 } | |
128 | |
1290 | 129 /** |
130 * @param bit 0 -> write zero bit, !=0 write one bit | |
131 */ | |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
132 static void put_cabac_bypass(CABACContext *c, int bit){ |
1287 | 133 c->low += c->low; |
134 | |
135 if(bit){ | |
136 c->low += c->range; | |
137 } | |
138 //FIXME optimize | |
139 if(c->low<0x200){ | |
140 put_cabac_bit(c, 0); | |
141 }else if(c->low<0x400){ | |
142 c->outstanding_count++; | |
143 c->low -= 0x200; | |
144 }else{ | |
145 put_cabac_bit(c, 1); | |
146 c->low -= 0x400; | |
147 } | |
2967 | 148 |
1287 | 149 #ifdef STRICT_LIMITS |
150 c->symCount++; | |
151 #endif | |
152 } | |
153 | |
1300
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
154 /** |
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
155 * |
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
156 * @return the number of bytes written |
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
157 */ |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
158 static int put_cabac_terminate(CABACContext *c, int bit){ |
1287 | 159 c->range -= 2; |
160 | |
161 if(!bit){ | |
162 renorm_cabac_encoder(c); | |
163 }else{ | |
164 c->low += c->range; | |
165 c->range= 2; | |
2967 | 166 |
1287 | 167 renorm_cabac_encoder(c); |
168 | |
169 assert(c->low <= 0x1FF); | |
170 put_cabac_bit(c, c->low>>9); | |
171 put_bits(&c->pb, 2, ((c->low>>7)&3)|1); | |
2967 | 172 |
1287 | 173 flush_put_bits(&c->pb); //FIXME FIXME FIXME XXX wrong |
174 } | |
2967 | 175 |
1287 | 176 #ifdef STRICT_LIMITS |
177 c->symCount++; | |
178 #endif | |
1300
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
179 |
1787 | 180 return (put_bits_count(&c->pb)+7)>>3; |
1287 | 181 } |
182 | |
1290 | 183 /** |
184 * put (truncated) unary binarization. | |
185 */ | |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
186 static void put_cabac_u(CABACContext *c, uint8_t * state, int v, int max, int max_index, int truncated){ |
1290 | 187 int i; |
2967 | 188 |
1290 | 189 assert(v <= max); |
2967 | 190 |
1290 | 191 #if 1 |
192 for(i=0; i<v; i++){ | |
193 put_cabac(c, state, 1); | |
194 if(i < max_index) state++; | |
195 } | |
196 if(truncated==0 || v<max) | |
197 put_cabac(c, state, 0); | |
198 #else | |
199 if(v <= max_index){ | |
200 for(i=0; i<v; i++){ | |
201 put_cabac(c, state+i, 1); | |
202 } | |
203 if(truncated==0 || v<max) | |
204 put_cabac(c, state+i, 0); | |
205 }else{ | |
206 for(i=0; i<=max_index; i++){ | |
207 put_cabac(c, state+i, 1); | |
208 } | |
209 for(; i<v; i++){ | |
210 put_cabac(c, state+max_index, 1); | |
211 } | |
212 if(truncated==0 || v<max) | |
213 put_cabac(c, state+max_index, 0); | |
214 } | |
215 #endif | |
216 } | |
217 | |
218 /** | |
219 * put unary exp golomb k-th order binarization. | |
220 */ | |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
221 static void put_cabac_ueg(CABACContext *c, uint8_t * state, int v, int max, int is_signed, int k, int max_index){ |
1290 | 222 int i; |
2967 | 223 |
1290 | 224 if(v==0) |
225 put_cabac(c, state, 0); | |
226 else{ | |
1298 | 227 const int sign= v < 0; |
2967 | 228 |
4001 | 229 if(is_signed) v= FFABS(v); |
2967 | 230 |
1290 | 231 if(v<max){ |
232 for(i=0; i<v; i++){ | |
233 put_cabac(c, state, 1); | |
234 if(i < max_index) state++; | |
235 } | |
236 | |
237 put_cabac(c, state, 0); | |
238 }else{ | |
239 int m= 1<<k; | |
240 | |
241 for(i=0; i<max; i++){ | |
242 put_cabac(c, state, 1); | |
243 if(i < max_index) state++; | |
244 } | |
245 | |
246 v -= max; | |
247 while(v >= m){ //FIXME optimize | |
248 put_cabac_bypass(c, 1); | |
249 v-= m; | |
250 m+= m; | |
251 } | |
252 put_cabac_bypass(c, 0); | |
253 while(m>>=1){ | |
254 put_cabac_bypass(c, v&m); | |
255 } | |
256 } | |
257 | |
258 if(is_signed) | |
259 put_cabac_bypass(c, sign); | |
260 } | |
261 } | |
262 | |
2323 | 263 static void refill(CABACContext *c){ |
264 #if CABAC_BITS == 16 | |
3946 | 265 c->low+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1); |
2323 | 266 #else |
267 c->low+= c->bytestream[0]<<1; | |
268 #endif | |
269 c->low -= CABAC_MASK; | |
270 c->bytestream+= CABAC_BITS/8; | |
271 } | |
272 | |
273 static void refill2(CABACContext *c){ | |
274 int i, x; | |
275 | |
276 x= c->low ^ (c->low-1); | |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
277 i= 7 - ff_h264_norm_shift[x>>(CABAC_BITS-1)]; |
2323 | 278 |
279 x= -CABAC_MASK; | |
2967 | 280 |
2323 | 281 #if CABAC_BITS == 16 |
282 x+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1); | |
283 #else | |
284 x+= c->bytestream[0]<<1; | |
285 #endif | |
2967 | 286 |
2323 | 287 c->low += x<<i; |
288 c->bytestream+= CABAC_BITS/8; | |
289 } | |
290 | |
1287 | 291 static inline void renorm_cabac_decoder(CABACContext *c){ |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
292 while(c->range < 0x100){ |
1287 | 293 c->range+= c->range; |
294 c->low+= c->low; | |
2323 | 295 if(!(c->low & CABAC_MASK)) |
296 refill(c); | |
1287 | 297 } |
298 } | |
299 | |
2323 | 300 static inline void renorm_cabac_decoder_once(CABACContext *c){ |
3951 | 301 #ifdef ARCH_X86_DISABLED |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
302 int temp; |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
303 #if 0 |
3950
900d21b85dd6
renorm_cabac_decoder_once START/STOP_TIMER scores for athlon
michael
parents:
3948
diff
changeset
|
304 //P3:683 athlon:475 |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
305 asm( |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
306 "lea -0x100(%0), %2 \n\t" |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
307 "shr $31, %2 \n\t" //FIXME 31->63 for x86-64 |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
308 "shl %%cl, %0 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
309 "shl %%cl, %1 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
310 : "+r"(c->range), "+r"(c->low), "+c"(temp) |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
311 ); |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
312 #elif 0 |
3950
900d21b85dd6
renorm_cabac_decoder_once START/STOP_TIMER scores for athlon
michael
parents:
3948
diff
changeset
|
313 //P3:680 athlon:474 |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
314 asm( |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
315 "cmp $0x100, %0 \n\t" |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
316 "setb %%cl \n\t" //FIXME 31->63 for x86-64 |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
317 "shl %%cl, %0 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
318 "shl %%cl, %1 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
319 : "+r"(c->range), "+r"(c->low), "+c"(temp) |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
320 ); |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
321 #elif 1 |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
322 int temp2; |
3950
900d21b85dd6
renorm_cabac_decoder_once START/STOP_TIMER scores for athlon
michael
parents:
3948
diff
changeset
|
323 //P3:665 athlon:517 |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
324 asm( |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
325 "lea -0x100(%0), %%eax \n\t" |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
326 "cdq \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
327 "mov %0, %%eax \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
328 "and %%edx, %0 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
329 "and %1, %%edx \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
330 "add %%eax, %0 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
331 "add %%edx, %1 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
332 : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2) |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
333 ); |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
334 #elif 0 |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
335 int temp2; |
3950
900d21b85dd6
renorm_cabac_decoder_once START/STOP_TIMER scores for athlon
michael
parents:
3948
diff
changeset
|
336 //P3:673 athlon:509 |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
337 asm( |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
338 "cmp $0x100, %0 \n\t" |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
339 "sbb %%edx, %%edx \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
340 "mov %0, %%eax \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
341 "and %%edx, %0 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
342 "and %1, %%edx \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
343 "add %%eax, %0 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
344 "add %%edx, %1 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
345 : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2) |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
346 ); |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
347 #else |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
348 int temp2; |
3950
900d21b85dd6
renorm_cabac_decoder_once START/STOP_TIMER scores for athlon
michael
parents:
3948
diff
changeset
|
349 //P3:677 athlon:511 |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
350 asm( |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
351 "cmp $0x100, %0 \n\t" |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
352 "lea (%0, %0), %%eax \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
353 "lea (%1, %1), %%edx \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
354 "cmovb %%eax, %0 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
355 "cmovb %%edx, %1 \n\t" |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
356 : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2) |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
357 ); |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
358 #endif |
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
359 #else |
3950
900d21b85dd6
renorm_cabac_decoder_once START/STOP_TIMER scores for athlon
michael
parents:
3948
diff
changeset
|
360 //P3:675 athlon:476 |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
361 int shift= (uint32_t)(c->range - 0x100)>>31; |
3642 | 362 c->range<<= shift; |
363 c->low <<= shift; | |
3943
811a9b0d9f32
several x86 renorm_cabac_decoder_once optimizations
michael
parents:
3928
diff
changeset
|
364 #endif |
2323 | 365 if(!(c->low & CABAC_MASK)) |
366 refill(c); | |
367 } | |
368 | |
4908
777f250df232
Fix multiple "¡Æinline/static¡Ç is not at beginning of declaration" warnings.
diego
parents:
4882
diff
changeset
|
369 static av_always_inline int get_cabac_inline(CABACContext *c, uint8_t * const state){ |
3642 | 370 //FIXME gcc generates duplicate load/stores for c->low and c->range |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
371 #define LOW "0" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
372 #define RANGE "4" |
4064 | 373 #ifdef ARCH_X86_64 |
374 #define BYTESTART "16" | |
375 #define BYTE "24" | |
376 #define BYTEEND "32" | |
377 #else | |
3993
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
378 #define BYTESTART "12" |
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
379 #define BYTE "16" |
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
380 #define BYTEEND "20" |
4064 | 381 #endif |
4882
8131ccb4ea72
Mark code parts that cannot work on AMD64 due to broken relocations as such.
diego
parents:
4881
diff
changeset
|
382 #if defined(ARCH_X86) && defined(CONFIG_7REGS) && defined(HAVE_EBX_AVAILABLE) && !defined(BROKEN_RELOCATIONS) |
4044
5ccdefd60f61
Fix PIC compilation, some defines were under #ifdef !PIC but used
diego
parents:
4043
diff
changeset
|
383 int bit; |
5ccdefd60f61
Fix PIC compilation, some defines were under #ifdef !PIC but used
diego
parents:
4043
diff
changeset
|
384 |
3984 | 385 #ifndef BRANCHLESS_CABAC_DECODER |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
386 asm volatile( |
4035 | 387 "movzbl (%1), %0 \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
388 "movl "RANGE "(%2), %%ebx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
389 "movl "RANGE "(%2), %%edx \n\t" |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
390 "andl $0xC0, %%ebx \n\t" |
4035 | 391 "movzbl "MANGLE(ff_h264_lps_range)"(%0, %%ebx, 2), %%esi\n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
392 "movl "LOW "(%2), %%ebx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
393 //eax:state ebx:low, edx:range, esi:RangeLPS |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
394 "subl %%esi, %%edx \n\t" |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
395 "movl %%edx, %%ecx \n\t" |
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
396 "shll $17, %%ecx \n\t" |
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
397 "cmpl %%ecx, %%ebx \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
398 " ja 1f \n\t" |
3999
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
399 |
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
400 #if 1 |
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
401 //athlon:4067 P3:4110 |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
402 "lea -0x100(%%edx), %%ecx \n\t" |
3999
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
403 "shr $31, %%ecx \n\t" |
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
404 "shl %%cl, %%edx \n\t" |
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
405 "shl %%cl, %%ebx \n\t" |
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
406 #else |
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
407 //athlon:4057 P3:4130 |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
408 "cmp $0x100, %%edx \n\t" //FIXME avoidable |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
409 "setb %%cl \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
410 "shl %%cl, %%edx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
411 "shl %%cl, %%ebx \n\t" |
3999
6cbad3675632
slightly faster on P3 slightly slower on athlon and probably faster on P4
michael
parents:
3996
diff
changeset
|
412 #endif |
4035 | 413 "movzbl "MANGLE(ff_h264_mps_state)"(%0), %%ecx \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
414 "movb %%cl, (%1) \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
415 //eax:state ebx:low, edx:range, esi:RangeLPS |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
416 "test %%bx, %%bx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
417 " jnz 2f \n\t" |
4064 | 418 "mov "BYTE "(%2), %%"REG_S" \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
419 "subl $0xFFFF, %%ebx \n\t" |
4064 | 420 "movzwl (%%"REG_S"), %%ecx \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
421 "bswap %%ecx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
422 "shrl $15, %%ecx \n\t" |
4064 | 423 "add $2, %%"REG_S" \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
424 "addl %%ecx, %%ebx \n\t" |
4064 | 425 "mov %%"REG_S", "BYTE "(%2) \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
426 "jmp 2f \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
427 "1: \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
428 //eax:state ebx:low, edx:range, esi:RangeLPS |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
429 "subl %%ecx, %%ebx \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
430 "movl %%esi, %%edx \n\t" |
3979
ce16f66a48ad
reading 8bit mem into a 8bit register needs 2 uops on P4, 8bit->32bit with zero extension needs just 1
michael
parents:
3978
diff
changeset
|
431 "movzbl " MANGLE(ff_h264_norm_shift) "(%%esi), %%ecx \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
432 "shll %%cl, %%ebx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
433 "shll %%cl, %%edx \n\t" |
4035 | 434 "movzbl "MANGLE(ff_h264_lps_state)"(%0), %%ecx \n\t" |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
435 "movb %%cl, (%1) \n\t" |
4064 | 436 "add $1, %0 \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
437 "test %%bx, %%bx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
438 " jnz 2f \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
439 |
4064 | 440 "mov "BYTE "(%2), %%"REG_c" \n\t" |
441 "movzwl (%%"REG_c"), %%esi \n\t" | |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
442 "bswap %%esi \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
443 "shrl $15, %%esi \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
444 "subl $0xFFFF, %%esi \n\t" |
4064 | 445 "add $2, %%"REG_c" \n\t" |
446 "mov %%"REG_c", "BYTE "(%2) \n\t" | |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
447 |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
448 "leal -1(%%ebx), %%ecx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
449 "xorl %%ebx, %%ecx \n\t" |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
450 "shrl $15, %%ecx \n\t" |
3979
ce16f66a48ad
reading 8bit mem into a 8bit register needs 2 uops on P4, 8bit->32bit with zero extension needs just 1
michael
parents:
3978
diff
changeset
|
451 "movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t" |
3994
2734b228fc87
use ecx instead of cl (no speed change on P3 but might avoid partial register stalls on some cpus)
michael
parents:
3993
diff
changeset
|
452 "neg %%ecx \n\t" |
2734b228fc87
use ecx instead of cl (no speed change on P3 but might avoid partial register stalls on some cpus)
michael
parents:
3993
diff
changeset
|
453 "add $7, %%ecx \n\t" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
454 |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
455 "shll %%cl , %%esi \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
456 "addl %%esi, %%ebx \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
457 "2: \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
458 "movl %%edx, "RANGE "(%2) \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
459 "movl %%ebx, "LOW "(%2) \n\t" |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
460 :"=&a"(bit) //FIXME this is fragile gcc either runs out of registers or misscompiles it (for example if "+a"(bit) or "+m"(*state) is used |
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
461 :"r"(state), "r"(c) |
4064 | 462 : "%"REG_c, "%ebx", "%edx", "%"REG_S, "memory" |
3969
fc6e0942353b
first try of a handwritten get_cabac() for x86, this is 10-20% faster on P3 depening on if you try to subtract the START/STOP_TIMER overhead
michael
parents:
3967
diff
changeset
|
463 ); |
3982
af16271634c2
moving another bit&1 out, this is as fast as with it in there, but it makes more sense with it outside of the loop
michael
parents:
3981
diff
changeset
|
464 bit&=1; |
4002
ec426fa57dfe
adds some useful comments after some of the #else, #elseif,
gpoirier
parents:
4001
diff
changeset
|
465 #else /* BRANCHLESS_CABAC_DECODER */ |
4046
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
466 |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
467 |
4418
4cceb7c877af
rename CMOV_IS_FAST to HAVE_FAST_CMOV and simplify configure
mru
parents:
4345
diff
changeset
|
468 #if defined HAVE_FAST_CMOV |
4046
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
469 #define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
470 "mov "tmp" , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
471 "shl $17 , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
472 "cmp "low" , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
473 "cmova %%ecx , "range" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
474 "sbb %%ecx , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
475 "and %%ecx , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
476 "sub "tmp" , "low" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
477 "xor %%ecx , "ret" \n\t" |
4418
4cceb7c877af
rename CMOV_IS_FAST to HAVE_FAST_CMOV and simplify configure
mru
parents:
4345
diff
changeset
|
478 #else /* HAVE_FAST_CMOV */ |
4046
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
479 #define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
480 "mov "tmp" , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
481 "shl $17 , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
482 "sub "low" , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
483 "sar $31 , "tmp" \n\t" /*lps_mask*/\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
484 "sub %%ecx , "range" \n\t" /*RangeLPS - range*/\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
485 "and "tmp" , "range" \n\t" /*(RangeLPS - range)&lps_mask*/\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
486 "add %%ecx , "range" \n\t" /*new range*/\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
487 "shl $17 , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
488 "and "tmp" , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
489 "sub %%ecx , "low" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
490 "xor "tmp" , "ret" \n\t" |
4418
4cceb7c877af
rename CMOV_IS_FAST to HAVE_FAST_CMOV and simplify configure
mru
parents:
4345
diff
changeset
|
491 #endif /* HAVE_FAST_CMOV */ |
3975 | 492 |
493 | |
4046
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
494 #define BRANCHLESS_GET_CABAC(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
495 "movzbl "statep" , "ret" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
496 "mov "range" , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
497 "and $0xC0 , "range" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
498 "movzbl "MANGLE(ff_h264_lps_range)"("ret", "range", 2), "range" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
499 "sub "range" , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
500 BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
501 "movzbl " MANGLE(ff_h264_norm_shift) "("range"), %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
502 "shl %%cl , "range" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
503 "movzbl "MANGLE(ff_h264_mlps_state)"+128("ret"), "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
504 "mov "tmpbyte" , "statep" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
505 "shl %%cl , "low" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
506 "test "lowword" , "lowword" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
507 " jnz 1f \n\t"\ |
4064 | 508 "mov "BYTE"("cabac"), %%"REG_c" \n\t"\ |
509 "movzwl (%%"REG_c") , "tmp" \n\t"\ | |
4046
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
510 "bswap "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
511 "shr $15 , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
512 "sub $0xFFFF , "tmp" \n\t"\ |
4064 | 513 "add $2 , %%"REG_c" \n\t"\ |
514 "mov %%"REG_c" , "BYTE "("cabac") \n\t"\ | |
4046
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
515 "lea -1("low") , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
516 "xor "low" , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
517 "shr $15 , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
518 "movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
519 "neg %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
520 "add $7 , %%ecx \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
521 "shl %%cl , "tmp" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
522 "add "tmp" , "low" \n\t"\ |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
523 "1: \n\t" |
3975 | 524 |
4046
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
525 asm volatile( |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
526 "movl "RANGE "(%2), %%esi \n\t" |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
527 "movl "LOW "(%2), %%ebx \n\t" |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
528 BRANCHLESS_GET_CABAC("%0", "%2", "(%1)", "%%ebx", "%%bx", "%%esi", "%%edx", "%%dl") |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
529 "movl %%esi, "RANGE "(%2) \n\t" |
8bbc695c9603
factorize get_cabac asm (0.5% slower but its much cleaner)
michael
parents:
4044
diff
changeset
|
530 "movl %%ebx, "LOW "(%2) \n\t" |
3975 | 531 |
532 :"=&a"(bit) | |
533 :"r"(state), "r"(c) | |
4064 | 534 : "%"REG_c, "%ebx", "%edx", "%esi", "memory" |
3975 | 535 ); |
3981
9854f686ba79
move the &1 out of the asm so gcc can optimize it away in inlined cases (yes this is slightly faster)
michael
parents:
3980
diff
changeset
|
536 bit&=1; |
4002
ec426fa57dfe
adds some useful comments after some of the #else, #elseif,
gpoirier
parents:
4001
diff
changeset
|
537 #endif /* BRANCHLESS_CABAC_DECODER */ |
4882
8131ccb4ea72
Mark code parts that cannot work on AMD64 due to broken relocations as such.
diego
parents:
4881
diff
changeset
|
538 #else /* defined(ARCH_X86) && defined(CONFIG_7REGS) && defined(HAVE_EBX_AVAILABLE) && !defined(BROKEN_RELOCATIONS) */ |
3642 | 539 int s = *state; |
4039 | 540 int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + s]; |
5083
ce36118abbbb
rename attribute_unused to av_unused and moves its declaration to common.h
benoit
parents:
4975
diff
changeset
|
541 int bit, lps_mask av_unused; |
2967 | 542 |
1287 | 543 c->range -= RangeLPS; |
3984 | 544 #ifndef BRANCHLESS_CABAC_DECODER |
4345
88967250d718
replace a few hardcoded numbers with their correct named ones
michael
parents:
4283
diff
changeset
|
545 if(c->low < (c->range<<(CABAC_BITS+1))){ |
3642 | 546 bit= s&1; |
3993
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
547 *state= ff_h264_mps_state[s]; |
2323 | 548 renorm_cabac_decoder_once(c); |
1287 | 549 }else{ |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
550 bit= ff_h264_norm_shift[RangeLPS]; |
4345
88967250d718
replace a few hardcoded numbers with their correct named ones
michael
parents:
4283
diff
changeset
|
551 c->low -= (c->range<<(CABAC_BITS+1)); |
3993
8b7c59b7af01
make state transition tables global as they are constant and the code is slightly faster that way
michael
parents:
3992
diff
changeset
|
552 *state= ff_h264_lps_state[s]; |
3956
0910f2844f9a
branchless renormalization (1% faster get_cabac) old branchless renormalization wasnt faster because gcc was scared of the shift variable (missusing bit variable now)
michael
parents:
3955
diff
changeset
|
553 c->range = RangeLPS<<bit; |
0910f2844f9a
branchless renormalization (1% faster get_cabac) old branchless renormalization wasnt faster because gcc was scared of the shift variable (missusing bit variable now)
michael
parents:
3955
diff
changeset
|
554 c->low <<= bit; |
0910f2844f9a
branchless renormalization (1% faster get_cabac) old branchless renormalization wasnt faster because gcc was scared of the shift variable (missusing bit variable now)
michael
parents:
3955
diff
changeset
|
555 bit= (s&1)^1; |
0910f2844f9a
branchless renormalization (1% faster get_cabac) old branchless renormalization wasnt faster because gcc was scared of the shift variable (missusing bit variable now)
michael
parents:
3955
diff
changeset
|
556 |
4345
88967250d718
replace a few hardcoded numbers with their correct named ones
michael
parents:
4283
diff
changeset
|
557 if(!(c->low & CABAC_MASK)){ |
2323 | 558 refill2(c); |
3956
0910f2844f9a
branchless renormalization (1% faster get_cabac) old branchless renormalization wasnt faster because gcc was scared of the shift variable (missusing bit variable now)
michael
parents:
3955
diff
changeset
|
559 } |
1287 | 560 } |
4002
ec426fa57dfe
adds some useful comments after some of the #else, #elseif,
gpoirier
parents:
4001
diff
changeset
|
561 #else /* BRANCHLESS_CABAC_DECODER */ |
4345
88967250d718
replace a few hardcoded numbers with their correct named ones
michael
parents:
4283
diff
changeset
|
562 lps_mask= ((c->range<<(CABAC_BITS+1)) - c->low)>>31; |
2967 | 563 |
4345
88967250d718
replace a few hardcoded numbers with their correct named ones
michael
parents:
4283
diff
changeset
|
564 c->low -= (c->range<<(CABAC_BITS+1)) & lps_mask; |
2323 | 565 c->range += (RangeLPS - c->range) & lps_mask; |
2967 | 566 |
3974 | 567 s^=lps_mask; |
4014
b2582438effe
dehack *ps_state indexing in the branchless decoder
michael
parents:
4012
diff
changeset
|
568 *state= (ff_h264_mlps_state+128)[s]; |
3974 | 569 bit= s&1; |
2967 | 570 |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
571 lps_mask= ff_h264_norm_shift[c->range]; |
2323 | 572 c->range<<= lps_mask; |
573 c->low <<= lps_mask; | |
574 if(!(c->low & CABAC_MASK)) | |
575 refill2(c); | |
4002
ec426fa57dfe
adds some useful comments after some of the #else, #elseif,
gpoirier
parents:
4001
diff
changeset
|
576 #endif /* BRANCHLESS_CABAC_DECODER */ |
4882
8131ccb4ea72
Mark code parts that cannot work on AMD64 due to broken relocations as such.
diego
parents:
4881
diff
changeset
|
577 #endif /* defined(ARCH_X86) && defined(CONFIG_7REGS) && defined(HAVE_EBX_AVAILABLE) && !defined(BROKEN_RELOCATIONS) */ |
2967 | 578 return bit; |
1287 | 579 } |
580 | |
4579 | 581 static int av_noinline get_cabac_noinline(CABACContext *c, uint8_t * const state){ |
4008
b636f3d59283
prevent "mb level" get_cabac() calls from being inlined (3% faster decode_mb_cabac() on P3)
michael
parents:
4002
diff
changeset
|
582 return get_cabac_inline(c,state); |
b636f3d59283
prevent "mb level" get_cabac() calls from being inlined (3% faster decode_mb_cabac() on P3)
michael
parents:
4002
diff
changeset
|
583 } |
b636f3d59283
prevent "mb level" get_cabac() calls from being inlined (3% faster decode_mb_cabac() on P3)
michael
parents:
4002
diff
changeset
|
584 |
b636f3d59283
prevent "mb level" get_cabac() calls from being inlined (3% faster decode_mb_cabac() on P3)
michael
parents:
4002
diff
changeset
|
585 static int get_cabac(CABACContext *c, uint8_t * const state){ |
b636f3d59283
prevent "mb level" get_cabac() calls from being inlined (3% faster decode_mb_cabac() on P3)
michael
parents:
4002
diff
changeset
|
586 return get_cabac_inline(c,state); |
b636f3d59283
prevent "mb level" get_cabac() calls from being inlined (3% faster decode_mb_cabac() on P3)
michael
parents:
4002
diff
changeset
|
587 } |
b636f3d59283
prevent "mb level" get_cabac() calls from being inlined (3% faster decode_mb_cabac() on P3)
michael
parents:
4002
diff
changeset
|
588 |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
589 static int get_cabac_bypass(CABACContext *c){ |
4040 | 590 #if 0 //not faster |
591 int bit; | |
592 asm volatile( | |
593 "movl "RANGE "(%1), %%ebx \n\t" | |
594 "movl "LOW "(%1), %%eax \n\t" | |
595 "shl $17, %%ebx \n\t" | |
596 "add %%eax, %%eax \n\t" | |
597 "sub %%ebx, %%eax \n\t" | |
598 "cdq \n\t" | |
599 "and %%edx, %%ebx \n\t" | |
600 "add %%ebx, %%eax \n\t" | |
601 "test %%ax, %%ax \n\t" | |
602 " jnz 1f \n\t" | |
4064 | 603 "movl "BYTE "(%1), %%"REG_b" \n\t" |
4040 | 604 "subl $0xFFFF, %%eax \n\t" |
4064 | 605 "movzwl (%%"REG_b"), %%ecx \n\t" |
4040 | 606 "bswap %%ecx \n\t" |
607 "shrl $15, %%ecx \n\t" | |
4064 | 608 "addl $2, %%"REG_b" \n\t" |
4040 | 609 "addl %%ecx, %%eax \n\t" |
4064 | 610 "movl %%"REG_b", "BYTE "(%1) \n\t" |
4040 | 611 "1: \n\t" |
612 "movl %%eax, "LOW "(%1) \n\t" | |
613 | |
614 :"=&d"(bit) | |
615 :"r"(c) | |
4064 | 616 : "%eax", "%"REG_b, "%ecx", "memory" |
4040 | 617 ); |
618 return bit+1; | |
619 #else | |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
620 int range; |
1287 | 621 c->low += c->low; |
622 | |
2323 | 623 if(!(c->low & CABAC_MASK)) |
624 refill(c); | |
2967 | 625 |
4345
88967250d718
replace a few hardcoded numbers with their correct named ones
michael
parents:
4283
diff
changeset
|
626 range= c->range<<(CABAC_BITS+1); |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
627 if(c->low < range){ |
1287 | 628 return 0; |
629 }else{ | |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
630 c->low -= range; |
1287 | 631 return 1; |
632 } | |
4040 | 633 #endif |
1287 | 634 } |
4040 | 635 |
636 | |
4283
d6f83e2f8804
rename always_inline to av_always_inline and move to common.h
mru
parents:
4241
diff
changeset
|
637 static av_always_inline int get_cabac_bypass_sign(CABACContext *c, int val){ |
4241 | 638 #if defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__)) |
4040 | 639 asm volatile( |
640 "movl "RANGE "(%1), %%ebx \n\t" | |
641 "movl "LOW "(%1), %%eax \n\t" | |
642 "shl $17, %%ebx \n\t" | |
643 "add %%eax, %%eax \n\t" | |
644 "sub %%ebx, %%eax \n\t" | |
645 "cdq \n\t" | |
646 "and %%edx, %%ebx \n\t" | |
647 "add %%ebx, %%eax \n\t" | |
648 "xor %%edx, %%ecx \n\t" | |
649 "sub %%edx, %%ecx \n\t" | |
650 "test %%ax, %%ax \n\t" | |
651 " jnz 1f \n\t" | |
4064 | 652 "mov "BYTE "(%1), %%"REG_b" \n\t" |
4040 | 653 "subl $0xFFFF, %%eax \n\t" |
4064 | 654 "movzwl (%%"REG_b"), %%edx \n\t" |
4040 | 655 "bswap %%edx \n\t" |
656 "shrl $15, %%edx \n\t" | |
4064 | 657 "add $2, %%"REG_b" \n\t" |
4040 | 658 "addl %%edx, %%eax \n\t" |
4064 | 659 "mov %%"REG_b", "BYTE "(%1) \n\t" |
4040 | 660 "1: \n\t" |
661 "movl %%eax, "LOW "(%1) \n\t" | |
662 | |
663 :"+c"(val) | |
664 :"r"(c) | |
4064 | 665 : "%eax", "%"REG_b, "%edx", "memory" |
4040 | 666 ); |
667 return val; | |
668 #else | |
669 int range, mask; | |
670 c->low += c->low; | |
671 | |
672 if(!(c->low & CABAC_MASK)) | |
673 refill(c); | |
674 | |
4345
88967250d718
replace a few hardcoded numbers with their correct named ones
michael
parents:
4283
diff
changeset
|
675 range= c->range<<(CABAC_BITS+1); |
4040 | 676 c->low -= range; |
677 mask= c->low >> 31; | |
678 range &= mask; | |
679 c->low += range; | |
680 return (val^mask)-mask; | |
681 #endif | |
682 } | |
683 | |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
684 //FIXME the x86 code from this file should be moved into i386/h264 or cabac something.c/h (note ill kill you if you move my code away from under my fingers before iam finished with it!) |
5127 | 685 //FIXME use some macros to avoid duplicatin get_cabac (cannot be done yet as that would make optimization work hard) |
4882
8131ccb4ea72
Mark code parts that cannot work on AMD64 due to broken relocations as such.
diego
parents:
4881
diff
changeset
|
686 #if defined(ARCH_X86) && defined(CONFIG_7REGS) && defined(HAVE_EBX_AVAILABLE) && !defined(BROKEN_RELOCATIONS) |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
687 static int decode_significance_x86(CABACContext *c, int max_coeff, uint8_t *significant_coeff_ctx_base, int *index){ |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
688 void *end= significant_coeff_ctx_base + max_coeff - 1; |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
689 int minusstart= -(int)significant_coeff_ctx_base; |
4049
8c1a5ed03a00
another instruction less in decode_significance_x86() -> 1% faster ion P3
michael
parents:
4048
diff
changeset
|
690 int minusindex= 4-(int)index; |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
691 int coeff_count; |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
692 asm volatile( |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
693 "movl "RANGE "(%3), %%esi \n\t" |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
694 "movl "LOW "(%3), %%ebx \n\t" |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
695 |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
696 "2: \n\t" |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
697 |
4047
61a4e7218a45
reordering instructions a little in decode_significance_x86() -> 2 instructions less / 1% faster decode_residual on P3
michael
parents:
4046
diff
changeset
|
698 BRANCHLESS_GET_CABAC("%%edx", "%3", "(%1)", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al") |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
699 |
4047
61a4e7218a45
reordering instructions a little in decode_significance_x86() -> 2 instructions less / 1% faster decode_residual on P3
michael
parents:
4046
diff
changeset
|
700 "test $1, %%edx \n\t" |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
701 " jz 3f \n\t" |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
702 |
4047
61a4e7218a45
reordering instructions a little in decode_significance_x86() -> 2 instructions less / 1% faster decode_residual on P3
michael
parents:
4046
diff
changeset
|
703 BRANCHLESS_GET_CABAC("%%edx", "%3", "61(%1)", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al") |
61a4e7218a45
reordering instructions a little in decode_significance_x86() -> 2 instructions less / 1% faster decode_residual on P3
michael
parents:
4046
diff
changeset
|
704 |
4064 | 705 "mov %2, %%"REG_a" \n\t" |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
706 "movl %4, %%ecx \n\t" |
4064 | 707 "add %1, %%"REG_c" \n\t" |
708 "movl %%ecx, (%%"REG_a") \n\t" | |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
709 |
4047
61a4e7218a45
reordering instructions a little in decode_significance_x86() -> 2 instructions less / 1% faster decode_residual on P3
michael
parents:
4046
diff
changeset
|
710 "test $1, %%edx \n\t" |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
711 " jnz 4f \n\t" |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
712 |
4064 | 713 "add $4, %%"REG_a" \n\t" |
714 "mov %%"REG_a", %2 \n\t" | |
4048 | 715 |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
716 "3: \n\t" |
4064 | 717 "add $1, %1 \n\t" |
718 "cmp %5, %1 \n\t" | |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
719 " jb 2b \n\t" |
4064 | 720 "mov %2, %%"REG_a" \n\t" |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
721 "movl %4, %%ecx \n\t" |
4064 | 722 "add %1, %%"REG_c" \n\t" |
723 "movl %%ecx, (%%"REG_a") \n\t" | |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
724 "4: \n\t" |
4064 | 725 "add %6, %%eax \n\t" |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
726 "shr $2, %%eax \n\t" |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
727 |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
728 "movl %%esi, "RANGE "(%3) \n\t" |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
729 "movl %%ebx, "LOW "(%3) \n\t" |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
730 :"=&a"(coeff_count), "+r"(significant_coeff_ctx_base), "+m"(index)\ |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
731 :"r"(c), "m"(minusstart), "m"(end), "m"(minusindex)\ |
4064 | 732 : "%"REG_c, "%ebx", "%edx", "%esi", "memory"\ |
4037
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
733 ); |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
734 return coeff_count; |
53be304c7f54
x86 asm version of the decode significance loop (not 8x8) of decode_residual() 5% faster decode_residual() on P3
michael
parents:
4035
diff
changeset
|
735 } |
4051 | 736 |
737 static int decode_significance_8x8_x86(CABACContext *c, uint8_t *significant_coeff_ctx_base, int *index, uint8_t *sig_off){ | |
738 int minusindex= 4-(int)index; | |
739 int coeff_count; | |
4064 | 740 long last=0; |
4051 | 741 asm volatile( |
742 "movl "RANGE "(%3), %%esi \n\t" | |
743 "movl "LOW "(%3), %%ebx \n\t" | |
744 | |
4064 | 745 "mov %1, %%"REG_D" \n\t" |
4051 | 746 "2: \n\t" |
747 | |
4064 | 748 "mov %6, %%"REG_a" \n\t" |
749 "movzbl (%%"REG_a", %%"REG_D"), %%edi \n\t" | |
750 "add %5, %%"REG_D" \n\t" | |
4051 | 751 |
4064 | 752 BRANCHLESS_GET_CABAC("%%edx", "%3", "(%%"REG_D")", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al") |
4051 | 753 |
754 "mov %1, %%edi \n\t" | |
755 "test $1, %%edx \n\t" | |
756 " jz 3f \n\t" | |
757 | |
758 "movzbl "MANGLE(last_coeff_flag_offset_8x8)"(%%edi), %%edi\n\t" | |
4064 | 759 "add %5, %%"REG_D" \n\t" |
4051 | 760 |
4064 | 761 BRANCHLESS_GET_CABAC("%%edx", "%3", "15(%%"REG_D")", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al") |
4051 | 762 |
4064 | 763 "mov %2, %%"REG_a" \n\t" |
4051 | 764 "mov %1, %%edi \n\t" |
4064 | 765 "movl %%edi, (%%"REG_a") \n\t" |
4051 | 766 |
767 "test $1, %%edx \n\t" | |
768 " jnz 4f \n\t" | |
769 | |
4064 | 770 "add $4, %%"REG_a" \n\t" |
771 "mov %%"REG_a", %2 \n\t" | |
4051 | 772 |
773 "3: \n\t" | |
774 "addl $1, %%edi \n\t" | |
775 "mov %%edi, %1 \n\t" | |
776 "cmpl $63, %%edi \n\t" | |
777 " jb 2b \n\t" | |
4064 | 778 "mov %2, %%"REG_a" \n\t" |
779 "movl %%edi, (%%"REG_a") \n\t" | |
4051 | 780 "4: \n\t" |
781 "addl %4, %%eax \n\t" | |
782 "shr $2, %%eax \n\t" | |
783 | |
784 "movl %%esi, "RANGE "(%3) \n\t" | |
785 "movl %%ebx, "LOW "(%3) \n\t" | |
786 :"=&a"(coeff_count),"+m"(last), "+m"(index)\ | |
787 :"r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base), "m"(sig_off)\ | |
4064 | 788 : "%"REG_c, "%ebx", "%edx", "%esi", "%"REG_D, "memory"\ |
4051 | 789 ); |
790 return coeff_count; | |
791 } | |
4882
8131ccb4ea72
Mark code parts that cannot work on AMD64 due to broken relocations as such.
diego
parents:
4881
diff
changeset
|
792 #endif /* defined(ARCH_X86) && && defined(CONFIG_7REGS) && defined(HAVE_EBX_AVAILABLE) && !defined(BROKEN_RELOCATIONS) */ |
1287 | 793 |
1300
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
794 /** |
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
795 * |
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
796 * @return the number of bytes read or 0 if no end |
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
797 */ |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
798 static int get_cabac_terminate(CABACContext *c){ |
4024
d550343b5dac
shift CABACContext.range right, this reduces the number of shifts needed in get_cabac() and is slightly faster on P3 (and should be much faster on P4 as the P4 except the more recent variants lacks an integer shifter and so shifts have ~10 times longer latency then simple operations like adds)
michael
parents:
4014
diff
changeset
|
799 c->range -= 2; |
4345
88967250d718
replace a few hardcoded numbers with their correct named ones
michael
parents:
4283
diff
changeset
|
800 if(c->low < c->range<<(CABAC_BITS+1)){ |
2323 | 801 renorm_cabac_decoder_once(c); |
1287 | 802 return 0; |
803 }else{ | |
1300
e18667d1e94d
FFV1 codec (our very simple lossless intra only codec, compresses much better then huffyuv)
michaelni
parents:
1298
diff
changeset
|
804 return c->bytestream - c->bytestream_start; |
2967 | 805 } |
1287 | 806 } |
807 | |
1290 | 808 /** |
809 * get (truncated) unnary binarization. | |
810 */ | |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
811 static int get_cabac_u(CABACContext *c, uint8_t * state, int max, int max_index, int truncated){ |
1290 | 812 int i; |
2967 | 813 |
814 for(i=0; i<max; i++){ | |
1290 | 815 if(get_cabac(c, state)==0) |
816 return i; | |
2967 | 817 |
1290 | 818 if(i< max_index) state++; |
819 } | |
820 | |
821 return truncated ? max : -1; | |
822 } | |
823 | |
824 /** | |
825 * get unary exp golomb k-th order binarization. | |
826 */ | |
3928
987fffdf6ae7
don't try to inline cabac functions. gcc ignored the hint anyway, and forcing it would make h264 slower.
lorenm
parents:
3642
diff
changeset
|
827 static int get_cabac_ueg(CABACContext *c, uint8_t * state, int max, int is_signed, int k, int max_index){ |
1290 | 828 int i, v; |
829 int m= 1<<k; | |
2967 | 830 |
831 if(get_cabac(c, state)==0) | |
1290 | 832 return 0; |
2967 | 833 |
1290 | 834 if(0 < max_index) state++; |
2967 | 835 |
836 for(i=1; i<max; i++){ | |
1290 | 837 if(get_cabac(c, state)==0){ |
838 if(is_signed && get_cabac_bypass(c)){ | |
839 return -i; | |
840 }else | |
841 return i; | |
842 } | |
843 | |
844 if(i < max_index) state++; | |
845 } | |
2967 | 846 |
1290 | 847 while(get_cabac_bypass(c)){ |
848 i+= m; | |
849 m+= m; | |
850 } | |
2967 | 851 |
1290 | 852 v=0; |
853 while(m>>=1){ | |
854 v+= v + get_cabac_bypass(c); | |
855 } | |
856 i += v; | |
857 | |
858 if(is_signed && get_cabac_bypass(c)){ | |
859 return -i; | |
860 }else | |
861 return i; | |
862 } | |
4975 | 863 |
864 #endif /* CABAC_H */ |