Mercurial > libavcodec.hg
comparison bitstream.h @ 2979:bfabfdf9ce55 libavcodec
COSMETICS: tabs --> spaces, some prettyprinting
author | diego |
---|---|
date | Thu, 22 Dec 2005 01:10:11 +0000 |
parents | ef2149182f1c |
children | 67d3bb5a4ce1 |
comparison
equal
deleted
inserted
replaced
2978:403183bbb505 | 2979:bfabfdf9ce55 |
---|---|
144 # define unaligned32(a) (*(const uint32_t*)(a)) | 144 # define unaligned32(a) (*(const uint32_t*)(a)) |
145 #else | 145 #else |
146 # ifdef __GNUC__ | 146 # ifdef __GNUC__ |
147 static inline uint32_t unaligned32(const void *v) { | 147 static inline uint32_t unaligned32(const void *v) { |
148 struct Unaligned { | 148 struct Unaligned { |
149 uint32_t i; | 149 uint32_t i; |
150 } __attribute__((packed)); | 150 } __attribute__((packed)); |
151 | 151 |
152 return ((const struct Unaligned *) v)->i; | 152 return ((const struct Unaligned *) v)->i; |
153 } | 153 } |
154 # elif defined(__DECC) | 154 # elif defined(__DECC) |
181 /* XXX: optimize */ | 181 /* XXX: optimize */ |
182 if (n < bit_left) { | 182 if (n < bit_left) { |
183 bit_buf = (bit_buf<<n) | value; | 183 bit_buf = (bit_buf<<n) | value; |
184 bit_left-=n; | 184 bit_left-=n; |
185 } else { | 185 } else { |
186 bit_buf<<=bit_left; | 186 bit_buf<<=bit_left; |
187 bit_buf |= value >> (n - bit_left); | 187 bit_buf |= value >> (n - bit_left); |
188 #ifdef UNALIGNED_STORES_ARE_BAD | 188 #ifdef UNALIGNED_STORES_ARE_BAD |
189 if (3 & (intptr_t) s->buf_ptr) { | 189 if (3 & (intptr_t) s->buf_ptr) { |
190 s->buf_ptr[0] = bit_buf >> 24; | 190 s->buf_ptr[0] = bit_buf >> 24; |
191 s->buf_ptr[1] = bit_buf >> 16; | 191 s->buf_ptr[1] = bit_buf >> 16; |
194 } else | 194 } else |
195 #endif | 195 #endif |
196 *(uint32_t *)s->buf_ptr = be2me_32(bit_buf); | 196 *(uint32_t *)s->buf_ptr = be2me_32(bit_buf); |
197 //printf("bitbuf = %08x\n", bit_buf); | 197 //printf("bitbuf = %08x\n", bit_buf); |
198 s->buf_ptr+=4; | 198 s->buf_ptr+=4; |
199 bit_left+=32 - n; | 199 bit_left+=32 - n; |
200 bit_buf = value; | 200 bit_buf = value; |
201 } | 201 } |
202 | 202 |
203 s->bit_buf = bit_buf; | 203 s->bit_buf = bit_buf; |
204 s->bit_left = bit_left; | 204 s->bit_left = bit_left; |
210 static inline void put_bits(PutBitContext *s, int n, unsigned int value) | 210 static inline void put_bits(PutBitContext *s, int n, unsigned int value) |
211 { | 211 { |
212 # ifdef ALIGNED_BITSTREAM_WRITER | 212 # ifdef ALIGNED_BITSTREAM_WRITER |
213 # if defined(ARCH_X86) || defined(ARCH_X86_64) | 213 # if defined(ARCH_X86) || defined(ARCH_X86_64) |
214 asm volatile( | 214 asm volatile( |
215 "movl %0, %%ecx \n\t" | 215 "movl %0, %%ecx \n\t" |
216 "xorl %%eax, %%eax \n\t" | 216 "xorl %%eax, %%eax \n\t" |
217 "shrdl %%cl, %1, %%eax \n\t" | 217 "shrdl %%cl, %1, %%eax \n\t" |
218 "shrl %%cl, %1 \n\t" | 218 "shrl %%cl, %1 \n\t" |
219 "movl %0, %%ecx \n\t" | 219 "movl %0, %%ecx \n\t" |
220 "shrl $3, %%ecx \n\t" | 220 "shrl $3, %%ecx \n\t" |
221 "andl $0xFFFFFFFC, %%ecx \n\t" | 221 "andl $0xFFFFFFFC, %%ecx \n\t" |
222 "bswapl %1 \n\t" | 222 "bswapl %1 \n\t" |
223 "orl %1, (%2, %%ecx) \n\t" | 223 "orl %1, (%2, %%ecx) \n\t" |
224 "bswapl %%eax \n\t" | 224 "bswapl %%eax \n\t" |
225 "addl %3, %0 \n\t" | 225 "addl %3, %0 \n\t" |
226 "movl %%eax, 4(%2, %%ecx) \n\t" | 226 "movl %%eax, 4(%2, %%ecx) \n\t" |
227 : "=&r" (s->index), "=&r" (value) | 227 : "=&r" (s->index), "=&r" (value) |
228 : "r" (s->buf), "r" (n), "0" (s->index), "1" (value<<(-n)) | 228 : "r" (s->buf), "r" (n), "0" (s->index), "1" (value<<(-n)) |
229 : "%eax", "%ecx" | 229 : "%eax", "%ecx" |
230 ); | 230 ); |
231 # else | 231 # else |
232 int index= s->index; | 232 int index= s->index; |
233 uint32_t *ptr= ((uint32_t *)s->buf)+(index>>5); | 233 uint32_t *ptr= ((uint32_t *)s->buf)+(index>>5); |
234 | 234 |
241 s->index= index; | 241 s->index= index; |
242 # endif | 242 # endif |
243 # else //ALIGNED_BITSTREAM_WRITER | 243 # else //ALIGNED_BITSTREAM_WRITER |
244 # if defined(ARCH_X86) || defined(ARCH_X86_64) | 244 # if defined(ARCH_X86) || defined(ARCH_X86_64) |
245 asm volatile( | 245 asm volatile( |
246 "movl $7, %%ecx \n\t" | 246 "movl $7, %%ecx \n\t" |
247 "andl %0, %%ecx \n\t" | 247 "andl %0, %%ecx \n\t" |
248 "addl %3, %%ecx \n\t" | 248 "addl %3, %%ecx \n\t" |
249 "negl %%ecx \n\t" | 249 "negl %%ecx \n\t" |
250 "shll %%cl, %1 \n\t" | 250 "shll %%cl, %1 \n\t" |
251 "bswapl %1 \n\t" | 251 "bswapl %1 \n\t" |
252 "movl %0, %%ecx \n\t" | 252 "movl %0, %%ecx \n\t" |
253 "shrl $3, %%ecx \n\t" | 253 "shrl $3, %%ecx \n\t" |
254 "orl %1, (%%ecx, %2) \n\t" | 254 "orl %1, (%%ecx, %2) \n\t" |
255 "addl %3, %0 \n\t" | 255 "addl %3, %0 \n\t" |
256 "movl $0, 4(%%ecx, %2) \n\t" | 256 "movl $0, 4(%%ecx, %2) \n\t" |
257 : "=&r" (s->index), "=&r" (value) | 257 : "=&r" (s->index), "=&r" (value) |
258 : "r" (s->buf), "r" (n), "0" (s->index), "1" (value) | 258 : "r" (s->buf), "r" (n), "0" (s->index), "1" (value) |
259 : "%ecx" | 259 : "%ecx" |
260 ); | 260 ); |
261 # else | 261 # else |
262 int index= s->index; | 262 int index= s->index; |
263 uint32_t *ptr= (uint32_t*)(((uint8_t *)s->buf)+(index>>3)); | 263 uint32_t *ptr= (uint32_t*)(((uint8_t *)s->buf)+(index>>3)); |
264 | 264 |
274 | 274 |
275 | 275 |
276 static inline uint8_t* pbBufPtr(PutBitContext *s) | 276 static inline uint8_t* pbBufPtr(PutBitContext *s) |
277 { | 277 { |
278 #ifdef ALT_BITSTREAM_WRITER | 278 #ifdef ALT_BITSTREAM_WRITER |
279 return s->buf + (s->index>>3); | 279 return s->buf + (s->index>>3); |
280 #else | 280 #else |
281 return s->buf_ptr; | 281 return s->buf_ptr; |
282 #endif | 282 #endif |
283 } | 283 } |
284 | 284 |
285 /** | 285 /** |
286 * | 286 * |
288 */ | 288 */ |
289 static inline void skip_put_bytes(PutBitContext *s, int n){ | 289 static inline void skip_put_bytes(PutBitContext *s, int n){ |
290 assert((put_bits_count(s)&7)==0); | 290 assert((put_bits_count(s)&7)==0); |
291 #ifdef ALT_BITSTREAM_WRITER | 291 #ifdef ALT_BITSTREAM_WRITER |
292 FIXME may need some cleaning of the buffer | 292 FIXME may need some cleaning of the buffer |
293 s->index += n<<3; | 293 s->index += n<<3; |
294 #else | 294 #else |
295 assert(s->bit_left==32); | 295 assert(s->bit_left==32); |
296 s->buf_ptr += n; | 296 s->buf_ptr += n; |
297 #endif | 297 #endif |
298 } | 298 } |
299 | 299 |
300 /** | 300 /** |
301 * skips the given number of bits. | 301 * skips the given number of bits. |
364 */ | 364 */ |
365 | 365 |
366 static inline int unaligned32_be(const void *v) | 366 static inline int unaligned32_be(const void *v) |
367 { | 367 { |
368 #ifdef CONFIG_ALIGN | 368 #ifdef CONFIG_ALIGN |
369 const uint8_t *p=v; | 369 const uint8_t *p=v; |
370 return (((p[0]<<8) | p[1])<<16) | (p[2]<<8) | (p[3]); | 370 return (((p[0]<<8) | p[1])<<16) | (p[2]<<8) | (p[3]); |
371 #else | 371 #else |
372 return be2me_32( unaligned32(v)); //original | 372 return be2me_32( unaligned32(v)); //original |
373 #endif | 373 #endif |
374 } | 374 } |
375 | 375 |
376 static inline int unaligned32_le(const void *v) | 376 static inline int unaligned32_le(const void *v) |
377 { | 377 { |
526 }\ | 526 }\ |
527 | 527 |
528 #if defined(ARCH_X86) || defined(ARCH_X86_64) | 528 #if defined(ARCH_X86) || defined(ARCH_X86_64) |
529 # define SKIP_CACHE(name, gb, num)\ | 529 # define SKIP_CACHE(name, gb, num)\ |
530 asm(\ | 530 asm(\ |
531 "shldl %2, %1, %0 \n\t"\ | 531 "shldl %2, %1, %0 \n\t"\ |
532 "shll %2, %1 \n\t"\ | 532 "shll %2, %1 \n\t"\ |
533 : "+r" (name##_cache0), "+r" (name##_cache1)\ | 533 : "+r" (name##_cache0), "+r" (name##_cache1)\ |
534 : "Ic" ((uint8_t)num)\ | 534 : "Ic" ((uint8_t)num)\ |
535 ); | 535 ); |
536 #else | 536 #else |
537 # define SKIP_CACHE(name, gb, num)\ | 537 # define SKIP_CACHE(name, gb, num)\ |