Mercurial > libavcodec.hg
comparison armv4l/mathops.h @ 8031:eebc7209c47f libavcodec
Convert asm keyword into __asm__.
Neither the asm() nor the __asm__() keyword is part of the C99
standard, but while GCC accepts the former in C89 syntax, it is not
accepted in C99 unless GNU extensions are turned on (with -fasm). The
latter form is accepted in any syntax as an extension (without
requiring further command-line options).
Sun Studio C99 compiler also does not accept asm() while accepting
__asm__(), albeit reporting warnings that it's not valid C99 syntax.
author | flameeyes |
---|---|
date | Thu, 16 Oct 2008 13:34:09 +0000 |
parents | c4a4495715dd |
children | 8547a4ae101b |
comparison
equal
deleted
inserted
replaced
8030:a512ac8fa540 | 8031:eebc7209c47f |
---|---|
23 #define AVCODEC_ARMV4L_MATHOPS_H | 23 #define AVCODEC_ARMV4L_MATHOPS_H |
24 | 24 |
25 #ifdef FRAC_BITS | 25 #ifdef FRAC_BITS |
26 # define MULL(a, b) \ | 26 # define MULL(a, b) \ |
27 ({ int lo, hi;\ | 27 ({ int lo, hi;\ |
28 asm("smull %0, %1, %2, %3 \n\t"\ | 28 __asm__("smull %0, %1, %2, %3 \n\t"\ |
29 "mov %0, %0, lsr %4\n\t"\ | 29 "mov %0, %0, lsr %4\n\t"\ |
30 "add %1, %0, %1, lsl %5\n\t"\ | 30 "add %1, %0, %1, lsl %5\n\t"\ |
31 : "=&r"(lo), "=&r"(hi)\ | 31 : "=&r"(lo), "=&r"(hi)\ |
32 : "r"(b), "r"(a), "i"(FRAC_BITS), "i"(32-FRAC_BITS));\ | 32 : "r"(b), "r"(a), "i"(FRAC_BITS), "i"(32-FRAC_BITS));\ |
33 hi; }) | 33 hi; }) |
35 | 35 |
36 #ifdef HAVE_ARMV6 | 36 #ifdef HAVE_ARMV6 |
37 static inline av_const int MULH(int a, int b) | 37 static inline av_const int MULH(int a, int b) |
38 { | 38 { |
39 int r; | 39 int r; |
40 asm ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b)); | 40 __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b)); |
41 return r; | 41 return r; |
42 } | 42 } |
43 #define MULH MULH | 43 #define MULH MULH |
44 #else | 44 #else |
45 #define MULH(a, b) \ | 45 #define MULH(a, b) \ |
46 ({ int lo, hi;\ | 46 ({ int lo, hi;\ |
47 asm ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\ | 47 __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\ |
48 hi; }) | 48 hi; }) |
49 #endif | 49 #endif |
50 | 50 |
51 static inline av_const int64_t MUL64(int a, int b) | 51 static inline av_const int64_t MUL64(int a, int b) |
52 { | 52 { |
53 union { uint64_t x; unsigned hl[2]; } x; | 53 union { uint64_t x; unsigned hl[2]; } x; |
54 asm ("smull %0, %1, %2, %3" | 54 __asm__ ("smull %0, %1, %2, %3" |
55 : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b)); | 55 : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b)); |
56 return x.x; | 56 return x.x; |
57 } | 57 } |
58 #define MUL64 MUL64 | 58 #define MUL64 MUL64 |
59 | 59 |
60 static inline av_const int64_t MAC64(int64_t d, int a, int b) | 60 static inline av_const int64_t MAC64(int64_t d, int a, int b) |
61 { | 61 { |
62 union { uint64_t x; unsigned hl[2]; } x = { d }; | 62 union { uint64_t x; unsigned hl[2]; } x = { d }; |
63 asm ("smlal %0, %1, %2, %3" | 63 __asm__ ("smlal %0, %1, %2, %3" |
64 : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b)); | 64 : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b)); |
65 return x.x; | 65 return x.x; |
66 } | 66 } |
67 #define MAC64(d, a, b) ((d) = MAC64(d, a, b)) | 67 #define MAC64(d, a, b) ((d) = MAC64(d, a, b)) |
68 #define MLS64(d, a, b) MAC64(d, -(a), b) | 68 #define MLS64(d, a, b) MAC64(d, -(a), b) |
69 | 69 |
70 #if defined(HAVE_ARMV5TE) | 70 #if defined(HAVE_ARMV5TE) |
71 | 71 |
72 /* signed 16x16 -> 32 multiply add accumulate */ | 72 /* signed 16x16 -> 32 multiply add accumulate */ |
73 # define MAC16(rt, ra, rb) \ | 73 # define MAC16(rt, ra, rb) \ |
74 asm ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); | 74 __asm__ ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); |
75 /* signed 16x16 -> 32 multiply */ | 75 /* signed 16x16 -> 32 multiply */ |
76 # define MUL16(ra, rb) \ | 76 # define MUL16(ra, rb) \ |
77 ({ int __rt; \ | 77 ({ int __rt; \ |
78 asm ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \ | 78 __asm__ ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \ |
79 __rt; }) | 79 __rt; }) |
80 | 80 |
81 #endif | 81 #endif |
82 | 82 |
83 #endif /* AVCODEC_ARMV4L_MATHOPS_H */ | 83 #endif /* AVCODEC_ARMV4L_MATHOPS_H */ |