# HG changeset patch # User mru # Date 1240012828 0 # Node ID 1fa3820b1a84e2a58a65fde32a93e947c17b23f1 # Parent 98b64f65be0d2316f776e921fd37893b881ffa0c ARM asm for AV_RN*() ARMv6 and later support unaligned loads and stores for single word/halfword but not double/multiple. GCC is ignorant of this and will always use bytewise accesses for unaligned data. Casting to an int32_t pointer is dangerous since a load/store double or multiple instruction might be used (this happens with some code in FFmpeg). Implementing the AV_[RW]* macros with inline asm using only supported instructions gives fast and safe unaligned accesses. ARM RVCT does the right thing with generic code. This gives an overall speedup of up to 10%. diff -r 98b64f65be0d -r 1fa3820b1a84 arm/intreadwrite.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/arm/intreadwrite.h Sat Apr 18 00:00:28 2009 +0000 @@ -0,0 +1,78 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_ARM_INTREADWRITE_H +#define AVUTIL_ARM_INTREADWRITE_H + +#include +#include "config.h" + +#if HAVE_FAST_UNALIGNED && HAVE_INLINE_ASM + +#define AV_RN16 AV_RN16 +static inline uint16_t AV_RN16(const void *p) +{ + uint16_t v; + __asm__ ("ldrh %0, %1" : "=r"(v) : "m"(*(const uint16_t *)p)); + return v; +} + +#define AV_WN16 AV_WN16 +static inline void AV_WN16(void *p, uint16_t v) +{ + __asm__ ("strh %1, %0" : "=m"(*(uint16_t *)p) : "r"(v)); +} + +#define AV_RN32 AV_RN32 +static inline uint32_t AV_RN32(const void *p) +{ + uint32_t v; + __asm__ ("ldr %0, %1" : "=r"(v) : "m"(*(const uint32_t *)p)); + return v; +} + +#define AV_WN32 AV_WN32 +static inline void AV_WN32(void *p, uint32_t v) +{ + __asm__ ("str %1, %0" : "=m"(*(uint32_t *)p) : "r"(v)); +} + +#define AV_RN64 AV_RN64 +static inline uint64_t AV_RN64(const void *p) +{ + union { uint64_t v; uint32_t hl[2]; } v; + __asm__ ("ldr %0, %2 \n\t" + "ldr %1, %3 \n\t" + : "=r"(v.hl[0]), "=r"(v.hl[1]) + : "m"(*(const uint32_t*)p), "m"(*((const uint32_t*)p+1))); + return v.v; +} + +#define AV_WN64 AV_WN64 +static inline void AV_WN64(void *p, uint64_t v) +{ + union { uint64_t v; uint32_t hl[2]; } vv = { v }; + __asm__ ("str %2, %0 \n\t" + "str %3, %1 \n\t" + : "=m"(*(uint32_t*)p), "=m"(*((uint32_t*)p+1)) + : "r"(vv.hl[0]), "r"(vv.hl[1])); +} + +#endif /* HAVE_INLINE_ASM */ + +#endif /* AVUTIL_ARM_INTREADWRITE_H */ diff -r 98b64f65be0d -r 1fa3820b1a84 intreadwrite.h --- a/intreadwrite.h Sat Apr 18 00:00:22 2009 +0000 +++ b/intreadwrite.h Sat Apr 18 00:00:28 2009 +0000 @@ -29,6 +29,9 @@ * defined, even if these are implemented as inline functions. */ +#if ARCH_ARM +# include "arm/intreadwrite.h" +#endif /* * Define AV_[RW]N helper macros to simplify definitions not provided