Mercurial > libavcodec.hg
comparison ppc/gmc_altivec.c @ 995:edc10966b081 libavcodec
altivec jumbo patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
author | michaelni |
---|---|
date | Sat, 11 Jan 2003 20:51:03 +0000 |
parents | |
children | 95cbffdc98a9 |
comparison
equal
deleted
inserted
replaced
994:7701ff462e3a | 995:edc10966b081 |
---|---|
1 /* | |
2 * GMC (???) | |
3 * AltiVec-enabled | |
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org> | |
5 * | |
6 * This library is free software; you can redistribute it and/or | |
7 * modify it under the terms of the GNU Lesser General Public | |
8 * License as published by the Free Software Foundation; either | |
9 * version 2 of the License, or (at your option) any later version. | |
10 * | |
11 * This library is distributed in the hope that it will be useful, | |
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 * Lesser General Public License for more details. | |
15 * | |
16 * You should have received a copy of the GNU Lesser General Public | |
17 * License along with this library; if not, write to the Free Software | |
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 */ | |
20 | |
21 #include "../dsputil.h" | |
22 | |
23 #include "dsputil_altivec.h" | |
24 | |
25 /* | |
26 altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8, | |
27 to preserve proper dst alignement. | |
28 */ | |
29 void gmc1_altivec(UINT8 *dst /* align 8 */, UINT8 *src /* align1 */, int stride, int h, int x16, int y16, int rounder) | |
30 { | |
31 #if 0 | |
32 const int A=(16-x16)*(16-y16); | |
33 const int B=( x16)*(16-y16); | |
34 const int C=(16-x16)*( y16); | |
35 const int D=( x16)*( y16); | |
36 | |
37 int i; | |
38 | |
39 for(i=0; i<h; i++) | |
40 { | |
41 dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8; | |
42 dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8; | |
43 dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8; | |
44 dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8; | |
45 dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8; | |
46 dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8; | |
47 dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8; | |
48 dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8; | |
49 dst+= stride; | |
50 src+= stride; | |
51 } | |
52 #else | |
53 const unsigned short __attribute__ ((aligned(16))) rounder_a[8] = | |
54 {rounder, rounder, rounder, rounder, | |
55 rounder, rounder, rounder, rounder}; | |
56 const unsigned short __attribute__ ((aligned(16))) ABCD[8] = | |
57 { | |
58 (16-x16)*(16-y16), /* A */ | |
59 ( x16)*(16-y16), /* B */ | |
60 (16-x16)*( y16), /* C */ | |
61 ( x16)*( y16), /* D */ | |
62 0, 0, 0, 0 /* padding */ | |
63 }; | |
64 | |
65 register const vector unsigned char vczero = (const vector unsigned char)(0); | |
66 register const vector unsigned short vcsr8 = (const vector unsigned short)(8); | |
67 register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD; | |
68 register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD; | |
69 int i; | |
70 unsigned long dst_odd = (unsigned long)dst & 0x0000000F; | |
71 unsigned long src_really_odd = (unsigned long)src & 0x0000000F; | |
72 | |
73 tempA = vec_ld(0, (unsigned short*)ABCD); | |
74 Av = vec_splat(tempA, 0); | |
75 Bv = vec_splat(tempA, 1); | |
76 Cv = vec_splat(tempA, 2); | |
77 Dv = vec_splat(tempA, 3); | |
78 | |
79 rounderV = vec_ld(0, (unsigned short*)rounder_a); | |
80 | |
81 // we'll be able to pick-up our 9 char elements | |
82 // at src from those 32 bytes | |
83 // we load the first batch here, as inside the loop | |
84 // we can re-use 'src+stride' from one iteration | |
85 // as the 'src' of the next. | |
86 src_0 = vec_ld(0, src); | |
87 src_1 = vec_ld(16, src); | |
88 srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src)); | |
89 | |
90 if (src_really_odd != 0x0000000F) | |
91 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector. | |
92 srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src)); | |
93 } | |
94 else | |
95 { | |
96 srcvB = src_1; | |
97 } | |
98 srcvA = vec_mergeh(vczero, srcvA); | |
99 srcvB = vec_mergeh(vczero, srcvB); | |
100 | |
101 for(i=0; i<h; i++) | |
102 { | |
103 dst_odd = (unsigned long)dst & 0x0000000F; | |
104 src_really_odd = (((unsigned long)src) + stride) & 0x0000000F; | |
105 | |
106 dstv = vec_ld(0, dst); | |
107 | |
108 // we we'll be able to pick-up our 9 char elements | |
109 // at src + stride from those 32 bytes | |
110 // then reuse the resulting 2 vectors srvcC and srcvD | |
111 // as the next srcvA and srcvB | |
112 src_0 = vec_ld(stride + 0, src); | |
113 src_1 = vec_ld(stride + 16, src); | |
114 srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src)); | |
115 | |
116 if (src_really_odd != 0x0000000F) | |
117 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector. | |
118 srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src)); | |
119 } | |
120 else | |
121 { | |
122 srcvD = src_1; | |
123 } | |
124 | |
125 srcvC = vec_mergeh(vczero, srcvC); | |
126 srcvD = vec_mergeh(vczero, srcvD); | |
127 | |
128 | |
129 // OK, now we (finally) do the math :-) | |
130 // those four instructions replaces 32 int muls & 32 int adds. | |
131 // isn't AltiVec nice ? | |
132 tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV); | |
133 tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA); | |
134 tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB); | |
135 tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC); | |
136 | |
137 srcvA = srcvC; | |
138 srcvB = srcvD; | |
139 | |
140 tempD = vec_sr(tempD, vcsr8); | |
141 | |
142 dstv2 = vec_pack(tempD, (vector unsigned short)vczero); | |
143 | |
144 if (dst_odd) | |
145 { | |
146 dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1)); | |
147 } | |
148 else | |
149 { | |
150 dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3)); | |
151 } | |
152 | |
153 vec_st(dstv2, 0, dst); | |
154 | |
155 dst += stride; | |
156 src += stride; | |
157 } | |
158 #endif | |
159 } |