comparison arm/simple_idct_armv6.S @ 8576:636dc45f4779 libavcodec

ARM: clean up pc-relative references in simple_idct_armv6.S
author mru
date Mon, 12 Jan 2009 20:37:33 +0000
parents 0b9dff3a1ce2
children 34facb1ab4da
comparison
equal deleted inserted replaced
8575:0b9dff3a1ce2 8576:636dc45f4779
58 ip = w42 <= 2 cycles 58 ip = w42 <= 2 cycles
59 59
60 Output in registers r4--r11 60 Output in registers r4--r11
61 */ 61 */
62 .macro idct_row shift 62 .macro idct_row shift
63 ldr lr, [pc, #(w46-.-8)] /* lr = W4 | (W6 << 16) */ 63 ldr lr, w46 /* lr = W4 | (W6 << 16) */
64 mov r1, #(1<<(\shift-1)) 64 mov r1, #(1<<(\shift-1))
65 smlad r4, r2, ip, r1 65 smlad r4, r2, ip, r1
66 smlsd r7, r2, ip, r1 66 smlsd r7, r2, ip, r1
67 ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */ 67 ldr ip, w13 /* ip = W1 | (W3 << 16) */
68 ldr r10,[pc, #(w57-.-8)] /* r10 = W5 | (W7 << 16) */ 68 ldr r10,w57 /* r10 = W5 | (W7 << 16) */
69 smlad r5, r2, lr, r1 69 smlad r5, r2, lr, r1
70 smlsd r6, r2, lr, r1 70 smlsd r6, r2, lr, r1
71 71
72 smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */ 72 smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */
73 smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */ 73 smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */
76 pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */ 76 pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */
77 smusdx r9, r2, r3 /* r9 = -B1 = W7*row[3] - W3*row[1] */ 77 smusdx r9, r2, r3 /* r9 = -B1 = W7*row[3] - W3*row[1] */
78 smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */ 78 smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */
79 smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */ 79 smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */
80 80
81 ldr r3, [pc, #(w42n-.-8)] /* r3 = -W4 | (-W2 << 16) */ 81 ldr r3, w42n /* r3 = -W4 | (-W2 << 16) */
82 smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */ 82 smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */
83 ldr r2, [r0, #4] /* r2 = row[6,4] */ 83 ldr r2, [r0, #4] /* r2 = row[6,4] */
84 smlsdx r11,lr, ip, r11 /* B3 += W3*row[5] - W1*row[7] */ 84 smlsdx r11,lr, ip, r11 /* B3 += W3*row[5] - W1*row[7] */
85 ldr ip, [pc, #(w46-.-8)] /* ip = W4 | (W6 << 16) */ 85 ldr ip, w46 /* ip = W4 | (W6 << 16) */
86 smlad r9, lr, r1, r9 /* B1 -= W1*row[5] + W5*row[7] */ 86 smlad r9, lr, r1, r9 /* B1 -= W1*row[5] + W5*row[7] */
87 87
88 smlad r5, r2, r3, r5 /* A1 += -W4*row[4] - W2*row[6] */ 88 smlad r5, r2, r3, r5 /* A1 += -W4*row[4] - W2*row[6] */
89 smlsd r6, r2, r3, r6 /* A2 += -W4*row[4] + W2*row[6] */ 89 smlsd r6, r2, r3, r6 /* A2 += -W4*row[4] + W2*row[6] */
90 smlad r4, r2, ip, r4 /* A0 += W4*row[4] + W6*row[6] */ 90 smlad r4, r2, ip, r4 /* A0 += W4*row[4] + W6*row[6] */
99 ip = w42 99 ip = w42
100 100
101 Output in registers r4--r11 101 Output in registers r4--r11
102 */ 102 */
103 .macro idct_row4 shift 103 .macro idct_row4 shift
104 ldr lr, [pc, #(w46-.-8)] /* lr = W4 | (W6 << 16) */ 104 ldr lr, w46 /* lr = W4 | (W6 << 16) */
105 ldr r10,[pc, #(w57-.-8)] /* r10 = W5 | (W7 << 16) */ 105 ldr r10,w57 /* r10 = W5 | (W7 << 16) */
106 mov r1, #(1<<(\shift-1)) 106 mov r1, #(1<<(\shift-1))
107 smlad r4, r2, ip, r1 107 smlad r4, r2, ip, r1
108 smlsd r7, r2, ip, r1 108 smlsd r7, r2, ip, r1
109 ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */ 109 ldr ip, w13 /* ip = W1 | (W3 << 16) */
110 smlad r5, r2, lr, r1 110 smlad r5, r2, lr, r1
111 smlsd r6, r2, lr, r1 111 smlsd r6, r2, lr, r1
112 smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */ 112 smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */
113 smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */ 113 smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */
114 pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */ 114 pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */
202 orrs lr, lr, ip 202 orrs lr, lr, ip
203 cmpeq lr, r3 203 cmpeq lr, r3
204 cmpeq lr, r2, lsr #16 204 cmpeq lr, r2, lsr #16
205 beq 1f 205 beq 1f
206 str r1, [sp, #-4]! 206 str r1, [sp, #-4]!
207 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */ 207 ldr ip, w42 /* ip = W4 | (W2 << 16) */
208 cmp lr, #0 208 cmp lr, #0
209 beq 2f 209 beq 2f
210 210
211 idct_row ROW_SHIFT 211 idct_row ROW_SHIFT
212 b 3f 212 b 3f
246 */ 246 */
247 function idct_col_armv6 247 function idct_col_armv6
248 stmfd sp!, {r1, lr} 248 stmfd sp!, {r1, lr}
249 249
250 ldr r2, [r0] /* r2 = row[2,0] */ 250 ldr r2, [r0] /* r2 = row[2,0] */
251 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */ 251 ldr ip, w42 /* ip = W4 | (W2 << 16) */
252 ldr r3, [r0, #8] /* r3 = row[3,1] */ 252 ldr r3, [r0, #8] /* r3 = row[3,1] */
253 idct_row COL_SHIFT 253 idct_row COL_SHIFT
254 ldr r1, [sp], #4 254 ldr r1, [sp], #4
255 idct_finish_shift COL_SHIFT 255 idct_finish_shift COL_SHIFT
256 256
274 */ 274 */
275 function idct_col_put_armv6 275 function idct_col_put_armv6
276 stmfd sp!, {r1, r2, lr} 276 stmfd sp!, {r1, r2, lr}
277 277
278 ldr r2, [r0] /* r2 = row[2,0] */ 278 ldr r2, [r0] /* r2 = row[2,0] */
279 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */ 279 ldr ip, w42 /* ip = W4 | (W2 << 16) */
280 ldr r3, [r0, #8] /* r3 = row[3,1] */ 280 ldr r3, [r0, #8] /* r3 = row[3,1] */
281 idct_row COL_SHIFT 281 idct_row COL_SHIFT
282 ldmfd sp!, {r1, r2} 282 ldmfd sp!, {r1, r2}
283 idct_finish_shift_sat COL_SHIFT 283 idct_finish_shift_sat COL_SHIFT
284 284
304 */ 304 */
305 function idct_col_add_armv6 305 function idct_col_add_armv6
306 stmfd sp!, {r1, r2, lr} 306 stmfd sp!, {r1, r2, lr}
307 307
308 ldr r2, [r0] /* r2 = row[2,0] */ 308 ldr r2, [r0] /* r2 = row[2,0] */
309 ldr ip, [pc, #(w42-.-8)] /* ip = W4 | (W2 << 16) */ 309 ldr ip, w42 /* ip = W4 | (W2 << 16) */
310 ldr r3, [r0, #8] /* r3 = row[3,1] */ 310 ldr r3, [r0, #8] /* r3 = row[3,1] */
311 idct_row COL_SHIFT 311 idct_row COL_SHIFT
312 ldmfd sp!, {r1, r2} 312 ldmfd sp!, {r1, r2}
313 idct_finish 313 idct_finish
314 314