Mercurial > libavcodec.hg
comparison x86/vp8dsp.asm @ 12457:2982071047a2 libavcodec
Use "d" suffix for general-purpose registers used with movd.
This increases compatibilty with nasm and is also more consistent,
e.g. with h264_intrapred.asm and h264_chromamc.asm that already
do it that way.
author | reimar |
---|---|
date | Sun, 05 Sep 2010 10:10:16 +0000 |
parents | e6e4059ea421 |
children |
comparison
equal
deleted
inserted
replaced
12456:a5ddb39627fd | 12457:2982071047a2 |
---|---|
1340 ; write dwords 2 | 1340 ; write dwords 2 |
1341 psrldq m%1, 4 | 1341 psrldq m%1, 4 |
1342 psrldq m%2, 4 | 1342 psrldq m%2, 4 |
1343 %if %10 == 8 | 1343 %if %10 == 8 |
1344 movd [%5+%8*2], m%1 | 1344 movd [%5+%8*2], m%1 |
1345 movd %5, m%3 | 1345 movd %5d, m%3 |
1346 %endif | 1346 %endif |
1347 psrldq m%3, 4 | 1347 psrldq m%3, 4 |
1348 psrldq m%4, 4 | 1348 psrldq m%4, 4 |
1349 %if %10 == 16 | 1349 %if %10 == 16 |
1350 movd [%5+%8*2], m%1 | 1350 movd [%5+%8*2], m%1 |
1377 ; for SSE4: | 1377 ; for SSE4: |
1378 ; 3 is a pointer to the destination's 5th line | 1378 ; 3 is a pointer to the destination's 5th line |
1379 ; 4 is a pointer to the destination's 4th line | 1379 ; 4 is a pointer to the destination's 4th line |
1380 ; 5/6 is -stride and +stride | 1380 ; 5/6 is -stride and +stride |
1381 %macro WRITE_2x4W 6 | 1381 %macro WRITE_2x4W 6 |
1382 movd %3, %1 | 1382 movd %3d, %1 |
1383 punpckhdq %1, %1 | 1383 punpckhdq %1, %1 |
1384 mov [%4+%5*4], %3w | 1384 mov [%4+%5*4], %3w |
1385 shr %3, 16 | 1385 shr %3, 16 |
1386 add %4, %6 | 1386 add %4, %6 |
1387 mov [%4+%5*4], %3w | 1387 mov [%4+%5*4], %3w |
1388 | 1388 |
1389 movd %3, %1 | 1389 movd %3d, %1 |
1390 add %4, %5 | 1390 add %4, %5 |
1391 mov [%4+%5*2], %3w | 1391 mov [%4+%5*2], %3w |
1392 shr %3, 16 | 1392 shr %3, 16 |
1393 mov [%4+%5 ], %3w | 1393 mov [%4+%5 ], %3w |
1394 | 1394 |
1395 movd %3, %2 | 1395 movd %3d, %2 |
1396 punpckhdq %2, %2 | 1396 punpckhdq %2, %2 |
1397 mov [%4 ], %3w | 1397 mov [%4 ], %3w |
1398 shr %3, 16 | 1398 shr %3, 16 |
1399 mov [%4+%6 ], %3w | 1399 mov [%4+%6 ], %3w |
1400 | 1400 |
1401 movd %3, %2 | 1401 movd %3d, %2 |
1402 add %4, %6 | 1402 add %4, %6 |
1403 mov [%4+%6 ], %3w | 1403 mov [%4+%6 ], %3w |
1404 shr %3, 16 | 1404 shr %3, 16 |
1405 mov [%4+%6*2], %3w | 1405 mov [%4+%6*2], %3w |
1406 add %4, %5 | 1406 add %4, %5 |
1407 %endmacro | 1407 %endmacro |
1408 | 1408 |
1409 %macro WRITE_8W_SSE2 5 | 1409 %macro WRITE_8W_SSE2 5 |
1410 movd %2, %1 | 1410 movd %2d, %1 |
1411 psrldq %1, 4 | 1411 psrldq %1, 4 |
1412 mov [%3+%4*4], %2w | 1412 mov [%3+%4*4], %2w |
1413 shr %2, 16 | 1413 shr %2, 16 |
1414 add %3, %5 | 1414 add %3, %5 |
1415 mov [%3+%4*4], %2w | 1415 mov [%3+%4*4], %2w |
1416 | 1416 |
1417 movd %2, %1 | 1417 movd %2d, %1 |
1418 psrldq %1, 4 | 1418 psrldq %1, 4 |
1419 add %3, %4 | 1419 add %3, %4 |
1420 mov [%3+%4*2], %2w | 1420 mov [%3+%4*2], %2w |
1421 shr %2, 16 | 1421 shr %2, 16 |
1422 mov [%3+%4 ], %2w | 1422 mov [%3+%4 ], %2w |
1423 | 1423 |
1424 movd %2, %1 | 1424 movd %2d, %1 |
1425 psrldq %1, 4 | 1425 psrldq %1, 4 |
1426 mov [%3 ], %2w | 1426 mov [%3 ], %2w |
1427 shr %2, 16 | 1427 shr %2, 16 |
1428 mov [%3+%5 ], %2w | 1428 mov [%3+%5 ], %2w |
1429 | 1429 |
1430 movd %2, %1 | 1430 movd %2d, %1 |
1431 add %3, %5 | 1431 add %3, %5 |
1432 mov [%3+%5 ], %2w | 1432 mov [%3+%5 ], %2w |
1433 shr %2, 16 | 1433 shr %2, 16 |
1434 mov [%3+%5*2], %2w | 1434 mov [%3+%5*2], %2w |
1435 %endmacro | 1435 %endmacro |
1444 pextrw [%2+%5 ], %1, 6 | 1444 pextrw [%2+%5 ], %1, 6 |
1445 pextrw [%2+%5*2], %1, 7 | 1445 pextrw [%2+%5*2], %1, 7 |
1446 %endmacro | 1446 %endmacro |
1447 | 1447 |
1448 %macro SPLATB_REG_MMX 2-3 | 1448 %macro SPLATB_REG_MMX 2-3 |
1449 movd %1, %2 | 1449 movd %1, %2d |
1450 punpcklbw %1, %1 | 1450 punpcklbw %1, %1 |
1451 punpcklwd %1, %1 | 1451 punpcklwd %1, %1 |
1452 punpckldq %1, %1 | 1452 punpckldq %1, %1 |
1453 %endmacro | 1453 %endmacro |
1454 | 1454 |
1455 %macro SPLATB_REG_MMXEXT 2-3 | 1455 %macro SPLATB_REG_MMXEXT 2-3 |
1456 movd %1, %2 | 1456 movd %1, %2d |
1457 punpcklbw %1, %1 | 1457 punpcklbw %1, %1 |
1458 pshufw %1, %1, 0x0 | 1458 pshufw %1, %1, 0x0 |
1459 %endmacro | 1459 %endmacro |
1460 | 1460 |
1461 %macro SPLATB_REG_SSE2 2-3 | 1461 %macro SPLATB_REG_SSE2 2-3 |
1462 movd %1, %2 | 1462 movd %1, %2d |
1463 punpcklbw %1, %1 | 1463 punpcklbw %1, %1 |
1464 pshuflw %1, %1, 0x0 | 1464 pshuflw %1, %1, 0x0 |
1465 punpcklqdq %1, %1 | 1465 punpcklqdq %1, %1 |
1466 %endmacro | 1466 %endmacro |
1467 | 1467 |
1468 %macro SPLATB_REG_SSSE3 3 | 1468 %macro SPLATB_REG_SSSE3 3 |
1469 movd %1, %2 | 1469 movd %1, %2d |
1470 pshufb %1, %3 | 1470 pshufb %1, %3 |
1471 %endmacro | 1471 %endmacro |
1472 | 1472 |
1473 %macro SIMPLE_LOOPFILTER 4 | 1473 %macro SIMPLE_LOOPFILTER 4 |
1474 cglobal vp8_%2_loop_filter_simple_%1, 3, %3, %4 | 1474 cglobal vp8_%2_loop_filter_simple_%1, 3, %3, %4 |