//void __stdcall MemCpy_WMMX(unsigned char* pSrc, unsigned char* pDst, int size)


#include <asm.h>

.global C_SYMBOL(MemCpy_WMMX)

.global C_SYMBOL(testasm)


C_SYMBOL(testasm):
STMFD   SP!, {R4,LR}
//.long 0xe7fddefe
//ADD     R0, R0, R0,LSL#5
//MOV     R0, R0,LSL#16
BL      C_SYMBOL(__divsi3)
LDMFD   SP!, {R4,PC}


C_SYMBOL(MemCpy_WMMX):

arg_0=	0
arg_4=	4

    STMFD	SP!, {R4-R7,LR}

_START_:
    CMP R2, #0
    BLE _DONE_
    MOV	R6, R2                              //R6 = buffer size
    ADD	R7, R1,	#7                          //R1 = destination pointer
    BIC	R7, R7,	#7                          //Ceiling R7 ADDRESS to 8 bytes aligned, suppose N bytes jumped
    SUB	R7, R7,	R1                          //
    CMP	R7, #0                              //Compare to see if R7 is originally 8 bytes aligned
    BEQ	_lc030_000669_                      //if so jump to _lc030_000669_

_Less_Than_32_:
    WLDRB   wR0 ,[R0], #1                   //R0 = Source pointer, LOAD 1 byte

    CMP	R7, R6                              //
    MOVGT	R7, R6                          //Let R7 be min(N , size)
    SUB	R6, R6,	R7                          //Let R6 be size - N

_Process_Step_N_header_:
    SUBS	R7, R7,	#1                      //N-- and modify CPSR
    WSTRB   wR0 ,[R1], #1                   //SAVE 1 byte
    WLDRBGT   wR0 ,[R0], #1                 //if size > N, LOAD 1 byte again

    BGT	_Process_Step_N_header_             //and then jump to _Process_Step_N_header_
    CMP	R6, #0
    BEQ	_DONE_                              //if R6 = size - N == 0 jump to _DONE_

_lc030_000669_:
    SUBS	R6, R6,	#0x20                   //R6 -= 32 bytes
    ADDMI	R7, R6,	#0x20
    ADDMI	R6, R6,	#0x20
    BMI	_Less_Than_32_                      //jumpt to _Less_Than_32_ if less than 32 bytes
    TST	R0, #7
    BNE	_UnAligned8_                        //if pSrc is not 8 bytes aligned, jump to _UnAligned8_

    WLDRD   wR0, [R0], #8
    WLDRD   wR1, [R0], #8

_Loop_Aligned8_:
    PLD	[R0,#0x20]
    PLD	[R2,#0x20]                          //interesting that removing this non-sense preload would slow down speed

    WSTRD   wR0, [R1], #8
    WLDRD   wR2, [R0], #8
    WSTRD   wR1, [R1], #8
    WLDRD   wR3, [R0], #8

    SUBS	R6, R6,	#0x20

    WSTRD   wR2, [R1], #8
    WLDRDGE wR0, [R0], #8
    WSTRD   wR3, [R1], #8
    WLDRDGE wR1, [R0], #8

    BGE	_Loop_Aligned8_

    ADDS	R6, R6,	#0x20
    BEQ	_DONE_
    MOVGT	R7, R6
    BGT	_Less_Than_32_

_UnAligned8_:
    AND	R7, R0,	#7
    BIC	R0, R0,	#7
    TMCR    wCGR0, R7
    WLDRD   wR0, [R0], #8
    WLDRD   wR1, [R0], #8
    WLDRD   wR2, [R0], #8
    WLDRD   wR3, [R0], #8

_Loop_UnAligned8_:
    SUBS	R6, R6,	#0x20
    PLD	[R0,#0x20]
    PLD	[R2,#0x20]
    WLDRD   wR4, [R0]

    WALIGNR0 wR0 ,wR0, wR1
    WSTRD   wR0, [R1], #8
    WLDRDGE wR0, [R0], #8

    WALIGNR0 wR1 ,wR1, wR2
    WSTRD   wR1, [R1], #8
    WLDRDGE wR1, [R0], #8

    WALIGNR0 wR2 ,wR2, wR3
    WSTRD   wR2, [R1], #8
    WLDRDGE wR2, [R0], #8

    WALIGNR0 wR3, wR3, wR4
    WSTRD   wR3, [R1], #8
    WLDRDGE wR3, [R0], #8

    BGE	_Loop_UnAligned8_
    ADD	R0, R0,	R7
    ADDS	R6, R6,	#0x20
    BEQ	_DONE_
    MOVGT	R7, R6
    BGT	_Less_Than_32_

_DONE_:

    LDMFD	SP!, {R4-R7,PC}



