#include "stddef.h"
#include "string.h"
#include "cpuid.h"

/* ===============================================================
 * === ALIGNED SSE2 MEMORY CLEARING OPERATIONS (MOVDQA == SSE2) ==
 * =============================================================== */

const void * memcpy_sse2(const void * const dst, const void * const src, const size_t m_count)
{
	size_t i = 0, ret = -1;

	/* is "src" aligned on a SSE_XMM_SIZE boundary */
	if(!((size_t)src & (SSE_XMM_SIZE-1)))
	{ }
	else
	{ 
		/* lets make sure we don't copy 'too' many bytes (i < m_count) */
		while((((size_t)src + i) & (SSE_XMM_SIZE-1)) && i < m_count)
		{
			asm("movsb;"::"S"((size_t)src + i), "D"((size_t)dst + i));
			i++;
		}
	}

	/* check to see if "dst" is aligned on a SSE_XMM_SIZE boundary */
	if(!(((size_t)dst + i) & (SSE_XMM_SIZE-1)))
	{
		/* each iteration consumes a 128-byte chunk of memory */
		for(; i + 128 < m_count; i += 128)
		{
			/* fill all the XMM 128-bit SSE registers! */
			asm (" movdqa 0(%0) , %%xmm0;  "
				 " movdqa 16(%0), %%xmm1;  " 
				 " movdqa 32(%0), %%xmm2;  " 
				 " movdqa 48(%0), %%xmm3;  " 
				 " movdqa 64(%0), %%xmm4;  " 
				 " movdqa 80(%0), %%xmm5;  " 
				 " movdqa 96(%0), %%xmm6;  " 
				 " movdqa 112(%0), %%xmm7; " 
				 " movntdq %%xmm0, 0(%1);    " 
				 " movntdq %%xmm1, 16(%1);	 " 
				 " movntdq %%xmm2, 32(%1);	 " 
				 " movntdq %%xmm3, 48(%1);	 " 
				 " movntdq %%xmm4, 64(%1);   " 
				 " movntdq %%xmm5, 80(%1);	 " 
				 " movntdq %%xmm6, 96(%1);	 " 
				 " movntdq %%xmm7, 112(%1);	 " 
				 " clflush 0(%0);           "
				 ::"r"((size_t)src + i), "r"((size_t)dst + i)); 
		}
	}
	else
	{
		for(; i + 128 < m_count; i += 128)
		{ 
			asm (" movdqa 0(%0) , %%xmm0;  "
				 " movdqa 16(%0), %%xmm1;  " 
				 " movdqa 32(%0), %%xmm2;  " 
				 " movdqa 48(%0), %%xmm3;  " 
				 " movdqa 64(%0), %%xmm4;  " 
				 " movdqa 80(%0), %%xmm5;  " 
				 " movdqa 96(%0), %%xmm6;  " 
				 " movdqa 112(%0), %%xmm7; " 
				 " movdqu %%xmm0, 0(%1);     " 
				 " movdqu %%xmm1, 16(%1);	 " 
				 " movdqu %%xmm2, 32(%1);	 " 
				 " movdqu %%xmm3, 48(%1);	 " 
				 " movdqu %%xmm4, 64(%1);    " 
				 " movdqu %%xmm5, 80(%1);	 " 
				 " movdqu %%xmm6, 96(%1);	 " 
				 " movdqu %%xmm7, 112(%1);	 " 
				 " clflush 0(%0);           "
				 " clflush 0(%1);           "
				 ::"r"((size_t)src + i), "r"((size_t)dst + i)); 
		}
	}

    asm(" rep movsl; " :: "S"((size_t)src + i), "D"((size_t)dst + i), "c"((m_count - i) / sizeof(uint32_t)));
    i += ((m_count - i) / sizeof(uint32_t)) * sizeof(uint32_t);
    
	asm(" rep movsb; " :"=D"(ret) : "S"((size_t)src + i), "D"((size_t)dst + i), "c"(m_count - i));
    
	return (void *)(ret);
}

const void * memcpy_std(const void * const dst, const void * const src, const size_t m_count)
{
    size_t i = 0, ret = -1;
    
    asm(" rep movsl; " :: "S"((size_t)src), "D"((size_t)dst), "c"(m_count / sizeof(uint32_t)));
    i += (m_count / sizeof(uint32_t)) * sizeof(uint32_t);
    
	asm(" rep movsb; " :"=D"(ret) : "S"((size_t)src + i), "D"((size_t)dst + i), "c"(m_count - i));
	
	return (void *)(ret);
}












