
// Copyright (C) 2011 Luca Piccioni
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program.  If not, see <http://www.gnu.org/licenses/>.

#include <exception>
#include <memory>

#include "Derm.Simd.h"

using namespace std;

#pragma region Memory_Copy_SSE2

/**
  * @note Additional arguments passed by registers
  *		- ESI: source memory address
  *		- EDI: destination memory address
  *		- EBX: number of bytes to copy
  */
__forceinline void DERM_SIMD_API Memory_CopyLoop_NoAlign_SSE2()
{
	__asm {

		// Copy 64 bytes per loop
		shr ebx, 6;

Memory_Copy_SSE_Loop:

		// Prefetch next segment (at least 32 bytes, maybe more)
		// Avoid cache pollution (non-temporal hint)
		prefetchnta 64[ESI];
	
		// From memory, to registers
		movdqu xmm0,  0[ESI];
		movdqu xmm1, 16[ESI]; 
		movdqu xmm2, 32[ESI]; 
		movdqu xmm3, 48[ESI]; 

		// From registers, to memory
		// Non-temporal hint
		movntdq  0[EDI], xmm0;
		movntdq 16[EDI], xmm1; 
		movntdq 32[EDI], xmm2; 
		movntdq 48[EDI], xmm3; 

		// Increment pointers
		add esi, 64; 
		add edi, 64; 
		dec ebx; 

		jnz Memory_Copy_SSE_Loop;

	}
}

/**
  * @note Additional arguments passed by registers
  *		- ESI: source memory address
  *		- EDI: destination memory address
  *		- EBX: number of bytes to copy
  */
__forceinline void DERM_SIMD_API Memory_CopyLoop_Align_SSE2()
{
	__asm {

		// Copy 64 bytes per loop
		shr ebx, 6;

Memory_Copy_SSE_Loop:

		// Prefetch next segment (at least 32 bytes, maybe more)
		// Avoid cache pollution (non-temporal hint)
		prefetchnta 64[ESI];
	
		// From memory, for registers
		movdqa xmm0,  0[ESI];
		movdqa xmm1, 16[ESI]; 
		movdqa xmm2, 32[ESI]; 
		movdqa xmm3, 48[ESI]; 

		// Non-temporal hint
		movntdq  0[EDI], xmm0;
		movntdq 16[EDI], xmm1; 
		movntdq 32[EDI], xmm2; 
		movntdq 48[EDI], xmm3; 

		// Increment pointers
		add esi, 64; 
		add edi, 64; 
		dec ebx; 

		jnz Memory_Copy_SSE_Loop;

	}
}

void DERM_SIMD_API Memory_Copy_SSE2(void *dst, void *src, unsigned long long bytes)
{
	// Present a decent buffer please
	if (bytes < 256) {
		memcpy(dst, src, bytes); 
		return;
	}

	bool aligned = ((((unsigned long long)dst) % 16) == 0) && ((((unsigned long long)src) % 16) == 0);
	bool alignable = (((unsigned long long)dst) % 16) == (((unsigned long long)src) % 16);

	// If alignable, align memory pointers
	if ((aligned == false) || (alignable == true)) {
		int prealignBytes = 16 - (((unsigned long long)dst) % 16);

		// Align pointers
		memcpy(dst, src, prealignBytes); 
		dst = ((unsigned char*)dst) + prealignBytes;
		src = ((unsigned char*)src) + prealignBytes;
		bytes -= prealignBytes;

		// Now we are aligned
		aligned = true;
	}

	unsigned long byteBy64 = bytes - (bytes % 64);

	// Load pointers
	__asm { 
		mov esi, src;
		mov edi, dst;
		mov ebx, byteBy64;
	}

	if (aligned == false) {
		Memory_CopyLoop_NoAlign_SSE2();
	} else {
		Memory_CopyLoop_Align_SSE2();
	}

	// Copy remaining bytes, if any
	if (byteBy64 != bytes)
		memcpy(((unsigned char*)dst) + byteBy64, ((unsigned char*)src) + byteBy64, bytes - byteBy64);
}

#pragma endregion