#ifndef __INCLUDED_CBL_SIMD_H__
#error "CBL_SIMD_SSE.hpp must not be used directly. Use CBL_SIMD.h instead."
#else
#ifndef __INCLUDED_CBL_SIMD_SSE_HPP__
#define __INCLUDED_CBL_SIMD_SSE_HPP__

// ---------------------------------------------------------------------------------------------
//  Vec4f declarations
// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSplat(Vec4f& _rOut, float _Value)
{
	_rOut.Packed = _mm_set1_ps(_Value);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSplat(Vec4f& _rOut, Vec4f& _rValues, uint32 _Selector)
{
	_rOut.Packed = _mm_set1_ps(_rValues.Data[_Selector]);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSwizzle(Vec4f& _rOut, const Vec4f& _rValues, const Vec4i& _rDest)
{
	_rOut.Packed = _mm_setr_ps(_rValues.Data[_rDest.Data[0]], _rValues.Data[_rDest.Data[1]], _rValues.Data[_rDest.Data[2]], _rValues.Data[_rDest.Data[3]]);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMergeBack(Vec4f& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	_rOut.Packed = _mm_unpackhi_ps(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMergeFront(Vec4f& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	_rOut.Packed = _mm_unpacklo_ps(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAdd(Vec4f& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	_rOut.Packed = _mm_add_ps(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSub(Vec4f& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	_rOut.Packed = _mm_sub_ps(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMul(Vec4f& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	_rOut.Packed = _mm_mul_ps(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecDiv(Vec4f& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	_rOut.Packed = _mm_div_ps(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMAdd(Vec4f& _rOut, const Vec4f& _rMLhs, const Vec4f& _rMRhs, const Vec4f& _rAdd)
{
	
	_rOut.Packed = _mm_add_ps(_mm_mul_ps(_rMLhs.Packed, _rMRhs.Packed), _rAdd.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecNMSub(Vec4f& _rOut, const Vec4f& _rMLhs, const Vec4f& _rMRhs, const Vec4f& _rSub)
{
	_rOut.Packed = _mm_sub_ps(_mm_setzero_ps(), _mm_sub_ps(_mm_mul_ps(_rMLhs.Packed, _rMRhs.Packed), _rSub.Packed));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecScale(Vec4f& _rOut, const Vec4f& _rLhs, float _Scale)
{
	_rOut.Packed = _mm_mul_ps(_rLhs.Packed, _mm_set1_ps(_Scale));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMin(Vec4f& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	_rOut.Packed = _mm_min_ps(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMax(Vec4f& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	_rOut.Packed = _mm_max_ps(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSqrt(Vec4f& _rOut, const Vec4f& _rValues)
{
	_rOut.Packed = _mm_sqrt_ps(_rValues.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecRESqrt(Vec4f& _rOut, const Vec4f& _rValues)
{
	_rOut.Packed = _mm_rsqrt_ps(_rValues.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecRE(Vec4f& _rOut, const Vec4f& _rValues)
{
	_rOut.Packed = _mm_rcp_ps(_rValues.Packed);	
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAbs(Vec4f& _rOut, const Vec4f& _rValues)
{
	_rOut.Packed = _mm_max_ps(_rValues.Packed, _mm_sub_ps(_mm_setzero_ps(), _rValues.Packed));	
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecFloor(Vec4f& _rOut, const Vec4f& _rValues)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_floor_ps(_rValues.Packed);
#else	
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = floor(_rValues.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCeil(Vec4f& _rOut, const Vec4f& _rValues)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_ceil_ps(_rValues.Packed);
#else
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = ceil(_rValues.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecRound(Vec4f& _rOut, const Vec4f& _rValues)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_round_ps(_rValues.Packed, _MM_ROUND_NEAREST);
#else
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = Round(_rValues.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE bool CB_INLINE_ATTR VecAll(const Vec4f& _rValues, float _Value)
{
	for (int32 i(0); i<4; ++i)
		if (_rValues.Data[i] != _Value)
			return false;
	
	return true;
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpEQ(Vec4i& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	CB_DECL_ALIGN(16) Vec4f Temp;
	Temp.Packed = _mm_and_ps(_mm_cmpeq_ps(_rLhs.Packed, _rRhs.Packed), _mm_set1_ps(1));
#ifdef CB_USE_SSE2
	_rOut.Packed = _mm_cvtps_epi32(Temp.Packed);
#else
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = (int32)Temp.Data[i];
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpGT(Vec4i& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	CB_DECL_ALIGN(16) Vec4f Temp;
	Temp.Packed = _mm_and_ps(_mm_cmpgt_ps(_rLhs.Packed, _rRhs.Packed), _mm_set1_ps(1));
#ifdef CB_USE_SSE2
	_rOut.Packed = _mm_cvtps_epi32(Temp.Packed);
#else
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = (int32)Temp.Data[i];
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpLT(Vec4i& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	CB_DECL_ALIGN(16) Vec4f Temp;
	Temp.Packed = _mm_and_ps(_mm_cmplt_ps(_rLhs.Packed, _rRhs.Packed), _mm_set1_ps(1));
#ifdef CB_USE_SSE2
	_rOut.Packed = _mm_cvtps_epi32(Temp.Packed);
#else
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = (int32)Temp.Data[i];
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpGE(Vec4i& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	CB_DECL_ALIGN(16) Vec4f Temp;
	Temp.Packed = _mm_and_ps(_mm_cmpge_ps(_rLhs.Packed, _rRhs.Packed), _mm_set1_ps(1));
#ifdef CB_USE_SSE2
	_rOut.Packed = _mm_cvtps_epi32(Temp.Packed);
#else
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = (int32)Temp.Data[i];
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpLE(Vec4i& _rOut, const Vec4f& _rLhs, const Vec4f& _rRhs)
{
	CB_DECL_ALIGN(16) Vec4f Temp;
	Temp.Packed = _mm_and_ps(_mm_cmple_ps(_rLhs.Packed, _rRhs.Packed), _mm_set1_ps(1));
#ifdef CB_USE_SSE2
	_rOut.Packed = _mm_cvtps_epi32(Temp.Packed);
#else
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = (int32)Temp.Data[i];
#endif
}

#ifndef CB_USE_SSE2
// If we are not allowed to use SSE2 we don't have any Integer functions.
// So we include the standard fallback methods right away, excluding the float methods with
// CBL_SSE_FALLBACK.
#define CBL_SSE_FALLBACK
#include "CBL_SIMD_Fallback.h"
#else

// ---------------------------------------------------------------------------------------------
//  Vec4i declarations
// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSplat(Vec4i& _rOut, int32 _Value)
{
	_rOut.Packed = _mm_set1_epi32(_Value);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSplat(Vec4i& _rOut, Vec4i& _rValues, uint32 _Selector)
{
	_rOut.Packed = _mm_set1_epi32(_rValues.Data[_Selector]);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSwizzle(Vec4i& _rOut, const Vec4i& _rValues, const Vec4i& _rDest)
{
	_rOut.Packed = _mm_setr_epi32(_rValues.Data[_rDest.Data[0]], _rValues.Data[_rDest.Data[1]], _rValues.Data[_rDest.Data[2]], _rValues.Data[_rDest.Data[3]]);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMergeBack(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_unpackhi_epi32(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMergeFront(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_unpacklo_epi32(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAdd(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_add_epi32(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSub(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_sub_epi32(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMul(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = _rLhs.Data[i] * _rRhs.Data[i];
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecDiv(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = _rLhs.Data[i] / _rRhs.Data[i];
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMAdd(Vec4i& _rOut, const Vec4i& _rMLhs, const Vec4i& _rMRhs, const Vec4i& _rAdd)
{
	CB_DECL_ALIGN(16) Vec4i Temp;
	VecMul(Temp, _rMLhs, _rMRhs);
	_rOut.Packed = _mm_add_epi32(Temp.Packed, _rAdd.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecScale(Vec4i& _rOut, const Vec4i& _rLhs, int32 _Scale)
{
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = _rLhs.Data[i] * _Scale;
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMin(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_min_epi32(_rLhs.Packed, _rRhs.Packed);	
#else	
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = Min(_rLhs.Data[i], _rRhs.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMax(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_max_epi32(_rLhs.Packed, _rRhs.Packed);	
#else	
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = Max(_rLhs.Data[i], _rRhs.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAnd(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAndNot(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_andnot_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecOr(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_or_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecXOr(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_xor_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecLShift(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_sll_epi32(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecRShift(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_srl_epi32(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAbs(Vec4i& _rOut, const Vec4i& _rValues)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_max_epi32(_rValues.Packed, _mm_sub_epi32(_mm_setzero_si128(), _rValues.Packed));	
#else
	for (int32 i(0); i<4; ++i)
		_rOut.Data[i] = Abs(_rValues.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE bool CB_INLINE_ATTR VecAll(const Vec4i& _rValues, int32 _Value)
{
	for (int32 i(0); i<4; ++i)
		if (_rValues.Data[i] != _Value)
			return false;
	
	return true;
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpEQ(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmpeq_epi32(_rLhs.Packed, _rRhs.Packed), _mm_set1_epi32(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpGT(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmpgt_epi32(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpLT(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmplt_epi32(_rLhs.Packed, _rRhs.Packed), _mm_set1_epi32(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpGE(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed =	_mm_and_si128(_mm_or_si128(_mm_cmpeq_epi32(_rLhs.Packed, _rRhs.Packed),
											   _mm_cmpgt_epi32(_rLhs.Packed, _rRhs.Packed)),
								  _mm_set1_epi32(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpLE(Vec4i& _rOut, const Vec4i& _rLhs, const Vec4i& _rRhs)
{
	_rOut.Packed =	_mm_and_si128((_mm_or_si128(_mm_cmpeq_epi32(_rLhs.Packed, _rRhs.Packed),
												_mm_cmplt_epi32(_rLhs.Packed, _rRhs.Packed)),
								   _mm_set1_epi32(1));
}

// ---------------------------------------------------------------------------------------------
//  Vec8s declarations
// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSplat(Vec8s& _rOut, int16 _Value)
{
	_rOut.Packed = _mm_set1_epi16(_Value);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSplat(Vec8s& _rOut, Vec8s& _rValues, uint32 _Selector)
{
	_rOut.Packed = _mm_set1_epi16(_rValues.Data[_Selector]);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSwizzle(Vec8s& _rOut, const Vec8s& _rValues, const Vec8s& _rDest)
{
	_rOut.Packed = _mm_setr_epi16(	_rValues.Data[_rDest.Data[0]], _rValues.Data[_rDest.Data[1]], _rValues.Data[_rDest.Data[2]], _rValues.Data[_rDest.Data[3]],
									_rValues.Data[_rDest.Data[4]], _rValues.Data[_rDest.Data[5]], _rValues.Data[_rDest.Data[6]], _rValues.Data[_rDest.Data[7]]);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMergeBack(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_unpackhi_epi16(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMergeFront(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_unpacklo_epi16(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAdd(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_add_epi16(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSub(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_sub_epi16(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMul(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	for (int32 i(0); i<8; ++i)
		_rOut.Data[i] = _rLhs.Data[i] * _rRhs.Data[i];
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecDiv(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	for (int32 i(0); i<8; ++i)
		_rOut.Data[i] = _rLhs.Data[i] / _rRhs.Data[i];
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMAdd(Vec8s& _rOut, const Vec8s& _rMLhs, const Vec8s& _rMRhs, const Vec8s& _rAdd)
{
	CB_DECL_ALIGN(16) Vec8s Temp;
	VecMul(Temp, _rMLhs, _rMRhs);
	_rOut.Packed = _mm_madd_epi16(Temp.Packed, _rAdd.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecScale(Vec8s& _rOut, const Vec8s& _rLhs, int16 _Scale)
{
	for (int32 i(0); i<8; ++i)
		_rOut.Data[i] = _rLhs.Data[i] * _Scale;
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMin(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_min_epi16(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMax(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_max_epi16(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAnd(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAndNot(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_andnot_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecOr(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_or_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecXOr(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_xor_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecLShift(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_sll_epi16(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecRShift(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_srl_epi16(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAbs(Vec8s& _rOut, const Vec8s& _rValues)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_max_epi16(_rValues.Packed, _mm_sub_epi16(_mm_setzero_si128(), _rValues.Packed));	
#else
	for (int32 i(0); i<8; ++i)
		_rOut.Data[i] = Abs(_rValues.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE bool CB_INLINE_ATTR VecAll(const Vec8s& _rValues, int16 _Value)
{
	for (int32 i(0); i<8; ++i)
		if (_rValues.Data[i] != _Value)
			return false;
	
	return true;
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpEQ(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmpeq_epi16(_rLhs.Packed, _rRhs.Packed), _mm_set1_epi16(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpGT(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmpgt_epi16(_rLhs.Packed, _rRhs.Packed), _mm_set1_epi16(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpLT(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmplt_epi16(_rLhs.Packed, _rRhs.Packed), _mm_set1_epi16(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpGE(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed =	_mm_and_si128(_mm_or_si128(_mm_cmpeq_epi16(_rLhs.Packed, _rRhs.Packed),
											   _mm_cmpgt_epi16(_rLhs.Packed, _rRhs.Packed)),
								  _mm_set1_epi16(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpLE(Vec8s& _rOut, const Vec8s& _rLhs, const Vec8s& _rRhs)
{
	_rOut.Packed =	_mm_and_si128(_mm_or_si128(_mm_cmpeq_epi16(_rLhs.Packed, _rRhs.Packed),
											   _mm_cmplt_epi16(_rLhs.Packed, _rRhs.Packed)),
								  , _mm_set1_epi16(1));
}

// ---------------------------------------------------------------------------------------------
//  Vec16c declarations
// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSplat(Vec16c& _rOut, int8 _Value)
{
	_rOut.Packed = _mm_set1_epi8(_Value);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSplat(Vec16c& _rOut, Vec16c& _rValues, uint32 _Selector)
{
	_rOut.Packed = _mm_set1_epi8(_rValues.Data[_Selector]);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSwizzle(Vec16c& _rOut, const Vec16c& _rValues, const Vec16c& _rDest)
{
	_rOut.Packed = _mm_setr_epi8(_rValues.Data[_rDest.Data[0]],  _rValues.Data[_rDest.Data[1]],  _rValues.Data[_rDest.Data[2]],  _rValues.Data[_rDest.Data[3]],
								 _rValues.Data[_rDest.Data[4]],  _rValues.Data[_rDest.Data[5]],  _rValues.Data[_rDest.Data[6]],  _rValues.Data[_rDest.Data[7]],
								 _rValues.Data[_rDest.Data[8]],  _rValues.Data[_rDest.Data[9]],  _rValues.Data[_rDest.Data[10]], _rValues.Data[_rDest.Data[11]]
								 _rValues.Data[_rDest.Data[12]], _rValues.Data[_rDest.Data[13]], _rValues.Data[_rDest.Data[14]], _rValues.Data[_rDest.Data[15]]);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMergeBack(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_unpackhi_epi8(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMergeFront(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_unpacklo_epi8(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAdd(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_add_epi8(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecSub(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_sub_epi8(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMul(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	for (int32 i(0); i<16; ++i)
		_rOut.Data[i] = _rLhs.Data[i] * _rRhs.Data[i];
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecDiv(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	for (int32 i(0); i<16; ++i)
		_rOut.Data[i] = _rLhs.Data[i] / _rRhs.Data[i];
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMAdd(Vec16c& _rOut, const Vec16c& _rMLhs, const Vec16c& _rMRhs, const Vec16c& _rAdd)
{
	CB_DECL_ALIGN(16) Vec16c Temp;
	VecMul(Temp, _rMLhs, _rMRhs);
	_rOut.Packed = _mm_add_epi8(Temp.Packed, _rAdd.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecScale(Vec16c& _rOut, const Vec16c& _rLhs, int8 _Scale)
{
	for (int32 i(0); i<16; ++i)
		_rOut.Data[i] = _rLhs.Data[i] * _Scale;
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMin(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_min_epi8(_rLhs.Packed, _rRhs.Packed);
#else
	for (int32 i(0); i<16; ++i)
		_rOut.Data[i] = Min(_rLhs.Data[i], _rRhs.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecMax(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_max_epi8(_rLhs.Packed, _rRhs.Packed);
#else
	for (int32 i(0); i<16; ++i)
		_rOut.Data[i] = Max(_rLhs.Data[i], _rRhs.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAnd(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAndNot(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_andnot_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecOr(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_or_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecXOr(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_xor_si128(_rLhs.Packed, _rRhs.Packed);
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecLShift(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	for (int32 i(0); i<16; ++i)
		_rOut.Data[i] = _rLhs.Data[i] << _rRhs.Data[i];
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecRShift(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	for (int32 i(0); i<16; ++i)
		_rOut.Data[i] = _rLhs.Data[i] >> _rRhs.Data[i];
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecAbs(Vec16c& _rOut, const Vec16c& _rValues)
{
#ifdef CB_USE_SSE4
	_rOut.Packed = _mm_max_epi8(_rValues.Packed, _mm_sub_epi8(_mm_setzero_si128(), _rValues.Packed));	
#else
	for (int32 i(0); i<16; ++i)
		_rOut.Data[i] = Abs(_rValues.Data[i]);
#endif
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE bool CB_INLINE_ATTR VecAll(const Vec16c& _rValues, int8 _Value)
{
	for (int32 i(0); i<16; ++i)
		if (_rValues.Data[i] != _Value)
			return false;
	
	return true;
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpEQ(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmpeq_epi8(_rLhs.Packed, _rRhs.Packed), _mm_set1_epi8(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpGT(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmpgt_epi8(_rLhs.Packed, _rRhs.Packed), _mm_set1_epi8(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpLT(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed = _mm_and_si128(_mm_cmplt_epi8(_rLhs.Packed, _rRhs.Packed), _mm_set1_epi8(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpGE(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed =	_mm_and_si128(_mm_or_si128(_mm_cmpeq_epi8(_rLhs.Packed, _rRhs.Packed),
											   _mm_cmpgt_epi8(_rLhs.Packed, _rRhs.Packed)), 
								  _mm_set1_epi8(1));
}

// ---------------------------------------------------------------------------------------------

CB_FORCEINLINE void CB_INLINE_ATTR VecCmpLE(Vec16c& _rOut, const Vec16c& _rLhs, const Vec16c& _rRhs)
{
	_rOut.Packed =	_mm_and_si128(_mm_or_si128(_mm_cmpeq_epi8(_rLhs.Packed, _rRhs.Packed),
											   _mm_cmplt_epi8(_rLhs.Packed, _rRhs.Packed)), 
								  _mm_set1_epi8(1));
}

#endif
#endif