/////////////////////////////////////////////////////////////////////////
//
// Amuse Engine SDK - core/math/vector/Sse
// Copyright (c) 2014.  All Rights Reserved
//
// File:		AEVector3f_Sse.h
// Author:		Gianluca Belardelli
// Date:		27/01/2014
//
/////////////////////////////////////////////////////////////////////////
#ifndef _AEVECTOR3FSSE_INL_
#define _AEVECTOR3FSSE_INL_

#if AE_SSE_VERSION >= 0x41
#define AE_VECTOR3F_COMBINE_XYZ(xyz) _mm_blend_ps( xyz, 0, 0x8)
#else
#define AE_VECTOR3F_COMBINE_XYZ(xyz) _mm_shuffle_ps( xyz, _mm_unpackhi_ps(xyz, 0), _MM_SHUFFLE(3,0,1,0))
#endif

#define AEVECTOR4F_CONSTRUCTORS
AE_FORCEINLINE AEVector3f::AEVector3f( const AEQuadFloat32 &qfValue )
{
	m_qfVector = qfValue;
}

AE_FORCEINLINE AEVector3f::AEVector3f( AEFLOAT32 fX, AEFLOAT32 fY, AEFLOAT32 fZ )
{
	m_qfVector = _mm_setr_ps( fX, fY, fZ, 0 );
}

AE_FORCEINLINE AEVector3f::AEVector3f( const AEVector3f &vcCopy )
{
	m_qfVector = vcCopy.m_qfVector;
}

#define AEVECTOR4F_BASESETS
AE_FORCEINLINE void AEVector3f::SetZero( void )
{
	m_qfVector = _mm_setzero_ps();
}

AE_FORCEINLINE void AEVector3f::Set( AEFLOAT32 fX, AEFLOAT32 fY, AEFLOAT32 fZ )
{
	m_qfVector = _mm_setr_ps( fX, fY, fZ, 0 );
}

AE_FORCEINLINE void AEVector3f::Set( AESimdFloat32ConstRef fX, AESimdFloat32ConstRef fY, AESimdFloat32ConstRef fZ )
{
	const AEQuadFloat32 ab = _mm_unpacklo_ps( fX.m_fReal, fY.m_fReal );
	const AEQuadFloat32 cd = _mm_unpacklo_ps( fZ.m_fReal, 0 );
	m_qfVector = _mm_movelh_ps( ab, cd );
}

AE_FORCEINLINE void AEVector3f::SetAll( const AEFLOAT32 &fValue )
{
	m_qfVector = _mm_set1_ps( fValue );
}

AE_FORCEINLINE void AEVector3f::SetAll( AESimdFloat32ConstRef fValue )
{
	m_qfVector = fValue.m_fReal;
}

#define AEVECTOR4F_ADVSETS
AE_FORCEINLINE void AEVector3f::SetAdd( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = _mm_add_ps( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

AE_FORCEINLINE void AEVector3f::SetSub( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = _mm_sub_ps( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

AE_FORCEINLINE void AEVector3f::SetMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = _mm_mul_ps( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

AE_FORCEINLINE void AEVector3f::SetMul( AEVector4fRefParam vcVector, AESimdFloat32ConstRef fValue )
{
	m_qfVector = _mm_mul_ps( fValue.m_fReal, vcVector.m_qfVector );
}

AE_FORCEINLINE void AEVector3f::SetSubMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2, AESimdFloat32ConstRef fValue )
{
	m_qfVector = _mm_sub_ps( vcVector1.m_qfVector, _mm_mul_ps( fValue.m_fReal, vcVector2.m_qfVector ) );
}

AE_FORCEINLINE void AEVector3f::SetAddMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2, AESimdFloat32ConstRef fValue )
{
	m_qfVector = _mm_add_ps( vcVector1.m_qfVector, _mm_mul_ps( fValue.m_fReal, vcVector2.m_qfVector ) );
}

AE_FORCEINLINE void AEVector3f::SetAddMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2, AEVector4fRefParam vcVector3 )
{
	m_qfVector = _mm_add_ps( vcVector1.m_qfVector, _mm_mul_ps( vcVector2.m_qfVector, vcVector3.m_qfVector ) );
}

AE_FORCEINLINE void AEVector3f::SetSubMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2, AEVector4fRefParam vcVector3 )
{
	m_qfVector = _mm_sub_ps( vcVector1.m_qfVector, _mm_mul_ps( vcVector2.m_qfVector, vcVector3.m_qfVector ) );
}

AE_FORCEINLINE void AEVector3f::SetCross( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	const AEQuadFloat32 cross0 = _mm_mul_ps( vcVector1.m_qfVector, _mm_shuffle_ps( vcVector2.m_qfVector, vcVector2.m_qfVector, _MM_SHUFFLE( 3, 0, 2, 1 ) ) );
	const AEQuadFloat32 cross1 = _mm_mul_ps( vcVector2.m_qfVector, _mm_shuffle_ps( vcVector1.m_qfVector, vcVector1.m_qfVector, _MM_SHUFFLE( 3, 0, 2, 1) ) );
	const AEQuadFloat32 diff   = _mm_sub_ps( cross0, cross1 );

	m_qfVector = _mm_shuffle_ps( diff, diff, _MM_SHUFFLE( 3, 0, 2, 1 ) );
}

template <>
AE_FORCEINLINE void AEVector3f::SetNeg<1>( AEVector4fRefParam vcVector )
{
	__m128i mask = _mm_insert_epi16( _mm_setzero_si128(), 0x8000, 0x1 );
	m_qfVector = _mm_xor_ps( vcVector.m_qfVector, _mm_castsi128_ps( mask ) );
}

template <>
AE_FORCEINLINE void AEVector3f::SetNeg<2>( AEVector4fRefParam vcVector )
{
	__m128i mask = _mm_insert_epi16( _mm_setzero_si128(), 0x8000, 0x1 );
	mask = _mm_shuffle_epi32( mask, _MM_SHUFFLE( 1, 1, 0, 0 ) );
	m_qfVector = _mm_xor_ps( vcVector.m_qfVector, _mm_castsi128_ps( mask ) );
}

#define AE_VECTOR4F_NEGFUNCS
template <>
AE_FORCEINLINE void AEVector3f::SetNeg<3>( AEVector4fRefParam vcVector )
{
	__m128i mask = _mm_insert_epi16( _mm_setzero_si128(), 0x8000, 0x1 );
	mask = _mm_shuffle_epi32( mask, _MM_SHUFFLE( 1, 0, 0, 0 ) );
	m_qfVector = _mm_xor_ps( vcVector.m_qfVector, _mm_castsi128_ps( mask ) );
}

template <int N>
AE_FORCEINLINE void AEVector3f::SetNeg( AEVector4fRefParam vcVector )
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
}

#define AE_VECTOR4F_ABSFUNCS
AE_FORCEINLINE void AEVector3f::SetAbs( AEVector4fRefParam vcVector )
{
	m_qfVector = AEMathSSE::QuadFabs( vcVector.m_qfVector );
}

#define AE_VECTOR4F_MINMAXFUNCS
AE_FORCEINLINE void AEVector3f::SetMin( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = _mm_min_ps( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

AE_FORCEINLINE void AEVector3f::SetMax( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = _mm_max_ps( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

#if AE_SSE_VERSION >= 0x41
template <int I>
AE_FORCEINLINE void AEVector3f::ZeroComponent( void )
{
	AE_VECTOR4F_SUBINDEX_CHECK;
	m_qfVector = _mm_blend_ps( m_qfVector, _mm_setzero_ps(), 1 << I );
}
#else

template <>
AE_FORCEINLINE void AEVector3f::ZeroComponent<0>( void )
{
	m_qfVector = _mm_castsi128_ps( _mm_slli_si128( _mm_srli_si128( _mm_castps_si128( m_qfVector ), 4 ), 4 ) );
}

template <>
AE_FORCEINLINE void AEVector3f::ZeroComponent<1>( void )
{
	m_qfVector = _mm_shuffle_ps( _mm_unpacklo_ps( m_qfVector, _mm_setzero_ps() ), m_qfVector, _MM_SHUFFLE( 3, 2, 1, 0 ) );
}

template <>
AE_FORCEINLINE void AEVector3f::ZeroComponent<2>( void )
{
	m_qfVector = _mm_shuffle_ps( m_qfVector, _mm_unpackhi_ps( m_qfVector, _mm_setzero_ps() ), _MM_SHUFFLE( 2, 3, 1, 0 ) );
}

template <int I>
AE_FORCEINLINE void AEVector3f::ZeroComponent( void )
{
	AE_VECTOR4F_SUBINDEX_CHECK;
}
#endif

AE_FORCEINLINE void AEVector3f::ZeroComponent( const AEINT32 nIndex )
{
	AEASSERT_MSG( ( nIndex >= 0 ) && ( nIndex < 3 ), "Component index out of range");
	switch( nIndex )
	{
		case 2:  ZeroComponent<2>(); break;
		case 1:  ZeroComponent<1>(); break;
		default: ZeroComponent<0>(); break;
	}
}

#define AE_VECTOR4F_DOTFUNCS
template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetDot<2>( AEVector4fRefParam vcVector ) const
{
#if AE_SSE_VERSION >= 0x41
	return AESimdFloat32::Convert( _mm_dp_ps( m_qfVector, vcVector.m_qfVector, 0x3F ) );
#elif AE_SSE_VERSION >= 0x30
	const AEQuadFloat32 x2 = _mm_mul_ps( m_qfVector, vcVector.m_qfVector );
	const AEQuadFloat32 hsum = _mm_hadd_ps( x2, x2 ); // xy zw xy zw
	return AESimdFloat32::Convert( _mm_shuffle_ps(hsum,hsum,_MM_SHUFFLE(0,0,0,0)));
#else
	const AEQuadFloat32 x2 = _mm_mul_ps(m_qfVector,vcVector.m_qfVector);
	const AEQuadFloat32 result = _mm_add_ps( _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(1,1,1,1)), _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(0,0,0,0))); // xy xy xy xy
	return AESimdFloat32::Convert(result);
#endif
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetDot<3>( AEVector4fRefParam vcVector ) const
{
#if AE_SSE_VERSION >= 0x41
	return AESimdFloat32::Convert( _mm_dp_ps(m_qfVector, vcVector.m_qfVector, 0x7F));
#elif AE_SSE_VERSION >= 0x30
	const AEQuadFloat32 x2 = _mm_mul_ps(m_qfVector,vcVector.m_qfVector);
	const AEQuadFloat32 hsum = _mm_hadd_ps(x2,x2); // xy zw xy zw
	const AEQuadFloat32 z = _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(2,2,2,2)); // zzzz
	const AEQuadFloat32 hsumz = _mm_add_ps(hsum, z); // xyz zzw xyz zzw
	return AESimdFloat32::Convert(  _mm_shuffle_ps(hsumz,hsumz,_MM_SHUFFLE(0,0,0,0)));
#else
	const AEQuadFloat32 x2 = _mm_mul_ps(m_qfVector,vcVector.m_qfVector);
	const AEQuadFloat32 xySum = _mm_add_ps( _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(1,1,1,1)), _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(0,0,0,0))); // xy xy xy xy
	const AEQuadFloat32 z = _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(2,2,2,2)); // zzzz
	const AEQuadFloat32 result = _mm_add_ps( z, xySum); // xyz xyz xyz xyz
	return AESimdFloat32::Convert(result);
#endif
}

template <int N>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetDot( AEVector4fRefParam vcVector ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

AE_FORCEINLINE const AESimdFloat32 AEVector3f::Dot4xyz1( AEVector4fRefParam vcVector ) const
{
#if AE_SSE_VERSION >= 0x41
	const AEQuadFloat32 xyz = _mm_dp_ps( m_qfVector, vcVector.m_qfVector, 0x7F );
	return AESimdFloat32::Convert( _mm_add_ps( xyz, _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 3, 3, 3, 3 ) ) ) );
#elif AE_SSE_VERSION >= 0x30
	const AEQuadFloat32 xx2 = _mm_mul_ps( m_qfVector, vcVector.m_qfVector );
	const AEQuadFloat32 x2 = AE_VECTOR4F_COMBINE_XYZ_W( xx2, m_qfVector );	// replace w by this.w
	const AEQuadFloat32 hsum = _mm_hadd_ps( x2, x2 ); // xy zw xy zw
	return AESimdFloat32::Convert( _mm_hadd_ps( hsum, hsum ) ); // xyzw all 4
#else
	const AEQuadFloat32 xx2 = _mm_mul_ps( m_qfVector, vcVector.m_qfVector );
	const AEQuadFloat32 x2 = AE_VECTOR4F_COMBINE_XYZ_W( xx2, m_qfVector );	// replace w by this.w
	const AEQuadFloat32 sum0 = _mm_add_ps( _mm_shuffle_ps( x2, x2, _MM_SHUFFLE( 1, 0, 3, 2 ) ), x2 ); // yxwz+xyzw = xy xy zw zw
	const AEQuadFloat32 sum1 = _mm_shuffle_ps( sum0, sum0, _MM_SHUFFLE( 2, 3, 0, 1 ) ); // = zw zw xy xy
	const AEQuadFloat32 result = _mm_add_ps( sum0, sum1 ); // = xyzw xyzw xyzw xyzw
	return AESimdFloat32::Convert( result );
#endif
}

#define AE_VECTOR4F_HORIZFUNCS
template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalAdd<2>( void ) const
{
#if AE_SSE_VERSION >= 0x30
	const AEQuadFloat32 x2 = _mm_hadd_ps( m_qfVector, m_qfVector );
	return AESimdFloat32::Convert( _mm_shuffle_ps( x2, x2, _MM_SHUFFLE( 0, 0, 0, 0 ) ) );
#else
	return AESimdFloat32::Convert( _mm_add_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 1, 1, 1, 1 ) ), _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 0, 0, 0, 0 ) ) ) );
#endif
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalAdd<3>( void ) const
{
#if AE_SSE_VERSION >= 0x30
	const AEQuadFloat32 x2 = _mm_hadd_ps( m_qfVector, m_qfVector );
	return AESimdFloat32::Convert( _mm_add_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 2, 2, 2, 2 ) ), _mm_shuffle_ps( x2, x2, _MM_SHUFFLE( 0, 0, 0, 0 ) ) ) );
#else
	const AEQuadFloat32 xySum = _mm_add_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 1, 1, 1, 1 ) ), _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 0, 0, 0, 0 ) ) );
	return AESimdFloat32::Convert( _mm_add_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 2, 2, 2, 2 ) ), xySum ) );
#endif
}

template <int N>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalAdd( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMul<2>( void ) const
{
	return AESimdFloat32::Convert( _mm_mul_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 1, 1, 1, 1 ) ), _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 0, 0, 0, 0 ) ) ) );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMul<3>( void ) const
{
	const AEQuadFloat32 xyProd = _mm_mul_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 1, 1, 1, 1 ) ), _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 0, 0, 0, 0 ) ) );
	return AESimdFloat32::Convert( _mm_mul_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 2, 2, 2, 2 ) ), xyProd ) );
}

template <int N>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMul( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMax<1>( void ) const
{
	return GetComponent<0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMax<2>( void ) const
{
	return AESimdFloat32::Convert( _mm_max_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 1, 1, 1, 1 ) ), _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 0, 0, 0, 0 ) ) ) );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMax<3>( void ) const
{
	const AEQuadFloat32 xy = _mm_max_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 1, 1, 1, 1 ) ), _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 0, 0, 0, 0 ) ) );
	return AESimdFloat32::Convert( _mm_max_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 2, 2, 2, 2 ) ), xy ) );
}

template <int N>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMax( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMin<1>( void ) const
{
	return GetComponent<0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMin<2>( void ) const
{
	return AESimdFloat32::Convert( _mm_min_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 1, 1, 1, 1 ) ), _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 0, 0, 0, 0 ) ) ) );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMin<3>( void ) const
{
	const AEQuadFloat32 xy = _mm_min_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 1, 1, 1, 1 ) ), _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 0, 0, 0, 0 ) ) );
	return AESimdFloat32::Convert( _mm_min_ps( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( 2, 2, 2, 2 ) ), xy ) );
}

template <int N>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetHorizontalMin( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

#define AE_VECTOR4F_DIVFUNCS

namespace AEVector4_AdvancedInterface
{
	template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
	struct unrollf_setDiv
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_IGNORE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
		{
			switch( A )
			{
				case AE_ACC_23_BIT: qfSelf = _mm_mul_ps( vcVector1.m_qfVector, AEMath::QuadReciprocal( vcVector2.m_qfVector ) ); break;
				case AE_ACC_12_BIT: qfSelf = _mm_mul_ps( vcVector1.m_qfVector, _mm_rcp_ps( vcVector2.m_qfVector ) ); break;
				default:         	qfSelf = _mm_div_ps( vcVector1.m_qfVector, vcVector2.m_qfVector ); break;
			}
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_SET_ZERO>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
		{
			const AEQuadFloat32 equalsZero = _mm_cmpeq_ps( vcVector2.m_qfVector, _mm_setzero_ps() );
			AEQuadFloat32 e;
			unrollf_setDiv<A, AE_DIV_IGNORE>::Apply( e, vcVector1, vcVector2 );
			qfSelf = _mm_andnot_ ps( equalsZero, e );
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_SET_HIGH>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
		{
			const AEQuadFloat32 equalsZero = _mm_cmpeq_ps( vcVector2.m_qfVector, _mm_setzero_ps() );
			AEQuadFloat32 e; unrollf_setDiv<A, AE_DIV_IGNORE>::Apply( e, vcVector1, vcVector2 );
			AEQuadFloat32 huge = _mm_set1_ps( AE_FLOAT_HIGH );
			const __m128i mask = _mm_slli_epi32( _mm_srli_epi32( _mm_castps_si128( vcVector1.m_qfVector ), 31 ), 31 );
			huge = _mm_xor_ps( huge, _mm_castsi128_ps( mask ) );
		#if AE_SSE_VERSION >= 0x41
			qfSelf = _mm_blendv_ps( e, huge, equalsZero );
		#else
			qfSelf = _mm_or_ps( _mm_and_ps( equalsZero, huge ), _mm_andnot_ps( equalsZero, e ) );
		#endif
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_SET_MAX>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
		{
			const AEQuadFloat32 equalsZero = _mm_cmpeq_ps( vcVector2.m_qfVector, _mm_setzero_ps() );
			AEQuadFloat32 e; unrollf_setDiv<A, AE_DIV_IGNORE>::Apply( e, vcVector1, vcVector2 );
			AEQuadFloat32 huge = _mm_set1_ps( AE_FLOAT_MAX );
			const __m128i mask = _mm_slli_epi32( _mm_srli_epi32( _mm_castps_si128( vcVector1.m_qfVector ), 31 ), 31 );
			huge = _mm_xor_ps( huge, _mm_castsi128_ps( mask ) );
		#if AE_SSE_VERSION >= 0x41
			qfSelf = _mm_blendv_ps( e, huge, equalsZero );
		#else
			qfSelf = _mm_or_ps( _mm_and_ps( equalsZero, huge ), _mm_andnot_ps( equalsZero, e ) );
		#endif
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_SET_ZERO_AND_ONE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
		{
			unrollf_setDiv<A, AE_DIV_SET_ZERO>::Apply( qfSelf, vcVector1, vcVector2);
			const AEQuadFloat32 one = g_vectorfConstants[AE_QUADREAL_1];
			const AEQuadFloat32 absVal = AEMath::QuadFabs( _mm_sub_ps( qfSelf, one ) );
			const AEQuadFloat32 lessEqualEps = _mm_cmple_ps( absVal, g_vectorfConstants[AE_QUADREAL_EPS] );
		#if AE_SSE_VERSION >= 0x41
			qfSelf = _mm_blendv_ps( qfSelf, one, lessEqualEps );
		#else
			qfSelf = _mm_or_ps( _mm_and_ps( lessEqualEps, one ), _mm_andnot_ps( lessEqualEps, qfSelf ) );
		#endif
		}
	};
} // namespace

template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
AE_FORCEINLINE void AEVector3f::SetDiv( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	AEVector4_AdvancedInterface::unrollf_setDiv<A,D>::Apply( m_qfVector, vcVector1, vcVector2 );
}

AE_FORCEINLINE void AEVector3f::SetDiv( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	AEVector4_AdvancedInterface::unrollf_setDiv<AE_ACC_23_BIT,AE_DIV_IGNORE>::Apply( m_qfVector, vcVector1, vcVector2 );
}

template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
AE_FORCEINLINE void AEVector3f::Div( AEVector4fRefParam vcVector )
{
	SetDiv<A,D>( *this, vcVector );
}

AE_FORCEINLINE void AEVector3f::Div( AEVector4fRefParam vcVector )
{
	SetDiv( *this, vcVector );
}

#define AE_VECTOR4F_RECIPROCALFUNCS
namespace AEVector4_AdvancedInterface
{
	template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
	struct unrollf_setReciprocal
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_IGNORE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			switch (A)
			{
				case AE_ACC_23_BIT: fSelf = AEMath::QuadReciprocal( vcVector.m_qfVector ); break;
				case AE_ACC_12_BIT: fSelf = _mm_rcp_ps( vcVector.m_qfVector ); break;
				default:         	fSelf = _mm_div_ps( g_vectorfConstants[AE_QUADREAL_1], vcVector.m_qfVector ); break;
			}
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_SET_ZERO>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			const AEQuadFloat32 equalsZero = _mm_cmpeq_ps( vcVector.m_qfVector, _mm_setzero_ps() );
			AEQuadFloat32 e;
			unrollf_setReciprocal<A, AE_DIV_IGNORE>::Apply( e, vcVector );
			fSelf = _mm_andnot_ps( equalsZero, e );
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_SET_HIGH>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			const AEQuadFloat32 equalsZero = _mm_cmpeq_ps( vcVector.m_qfVector, _mm_setzero_ps() );
			AEQuadFloat32 e;
			unrollf_setReciprocal<A, AE_DIV_IGNORE>::Apply( e, vcVector );
			AEQuadFloat32 huge = _mm_set1_ps( AE_FLOAT_HIGH );
			const __m128i mask = _mm_slli_epi32( _mm_srli_epi32( _mm_castps_si128( vcVector.m_qfVector ), 31 ), 31 );
			huge = _mm_xor_ps( huge, _mm_castsi128_ps( mask ) );
		#if AE_SSE_VERSION >= 0x41
			fSelf = _mm_blendv_ps( e, huge, equalsZero );
		#else
			fSelf = _mm_or_ps( _mm_and_ps( equalsZero, huge ), _mm_andnot_ps( equalsZero, e ) );
		#endif
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_SET_MAX>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			const AEQuadFloat32 equalsZero = _mm_cmpeq_ps( vcVector.m_qfVector, _mm_setzero_ps() );
			AEQuadFloat32 e;
			unrollf_setReciprocal<A, AE_DIV_IGNORE>::Apply( e, vcVector );
			AEQuadFloat32 huge = _mm_set1_ps( AE_FLOAT_MAX );
			const __m128i mask = _mm_slli_epi32( _mm_srli_epi32( _mm_castps_si128( vcVector.m_qfVector ), 31 ), 31 );
			huge = _mm_xor_ps( huge, _mm_castsi128_ps( mask ) );
		#if AE_SSE_VERSION >= 0x41
			fSelf = _mm_blendv_ps( e, huge, equalsZero );
		#else
			fSelf = _mm_or_ps( _mm_and_ps( equalsZero, huge ), _mm_andnot_ps( equalsZero, e ) );
		#endif
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_SET_ZERO_AND_ONE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam vcVector )
		{
			unrollf_setReciprocal<A, AE_DIV_SET_ZERO>::Apply( fSelf, vcVector );
			const AEQuadFloat32 one = g_vectorfConstants[AE_QUADREAL_1];
			const AEQuadFloat32 absVal = AEMath::QuadFabs( _mm_sub_ps( fSelf, one ) );
			const AEQuadFloat32 lessEqualEps = _mm_cmple_ps( absVal, g_vectorfConstants[AE_QUADREAL_EPS] );
		#if AE_SSE_VERSION >= 0x41
			fSelf = _mm_blendv_ps( fSelf, one, lessEqualEps );
		#else
			fSelf = _mm_or_ps( _mm_and_ps( lessEqualEps, one ), _mm_andnot_ps( lessEqualEps, fSelf ) );
		#endif
		}
	};
} // End namespace

template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
AE_FORCEINLINE void AEVector3f::SetReciprocal( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setReciprocal<A,D>::Apply( m_qfVector, vcVector );
}

AE_FORCEINLINE void AEVector3f::SetReciprocal( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setReciprocal<AE_ACC_23_BIT,AE_DIV_IGNORE>::Apply( m_qfVector, vcVector );
}

#define AE_VECTOR4F_SQRTFUNCS
namespace AEVector4_AdvancedInterface
{
	template <AEMathAccuracyMode A, AEMathNegSqrtMode S>
	struct unrollf_setSqrt
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setSqrt<A, AE_SQRT_IGNORE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			switch (A)
			{
				case AE_ACC_23_BIT: fSelf = _mm_mul_ps( vcVector.m_qfVector, AEMath::QuadReciprocalSquareRoot( vcVector.m_qfVector ) ); break;
				case AE_ACC_12_BIT: fSelf = _mm_mul_ps( vcVector.m_qfVector,_mm_rsqrt_ps( vcVector.m_qfVector ) ); break;
				default:         	fSelf = _mm_sqrt_ps( vcVector.m_qfVector ); break;
			}
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setSqrt<A, AE_SQRT_SET_ZERO>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			const AEQuadFloat32 equalsZero = _mm_cmple_ps( vcVector.m_qfVector, _mm_setzero_ps() );
			AEQuadFloat32 e;
			unrollf_setSqrt<A, AE_SQRT_IGNORE>::Apply( e, vcVector );
			fSelf = _mm_andnot_ps( equalsZero, e );
		}
	};
} // End namespace

template <AEMathAccuracyMode A, AEMathNegSqrtMode S>
AE_FORCEINLINE void AEVector3f::SetSqrt( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setSqrt<A,S>::Apply( m_qfVector, vcVector );
}

AE_FORCEINLINE void AEVector3f::SetSqrt( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setSqrt<AE_ACC_23_BIT, AE_SQRT_SET_ZERO>::Apply( m_qfVector, vcVector );
}

namespace AEVector4_AdvancedInterface
{
	template <AEMathAccuracyMode A, AEMathNegSqrtMode S>
	struct unrollf_setSqrtInverse
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setSqrtInverse<A, AE_SQRT_IGNORE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			switch (A)
			{
				case AE_ACC_23_BIT: fSelf = AEMath::QuadReciprocalSquareRoot( vcVector.m_qfVector ); break;
				case AE_ACC_12_BIT: fSelf = _mm_rsqrt_ps( vcVector.m_qfVector ); break;
				default:			fSelf = _mm_div_ps( g_vectorfConstants[AE_QUADREAL_1], _mm_sqrt_ps( vcVector.m_qfVector ) ); break;
			}
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setSqrtInverse<A, AE_SQRT_SET_ZERO>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &fSelf, AEVector4fRefParam vcVector )
		{
			const AEQuadFloat32 equalsZero = _mm_cmple_ps( vcVector.m_qfVector, _mm_setzero_ps() );
			AEQuadFloat32 e;
			unrollf_setSqrtInverse<A, AE_SQRT_IGNORE>::Apply( e, vcVector );
			fSelf = _mm_andnot_ps( equalsZero, e );
		}
	};
} // End namespace

template <AEMathAccuracyMode A, AEMathNegSqrtMode S>
AE_FORCEINLINE void AEVector3f::SetSqrtInverse( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setSqrtInverse<A,S>::Apply( m_qfVector, vcVector );
}

AE_FORCEINLINE void AEVector3f::SetSqrtInverse( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setSqrtInverse<AE_ACC_23_BIT,AE_SQRT_SET_ZERO>::Apply( m_qfVector, vcVector );
}

#define AE_VECTOR4F_SETXYZWFUNCS
AE_FORCEINLINE void AEVector3f::SetXYZ( AEVector4fRefParam vcXYZVector )
{
	m_qfVector = AE_VECTOR4F_COMBINE_XYZ_W( vcXYZVector.m_qfVector, m_qfVector );
}

AE_FORCEINLINE void AEVector3f::SetXYZ( AEFLOAT32 fValue )
{
	const AEQuadFloat32 q = _mm_set1_ps( fValue );
	m_qfVector = AE_VECTOR4F_COMBINE_XYZ_W( q, m_qfVector );
}

AE_FORCEINLINE void AEVector3f::SetXYZ( AESimdFloat32ConstRef fValue )
{
	m_qfVector = AE_VECTOR4F_COMBINE_XYZ_W( fValue.m_fReal, m_qfVector );
}

AE_FORCEINLINE void AEVector3f::AddXYZ( AEVector4fRefParam vcXYZVector )
{
	m_qfVector = _mm_add_ps( m_qfVector, vcXYZVector.m_qfVector );
	#ifdef AEDEBUG
		m_qfVector.m128_u32[3] = 0xffffffff;
	#endif
}

AE_FORCEINLINE void AEVector3f::SubXYZ( AEVector4fRefParam vcXYZVector )
{
	m_qfVector = _mm_sub_ps( m_qfVector, vcXYZVector.m_qfVector );
	#ifdef AEDEBUG
		m_qfVector.m128_u32[3] = 0xffffffff;
	#endif
}
#define AE_VECTOR4F_LOADFUNCS

namespace AEVector4_AdvancedInterface
{
	template <int N, AEMathIoMode A>
	struct unrollf_load
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEFLOAT32 *lpAddr )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <int N, AEMathIoMode A>
	struct unrollf_load_D
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEDOUBLE64 *lpAddr )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <int N>
	struct unrollf_load<N, AE_IO_BYTE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEFLOAT32 *lpAddr )
		{
			switch (N)
			{
				case 1:
					{
						qfSelf = _mm_load_ss( lpAddr );
						#ifdef AEDEBUG
							m_qfVector.m128_u32[1] = 0xffffffff;
							m_qfVector.m128_u32[2] = 0xffffffff;
							m_qfVector.m128_u32[3] = 0xffffffff;
						#endif
						
					}
					break;
				case 2:
					{
						qfSelf = _mm_castpd_ps( _mm_load_sd( ( const double* )lpAddr ) );
						#ifdef AEDEBUG
							m_qfVector.m128_u32[2] = 0xffffffff;
							m_qfVector.m128_u32[3] = 0xffffffff;
						#endif
					}
					break;
				case 3:
					{
						__m128 xy = _mm_castpd_ps( _mm_load_sd( ( const double* )lpAddr ) );
						__m128 z = _mm_load_ss( lpAddr+2 );
						qfSelf = _mm_movelh_ps( xy, z );
						#ifdef AEDEBUG
							m_qfVector.m128_u32[3] = 0xffffffff;
						#endif
					}
					break;
				default:
					{
						#if AE_SSE_VERSION >= 0x30
							qfSelf = _mm_castsi128_ps( _mm_lddqu_si128( (const __m128i* )lpAddr ) );
						#else
							qfSelf = _mm_loadu_ps( lpAddr );
						#endif
					}
					break;
			}
		}
	};

	template <int N>
	struct unrollf_load_D<N, AE_IO_BYTE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEDOUBLE64 *lpAddr )
		{
			switch (N)
			{
			case 1:
				{
					__m128d a = _mm_load_sd( lpAddr );
					qfSelf = _mm_cvtpd_ps( a );
					#ifdef AEDEBUG
						m_qfVector.m128_u32[1] = 0xffffffff;
						m_qfVector.m128_u32[2] = 0xffffffff;
						m_qfVector.m128_u32[3] = 0xffffffff;
					#endif
				}
				break;
			case 2:
				{
					#if AE_SSE_VERSION >= 0x30
						__m128d a = _mm_castsi128_pd( _mm_lddqu_si128( ( const __m128i* )lpAddr ) );
					#else
						__m128d a = _mm_loadu_pd( lpAddr );
					#endif
					
					qfSelf = _mm_cvtpd_ps(a);
					
					#ifdef AEDEBUG
						m_qfVector.m128_u32[2] = 0xffffffff;
						m_qfVector.m128_u32[3] = 0xffffffff;
					#endif
				}
				break;
			case 3:
				{
					#if AE_SSE_VERSION >= 0x30
						__m128d a = _mm_castsi128_pd( _mm_lddqu_si128( ( const __m128i* )lpAddr ) );
					#else
						__m128d a = _mm_loadu_pd( lpAddr );
					#endif
					
					__m128d b = _mm_load_sd( lpAddr+2 );

					__m128 xy = _mm_cvtpd_ps( a );
					__m128 z = _mm_cvtpd_ps( b );
					qfSelf = _mm_movelh_ps( xy, z );
					
					#ifdef AEDEBUG
						m_qfVector.m128_u32[3] = 0xffffffff;
					#endif
				}
				break;
			default:
				{
				#if AE_SSE_VERSION >= 0x30
					__m128d a = _mm_castsi128_pd( _mm_lddqu_si128( ( const __m128i* )lpAddr ) );
					__m128d b = _mm_castsi128_pd( _mm_lddqu_si128( ( const __m128i* )( lpAddr+2 ) ) );
				#else
					__m128d a = _mm_loadu_pd( lpAddr );
					__m128d b = _mm_loadu_pd( lpAddr+2 );
				#endif
					__m128 xy = _mm_cvtpd_ps( a );
					__m128 zw = _mm_cvtpd_ps( b );
					qfSelf = _mm_movelh_ps( xy, zw );
				}
				break;
			}
		}
	};

	template <int N>
	struct unrollf_load<N, AE_IO_NATIVE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEFLOAT32 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & (sizeof(AEFLOAT32)-1) ) == 0, "pointer must be aligned to native size of AEFLOAT32.");
			unrollf_load<N, AE_IO_BYTE_ALIGNED>::Apply( qfSelf, lpAddr );
		}
	};

	template <int N>
	struct unrollf_load_D<N, AE_IO_NATIVE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEDOUBLE64 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & (sizeof(AEDOUBLE64)-1) ) == 0, "pointer must be aligned to native size of AEDOUBLE64.");
			unrollf_load_D<N, AE_IO_BYTE_ALIGNED>::Apply( qfSelf, lpAddr );
		}
	};

	template <int N>
	struct unrollf_load<N, AE_IO_SIMD_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEFLOAT32 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & ((sizeof(AEFLOAT32)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			if( N == 4 )
				qfSelf = _mm_load_ps( lpAddr );
			else
				unrollf_load<N, AE_IO_NATIVE_ALIGNED>::Apply( qfSelf, lpAddr );
		}
	};

	template <int N>
	struct unrollf_load_D<N, AE_IO_SIMD_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEDOUBLE64 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & ((sizeof(AEDOUBLE64)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			switch (N)
			{
			case 4:
				{
					__m128d a = _mm_load_pd( lpAddr );
					__m128d b = _mm_load_pd( lpAddr+2 );
					__m128 xy = _mm_cvtpd_ps( a );
					__m128 zw = _mm_cvtpd_ps( b );
					qfSelf = _mm_movelh_ps( xy, zw );
				}
				break;
			case 3:
				{
					__m128d a = _mm_load_pd( lpAddr );
					__m128d b = _mm_load_sd( lpAddr+2 );
					__m128 xy = _mm_cvtpd_ps( a );
					__m128 z = _mm_cvtpd_ps( b );
					qfSelf = _mm_movelh_ps( xy, z );

					#ifdef AEDEBUG
						qfSelf.m128_u32[3] = 0xffffffff;
					#endif

				}
				break;
			case 2:
				{
					__m128d a = _mm_load_pd( lpAddr );
					qfSelf = _mm_cvtpd_ps( a );
					#ifdef AEDEBUG
						qfSelf.m128_u32[2] = 0xffffffff;
						qfSelf.m128_u32[3] = 0xffffffff;
					#endif
				}
				break;
			default:
				{
					unrollf_load_D<N, AE_IO_NATIVE_ALIGNED>::Apply( qfSelf, lpAddr );
				}
				break;
			}
		}
	};

	template <int N>
	struct unrollf_load<N, AE_IO_NOT_CACHED>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEFLOAT32 *lpAddr )
		{
		#if AE_SSE_VERSION >= 0x41
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & ((sizeof(AEFLOAT32)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			if( N == 4 )
				qfSelf = _mm_castsi128_ps( _mm_stream_load_si128( ( __m128i* )lpAddr ) );
			else
				unrollf_load<N, AE_IO_SIMD_ALIGNED>::Apply( qfSelf, lpAddr );
		#else
			unrollf_load<N, AE_IO_SIMD_ALIGNED>::Apply( qfSelf, lpAddr );
		#endif
		}
	};

	template <int N>
	struct unrollf_load_D<N, AE_IO_NOT_CACHED>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32 &qfSelf, const AEDOUBLE64 *lpAddr )
		{
		#if AE_SSE_VERSION >= 0x41
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & ((sizeof(AEDOUBLE64)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			switch( N )
			{
			case 4:
				{
					__m128d a = _mm_castsi128_pd( _mm_stream_load_si128( ( __m128i* )lpAddr ) );
					__m128d b = _mm_castsi128_pd( _mm_stream_load_si128( ( __m128i* )( lpAddr+2 ) ) );
					__m128 xy = _mm_cvtpd_ps( a );
					__m128 zw = _mm_cvtpd_ps( b );
					qfSelf = _mm_movelh_ps( xy, zw );
				}
				break;
			case 3:
				{
					__m128d a = _mm_castsi128_pd( _mm_stream_load_si128( ( __m128i* )lpAddr ) );
					__m128d b = _mm_load_sd( lpAddr+2 );
					__m128 xy = _mm_cvtpd_ps( a );
					__m128 z = _mm_cvtpd_ps( b );
					qfSelf = _mm_movelh_ps( xy, z );
		
					#ifdef AEDEBUG
						qfSelf.m128_u32[3] = 0xffffffff;
					#endif

				}
				break;
			case 2:
				{
					__m128d a = _mm_castsi128_pd( _mm_stream_load_si128( (__m128i*)lpAddr ) );
					qfSelf = _mm_cvtpd_ps( a );
					#ifdef AEDEBUG
						qfSelf.m128_u32[2] = 0xffffffff;
						qfSelf.m128_u32[3] = 0xffffffff;
					#endif
				}
				break;
			default:
				{
					unrollf_load_D<N, AE_IO_SIMD_ALIGNED>::Apply( qfSelf, lpAddr );
				}
				break;
			}
		#else
			unrollf_load_D<N, AE_IO_SIMD_ALIGNED>::Apply( qfSelf, lpAddr );
		#endif
		}
	};
} // End namespace

template <int N, AEMathIoMode A>
AE_FORCEINLINE void AEVector3f::Load( const AEFLOAT32 *lpAddr )
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_load<N,A>::Apply( m_qfVector, lpAddr );
}

template <int N, AEMathIoMode A>
AE_FORCEINLINE void AEVector3f::Load( const AEDOUBLE64 *lpAddr )
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_load_D<N,A>::Apply( m_qfVector, lpAddr );
}

template <int N>
AE_FORCEINLINE void AEVector3f::Load( const AEFLOAT32 *lpAddr )
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_load<N,AE_IO_SIMD_ALIGNED>::Apply( m_qfVector, lpAddr );
}

template <int N>
AE_FORCEINLINE void AEVector3f::Load( const AEDOUBLE64 *lpAddr )
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_load_D<N,AE_IO_SIMD_ALIGNED>::Apply( m_qfVector, lpAddr );
}

namespace AEVector4_AdvancedInterface
{
	template <int N, AEMathIoMode A>
	struct unrollf_store
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEFLOAT32 *lpAddr )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <int N, AEMathIoMode A>
	struct unrollf_store_D
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEDOUBLE64 *lpAddr )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <int N>
	struct unrollf_store<N, AE_IO_BYTE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEFLOAT32 *lpAddr )
		{
			switch( N )
			{
				case 1:
					_mm_store_ss( lpAddr, qfSelf );
					break;
				case 2:
					_mm_store_sd( (double*)lpAddr, _mm_castps_pd( qfSelf ) );
					break;
				case 3:
					_mm_store_sd( (double*)lpAddr, _mm_castps_pd( qfSelf ) );
					_mm_store_ss( lpAddr+2, _mm_movehl_ps( qfSelf, qfSelf ) );
					break;
				default:
					_mm_storeu_ps( lpAddr, qfSelf );
					break;
			}
		}
	};

	template <int N>
	struct unrollf_store_D<N, AE_IO_BYTE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEDOUBLE64 *lpAddr )
		{
			switch( N )
			{
				case 1:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						_mm_store_sd( lpAddr, a );
					}
					break;
				case 2:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						_mm_storeu_pd( lpAddr, a );
					}
					break;
				case 3:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						__m128d b = _mm_cvtps_pd( _mm_movehl_ps( qfSelf, qfSelf ) );
						_mm_storeu_pd( lpAddr, a );
						_mm_store_sd( lpAddr+2, b );
					}
					break;
				default:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						__m128d b = _mm_cvtps_pd( _mm_movehl_ps( qfSelf, qfSelf ) );
						_mm_storeu_pd( lpAddr, a );
						_mm_storeu_pd( lpAddr+2, b );
					}
					break;
			}
		}
	};

	template <int N>
	struct unrollf_store<N, AE_IO_NATIVE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEFLOAT32 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & (sizeof(AEFLOAT32)-1) ) == 0, "pointer must be aligned to native size of AEFLOAT32.");
			unrollf_store<N, AE_IO_BYTE_ALIGNED>::Apply( qfSelf, lpAddr );
		}
	};

	template <int N>
	struct unrollf_store_D<N, AE_IO_NATIVE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEDOUBLE64 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & (sizeof(AEDOUBLE64)-1) ) == 0, "pointer must be aligned to native size of AEDOUBLE64.");
			unrollf_store_D<N, AE_IO_BYTE_ALIGNED>::Apply( qfSelf, lpAddr );
		}
	};

	template <int N>
	struct unrollf_store<N, AE_IO_SIMD_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEFLOAT32 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & ((sizeof(AEFLOAT32)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			if( N == 4 )
				_mm_store_ps( lpAddr, qfSelf );
			else
				unrollf_store<N, AE_IO_NATIVE_ALIGNED>::Apply( qfSelf, lpAddr );
		}
	};

	template <int N>
	struct unrollf_store_D<N, AE_IO_SIMD_ALIGNED>
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEDOUBLE64 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & ((sizeof(AEDOUBLE64)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			switch( N )
			{
				case 2:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						_mm_store_pd( lpAddr, a );
					}
					break;
				case 3:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						__m128d b = _mm_cvtps_pd( _mm_movehl_ps( qfSelf, qfSelf ) );
						_mm_store_pd( lpAddr, a );
						_mm_store_sd( lpAddr+2, b );
					}
					break;
				case 4:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						__m128d b = _mm_cvtps_pd( _mm_movehl_ps( qfSelf, qfSelf ) );
						_mm_store_pd( lpAddr, a );
						_mm_store_pd( lpAddr+2, b );
					}
					break;
				default:
					{
						unrollf_store_D<N, AE_IO_NATIVE_ALIGNED>::Apply( qfSelf, lpAddr );
					}
					break;
			}
		}
	};

	template <int N>
	struct unrollf_store<N, AE_IO_NOT_CACHED>
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEFLOAT32 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & ((sizeof(AEFLOAT32)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			if( N == 4 )
				_mm_stream_ps( lpAddr, qfSelf );
			else
				unrollf_store<N, AE_IO_SIMD_ALIGNED>::Apply( qfSelf, lpAddr );
		}
	};

	template <int N>
	struct unrollf_store_D<N, AE_IO_NOT_CACHED>
	{
		AE_FORCEINLINE static void Apply( const AEQuadFloat32 &qfSelf, AEDOUBLE64 *lpAddr )
		{
			AE_ASSERTMSG( ( ((AEUINT32)lpAddr) & ((sizeof(AEDOUBLE64)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			switch( N )
			{
				case 2:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						_mm_stream_pd( lpAddr, a );
					}
					break;
				case 3:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						__m128d b = _mm_cvtps_pd( _mm_movehl_ps( qfSelf, qfSelf ) );
						_mm_stream_pd( lpAddr, a );
						_mm_store_sd( lpAddr+2, b );
					}
					break;
				case 4:
					{
						__m128d a = _mm_cvtps_pd( qfSelf );
						__m128d b = _mm_cvtps_pd( _mm_movehl_ps( qfSelf, qfSelf ) );
						_mm_stream_pd( lpAddr, a );
						_mm_stream_pd( lpAddr+2, b );
					}
					break;
				default:
					{
						unrollf_store_D<N, AE_IO_SIMD_ALIGNED>::Apply( qfSelf, lpAddr );
					}
					break;
			}
		}
	};
} // End namespace

template <int N, AEMathIoMode A, AEMathRoundingMode R>
AE_FORCEINLINE void AEVector3f::Store( AEFLOAT32 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store<N,A>::Apply( m_qfVector, lpAddr );
}

template <int N, AEMathIoMode A, AEMathRoundingMode R>
AE_FORCEINLINE void AEVector3f::Store( AEDOUBLE64 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store_D<N,A>::Apply( m_qfVector, lpAddr );
}

template <int N, AEMathIoMode A>
AE_FORCEINLINE void AEVector3f::Store( AEFLOAT32 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store<N,A>::Apply( m_qfVector, lpAddr );
}

template <int N, AEMathIoMode A>
AE_FORCEINLINE void AEVector3f::Store( AEDOUBLE64 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store_D<N,A>::Apply( m_qfVector, lpAddr );
}

template <int N>
AE_FORCEINLINE void AEVector3f::Store( AEFLOAT32* lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store<N,AE_IO_SIMD_ALIGNED>::Apply( m_qfVector, lpAddr );
}

template <int N>
AE_FORCEINLINE void AEVector3f::Store( AEDOUBLE64 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store_D<N,AE_IO_SIMD_ALIGNED>::Apply( m_qfVector, lpAddr );
}

#define AE_VECTOR4F_SETCOMPFUNCS
AE_FORCEINLINE void AEVector3f::SetComponent( const AEINT32 nIndex, AESimdFloat32ConstRef fValue )
{
	static AE_ALIGN16 const AEUINT32 indexToMask[16] = 
	{
		0xffffffff, 0x00000000, 0x00000000, 0x00000000,
		0x00000000, 0xffffffff, 0x00000000, 0x00000000,
		0x00000000, 0x00000000, 0xffffffff, 0x00000000,
		0x00000000, 0x00000000, 0x00000000, 0xffffffff
	};

	AEASSERT_MSG( nIndex >= 0 && nIndex < 3, "index out of bounds for component access");

	const AEQuadFloat32 mask = *( const AEQuadFloat32* )&indexToMask[ nIndex * 4 ];

#if AE_SSE_VERSION >= 0x41
	m_qfVector = _mm_blendv_ps( m_qfVector, fValue.m_fReal, mask );
#else
	m_qfVector = _mm_or_ps( _mm_and_ps(mask, fValue.m_fReal ), _mm_andnot_ps( mask, m_qfVector ) );
#endif
}

#if AE_SSE_VERSION >= 0x41
template <int I>
AE_FORCEINLINE void AEVector3f::SetComponent( AESimdFloat32ConstRef fValue )
{
	AE_VECTOR4F_SUBINDEX_CHECK;
	m_qfVector = _mm_blend_ps( m_qfVector, fValue.m_fReal, 0x1 << I );
}
#else
template <>
AE_FORCEINLINE void AEVector3f::SetComponent<0>( AESimdFloat32ConstRef fValue )
{
	m_qfVector = _mm_move_ss( m_qfVector, fValue.m_fReal );
}

template <>
AE_FORCEINLINE void AEVector3f::SetComponent<1>( AESimdFloat32ConstRef fValue )
{
	m_qfVector = _mm_shuffle_ps( _mm_unpacklo_ps( m_qfVector, fValue.m_fReal ), m_qfVector, _MM_SHUFFLE( 3, 2, 1, 0 ) );
}

template <>
AE_FORCEINLINE void AEVector3f::SetComponent<2>( AESimdFloat32ConstRef fValue )
{
	m_qfVector = _mm_shuffle_ps( m_qfVector, _mm_unpackhi_ps( m_qfVector, fValue.m_fReal ), _MM_SHUFFLE( 2, 3, 1, 0 ) );
}

template <int N>
AE_FORCEINLINE void AEVector3f::SetComponent( AESimdFloat32ConstRef fValue )
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
}
#endif

#define AE_VECTOR4F_OPERATORS
AE_FORCEINLINE AEFLOAT32 &AEVector3f::operator() ( AEINT32 nIndex )
{
	AEASSERT_MSG( nIndex >= 0 && nIndex < 4, "index out of bounds for component access");
	return m_qfVector.m128_f32[nIndex];
}

AE_FORCEINLINE const AEFLOAT32 &AEVector3f::operator() ( AEINT32 nIndex ) const
{
	AEASSERT_MSG( nIndex >= 0 && nIndex < 4, "index out of bounds for component access");
	return m_qfVector.m128_f32[nIndex];
}

#define AE_VECTOR4F_GETCOMPFUNCS
template <int I>
AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetComponent( void ) const
{
	AE_VECTOR4F_SUBINDEX_CHECK;
	return AESimdFloat32::Convert( _mm_shuffle_ps( m_qfVector, m_qfVector, _MM_SHUFFLE( I, I, I, I ) ) );
}

AE_FORCEINLINE const AESimdFloat32 AEVector3f::GetComponent( const AEINT32 nIndex ) const
{
	static AE_ALIGN16 const AEUINT32 indexToMask[16] = 
	{
		0xffffffff, 0x00000000, 0x00000000, 0x00000000,
		0x00000000, 0xffffffff, 0x00000000, 0x00000000,
		0x00000000, 0x00000000, 0xffffffff, 0x00000000,
		0x00000000, 0x00000000, 0x00000000, 0xffffffff
	};

	AEASSERT_MSG( nIndex >= 0 && nIndex < 4, "index out of bounds for component access");

	const AEQuadFloat32 mask = *(const AEQuadFloat32*)&indexToMask[ nIndex * 4 ];
	AEQuadFloat32 selected = _mm_and_ps( mask, m_qfVector ); 

	const AEQuadFloat32 zwxy = _mm_shuffle_ps( selected, selected, _MM_SHUFFLE( 1, 0, 3, 2 ) );
	selected = _mm_or_ps( selected, zwxy );
	const AEQuadFloat32 yxwz = _mm_shuffle_ps( selected, selected, _MM_SHUFFLE( 2, 3, 0, 1 ) );
	selected = _mm_or_ps( selected, yxwz );

	return AESimdFloat32::Convert( selected );
}

AE_FORCEINLINE void AEVector3f::SetFlipSign( AEVector4fRefParam vcVector, AEVector4fRefParam vSign )
{
	const __m128i mask = _mm_slli_epi32( _mm_srli_epi32( _mm_castps_si128( vSign.m_qfVector ), 31 ), 31 );
	m_qfVector = _mm_xor_ps( vcVector.m_qfVector, _mm_castsi128_ps( mask ) );
}

AE_FORCEINLINE void AEVector3f::SetFlipSign( AEVector4fRefParam vcVector, AESimdFloat32ConstRef sSign )
{
	const __m128i mask = _mm_slli_epi32( _mm_srli_epi32( _mm_castps_si128( sSign.m_fReal ), 31 ), 31 );
	m_qfVector = _mm_xor_ps( vcVector.m_qfVector, _mm_castsi128_ps( mask ) );
}

#define AE_VECTOR4F_CHECKFUNCS
template <> 
AE_FORCEINLINE AEBOOL32 AEVector3f::IsOk<1>( void ) const
{
	const AEQuadFloat32 nanMask = _mm_cmpord_ps( m_qfVector, _mm_setzero_ps() );
	return ( _mm_movemask_ps( nanMask ) & 0x1 );
}

template <> 
AE_FORCEINLINE AEBOOL32 AEVector3f::IsOk<2>( void ) const
{
	const AEQuadFloat32 nanMask = _mm_cmpunord_ps( m_qfVector, _mm_setzero_ps() );
	return !( _mm_movemask_ps( nanMask ) & 0x3 );
}

template <> 
AE_FORCEINLINE AEBOOL32 AEVector3f::IsOk<3>( void ) const
{
	const AEQuadFloat32 nanMask = _mm_cmpunord_ps( m_qfVector, _mm_setzero_ps() );
	return !( _mm_movemask_ps( nanMask ) & 0x7 );
}

template <int N> 
AE_FORCEINLINE AEBOOL32 AEVector3f::IsOk( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return false;
}

#define AE_VECTOR4F_CMPFUNCS

AE_FORCEINLINE const AEVector4fComparison AEVector3f::CmpL( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( _mm_cmplt_ps( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::CmpLE( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( _mm_cmple_ps( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::CmpG( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( _mm_cmpgt_ps( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::CmpGE( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( _mm_cmpge_ps( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::CmpE( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( _mm_cmpeq_ps( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::CmpNE( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( _mm_cmpneq_ps( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::LessZero( void ) const
{
	return AEVector4fComparison::Convert( _mm_cmplt_ps( m_qfVector, _mm_setzero_ps() ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::LessEqualZero( void ) const
{
	return AEVector4fComparison::Convert( _mm_cmple_ps( m_qfVector, _mm_setzero_ps() ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::GreaterZero( void ) const
{
	return AEVector4fComparison::Convert( _mm_cmpgt_ps(m_qfVector, _mm_setzero_ps() ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::GreaterEqualZero( void ) const
{
	return AEVector4fComparison::Convert( _mm_cmpge_ps( m_qfVector, _mm_setzero_ps() ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::EqualZero( void ) const
{
	return AEVector4fComparison::Convert( _mm_cmpeq_ps( m_qfVector, _mm_setzero_ps() ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::NotEqualZero( void ) const
{
	return AEVector4fComparison::Convert( _mm_cmpneq_ps( m_qfVector, _mm_setzero_ps() ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::SignBitSet( void ) const
{
	return AEVector4fComparison::Convert( _mm_castsi128_ps( _mm_srai_epi32( _mm_castps_si128( m_qfVector ), 31 ) ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector3f::SignBitClear( void ) const
{
	return AEVector4fComparison::Convert( _mm_castsi128_ps( _mm_cmpeq_epi32( _mm_srai_epi32( _mm_castps_si128( m_qfVector ), 31 ),_mm_setzero_si128() ) ) );
}

#endif // _AEVECTOR3FSSE_INL_

