/////////////////////////////////////////////////////////////////////////
//
// Amuse Engine SDK - core/math
// Copyright(c) 2014.  All Rights Reserved
//
// File:		AEVector4f_Neon.inl
// Author:		Gianluca Belardelli
// Date:		02/06/2014
//
/////////////////////////////////////////////////////////////////////////
#ifndef _AEVECTOR4F_NEON_INL_
#define _AEVECTOR4F_NEON_INL_

#define AEVECTOR4F_CONSTRUCTORS
AE_FORCEINLINE AEVector4f::AEVector4f( AEFLOAT32 fX, AEFLOAT32 fY, AEFLOAT32 fZ, AEFLOAT32 fW )
{
	float32x2_t l = AE_NEON_CONSTANT2F( fX, fY );
	float32x2_t h = AE_NEON_CONSTANT2F( fZ, fW );
	m_qfVector = vcombine_f32( l, h );
}

AE_FORCEINLINE AEVector4f::AEVector4f( const AEQuadFloat32 &qfValue )
{
	m_qfVector = qfValue;
}

AE_FORCEINLINE AEVector4f::AEVector4f( const AEVector4f &vcCopy )
{
	m_qfVector = vcCopy.m_qfVector;
}

#define AEVECTOR4F_BASESETS
AE_FORCEINLINE void AEVector4f::Set( AEFLOAT32 fX, AEFLOAT32 fY, AEFLOAT32 fZ, AEFLOAT32 fW )
{
	const float32_t temp[4] = { fX, fY, fZ, fW };
	float32x2_t l = vld1_f32( temp );
	float32x2_t h = vld1_f32( temp + 2 );
	m_qfVector = vcombine_f32(l,h);
}

AE_FORCEINLINE void AEVector4f::Set( AESimdFloat32ConstRef fX, AESimdFloat32ConstRef fY, AESimdFloat32ConstRef fZ, AESimdFloat32ConstRef fW )
{
	float32x2_t l = vset_lane_f32( vget_lane_f32( fY.m_fReal, 0 ), fX.m_fReal, 1 );
	float32x2_t h = vset_lane_f32( vget_lane_f32( fW.m_fReal, 0 ), fZ.m_fReal, 1 );
	m_qfVector = vcombine_f32( l, h );
}

AE_FORCEINLINE void AEVector4f::SetAll( const AEFLOAT32 &fValue )
{
	m_qfVector = vdupq_n_f32( fValue );
}

AE_FORCEINLINE void AEVector4f::SetAll( AESimdFloat32ConstRef fValue )
{
	m_qfVector = vcombine_f32( fValue.m_fReal, fValue.m_fReal );
}

AE_FORCEINLINE void AEVector4f::SetZero( void )
{
	float32x2_t z = vcreate_f32(0); 
	m_qfVector = vcombine_f32(z,z);
}

template <AEINT32 I> 
AE_FORCEINLINE void AEVector4f::ZeroComponent( void )
{
	AE_VECTOR4F_SUBINDEX_CHECK;
    // Add Switch to fix Clang issue were last parameter of vsetq_lane_f32 must be in constrange(0,3)
    // See RSYS-1377 for more information
    switch(I)
    {
        case 1:  m_qfVector = vsetq_lane_f32( 0, m_qfVector, 1 ); break;
        case 2:  m_qfVector = vsetq_lane_f32( 0, m_qfVector, 2 ); break;
        case 3:  m_qfVector = vsetq_lane_f32( 0, m_qfVector, 3 ); break;
        default: m_qfVector = vsetq_lane_f32( 0, m_qfVector, 0 ); break;
    }
}

AE_FORCEINLINE void AEVector4f::ZeroComponent( const AEINT32 nIndex )
{
	AEASSERT_MSG( ( nIndex >= 0 ) && ( nIndex < 4 ), "Component index out of range" );
	switch( nIndex ) 
	{
		case 1:  m_qfVector = vsetq_lane_f32( 0, m_qfVector, 1 ); break;
		case 2:  m_qfVector = vsetq_lane_f32( 0, m_qfVector, 2 ); break;
		case 3:  m_qfVector = vsetq_lane_f32( 0, m_qfVector, 3 ); break;
		default: m_qfVector = vsetq_lane_f32( 0, m_qfVector, 0 ); break;
	}
}

#define AEVECTOR4F_ADVSETS
AE_FORCEINLINE void AEVector4f::SetAdd( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = vaddq_f32( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

AE_FORCEINLINE void AEVector4f::SetSub( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = vsubq_f32( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

AE_FORCEINLINE void AEVector4f::SetMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = vmulq_f32( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

AE_FORCEINLINE void AEVector4f::SetMul( AEVector4fRefParam vcVector, AESimdFloat32ConstRef fValue )
{
	m_qfVector = vmulq_f32( vcombine_f32( fValue.m_fReal, fValue.m_fReal ), vcVector.m_qfVector );
}

AE_FORCEINLINE void AEVector4f::SetSubMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2, AESimdFloat32ConstRef fValue )
{
	m_qfVector = vmlsq_f32( vcVector1.m_qfVector, vcombine_f32( fValue.m_fReal, fValue.m_fReal), vcVector2.m_qfVector ); 
}

AE_FORCEINLINE void AEVector4f::SetAddMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2, AESimdFloat32ConstRef fValue )
{
	m_qfVector = vmlaq_f32( vcVector1.m_qfVector, vcombine_f32( fValue.m_fReal, fValue.m_fReal), vcVector2.m_qfVector ); 
}

AE_FORCEINLINE void AEVector4f::SetAddMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2, AEVector4fRefParam vcVector3 )
{
	m_qfVector = vmlaq_f32( vcVector1.m_qfVector, vcVector3.m_qfVector, vcVector2.m_qfVector ); 
}

AE_FORCEINLINE void AEVector4f::SetSubMul( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2, AEVector4fRefParam vcVector3 )
{
	m_qfVector = vmlsq_f32( vcVector1.m_qfVector, vcVector3.m_qfVector, vcVector2.m_qfVector ); 
}

AE_FORCEINLINE void AEVector4f::SetCross( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
#if 1 // vrev / vext version

	const float32x4_t v0_yzwx = vextq_f32( vcVector1.m_qfVector, vcVector1.m_qfVector, 1 );
	const float32x4_t v1_yzwx = vextq_f32( vcVector2.m_qfVector, vcVector2.m_qfVector, 1 );
	const float32x2_t v0_yz = vget_low_f32( v0_yzwx );
	const float32x2_t v1_yz = vget_low_f32( v1_yzwx );
	const float32x2_t v0_wx = vget_high_f32( v0_yzwx );
	const float32x2_t v1_wx = vget_high_f32( v1_yzwx );
	const float32x2_t v0_xw = vrev64_f32( v0_wx );
	const float32x2_t v1_xw = vrev64_f32( v1_wx );
	const float32x4_t v0_yzxw = vcombine_f32( v0_yz, v0_xw );
	const float32x4_t v1_yzxw = vcombine_f32( v1_yz, v1_xw );
	const float32x4_t cross = vmlsq_f32( vmulq_f32( vcVector1.m_qfVector, v1_yzxw ), v0_yzxw, vcVector2.m_qfVector );
	const float32x4_t cross_yzwx = vextq_f32( cross, cross, 1 );
	const float32x2_t c_xy = vget_low_f32( cross );
	const float32x2_t c_yz = vget_low_f32( cross_yzwx );
	m_qfVector = vcombine_f32( c_yz, c_xy );

#elif 0

	register float32x4_t v1 asm("q0") = v0; // not supported..
	register float32x4_t v2 asm("q2") = v1;
	register float32x4_t result asm("q8");
	asm volatile(
		"vmov    q1, q0              \n\t"
		"vmov    q3, q2              \n\t"
		"vzip.32 q0, q1              \n\t" // d0,d1,d3 = xx1, yy1, zz1
		"vzip.32 q2, q3              \n\t" // d4,d5,d6 = xx2, yy2, zz2
		"vmul.f32 d16, d1, d6        \n\t" // x' = yy1 * zz2
		"vmul.f32 d18, d3, d4        \n\t" // y' = zz1 * xx2
		"vmul.f32 d17, d0, d5        \n\t" // z' = xx1 * yy2
		"vmls.f32 d16, d3, d5        \n\t" // x' = x' - zz1 * yy2
		"vmls.f32 d18, d0, d6        \n\t" // y' = y' - xx1 * zz2
		"vmls.f32 d17, d1, d4        \n\t" // z' = z' - yy1 * xx2
		"vuzp.32 d16, d18"                 // result.xyzw = xyzz'
		: "=w" (v1), "=w" (v2), "=w" (result)
		: "0" (v1), "1" (v2)
		: "q1", "q3", "q9"
		);

	return result;

#else

	// Zip/unzipo version (see http://www.gp32x.com/board/index.php?/topic/55455-fast-neon-3-term-cross-product/)
	// Slower than vrev/vext ver in our usage anyway
	float32x4x2_t xxyyzz0 = vzipq_f32(HK_NEON_V(v0.m_qfVector), HK_NEON_V(v0.m_qfVector));
	float32x4x2_t xxyyzz1 = vzipq_f32(HK_NEON_V(v1.m_qfVector), HK_NEON_V(v1.m_qfVector));    

	float32x2_t xx0 = vget_low_f32(xxyyzz0.val[0]);
	float32x2_t yy0 = vget_high_f32(xxyyzz0.val[0]);    
	float32x2_t zz0 = vget_low_f32(xxyyzz0.val[1]);

	float32x2_t xx1 = vget_low_f32(xxyyzz1.val[0]);
	float32x2_t yy1 = vget_high_f32(xxyyzz1.val[0]);    
	float32x2_t zz1 = vget_low_f32(xxyyzz1.val[1]);

	float32x2_t x = vmul_f32(yy0, zz1); // x' = v0.y * v1.z
	float32x2_t y = vmul_f32(zz0, xx1); // x' = v0.z * v1.x  
	float32x2_t z = vmul_f32(xx0, yy1); // x' = v0.x * v1.y
	x = vmls_f32(x, zz0, yy1);          // x' = x' - v0.z * v1.y
	y = vmls_f32(y, xx0, zz1);          // y' = y' - v0.x * v1.z
	z = vmls_f32(z, yy0, xx1);          // z' = z' - v0.y * v1.x

	float32x2x2_t result = vuzp_f32(x, y);
	HK_NEON_V(m_qfVector) = vcombine_f32(result.val[0], z);

#endif
}

#define AE_VECTOR4F_CMPFUNCS

AE_FORCEINLINE const AEVector4fComparison AEVector4f::CmpE( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( vceqq_f32( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::CmpNE( AEVector4fRefParam vcVector ) const
{
	uint32x4_t eq = vceqq_f32( m_qfVector, vcVector.m_qfVector );
 	return AEVector4fComparison::Convert( vmvnq_u32( eq ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::CmpL( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( vcltq_f32( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::CmpLE( AEVector4fRefParam vcVector ) const
{
	return AEVector4fComparison::Convert( vcleq_f32( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::CmpG( AEVector4fRefParam vcVector ) const
{
 	return AEVector4fComparison::Convert( vcgtq_f32( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::CmpGE( AEVector4fRefParam vcVector ) const
{
 	return AEVector4fComparison::Convert( vcgeq_f32( m_qfVector, vcVector.m_qfVector ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::LessZero( void ) const
{
	AEVector4f zero; zero.SetZero();
	return AEVector4fComparison::Convert( vcltq_f32( m_qfVector, zero.m_qfVector ) ); // todo how to use VCLT #0
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::LessEqualZero( void ) const
{
	AEVector4f zero; zero.SetZero();
	return AEVector4fComparison::Convert( vcleq_f32( m_qfVector, zero.m_qfVector ) ); // todo how to use VCLE #0
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::GreaterZero( void ) const
{
	AEVector4f zero; zero.SetZero();
	return AEVector4fComparison::Convert( vcgtq_f32( m_qfVector, zero.m_qfVector ) ); // todo how to use VCGT #0
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::GreaterEqualZero( void ) const
{
	AEVector4f zero; zero.SetZero();
	return AEVector4fComparison::Convert( vcgeq_f32( m_qfVector, zero.m_qfVector ) ); // todo how to use VCGE #0
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::EqualZero( void ) const
{
	AEVector4f zero; zero.SetZero();
	return AEVector4fComparison::Convert( vceqq_f32( m_qfVector, zero.m_qfVector ) ); // todo how to use VCEQ #0
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::NotEqualZero( void ) const
{
	AEVector4f zero; zero.SetZero();
	uint32x4_t eq = vceqq_f32( m_qfVector, zero.m_qfVector ); // todo how to use VCEQ #0
	return AEVector4fComparison::Convert( vmvnq_u32( eq ) );
}

AE_FORCEINLINE void AEVector4f::SetSelect( AEVector4fConstRefCompParam vcCompareMask, AEVector4fRefParam vcTrueValue, AEVector4fRefParam vcFalseValue )
{
	m_qfVector = vbslq_f32( vcCompareMask.m_mMask, vcTrueValue.m_qfVector, vcFalseValue.m_qfVector );
}

template<AEVector4fComparison::Mask M> 
AE_FORCEINLINE void AEVector4f::SetSelect( AEVector4fRefParam vcTrueValue, AEVector4fRefParam vcFalseValue )
{
	AEVector4fComparison comp;
	comp.Set(M);
	SetSelect( comp, vcTrueValue, vcFalseValue );
}

template <>
AE_FORCEINLINE void AEVector4f::SetNeg<1>( AEVector4fRefParam vcVector )
{
	float32x2_t xy = vget_low_f32( vcVector.m_qfVector );
	m_qfVector = vcVector.m_qfVector;
	m_qfVector = vsetq_lane_f32( vget_lane_f32( vneg_f32( xy ), 0 ), m_qfVector, 0 );
}

template <>
AE_FORCEINLINE void AEVector4f::SetNeg<2>( AEVector4fRefParam vcVector )
{
	float32x2_t l = vget_low_f32( vcVector.m_qfVector );
	float32x2_t h = vget_high_f32( vcVector.m_qfVector );
	m_qfVector = vcombine_f32( vneg_f32( l ), h );
}

template <>
AE_FORCEINLINE void AEVector4f::SetNeg<3>( AEVector4fRefParam vcVector )
{
	float32x2_t w = vget_high_f32( vcVector.m_qfVector );
	m_qfVector = vnegq_f32( vcVector.m_qfVector );
	m_qfVector = vsetq_lane_f32( vget_lane_f32( w, 1 ), m_qfVector, 3 );
}

template <>
AE_FORCEINLINE void AEVector4f::SetNeg<4>( AEVector4fRefParam vcVector )
{
	m_qfVector = vnegq_f32( vcVector.m_qfVector );
}

template <AEINT32 N>
AE_FORCEINLINE void AEVector4f::SetNeg( AEVector4fRefParam vcVector )
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
}

AE_FORCEINLINE void AEVector4f::SetAbs( AEVector4fRefParam vcVector )
{
	m_qfVector = vabsq_f32( vcVector.m_qfVector );	
}

AE_FORCEINLINE void AEVector4f::SetMin( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = vminq_f32( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

AE_FORCEINLINE void AEVector4f::SetMax( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	m_qfVector = vmaxq_f32( vcVector1.m_qfVector, vcVector2.m_qfVector );
}

/* matrix3, rotation, quaternion, transform */

AE_FORCEINLINE void AEVector4f::SetRotatedDir( const AEMatrix3f &mtMatrix, AEVector4fRefParam vcVector )
{
	const AEQuadFloat32 c0 = mtMatrix.GetColumn<0>().m_qfVector;
	const AEQuadFloat32 c1 = mtMatrix.GetColumn<1>().m_qfVector;
	const AEQuadFloat32 c2 = mtMatrix.GetColumn<2>().m_qfVector;

	const AEQuadFloat32 b0 = vdupq_lane_f32( vget_low_f32( vcVector.m_qfVector ), 0 );
	const AEQuadFloat32 b1 = vdupq_lane_f32( vget_low_f32( vcVector.m_qfVector ), 1 );
	const AEQuadFloat32 b2 = vdupq_lane_f32( vget_high_f32( vcVector.m_qfVector ), 0 );

	const AEQuadFloat32 r0 = vmulq_f32( c0, b0 );
	const AEQuadFloat32 r1 = vmulq_f32( c1, b1 );
	const AEQuadFloat32 r2 = vmulq_f32( c2, b2 );

	m_qfVector = vaddq_f32( vaddq_f32( r0, r1 ), r2 );
}

AE_FORCEINLINE void AEVector4f::SetRotatedInverseDir( const AEMatrix3f &mtMatrix, AEVector4fRefParam vcVector )
{
	AEVector4f c0 = mtMatrix.GetColumn<0>();
	AEVector4f c1 = mtMatrix.GetColumn<1>();
	AEVector4f c2 = mtMatrix.GetColumn<2>();

	AE_VECTORTRANSPOSE3F( c0, c1, c2 );

	const AEQuadFloat32 b0 = vdupq_lane_f32( vget_low_f32( vcVector.m_qfVector ), 0 );
	const AEQuadFloat32 b1 = vdupq_lane_f32( vget_low_f32( vcVector.m_qfVector ), 1 );
	const AEQuadFloat32 b2 = vdupq_lane_f32( vget_high_f32( vcVector.m_qfVector ), 0 );

	const AEQuadFloat32 r0 = vmulq_f32( c0.m_qfVector, b0 );
	const AEQuadFloat32 r1 = vmulq_f32( c1.m_qfVector, b1 );
	const AEQuadFloat32 r2 = vmulq_f32( c2.m_qfVector, b2 );

	m_qfVector = vaddq_f32( vaddq_f32( r0, r1 ), r2 );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetDot<2>( AEVector4fRefParam vcVector ) const
{
	float32x4_t x2 = vmulq_f32( m_qfVector, vcVector.m_qfVector );
	float32x2_t l = vget_low_f32(x2);
	float32x2_t xy = vpadd_f32(l, l); 
	return AESimdFloat32::Convert( xy );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetDot<3>( AEVector4fRefParam vcVector ) const
{
	float32x4_t x2 = vmulq_f32( m_qfVector, vcVector.m_qfVector );
	float32x2_t l = vget_low_f32( x2 );
	float32x2_t h = vget_high_f32( x2 );
	h = vset_lane_f32( (AEFLOAT32)0, h, 1 );
	float32x2_t xy_zw = vpadd_f32( l, h ); 
	float32x2_t xyzw = vpadd_f32( xy_zw, xy_zw ); 
	return AESimdFloat32::Convert( xyzw );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetDot<4>( AEVector4fRefParam vcVector ) const
{
	float32x4_t x2 = vmulq_f32( m_qfVector, vcVector.m_qfVector );
	float32x2_t l = vget_low_f32( x2 );
	float32x2_t h = vget_high_f32( x2 );
	float32x2_t xy_zw = vpadd_f32( l, h ); // (0+1, 2+3)
	float32x2_t xyzw = vpadd_f32( xy_zw, xy_zw ); // 0+1+2+3
	return AESimdFloat32::Convert( xyzw );
}

template <AEINT32 N>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetDot( AEVector4fRefParam vcVector ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

AE_FORCEINLINE const AESimdFloat32 AEVector4f::Dot4xyz1( AEVector4fRefParam vcVector ) const
{
	float32x4_t x2 = vmulq_f32( m_qfVector, vcVector.m_qfVector );
	float32x2_t l = vget_low_f32( x2 );
	float32x2_t h = vget_high_f32( x2 );
	h = vset_lane_f32( vgetq_lane_f32( m_qfVector, 3 ), h, 1 );
	float32x2_t xy_zw = vpadd_f32( l, h ); // (0+1, 2+(3))
	float32x2_t xyzw = vpadd_f32( xy_zw, xy_zw ); // 0+1+2+(3)
	return AESimdFloat32::Convert( xyzw );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalAdd<2>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	float32x2_t xy = vpadd_f32( l, l ); // (0+1, 0+1)
	return AESimdFloat32::Convert( xy );	
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalAdd<3>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	float32x2_t h = vget_high_f32( m_qfVector );
	h = vset_lane_f32( (AEFLOAT32)0, h, 1) ;
	float32x2_t xy_zw = vpadd_f32( l, h ); // (0+1, 2+(3))
	float32x2_t xyzw = vpadd_f32( xy_zw, xy_zw ); // 0+1+2+(3)
	return AESimdFloat32::Convert( xyzw );	
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalAdd<4>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	float32x2_t h = vget_high_f32( m_qfVector );
	float32x2_t xy_zw = vpadd_f32( l, h ); // (0+1, 2+3)
	float32x2_t xyzw = vpadd_f32( xy_zw, xy_zw ); // 0+1+2+3
	return AESimdFloat32::Convert( xyzw );	
}

template <AEINT32 N>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalAdd( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMul<2>( void ) const
{
	const AESimdFloat32 xy = GetComponent<0>() * GetComponent<1>();
	return xy;	
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMul<3>( void ) const
{
	const AESimdFloat32 xyz = GetComponent<0>() * GetComponent<1>() * GetComponent<2>();
	return xyz;
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMul<4>( void ) const
{
	// todo vpmul?
	const AESimdFloat32 xyzw = GetComponent<0>() * GetComponent<1>() * GetComponent<2>() * GetComponent<3>();
	return xyzw;
}

template <AEINT32 N>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMul( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMax<1>( void ) const
{
	return GetComponent<0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMax<2>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	return AESimdFloat32::Convert( vpmax_f32( l, l ) );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMax<3>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	float32x2_t xy = vpmax_f32( l, l );
	float32x2_t zz = vdup_lane_f32( vget_high_f32( m_qfVector ), 0 );
	return AESimdFloat32::Convert( vmax_f32( zz, xy ) );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMax<4>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	float32x2_t h = vget_high_f32( m_qfVector );
	float32x2_t xy_zw = vpmax_f32( l, h );
	return AESimdFloat32::Convert( vpmax_f32( xy_zw, xy_zw ) );
}

template <AEINT32 N>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMax( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMin<2>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	return AESimdFloat32::Convert( vpmin_f32( l, l ) );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMin<3>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	float32x2_t xy = vpmin_f32( l, l );
	float32x2_t zz = vdup_lane_f32( vget_high_f32( m_qfVector ), 0 );
	return AESimdFloat32::Convert( vmin_f32( zz, xy ) );
}

template <>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMin<4>( void ) const
{
	float32x2_t l = vget_low_f32( m_qfVector );
	float32x2_t h = vget_high_f32( m_qfVector );
	float32x2_t xy_zw = vpmin_f32( l, h );
	return AESimdFloat32::Convert( vpmin_f32( xy_zw, xy_zw ) );
}

template <AEINT32 N>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetHorizontalMin( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return AESimdFloat32::GetConstant<AE_QUADREAL_0>();
}

#if defined(AE_PLATFORM_IOS)

struct _hkFloat32Struct
{
    AEFLOAT32 val[4];
};

AE_FORCEINLINE AEFLOAT32& AEVector4f::operator() (AEINT32 a)
{
	AEASSERT_MSG(0x6d0c31d7, a>=0 && a<4, "index out of bounds for component access");
	return reinterpret_cast<_hkFloat32Struct&>(m_qfVector).val[a];
}

AE_FORCEINLINE const AEFLOAT32& AEVector4f::operator() (AEINT32 a) const
{
	AEASSERT_MSG(0x6d0c31d7, a>=0 && a<4, "index out of bounds for component access");
	return reinterpret_cast<const _hkFloat32Struct&>(m_qfVector).val[a];
}

#else

AE_FORCEINLINE AEFLOAT32 &AEVector4f::operator() ( AEINT32 nIndex )
{
	AEASSERT_MSG( nIndex >= 0 && nIndex < 4, "index out of bounds for component access");
	return reinterpret_cast<AEFLOAT32*>( &m_qfVector )[nIndex];
}

AE_FORCEINLINE const AEFLOAT32 &AEVector4f::operator() ( AEINT32 nIndex ) const
{
	AEASSERT_MSG( nIndex>=0 && nIndex<4, "index out of bounds for component access");
	return reinterpret_cast<const AEFLOAT32*>( &m_qfVector )[nIndex];
}

#endif

AE_FORCEINLINE void AEVector4f::SetXYZ_W( AEVector4fRefParam vcXYZVector, AEVector4fRefParam vcWVector )
{
	m_qfVector = vsetq_lane_f32( vgetq_lane_f32( vcWVector.m_qfVector, 3 ), vcXYZVector.m_qfVector, 3 );
}

AE_FORCEINLINE void AEVector4f::SetXYZ_W( AEVector4fRefParam vcXYZVector, AESimdFloat32ConstRef fW )
{
	m_qfVector = vsetq_lane_f32( vget_lane_f32( fW.m_fReal, 0 ), vcXYZVector.m_qfVector, 3 );
}

AE_FORCEINLINE void AEVector4f::SetW( AEVector4fRefParam vcWVector )
{
	m_qfVector = vsetq_lane_f32( vgetq_lane_f32( vcWVector.m_qfVector, 3 ), m_qfVector, 3 );
}

AE_FORCEINLINE void AEVector4f::SetXYZ( AEVector4fRefParam vcXYZVector )
{
	m_qfVector = vsetq_lane_f32( vgetq_lane_f32( m_qfVector, 3 ), vcXYZVector.m_qfVector, 3 );
}

AE_FORCEINLINE void AEVector4f::AddXYZ( AEVector4fRefParam vcXYZVector )
{
	m_qfVector = vaddq_f32( m_qfVector, vcXYZVector.m_qfVector );
	
	#ifdef AEDEBUG
	reinterpret_cast<AEUINT32*>( &m_qfVector )[3] = 0xffffffff;
	#endif
}

AE_FORCEINLINE void AEVector4f::SubXYZ( AEVector4fRefParam vcXYZVector )
{
	m_qfVector = vsubq_f32( m_qfVector, vcXYZVector.m_qfVector );
	
	#ifdef AEDEBUG
	reinterpret_cast<AEUINT32*>( &m_qfVector )[3] = 0xffffffff;
	#endif
}

AE_FORCEINLINE void AEVector4f::SetXYZ( AEFLOAT32 fValue )
{
	AEQuadFloat32 xyz = vdupq_n_f32( fValue );
	m_qfVector = vsetq_lane_f32( vgetq_lane_f32( m_qfVector, 3 ), xyz, 3 );
}

AE_FORCEINLINE void AEVector4f::SetXYZ( AESimdFloat32ConstRef fValue )
{
	AEQuadFloat32 xyz = vcombine_f32( fValue.m_fReal, fValue.m_fReal );
	m_qfVector = vsetq_lane_f32( vgetq_lane_f32( m_qfVector, 3 ), xyz, 3 );
}

AE_FORCEINLINE void AEVector4f::SetXYZ_0( AEVector4fRefParam vcXYZVector )
{
	m_qfVector = vsetq_lane_f32( (AEFLOAT32)0, vcXYZVector.m_qfVector, 3 );
}

AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetComponent( const AEINT32 nIndex ) const
{
	AEASSERT_MSG( nIndex >= 0 && nIndex < 4, "index out of bounds for component access");

	AESingleFloat32 r;
	switch( nIndex )
	{
		case 1: r = vdup_lane_f32( vget_low_f32( m_qfVector ), 1 ); break;
		case 2: r = vdup_lane_f32( vget_high_f32( m_qfVector ), 0 ); break;
		case 3: r = vdup_lane_f32( vget_high_f32( m_qfVector ), 1 ); break;
		default: r = vdup_lane_f32( vget_low_f32( m_qfVector ), 0 ); break;
	}

	return AESimdFloat32::Convert(r);
}

template <AEINT32 I>
AE_FORCEINLINE const AESimdFloat32 AEVector4f::GetComponent( void ) const
{
	AE_VECTOR4F_SUBINDEX_CHECK;

	AESingleFloat32 r;
	switch(I)
	{
		case 1: r = vdup_lane_f32( vget_low_f32( m_qfVector ), 1 ); break;
		case 2: r = vdup_lane_f32( vget_high_f32( m_qfVector ), 0 ); break;
		case 3: r = vdup_lane_f32( vget_high_f32( m_qfVector ), 1 ); break;
		default: r = vdup_lane_f32( vget_low_f32( m_qfVector ), 0 ); break;
	}

	return AESimdFloat32::Convert(r);
}

AE_FORCEINLINE void AEVector4f::SetComponent( const AEINT32 nIndex, AESimdFloat32ConstRef fValue )
{
	AEASSERT_MSG( nIndex >= 0 && nIndex < 4, "index out of bounds for component access");
	
	switch( nIndex )
	{
		case 1: m_qfVector = vsetq_lane_f32( vget_lane_f32( fValue.m_fReal, 0 ), m_qfVector, 1 ); break;
		case 2: m_qfVector = vsetq_lane_f32( vget_lane_f32( fValue.m_fReal, 0 ), m_qfVector, 2 ); break;
		case 3: m_qfVector = vsetq_lane_f32( vget_lane_f32( fValue.m_fReal, 0 ), m_qfVector, 3 ); break;
		default: m_qfVector = vsetq_lane_f32 (vget_lane_f32( fValue.m_fReal, 0 ), m_qfVector, 0 ); break;
	}
}

template <AEINT32 I>
AE_FORCEINLINE void AEVector4f::SetComponent( AESimdFloat32ConstRef fValue )
{
	AE_VECTOR4F_SUBINDEX_CHECK;
    // Add Switch to fix Clang issue were last parameter of vgetq_lane_f32 must be in constrange(0,3)
    // See RSYS-1377 for more information
	switch(I)
	{
		case 1: m_qfVector = vsetq_lane_f32( vget_lane_f32( fValue.m_fReal, 0 ), m_qfVector, 1 ); break;
		case 2: m_qfVector = vsetq_lane_f32( vget_lane_f32( fValue.m_fReal, 0 ), m_qfVector, 2 ); break;
		case 3: m_qfVector = vsetq_lane_f32( vget_lane_f32( fValue.m_fReal, 0 ), m_qfVector, 3 ); break;
		default: m_qfVector = vsetq_lane_f32( vget_lane_f32( fValue.m_fReal, 0 ), m_qfVector, 0 ); break;
	}
}

template <> 
AE_FORCEINLINE AEBOOL32 AEVector4f::IsOk<1>( void ) const
{
	static const uint32x2_t quietnan = AE_NEON_CONSTANT2I(  0x7FC00000, 0x7FC00000);
	uint32x2_t mask = vand_u32(vreinterpret_u32_f32(vget_low_f32(m_qfVector)), quietnan);
	uint32x2_t test = vceq_u32(mask, quietnan);
	test = vmvn_u32(test);
	return vget_lane_u32(test,0);
}

template <> 
AE_FORCEINLINE AEBOOL32 AEVector4f::IsOk<2>( void ) const
{
	static const uint32x2_t quietnan = AE_NEON_CONSTANT2I( 0x7FC00000, 0x7FC00000 );
	uint32x2_t mask = vand_u32( vreinterpret_u32_f32( vget_low_f32( m_qfVector ) ), quietnan );
	uint32x2_t test = vceq_u32( mask, quietnan );
	test = vmvn_u32( test );
	return vget_lane_u32( test, 0 ) & vget_lane_u32( test, 1 );
}

template <> 
AE_FORCEINLINE AEBOOL32 AEVector4f::IsOk<3>( void ) const
{
	static const uint32x4_t quietnan = AE_NEON_CONSTANT4I( 0x7FC00000, 0x7FC00000, 0x7FC00000, 0x7FC00000 );
	uint32x4_t mask = vandq_u32( vreinterpretq_u32_f32( m_qfVector ), quietnan );
	uint32x4_t test = vceqq_u32( mask, quietnan );
	test = vmvnq_u32( test );
	return vgetq_lane_u32( test, 0 ) & vgetq_lane_u32( test, 1 ) & vgetq_lane_u32( test, 2 );
}

template <> 
AE_FORCEINLINE AEBOOL32 AEVector4f::IsOk<4>( void ) const
{
	static const uint32x4_t quietnan = AE_NEON_CONSTANT4I( 0x7FC00000, 0x7FC00000, 0x7FC00000, 0x7FC00000 );
	uint32x4_t mask = vandq_u32( vreinterpretq_u32_f32( m_qfVector ), quietnan );
	uint32x4_t test = vceqq_u32( mask, quietnan );
	test = vmvnq_u32( test );
	return vgetq_lane_u32( test, 0 ) & vgetq_lane_u32( test, 1 ) & vgetq_lane_u32( test, 2 ) & vgetq_lane_u32( test, 3 );
}

template <AEINT32 N> 
AE_FORCEINLINE AEBOOL32 AEVector4f::IsOk( void ) const
{
	AE_VECTOR4F_NOT_IMPLEMENTED;
	return false;
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::SignBitSet( void ) const
{
	static const uint32x4_t allsigns = AE_NEON_CONSTANT4I( 0x80000000, 0x80000000, 0x80000000, 0x80000000 );
	return AEVector4fComparison::Convert( vtstq_u32( vreinterpretq_u32_f32( m_qfVector ), allsigns ) );
}

AE_FORCEINLINE const AEVector4fComparison AEVector4f::SignBitClear( void ) const
{
	static const uint32x4_t allsigns = AE_NEON_CONSTANT4I( 0x80000000, 0x80000000, 0x80000000, 0x80000000 );
	return AEVector4fComparison::Convert( vmvnq_u32(vtstq_u32( vreinterpretq_u32_f32( m_qfVector ), allsigns ) ) );
}

AE_FORCEINLINE void AEVector4f::SetFlipSign( AEVector4fRefParam vcVector, AEVector4fConstRefCompParam sSign )
{
	static const uint32x4_t allsigns = AE_NEON_CONSTANT4I( 0x80000000, 0x80000000, 0x80000000, 0x80000000 );
	m_qfVector = vreinterpretq_f32_u32( veorq_u32(  vreinterpretq_u32_f32( vcVector.m_qfVector ), vandq_u32( sSign.m_mMask, allsigns ) ) );
}

AE_FORCEINLINE void AEVector4f::SetFlipSign( AEVector4fRefParam vcVector, AEVector4fRefParam vSign )
{
	static const uint32x4_t allsigns = AE_NEON_CONSTANT4I( 0x80000000, 0x80000000, 0x80000000, 0x80000000 );
	m_qfVector = vreinterpretq_f32_u32( veorq_u32(  vreinterpretq_u32_f32( vcVector.m_qfVector ), vandq_u32( vreinterpretq_u32_f32( vSign.m_qfVector ), allsigns ) ) );
}
/*
AE_FORCEINLINE void AEVector4f::SetFlipSign( AEVector4fRefParam vcVector, AESimdFloat32ConstRef sSign )
{
	static const uint32x4_t allsigns = AE_NEON_CONSTANT4I( 0x80000000, 0x80000000, 0x80000000, 0x80000000 );
	float32x4_t sharedSignV = vcombine_f32( sSign.m_fReal, sSign.m_fReal );
	m_qfVector = vreinterpretq_f32_u32( veorq_u32(  vreinterpretq_u32_f32( vcVector.m_qfVector ), vandq_u32( vreinterpretq_u32_f32( sharedSignV ), allsigns ) ) );
}
*/
namespace AEVector4_AdvancedInterface
{
	template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
	struct unrollf_setReciprocal
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};

	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_IGNORE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			switch (A)
			{
				case AE_ACC_23_BIT: 
					fSelf = AEMath::QuadReciprocal( a.m_qfVector ); 
					break;
				case AE_ACC_12_BIT: 
					{
						AEQuadFloat32 bb = vrecpeq_f32( a.m_qfVector ); // estimate is only 8 bit
						fSelf = vmulq_f32( bb, vrecpsq_f32( a.m_qfVector, bb ) ); // so do a pass1
					}
					break;
				default:
					fSelf = AEMath::QuadReciprocalTwoIter( a.m_qfVector ); 
					break; // HK_ACC_FULL
			}
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_SET_ZERO>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			AEQuadFloat32 zero = vcombine_f32( vcreate_f32(0), vcreate_f32(0) );
			uint32x4_t rEqualsZero = vceqq_f32( a.m_qfVector, zero ); // todo how to use the VCEQ #0 instruction
			AEQuadFloat32 e;
			unrollf_setReciprocal<A, AE_DIV_IGNORE>::Apply( e, a );
			fSelf = vreinterpretq_f32_u32( vbicq_u32( vreinterpretq_u32_f32( e ), rEqualsZero ) ); 
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_SET_HIGH>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			AEQuadFloat32 huge = vdupq_n_f32( AE_FLOAT_HIGH );
			AEQuadFloat32 zero = vcombine_f32( vcreate_f32(0), vcreate_f32(0) );
			uint32x4_t rEqualsZero = vceqq_f32( a.m_qfVector, zero ); // todo how to use the VCEQ #0 instruction
			AEQuadFloat32 e;
			unrollf_setReciprocal<A, AE_DIV_IGNORE>::Apply( e, a );
			fSelf = vbslq_f32( rEqualsZero, huge, e );
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_SET_MAX>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			AEQuadFloat32 huge = vdupq_n_f32( AE_FLOAT_MAX );
			AEQuadFloat32 zero = vcombine_f32( vcreate_f32(0), vcreate_f32(0) );
			uint32x4_t rEqualsZero = vceqq_f32( a.m_qfVector, zero ); // todo how to use the VCEQ #0 instruction
			AEQuadFloat32 e;
			unrollf_setReciprocal<A, AE_DIV_IGNORE>::Apply( e, a );
			fSelf = vbslq_f32( rEqualsZero, huge, e );
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setReciprocal<A, AE_DIV_SET_ZERO_AND_ONE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			unrollf_setReciprocal<A, AE_DIV_SET_ZERO>::Apply( fSelf, a );
			AEQuadFloat32 one = vdupq_n_f32( 1.0f );
			AEQuadFloat32 eps = vdupq_n_f32( AEMATH_EPSILON );
			AEQuadFloat32 absVal = vabsq_f32( vsubq_f32( fSelf, one ) );	
		    uint32x4_t lessEqualEps = vcleq_f32( absVal, eps );
			fSelf = vbslq_f32( lessEqualEps, one, fSelf );
		}
	};
} // End namespace 

template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
AE_FORCEINLINE void AEVector4f::SetReciprocal( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setReciprocal<A,D>::Apply( m_qfVector, vcVector );
}

AE_FORCEINLINE void AEVector4f::SetReciprocal( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setReciprocal<AE_ACC_23_BIT,AE_DIV_IGNORE>::Apply( m_qfVector, vcVector );
}

namespace AEVector4_AdvancedInterface
{
	template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
	struct unrollf_setDiv
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a, AEVector4fRefParam b )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_IGNORE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a, AEVector4fRefParam b )
		{
			// Neon does not have a div instruction
			AEQuadFloat32 t;
			unrollf_setReciprocal<A,AE_DIV_IGNORE>::Apply( t, b );
			fSelf = vmulq_f32( a.m_qfVector, t );
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_SET_ZERO>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a, AEVector4fRefParam b )
		{
			AEQuadFloat32 zero = vcombine_f32( vcreate_f32(0), vcreate_f32(0) );
			uint32x4_t rEqualsZero = vceqq_f32( b.m_qfVector, zero ); // todo how to use the VCEQ #0 instruction
			AEQuadFloat32 e;
			unrollf_setDiv<A, AE_DIV_IGNORE>::Apply( e, a, b );
			fSelf = vreinterpretq_f32_u32( vbicq_u32( vreinterpretq_u32_f32(e), rEqualsZero ) );
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_SET_HIGH>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a, AEVector4fRefParam b )
		{
			static const uint32x4_t allsigns = AE_NEON_CONSTANT4I( 0x80000000, 0x80000000, 0x80000000, 0x80000000 );
			AEQuadFloat32 huge = vdupq_n_f32( AE_FLOAT_HIGH );
			AEQuadFloat32 zero = vcombine_f32(vcreate_f32(0), vcreate_f32(0));
			uint32x4_t rEqualsZero = vceqq_f32( b.m_qfVector, zero); // todo how to use the VCEQ #0 instruction
			AEQuadFloat32 e;
			unrollf_setDiv<A, AE_DIV_IGNORE>::Apply( e, a, b );
			huge = vreinterpretq_f32_u32( veorq_u32(  vreinterpretq_u32_f32( huge ), vandq_u32( vreinterpretq_u32_f32( a.m_qfVector ), allsigns ) ) );
			fSelf = vbslq_f32( rEqualsZero, huge, e );
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_SET_MAX>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a, AEVector4fRefParam b )
		{
			static const uint32x4_t allsigns = AE_NEON_CONSTANT4I( 0x80000000, 0x80000000, 0x80000000, 0x80000000 );
			AEQuadFloat32 huge = vdupq_n_f32( AE_FLOAT_MAX );
			AEQuadFloat32 zero = vcombine_f32( vcreate_f32(0), vcreate_f32(0) );
			uint32x4_t rEqualsZero = vceqq_f32( b.m_qfVector, zero ); // todo how to use the VCEQ #0 instruction
			AEQuadFloat32 e;
			unrollf_setDiv<A, AE_DIV_IGNORE>::Apply( e, a, b );
			huge = vreinterpretq_f32_u32( veorq_u32(  vreinterpretq_u32_f32( huge ), vandq_u32( vreinterpretq_u32_f32( a.m_qfVector ), allsigns ) ) );
			fSelf = vbslq_f32( rEqualsZero, huge, e );
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setDiv<A, AE_DIV_SET_ZERO_AND_ONE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a, AEVector4fRefParam b )
		{
			unrollf_setDiv<A, AE_DIV_SET_ZERO>::Apply( fSelf, a, b );
			AEQuadFloat32 one = vdupq_n_f32( 1.0f );
			AEQuadFloat32 eps = vdupq_n_f32( AEMATH_EPSILON );
			AEQuadFloat32 absVal = vabsq_f32( vsubq_f32( fSelf, one ) );	
			uint32x4_t lessEqualEps = vcleq_f32( absVal, eps );
			fSelf = vbslq_f32( lessEqualEps, one, fSelf );
		}
	};
} // End namespace 

template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
AE_FORCEINLINE void AEVector4f::SetDiv( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	AEVector4_AdvancedInterface::unrollf_setDiv<A,D>::Apply( m_qfVector, vcVector1, vcVector2 );
}

AE_FORCEINLINE void AEVector4f::SetDiv( AEVector4fRefParam vcVector1, AEVector4fRefParam vcVector2 )
{
	AEVector4_AdvancedInterface::unrollf_setDiv<AE_ACC_23_BIT,AE_DIV_IGNORE>::Apply( m_qfVector, vcVector1, vcVector2 );
}

template <AEMathAccuracyMode A, AEMathDivByZeroMode D>
AE_FORCEINLINE void AEVector4f::Div( AEVector4fRefParam vcVector )
{
	SetDiv<A,D>( *this, vcVector );
}

AE_FORCEINLINE void AEVector4f::Div( AEVector4fRefParam vcVector )
{
	SetDiv( *this, vcVector );
}

namespace AEVector4_AdvancedInterface
{
	template <AEMathAccuracyMode A, AEMathNegSqrtMode S>
	struct unrollf_setSqrtInverse
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setSqrtInverse<A, AE_SQRT_IGNORE>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			switch (A)
			{
				case AE_ACC_23_BIT: 
					fSelf = AEMath::QuadReciprocalSquareRoot( a.m_qfVector ); 
					break;
				case AE_ACC_12_BIT: 
					{
						AEQuadFloat32 r0 = vrsqrteq_f32( a.m_qfVector ); // estimate is only 8Bit
						AEQuadFloat32 e0 = vmulq_f32(r0, r0);
						fSelf = vmulq_f32( vrsqrtsq_f32(  a.m_qfVector, e0 ), r0 ); // so do a pass1
					}
					break;
				default:
					fSelf = AEMath::QuadReciprocalSquareRootTwoIter( a.m_qfVector );
					break; // HK_ACC_FULL
			}
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setSqrtInverse<A, AE_SQRT_SET_ZERO>
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, AEVector4fRefParam a )
		{
			AEQuadFloat32 zero = vcombine_f32(vcreate_f32(0), vcreate_f32(0));
			uint32x4_t rEqualsZero = vcleq_f32( a.m_qfVector, zero); // todo how to use the VCLE #0 instruction
			AEQuadFloat32 e;
			unrollf_setSqrtInverse<A, AE_SQRT_IGNORE>::Apply(e,a);
			fSelf = vreinterpretq_f32_u32( vbicq_u32( vreinterpretq_u32_f32(e), rEqualsZero ));
		}
	};
} // namespace 

template <AEMathAccuracyMode A, AEMathNegSqrtMode S>
AE_FORCEINLINE void AEVector4f::SetSqrtInverse( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setSqrtInverse<A,S>::Apply (m_qfVector, vcVector );
}

AE_FORCEINLINE void AEVector4f::SetSqrtInverse( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setSqrtInverse<AE_ACC_23_BIT,AE_SQRT_SET_ZERO>::Apply( m_qfVector, vcVector );
}

namespace AEVector4_AdvancedInterface
{
	template <AEMathAccuracyMode A, AEMathNegSqrtMode S>
	struct unrollf_setSqrt
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, AEVector4fRefParam a)
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setSqrt<A, AE_SQRT_IGNORE>
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, AEVector4fRefParam a)
		{
			// Neon does not have a sqrt instruction
			// need to check the 0.0 case because we use invSqrt
			AEQuadFloat32 zero = vcombine_f32(vcreate_f32(0), vcreate_f32(0));
			uint32x4_t rEqualsZero = vceqq_f32( a.m_qfVector, zero); // todo how to use the VCEQ #0 instruction
			AEQuadFloat32 e;
			unrollf_setSqrtInverse<A, AE_SQRT_IGNORE>::Apply(e,a);
			// Neon mul does not normalize the sign -> premul to clear sign
			e = vmulq_f32(a.m_qfVector, e);
			fSelf = vreinterpretq_f32_u32( vbicq_u32( vreinterpretq_u32_f32(e), rEqualsZero ));
		}
	};
	
	template <AEMathAccuracyMode A>
	struct unrollf_setSqrt<A, AE_SQRT_SET_ZERO>
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, AEVector4fRefParam a)
		{
			// Neon does not have a sqrt instruction
			AEQuadFloat32 zero = vcombine_f32(vcreate_f32(0), vcreate_f32(0));
			uint32x4_t rEqualsZero = vcleq_f32( a.m_qfVector, zero); // todo how to use the VCLE #0 instruction
			AEQuadFloat32 e;
			unrollf_setSqrtInverse<A, AE_SQRT_IGNORE>::Apply(e,a);
			// Neon mul does not normalize the sign -> premul to clear sign
			e = vmulq_f32(a.m_qfVector, e);
			fSelf = vreinterpretq_f32_u32( vbicq_u32( vreinterpretq_u32_f32(e), rEqualsZero ));
		}
	};
} // namespace 

template <AEMathAccuracyMode A, AEMathNegSqrtMode S>
AE_FORCEINLINE void AEVector4f::SetSqrt( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setSqrt<A,S>::Apply( m_qfVector, vcVector );
}

AE_FORCEINLINE void AEVector4f::SetSqrt( AEVector4fRefParam vcVector )
{
	AEVector4_AdvancedInterface::unrollf_setSqrt<AE_ACC_23_BIT,AE_SQRT_SET_ZERO>::Apply( m_qfVector, vcVector );
}

namespace AEVector4_AdvancedInterface
{
	template <AEINT32 N, AEMathIoMode A>
	struct unrollf_load
	{
		AE_FORCEINLINE static void Apply( AEQuadFloat32& fSelf, const AEFLOAT32*  p )
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};
	
	template <AEINT32 N, AEMathIoMode A>
	struct unrolld_load
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, const AEDOUBLE64*  p)
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};
	
	template <AEINT32 N>
	struct unrollf_load<N, AE_IO_NATIVE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, const AEFLOAT32*  p)
		{
			// neon does not align AEASSERT_MSG(0x64211c2f, ( ((hkUlong)p) & (sizeof(AEFLOAT32)-1) ) == 0, "pointer must be aligned to native size of AEFLOAT32.");
			switch (N)
			{
				case 4:
					fSelf = vld1q_f32( (float32_t const *)p );
					break;
				case 3:
					{
						AESingleFloat32 q  = vld1_f32( (float32_t const *)p );
						AESingleFloat32 q2 = vld1_dup_f32( (float32_t const *) (p+2));
						fSelf = vcombine_f32(q,q2);
						#ifdef AEDEBUG
						reinterpret_cast<AEUINT32*>(&fSelf)[3] = 0xffffffff;
						#endif
					}
					break;
				case 2:
					{
						AESingleFloat32 q = vld1_f32( (float32_t const *)p );
						fSelf = vcombine_f32(q,q);
						#ifdef AEDEBUG
						reinterpret_cast<AEUINT32*>(&fSelf)[2] = 0xffffffff; reinterpret_cast<AEUINT32*>(&fSelf)[3] = 0xffffffff;
						#endif
					}
					break;
				default:
					fSelf = vld1q_dup_f32( (float32_t const *)p );
					#ifdef AEDEBUG
					reinterpret_cast<AEUINT32*>(&fSelf)[1] = 0xffffffff; reinterpret_cast<AEUINT32*>(&fSelf)[2] = 0xffffffff; reinterpret_cast<AEUINT32*>(&fSelf)[3] = 0xffffffff;
					#endif
					break;
			}
		}
	};
	
	template <AEINT32 N>
	struct unrolld_load<N, AE_IO_NATIVE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, const AEDOUBLE64*  p)
		{
			// neon does not align AEASSERT_MSG(0x64211c2f, ( ((hkUlong)p) & (sizeof(AEDOUBLE64)-1) ) == 0, "pointer must be aligned to native size of AEDOUBLE64.");
			float a[4]; 
			switch (N)
			{
			case 4:
				a[0] = float(p[0]); a[1] = float(p[1]); a[2] = float(p[2]); a[3] = float(p[3]); 
				fSelf = vld1q_f32( (float32_t const *)a );
				break;
			case 3:
				{
					a[0] = float(p[0]); a[1] = float(p[1]); a[2] = float(p[2]);
					AESingleFloat32 q  = vld1_f32( (float32_t const *)a );
					AESingleFloat32 q2 = vld1_dup_f32( (float32_t const *) &(a[2]));
					fSelf = vcombine_f32(q,q2);
					#ifdef AEDEBUG
					reinterpret_cast<AEUINT32*>(&fSelf)[3] = 0xffffffff;
					#endif
				}
				break;
			case 2:
				{
					a[0] = float(p[0]); a[1] = float(p[1]);
					AESingleFloat32 q = vld1_f32( (float32_t const *)a );
					fSelf = vcombine_f32(q,q);
					#ifdef AEDEBUG
					reinterpret_cast<AEUINT32*>(&fSelf)[2] = 0xffffffff; reinterpret_cast<AEUINT32*>(&fSelf)[3] = 0xffffffff;
					#endif
				}
				break;
			default:
				{
					a[0] = float(p[0]);
					fSelf = vld1q_dup_f32( (float32_t const *)a );
					#ifdef AEDEBUG
					reinterpret_cast<AEUINT32*>(&fSelf)[1] = 0xffffffff; reinterpret_cast<AEUINT32*>(&fSelf)[2] = 0xffffffff; reinterpret_cast<AEUINT32*>(&fSelf)[3] = 0xffffffff;
					#endif
				}
				break;
			}
		}
	};
	
	template <AEINT32 N>
	struct unrollf_load<N, AE_IO_SIMD_ALIGNED>
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, const AEFLOAT32*  p)
		{
			// neon does not align AEASSERT_MSG(0x64211c2f, ( ((hkUlong)p) & ((sizeof(AEFLOAT32)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			unrollf_load<N, AE_IO_NATIVE_ALIGNED>::Apply(fSelf,p);
		}
	};
	
	template <AEINT32 N>
	struct unrolld_load<N, AE_IO_SIMD_ALIGNED>
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, const AEDOUBLE64*  p)
		{
			// neon does not align AEASSERT_MSG(0x64211c2f, ( ((hkUlong)p) & ((sizeof(AEDOUBLE64)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			unrolld_load<N, AE_IO_NATIVE_ALIGNED>::Apply(fSelf,p);
		}
	};
	
	template <AEINT32 N>
	struct unrollf_load<N, AE_IO_NOT_CACHED>
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, const AEFLOAT32*  p)
		{
			unrollf_load<N, AE_IO_SIMD_ALIGNED>::Apply(fSelf,p);
		}
	};
	
	template <AEINT32 N>
	struct unrolld_load<N, AE_IO_NOT_CACHED>
	{
		AE_FORCEINLINE static void Apply(AEQuadFloat32& fSelf, const AEDOUBLE64*  p)
		{
			unrolld_load<N, AE_IO_SIMD_ALIGNED>::Apply(fSelf,p);
		}
	};
} // End namespace 

template <AEINT32 N, AEMathIoMode A>
AE_FORCEINLINE void AEVector4f::Load(const AEFLOAT32 *lpAddr )
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_load<N,A>::Apply( m_qfVector, lpAddr );
}

template <AEINT32 N, AEMathIoMode A>
AE_FORCEINLINE void AEVector4f::Load(const AEDOUBLE64 *lpAddr )
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrolld_load<N,A>::Apply( m_qfVector, lpAddr );
}

template <AEINT32 N>
AE_FORCEINLINE void AEVector4f::Load(const AEFLOAT32 *lpAddr )
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_load<N,AE_IO_SIMD_ALIGNED>::Apply( m_qfVector, lpAddr );
}

template <AEINT32 N>
AE_FORCEINLINE void AEVector4f::Load(const AEDOUBLE64 *lpAddr )
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrolld_load<N,AE_IO_SIMD_ALIGNED>::Apply( m_qfVector, lpAddr );
}

namespace AEVector4_AdvancedInterface
{
	template <AEINT32 N, AEMathIoMode A>
	struct unrollf_store
	{
		AE_FORCEINLINE static void Apply(const AEQuadFloat32& fSelf, AEFLOAT32*  p)
		{
			AE_VECTOR4F_TEMPLATE_CONFIG_NOT_IMPLEMENTED;
		}
	};
	
	template <AEINT32 N>
	struct unrollf_store<N, AE_IO_NATIVE_ALIGNED>
	{
		AE_FORCEINLINE static void Apply(const AEQuadFloat32& fSelf, AEFLOAT32*  p)
		{
			// neon does not align AEASSERT_MSG(0x64211c2f, ( ((hkUlong)p) & (sizeof(AEFLOAT32)-1) ) == 0, "pointer must be aligned to native size of AEFLOAT32.");
			switch (N)
			{
				case 4:
					vst1q_f32( (float32_t*)p, fSelf );
					break;
				case 3:
					vst1_f32( (float32_t*)p, vget_low_f32( fSelf ) );
					vst1q_lane_f32( (float32_t*)(p+2), fSelf, 2 );
					break;
				case 2:
					vst1_f32( (float32_t*)p, vget_low_f32( fSelf ) );
					break;
				default:
					vst1q_lane_f32( (float32_t*)p, fSelf, 0 );
					break;
			}
		}
	};
	
	template <AEINT32 N>
	struct unrollf_store<N, AE_IO_SIMD_ALIGNED>
	{
		AE_FORCEINLINE static void Apply(const AEQuadFloat32& fSelf, AEFLOAT32*  p)
		{
			// neon does not align AEASSERT_MSG(0x64211c2f, ( ((hkUlong)p) & ((sizeof(AEFLOAT32)*(N!=3?N:4) )-1) ) == 0, "pointer must be aligned for SIMD.");
			unrollf_store<N, AE_IO_NATIVE_ALIGNED>::Apply(fSelf,p);
		}
	};
	
	template <AEINT32 N>
	struct unrollf_store<N, AE_IO_NOT_CACHED>
	{
		AE_FORCEINLINE static void Apply(const AEQuadFloat32& fSelf, AEFLOAT32*  p)
		{
			unrollf_store<N, AE_IO_SIMD_ALIGNED>::Apply(fSelf,p);
		}
	};
	
	template <AEINT32 N, AEMathIoMode A>
	struct unrolld_store
	{
		AE_FORCEINLINE static void Apply(const AEQuadFloat32& fSelf, AEDOUBLE64*  p)
		{
			AE_ALIGN(AEFLOAT32 a[4], 16);
			unrollf_store<N, AE_IO_SIMD_ALIGNED>::Apply(fSelf, a);
						  p[0] = AEDOUBLE64(a[0]);
			if ( N >= 2){ p[1] = AEDOUBLE64(a[1]); }
			if ( N >= 3){ p[2] = AEDOUBLE64(a[2]); }
			if ( N >= 4){ p[3] = AEDOUBLE64(a[3]); }
		}
	};
} // End namespace 

template <AEINT32 N, AEMathIoMode A, AEMathRoundingMode R> 
AE_FORCEINLINE void AEVector4f::Store( AEFLOAT32 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store<N,A>::Apply( m_qfVector, lpAddr );
}

template <AEINT32 N, AEMathIoMode A, AEMathRoundingMode R> 
AE_FORCEINLINE void AEVector4f::Store(AEDOUBLE64 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrolld_store<N,A>::Apply( m_qfVector, lpAddr );
}

template <AEINT32 N, AEMathIoMode A> 
AE_FORCEINLINE void AEVector4f::Store(AEFLOAT32 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store<N,A>::Apply( m_qfVector, lpAddr );
}

template <AEINT32 N, AEMathIoMode A> 
AE_FORCEINLINE void AEVector4f::Store(AEDOUBLE64 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrolld_store<N,A>::Apply( m_qfVector, lpAddr );
}

template <AEINT32 N> 
AE_FORCEINLINE void AEVector4f::Store(AEFLOAT32 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrollf_store<N,AE_IO_SIMD_ALIGNED>::Apply( m_qfVector, lpAddr );
}

template <AEINT32 N> 
AE_FORCEINLINE void AEVector4f::Store(AEDOUBLE64 *lpAddr ) const
{
	AE_VECTOR4F_UNSUPPORTED_LENGTH_CHECK;
	AEVector4_AdvancedInterface::unrolld_store<N,AE_IO_SIMD_ALIGNED>::Apply( m_qfVector, lpAddr );
}

#endif // _AEVECTOR4F_NEON_INL_
