/////////////////////////////////////////////////////////////////////////
//
// Amuse Engine SDK - core/math/types/Sse
// Copyright (c) 2014.  All Rights Reserved
//
// File:		AEMath_Sse.inl
// Author:		Gianluca Belardelli
// Date:		07/01/2014
//
/////////////////////////////////////////////////////////////////////////
#ifndef _AEMATH_SSE_INL_
#define _AEMATH_SSE_INL_

/// Adapted from the book "Methods and Programs for Mathematical Functions",
/// Stephen Lloyd Baluk Moshier, Prentice-Hall, 1989
///
/// Range reduction is into intervals of pi/4.  The reduction error is nearly eliminated 
/// by extended precision modular arithmetic.
/// 
/// Two polynomial approximating functions are employed.
/// Between 0 and pi/4 the sine is approximated by
///      x  +  x**3 P(x**2).
/// Between pi/4 and pi/2 the cosine is represented as
///      1  -  x**2 Q(x**2).
/// 
/// ACCURACY: matches cmath on MSVC
/// 
///                      Relative error:
/// arithmetic   domain        peak       rms
///    IEEE    -4096,+4096    1.2e-7     3.0e-8
///    IEEE    -8192,+8192    3.0e-7     3.0e-8
/// Partial loss of accuracy begins to occur at x = 2^13
/// = 8192. Results may be meaningless for x >= 2^24
///
/// PERFORMANCE: About 4.3x faster than ::sinf for 4 simultaneous values
AE_FORCEINLINE static __m128 AE_CALL quadSin(const __m128 &inX)
{
	AE_ALIGN16 static const AEINT32 sinCoeff0[4] = { 0xB94CA1F9, 0xB94CA1F9, 0xB94CA1F9, 0xB94CA1F9 }; // -1.9515295891e-4
	AE_ALIGN16 static const AEINT32 sinCoeff1[4] = { 0x3C08839E, 0x3C08839E, 0x3C08839E, 0x3C08839E }; //  8.3321608736e-3
	AE_ALIGN16 static const AEINT32 sinCoeff2[4] = { 0xBE2AAAA3, 0xBE2AAAA3, 0xBE2AAAA3, 0xBE2AAAA3 }; // -1.6666654611e-1

	AE_ALIGN16 static const AEINT32 cosCoeff0[4] = { 0x37CCF5CE, 0x37CCF5CE, 0x37CCF5CE, 0x37CCF5CE }; //  2.443315711809948e-005
	AE_ALIGN16 static const AEINT32 cosCoeff1[4] = { 0xBAB6061A, 0xBAB6061A, 0xBAB6061A, 0xBAB6061A }; // -1.388731625493765e-003
	AE_ALIGN16 static const AEINT32 cosCoeff2[4] = { 0x3D2AAAA5, 0x3D2AAAA5, 0x3D2AAAA5, 0x3D2AAAA5 }; //  4.166664568298827e-002

	AE_ALIGN16 static const AEINT32 signMask[4]  = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
	AE_ALIGN16 static const AEINT32 fourDivPi[4] = { 0x3FA2F983, 0x3FA2F983, 0x3FA2F983, 0x3FA2F983 }; // 4 / Pi
	AE_ALIGN16 static const AEINT32 DP1[4]       = { 0xBF490000, 0xBF490000, 0xBF490000, 0xBF490000 }; // -0.78515625
	AE_ALIGN16 static const AEINT32 DP2[4]       = { 0xB97DA000, 0xB97DA000, 0xB97DA000, 0xB97DA000 }; // -2.4187564849853515625e-4
	AE_ALIGN16 static const AEINT32 DP3[4]       = { 0xB3222169, 0xB3222169, 0xB3222169, 0xB3222169 }; // -3.77489497744594108e-8
	AE_ALIGN16 static const AEINT32 floatOne[4]  = { 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 }; // 1.0
	AE_ALIGN16 static const AEINT32 floatHalf[4] = { 0x3F000000, 0x3F000000, 0x3F000000, 0x3F000000 }; // 0.5
	AE_ALIGN16 static const AEINT32 intOne[4]    = { 0x00000001, 0x00000001, 0x00000001, 0x00000001 };

	__m128 x = inX;

	// get abs and sign
	__m128 sign_bit = x;
	x = _mm_andnot_ps( *(__m128*)signMask, x);
	sign_bit = _mm_and_ps(sign_bit, *(__m128*)signMask);

	// scale by 4/Pi
	__m128 y = _mm_mul_ps(x, *(__m128*)fourDivPi);

	__m128i one = *(__m128i*)intOne;
	__m128i two = _mm_add_epi32(one,one);
	__m128i four = _mm_add_epi32(two,two);
	// store the integer part of y in emm2
	__m128i emm2 = _mm_cvttps_epi32(y);
	// j=(j+1) & (~1)
	emm2 = _mm_add_epi32(emm2, one);
	emm2 = _mm_andnot_si128(one, emm2);
	y = _mm_cvtepi32_ps(emm2);
	// get the swap sign flag
	__m128i emm0 = _mm_and_si128(emm2, four);
	emm0 = _mm_slli_epi32(emm0, 29);
	// get the polynom selection mask 
	emm2 = _mm_and_si128(emm2, two);
	emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());

	__m128 swap_sign_bit = _mm_castsi128_ps(emm0);
	__m128 poly_mask = _mm_castsi128_ps(emm2);
	sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);

	// The magic pass: Extended precision modular arithmetic
	//   x = ((x - y * DP1) - y * DP2) - y * DP3
	__m128 xmm1 = *(__m128*)DP1;
	__m128 xmm2 = *(__m128*)DP2;
	__m128 xmm3 = *(__m128*)DP3;
	xmm1 = _mm_mul_ps(y, xmm1);
	xmm2 = _mm_mul_ps(y, xmm2);
	xmm3 = _mm_mul_ps(y, xmm3);
	x = _mm_add_ps(x, xmm1);
	x = _mm_add_ps(x, xmm2);
	x = _mm_add_ps(x, xmm3);

	// Evaluate the first polynom  (0 <= x <= Pi/4)
	y = *(__m128*)cosCoeff0;
	__m128 z = _mm_mul_ps(x,x);

	y = _mm_mul_ps(y, z);
	y = _mm_add_ps(y, *(__m128*)cosCoeff1);
	y = _mm_mul_ps(y, z);
	y = _mm_add_ps(y, *(__m128*)cosCoeff2);
	y = _mm_mul_ps(y, z);
	y = _mm_mul_ps(y, z);
	__m128 tmp = _mm_mul_ps(z, *(__m128*)floatHalf);
	y = _mm_sub_ps(y, tmp);
	y = _mm_add_ps(y, *(__m128*)floatOne);

	// Evaluate the second polynom  (Pi/4 <= x <= 0)
	__m128 y2 = *(__m128*)sinCoeff0;
	y2 = _mm_mul_ps(y2, z);
	y2 = _mm_add_ps(y2, *(__m128*)sinCoeff1);
	y2 = _mm_mul_ps(y2, z);
	y2 = _mm_add_ps(y2, *(__m128*)sinCoeff2);
	y2 = _mm_mul_ps(y2, z);
	y2 = _mm_mul_ps(y2, x);
	y2 = _mm_add_ps(y2, x);

	// select the correct result from the two polynoms
#if AE_SSE_VERSION >= 0x41
	y = _mm_blendv_ps(y,y2,poly_mask);
#else
	y2 = _mm_and_ps(poly_mask, y2);
	y = _mm_andnot_ps(poly_mask, y);
	y = _mm_or_ps(y,y2);
#endif
	// update the sign
	y = _mm_xor_ps(y, sign_bit);

	return y;
}

/// Double precision version of above
///
///                      Relative error:
/// arithmetic   domain        peak       rms
///    IEEE -1.07e9,+1.07e9  2.1e-16     5.4e-17
/// Partial loss of accuracy begins to occur at x = 2**30
/// = 1.074e9.  The loss is not gradual, but jumps suddenly to
/// about 1 part in 10e7.  Results may be meaningless for
/// x > 2**49 = 5.6e14.
///
/// PERFORMANCE: About 1.3x faster than ::sin for 2 simultaneous values

AE_FORCEINLINE static __m128d AE_CALL twoSin(const __m128d& inX)
{
	AE_ALIGN16 static const AEINT64 sinCoeff0[2] = { 0x3de5d8fd1fd19ccdull, 0x3de5d8fd1fd19ccdull }; //  1.58962301576546568060E-10
	AE_ALIGN16 static const AEINT64 sinCoeff1[2] = { 0xbe5ae5e5a9291f5dull, 0xbe5ae5e5a9291f5dull }; // -2.50507477628578072866E-8
	AE_ALIGN16 static const AEINT64 sinCoeff2[2] = { 0x3ec71de3567d48a1ull, 0x3ec71de3567d48a1ull }; //  2.75573136213857245213E-6
	AE_ALIGN16 static const AEINT64 sinCoeff3[2] = { 0xbf2a01a019bfdf03ull, 0xbf2a01a019bfdf03ull }; // -1.98412698295895385996E-4
	AE_ALIGN16 static const AEINT64 sinCoeff4[2] = { 0x3f8111111110f7d0ull, 0x3f8111111110f7d0ull }; //  8.33333333332211858878E-3
	AE_ALIGN16 static const AEINT64 sinCoeff5[2] = { 0xbfc5555555555548ull, 0xbfc5555555555548ull }; // -1.66666666666666307295E-1

	AE_ALIGN16 static const AEINT64 cosCoeff0[2] = { 0xbda8fa49a0861a9bull, 0xbda8fa49a0861a9bull }; // -1.13585365213876817300E-11
	AE_ALIGN16 static const AEINT64 cosCoeff1[2] = { 0x3e21ee9d7b4e3f05ull, 0x3e21ee9d7b4e3f05ull }; //  2.08757008419747316778E-9
	AE_ALIGN16 static const AEINT64 cosCoeff2[2] = { 0xbe927e4f7eac4bc6ull, 0xbe927e4f7eac4bc6ull }; // -2.75573141792967388112E-7
	AE_ALIGN16 static const AEINT64 cosCoeff3[2] = { 0x3efa01a019c844f5ull, 0x3efa01a019c844f5ull }; //  2.48015872888517045348E-5
	AE_ALIGN16 static const AEINT64 cosCoeff4[2] = { 0xbf56c16c16c14f91ull, 0xbf56c16c16c14f91ull }; // -1.38888888888730564116E-3
	AE_ALIGN16 static const AEINT64 cosCoeff5[2] = { 0x3fa555555555554bull, 0x3fa555555555554bull }; //  4.16666666666665929218E-2

	AE_ALIGN16 static const AEINT64 signMask[2]  = { 0x8000000000000000ull, 0x8000000000000000ull };
	AE_ALIGN16 static const AEINT64 fourDivPi[2] = { 0x3FF45F306DC9C883ull, 0x3FF45F306DC9C883ull }; // 4 / Pi
	AE_ALIGN16 static const AEINT64 DP1[2]       = { 0xbfe921fb40000000ull, 0xbfe921fb40000000ull }; // -7.85398125648498535156E-1
	AE_ALIGN16 static const AEINT64 DP2[2]       = { 0xbe64442d00000000ull, 0xbe64442d00000000ull }; // -3.77489470793079817668E-8
	AE_ALIGN16 static const AEINT64 DP3[2]       = { 0xbce8469898cc5170ull, 0xbce8469898cc5170ull }; // -2.69515142907905952645E-15
	AE_ALIGN16 static const AEINT64 floatOne[2]  = { 0x3FF0000000000000ull, 0x3FF0000000000000ull }; // 1.0
	AE_ALIGN16 static const AEINT64 floatHalf[2] = { 0x3FE0000000000000ull, 0x3FE0000000000000ull }; // 0.5
	AE_ALIGN16 static const AEINT32 intOne[4]    = { 0x00000001, 0x00000001, 0x00000001, 0x00000001 };

	__m128d x = inX;

	// get abs and sign
	__m128d sign_bit = x;
	x = _mm_andnot_pd( *(__m128d*)signMask, x);
	sign_bit = _mm_and_pd(sign_bit, *(__m128d*)signMask);

	// scale by 4/Pi
	__m128d y = _mm_mul_pd(x, *(__m128d*)fourDivPi);

	__m128i one = *(__m128i*)intOne;
	__m128i two = _mm_add_epi32(one,one);
	__m128i four = _mm_add_epi32(two,two);
	// store the integer part of y in emm2
	__m128i emm2 = _mm_cvttpd_epi32(y);
	// j=(j+1) & (~1)
	emm2 = _mm_add_epi32(emm2, one);
	emm2 = _mm_andnot_si128(one, emm2);
	y = _mm_cvtepi32_pd(emm2);
	// get the swap sign flag
	__m128i emm0 = _mm_and_si128(emm2, four);
	emm0 = _mm_slli_epi32(emm0, 29);
	emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,1,0,0));
	// get the polynom selection mask 
	emm2 = _mm_and_si128(emm2, two);
	emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
	emm2 = _mm_shuffle_epi32(emm2, _MM_SHUFFLE(1,1,0,0));

	__m128d swap_sign_bit = _mm_castsi128_pd(emm0);
	__m128d poly_mask = _mm_castsi128_pd(emm2);
	sign_bit = _mm_xor_pd(sign_bit, swap_sign_bit);

	// The magic pass: Extended precision modular arithmetic
	//   x = ((x - y * DP1) - y * DP2) - y * DP3
	__m128d xmm1 = *(__m128d*)DP1;
	__m128d xmm2 = *(__m128d*)DP2;
	__m128d xmm3 = *(__m128d*)DP3;
	xmm1 = _mm_mul_pd(y, xmm1);
	xmm2 = _mm_mul_pd(y, xmm2);
	xmm3 = _mm_mul_pd(y, xmm3);
	x = _mm_add_pd(x, xmm1);
	x = _mm_add_pd(x, xmm2);
	x = _mm_add_pd(x, xmm3);

	__m128d z = _mm_mul_pd(x,x);

	// Evaluate the first polynom  (0 <= x <= Pi/4)
	y = *(__m128d*)cosCoeff0;
	y = _mm_mul_pd(y, z);
	y = _mm_add_pd(y, *(__m128d*)cosCoeff1);
	y = _mm_mul_pd(y, z);
	y = _mm_add_pd(y, *(__m128d*)cosCoeff2);
	y = _mm_mul_pd(y, z);
	y = _mm_add_pd(y, *(__m128d*)cosCoeff3);
	y = _mm_mul_pd(y, z);
	y = _mm_add_pd(y, *(__m128d*)cosCoeff4);
	y = _mm_mul_pd(y, z);
	y = _mm_add_pd(y, *(__m128d*)cosCoeff5);
	y = _mm_mul_pd(y, z);
	y = _mm_mul_pd(y, z);
	__m128d tmp = _mm_mul_pd(z, *(__m128d*)floatHalf);
	y = _mm_sub_pd(y, tmp);
	y = _mm_add_pd(y, *(__m128d*)floatOne);

	// Evaluate the second polynom  (Pi/4 <= x <= 0)
	__m128d y2 = *(__m128d*)sinCoeff0;
	y2 = _mm_mul_pd(y2, z);
	y2 = _mm_add_pd(y2, *(__m128d*)sinCoeff1);
	y2 = _mm_mul_pd(y2, z);
	y2 = _mm_add_pd(y2, *(__m128d*)sinCoeff2);
	y2 = _mm_mul_pd(y2, z);
	y2 = _mm_add_pd(y2, *(__m128d*)sinCoeff3);
	y2 = _mm_mul_pd(y2, z);
	y2 = _mm_add_pd(y2, *(__m128d*)sinCoeff4);
	y2 = _mm_mul_pd(y2, z);
	y2 = _mm_add_pd(y2, *(__m128d*)sinCoeff5);
	y2 = _mm_mul_pd(y2, z);
	y2 = _mm_mul_pd(y2, x);
	y2 = _mm_add_pd(y2, x);

	// select the correct result from the two polynoms
#if AE_SSE_VERSION >= 0x41
	y = _mm_blendv_pd(y,y2,poly_mask);
#else
	y2 = _mm_and_pd(poly_mask, y2);
	y = _mm_andnot_pd(poly_mask, y);
	y = _mm_or_pd(y,y2);
#endif
	// update the sign
	y = _mm_xor_pd(y, sign_bit);

	return y;
}

AE_FORCEINLINE static __m128 AE_CALL quadCos(const __m128& x)
{
	// cos(x) = sin(x + pi/2)
	static AE_ALIGN16 const AEUINT32 pihalf[4] = { 0x3FC90FDB, 0x3FC90FDB, 0x3FC90FDB, 0x3FC90FDB };
	__m128 xx = _mm_add_ps(x, *(__m128*)&pihalf);
	return quadSin(xx);
}

/// Properties see twoSin
AE_FORCEINLINE static __m128d AE_CALL twoCos(const __m128d& x)
{
	// cos(x) = sin(x + pi/2)
	static AE_ALIGN16 const AEUINT64 pihalf[2] = { 0x3FF921FB54442D18ull, 0x3FF921FB54442D18ull };
	__m128d xx = _mm_add_pd(x, *(__m128d*)&pihalf);
	return twoSin(xx);
}

/// Adapted from the book "Methods and Programs for Mathematical Functions",
/// Stephen Lloyd Baluk Moshier, Prentice-Hall, 1989
///
/// A polynomial of the form x + x**3 P(x**2)
/// is used for |x| in the interval [0, 0.5].  If |x| > 0.5 it is
/// transformed by the identity
///
///    asin(x) = pi/2 - 2 asin( sqrt( (1-x)/2 ) ).
///
/// ACCURACY: matches cmath on MSVC
///
///                      Relative error:
/// arithmetic   domain      peak         rms
///    IEEE     -1, 1       2.5e-7       5.0e-8
///
/// PERFORMANCE: About 8x faster than ::asinf for 4 simultaneous values
AE_FORCEINLINE static __m128 AE_CALL quadAsin(const __m128& xx) 
{
	AE_ALIGN16 static const AEINT32 signMask[4]     = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
	AE_ALIGN16 static const AEINT32 linearLimit[4]  = { 0x38D1B717, 0x38D1B717, 0x38D1B717, 0x38D1B717 }; // 1e-4
	AE_ALIGN16 static const AEINT32 floatOne[4]     = { 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 }; // 1.0
	AE_ALIGN16 static const AEINT32 floatHalf[4]    = { 0x3F000000, 0x3F000000, 0x3F000000, 0x3F000000 }; // 0.5
	AE_ALIGN16 static const AEINT32 piover2[4]      = { 0x3FC90FDB, 0x3FC90FDB, 0x3FC90FDB, 0x3FC90FDB }; // pi/2
	AE_ALIGN16 static const AEINT32 coeff4[4]       = { 0x3D2CB352, 0x3D2CB352, 0x3D2CB352, 0x3D2CB352 }; // 4.2163199048E-2
	AE_ALIGN16 static const AEINT32 coeff3[4]       = { 0x3CC617E3, 0x3CC617E3, 0x3CC617E3, 0x3CC617E3 }; // 2.4181311049E-2
	AE_ALIGN16 static const AEINT32 coeff2[4]       = { 0x3D3A3EC7, 0x3D3A3EC7, 0x3D3A3EC7, 0x3D3A3EC7 }; // 4.5470025998E-2
	AE_ALIGN16 static const AEINT32 coeff1[4]       = { 0x3D9980F6, 0x3D9980F6, 0x3D9980F6, 0x3D9980F6 }; // 7.4953002686E-2
	AE_ALIGN16 static const AEINT32 coeff0[4]       = { 0x3E2AAAE4, 0x3E2AAAE4, 0x3E2AAAE4, 0x3E2AAAE4 }; // 1.6666752422E-1

	__m128 a = _mm_andnot_ps( *(__m128*)signMask, xx);
	__m128 sign = _mm_and_ps(*(__m128*)signMask, xx);

	// linear approximation
	__m128 lessLimit = _mm_cmplt_ps(a,*(__m128*)linearLimit);

	// a > 0.5
	__m128 zGT = _mm_mul_ps(*(__m128*)floatHalf, _mm_sub_ps(*(__m128*)floatOne, a));
	__m128 xGT = _mm_sqrt_ps(zGT);

	// a <= 0.5
	__m128 xLE = a;
	__m128 zLE = _mm_mul_ps(xLE,xLE);

	// select polynom
	__m128 rangeMask = _mm_cmpgt_ps(a, *(__m128*)floatHalf);
#if AE_SSE_VERSION >= 0x41
	__m128 x = _mm_blendv_ps(xLE,xGT,rangeMask);
	__m128 z = _mm_blendv_ps(zLE,zGT,rangeMask);
#else
	__m128 x = _mm_or_ps(_mm_andnot_ps(rangeMask, xLE),_mm_and_ps(rangeMask, xGT));
	__m128 z = _mm_or_ps(_mm_andnot_ps(rangeMask, zLE),_mm_and_ps(rangeMask, zGT));
#endif

	// zz = (((((c4*z)+c3)*z+c2)*z+c1)*z+c0)*z*x+x
	__m128 zz = _mm_mul_ps(*(__m128*)coeff4, z);
	zz = _mm_add_ps(*(__m128*)coeff3, zz);
	zz = _mm_mul_ps(zz,z);
	zz = _mm_add_ps(*(__m128*)coeff2, zz);
	zz = _mm_mul_ps(zz,z);
	zz = _mm_add_ps(*(__m128*)coeff1, zz);
	zz = _mm_mul_ps(zz,z);
	zz = _mm_add_ps(*(__m128*)coeff0, zz);
	zz = _mm_mul_ps(zz,z);
	zz = _mm_mul_ps(zz,x);
	zz = _mm_add_ps(zz,x);

	// transform
	__m128 zzGT = _mm_add_ps(zz,zz);
	zzGT = _mm_sub_ps(*(__m128*)piover2,zzGT);
#if AE_SSE_VERSION >= 0x41
	zz = _mm_blendv_ps(zz,zzGT,rangeMask);
#else
	zz = _mm_or_ps(_mm_andnot_ps(rangeMask, zz),_mm_and_ps(rangeMask, zzGT));
#endif

	// select linear approximation
#if AE_SSE_VERSION >= 0x41
	zz = _mm_blendv_ps(zz,a,lessLimit);
#else
	zz = _mm_or_ps(_mm_andnot_ps(lessLimit, zz),_mm_and_ps(lessLimit, a));
#endif

	// update the sign
	zz = _mm_xor_ps(zz, sign);

	return zz;
}

/// Double precision version of above except that it uses
/// a rational function of the form x + x**3 P(x**2)/Q(x**2)
///
///                      Relative error:
/// arithmetic   domain        peak       rms
///    IEEE     -1, 1       1.9e-16     5.4e-17
///
/// PERFORMANCE: About 2x faster than ::asin for 2 simultaneous values

AE_FORCEINLINE static __m128d AE_CALL twoAsin(const __m128d& xx) 
{
	AE_ALIGN16 static const AEINT64 signMask[2]     = { 0x8000000000000000ull, 0x8000000000000000ull };
	AE_ALIGN16 static const AEINT64 linearLimit[2]  = { 0x3E45798EE2308C3Aull, 0x3E45798EE2308C3Aull }; // 1e-8
	AE_ALIGN16 static const AEINT64 floatOne[2]     = { 0x3FF0000000000000ull, 0x3FF0000000000000ull }; // 1.0
	AE_ALIGN16 static const AEINT64 limit[2]        = { 0x3FE4000000000000ull, 0x3FE4000000000000ull }; // 0.625
	AE_ALIGN16 static const AEINT64 piover4[2]      = { 0x3FE921FB54442D18ull, 0x3FE921FB54442D18ull }; // pi/4
	AE_ALIGN16 static const AEINT64 morebits[2]     = { 0x3C91A62633145C07ull, 0x3C91A62633145C07ull }; // 6.123233995736765886130E-17
	AE_ALIGN16 static const AEINT64 R0[2]           = { 0x3f684fc3988e9f08ull, 0x3f684fc3988e9f08ull }; //  2.967721961301243206100E-3
	AE_ALIGN16 static const AEINT64 R1[2]           = { 0xbfe2079259f9290full, 0xbfe2079259f9290full }; // -5.634242780008963776856E-1
	AE_ALIGN16 static const AEINT64 R2[2]           = { 0x401bdff5baf33e6aull, 0x401bdff5baf33e6aull }; //  6.968710824104713396794E0
	AE_ALIGN16 static const AEINT64 R3[2]           = { 0xc03991aaac01ab68ull, 0xc03991aaac01ab68ull }; // -2.556901049652824852289E1
	AE_ALIGN16 static const AEINT64 R4[2]           = { 0x403c896240f3081dull, 0x403c896240f3081dull }; //  2.853665548261061424989E1
	AE_ALIGN16 static const AEINT64 S1[2]           = { 0xc035f2a2b6bf5d8cull, 0xc035f2a2b6bf5d8cull }; // -2.194779531642920639778E1
	AE_ALIGN16 static const AEINT64 S2[2]           = { 0x40626219af6a7f42ull, 0x40626219af6a7f42ull }; //  1.470656354026814941758E2
	AE_ALIGN16 static const AEINT64 S3[2]           = { 0xc077fe08959063eeull, 0xc077fe08959063eeull }; // -3.838770957603691357202E2
	AE_ALIGN16 static const AEINT64 S4[2]           = { 0x40756709b0b644beull, 0x40756709b0b644beull }; //  3.424398657913078477438E2
	AE_ALIGN16 static const AEINT64 P0[2]           = { 0x3f716b9b0bd48ad3ull, 0x3f716b9b0bd48ad3ull }; //  4.253011369004428248960E-3
	AE_ALIGN16 static const AEINT64 P1[2]           = { 0xbfe34341333e5c16ull, 0xbfe34341333e5c16ull }; // -6.019598008014123785661E-1
	AE_ALIGN16 static const AEINT64 P2[2]           = { 0x4015c74b178a2dd9ull, 0x4015c74b178a2dd9ull }; //  5.444622390564711410273E0
	AE_ALIGN16 static const AEINT64 P3[2]           = { 0xc0304331de27907bull, 0xc0304331de27907bull }; // -1.626247967210700244449E1
	AE_ALIGN16 static const AEINT64 P4[2]           = { 0x40339007da779259ull, 0x40339007da779259ull }; //  1.956261983317594739197E1
	AE_ALIGN16 static const AEINT64 P5[2]           = { 0xc020656c06ceafd5ull, 0xc020656c06ceafd5ull }; // -8.198089802484824371615E0
	AE_ALIGN16 static const AEINT64 Q1[2]           = { 0xc02d7b590b5e0eabull, 0xc02d7b590b5e0eabull }; // -1.474091372988853791896E1
	AE_ALIGN16 static const AEINT64 Q2[2]           = { 0x40519fc025fe9054ull, 0x40519fc025fe9054ull }; //  7.049610280856842141659E1
	AE_ALIGN16 static const AEINT64 Q3[2]           = { 0xc06265bb6d3576d7ull, 0xc06265bb6d3576d7ull }; // -1.471791292232726029859E2
	AE_ALIGN16 static const AEINT64 Q4[2]           = { 0x4061705684ffbf9dull, 0x4061705684ffbf9dull }; //  1.395105614657485689735E2
	AE_ALIGN16 static const AEINT64 Q5[2]           = { 0xc04898220a3607acull, 0xc04898220a3607acull }; // -4.918853881490881290097E1


	__m128d a = _mm_andnot_pd( *(__m128d*)signMask, xx);
	__m128d sign = _mm_and_pd(*(__m128d*)signMask, xx);

	// linear approximation
	__m128d lessLimit = _mm_cmplt_pd(a,*(__m128d*)linearLimit);

	__m128d selectGT = _mm_cmpgt_pd(a, *(__m128d*)limit);

	// a > 0.625
	__m128d zGT;
	{
		__m128d zz = _mm_sub_pd(*(__m128d*)floatOne, a);

		__m128d polR = _mm_mul_pd(*(__m128d*)R0, zz);
		polR = _mm_add_pd(polR, *(__m128d*)R1);
		polR = _mm_mul_pd(polR, zz);
		polR = _mm_add_pd(polR, *(__m128d*)R2);
		polR = _mm_mul_pd(polR, zz);
		polR = _mm_add_pd(polR, *(__m128d*)R3);
		polR = _mm_mul_pd(polR, zz);
		polR = _mm_add_pd(polR, *(__m128d*)R4);
		polR = _mm_mul_pd(polR, zz);

		__m128d polS = zz; // coeff0 = 1
		polS = _mm_add_pd(polS, *(__m128d*)S1);
		polS = _mm_mul_pd(polS, zz);
		polS = _mm_add_pd(polS, *(__m128d*)S2);
		polS = _mm_mul_pd(polS, zz);
		polS = _mm_add_pd(polS, *(__m128d*)S3);
		polS = _mm_mul_pd(polS, zz);
		polS = _mm_add_pd(polS, *(__m128d*)S4);

		__m128d p = _mm_div_pd(polR,polS);
		zz = _mm_sqrt_pd(_mm_add_pd(zz,zz));
		zGT = _mm_sub_pd(*(__m128d*)piover4, zz);
		zz = _mm_sub_pd(_mm_mul_pd(zz,p),*(__m128d*)morebits);
		zGT = _mm_sub_pd(zGT,zz);
		zGT = _mm_add_pd(zGT, *(__m128d*)piover4);
	}

	// a <= 0.625
	__m128d zLT;
	{
		__m128d zz = _mm_mul_pd(a,a);

		__m128d polP = _mm_mul_pd(*(__m128d*)P0, zz);
		polP = _mm_add_pd(polP, *(__m128d*)P1);
		polP = _mm_mul_pd(polP, zz);
		polP = _mm_add_pd(polP, *(__m128d*)P2);
		polP = _mm_mul_pd(polP, zz);
		polP = _mm_add_pd(polP, *(__m128d*)P3);
		polP = _mm_mul_pd(polP, zz);
		polP = _mm_add_pd(polP, *(__m128d*)P4);
		polP = _mm_mul_pd(polP, zz);
		polP = _mm_add_pd(polP, *(__m128d*)P5);
		polP = _mm_mul_pd(polP, zz);

		__m128d polQ = zz; // coeff0 = 1
		polQ = _mm_add_pd(polQ, *(__m128d*)Q1);
		polQ = _mm_mul_pd(polQ, zz);
		polQ = _mm_add_pd(polQ, *(__m128d*)Q2);
		polQ = _mm_mul_pd(polQ, zz);
		polQ = _mm_add_pd(polQ, *(__m128d*)Q3);
		polQ = _mm_mul_pd(polQ, zz);
		polQ = _mm_add_pd(polQ, *(__m128d*)Q4);
		polQ = _mm_mul_pd(polQ, zz);
		polQ = _mm_add_pd(polQ, *(__m128d*)Q5);

		zLT = _mm_div_pd(polP,polQ);
		zLT = _mm_mul_pd(a,zLT);
		zLT = _mm_add_pd(a,zLT);
	}

	__m128d z;

	// select case and linear approximation
#if AE_SSE_VERSION >= 0x41
	z = _mm_blendv_pd(zLT,zGT,selectGT);
	z = _mm_blendv_pd(z,a,lessLimit);
#else
	z = _mm_or_pd(_mm_andnot_pd(selectGT, zLT),_mm_and_pd(selectGT, zGT));
	z = _mm_or_pd(_mm_andnot_pd(lessLimit, z),_mm_and_pd(lessLimit, a));
#endif

	// update the sign
	z = _mm_xor_pd(z, sign);

	return z;
}

AE_FORCEINLINE static __m128 AE_CALL quadAcos(const __m128& xx) 
{
	// acos(x) = pi/2 - asin(x)
	AE_ALIGN16 static const AEINT32 piover2[4] = { 0x3FC90FDB, 0x3FC90FDB, 0x3FC90FDB, 0x3FC90FDB };
	__m128 x = _mm_sub_ps(*(__m128*)piover2, quadAsin(xx));
	return x;
}

AE_FORCEINLINE static __m128d AE_CALL twoAcos(const __m128d& xx) 
{
	// acos(x) = pi/2 - asin(x)
	AE_ALIGN16 static const AEINT64 piover2[2] = { 0x3FF921FB54442D18ull, 0x3FF921FB54442D18ull };
	__m128d x = _mm_sub_pd(*(__m128d*)piover2, twoAsin(xx));
	return x;
}

/// Average absolute error 0.000069
/// Max absolute error 0.000763
/// About 6x faster than ::atan2f for 4 simultaneous values
AE_FORCEINLINE static __m128 AE_CALL quadAtan2(const __m128& y, const __m128& x)
{
	static AE_ALIGN16 const AEUINT32 signMask[4]= { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
	static AE_ALIGN16 const AEUINT32 one[4]		= { 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 }; //  1.0
	static AE_ALIGN16 const AEUINT32 pi[4]		= { 0x40490FDB, 0x40490FDB, 0x40490FDB, 0x40490FDB }; //  PI
	static AE_ALIGN16 const AEUINT32 piOver2[4]	= { 0x3FC90FDB, 0x3FC90FDB, 0x3FC90FDB, 0x3FC90FDB }; //  PI / 2
	static AE_ALIGN16 const AEUINT32 t0[4]		= { 0xBDBBB0F6, 0xBDBBB0F6, 0xBDBBB0F6, 0xBDBBB0F6 }; // -0.91646118527267623468e-1
	static AE_ALIGN16 const AEUINT32 t1[4]		= { 0xBFB2A61F, 0xBFB2A61F, 0xBFB2A61F, 0xBFB2A61F }; // -0.13956945682312098640e1
	static AE_ALIGN16 const AEUINT32 t2[4]		= { 0xC2BCC9B1, 0xC2BCC9B1, 0xC2BCC9B1, 0xC2BCC9B1 }; // -0.94393926122725531747e2
	static AE_ALIGN16 const AEUINT32 t3[4]		= { 0x414E36D1, 0x414E36D1, 0x414E36D1, 0x414E36D1 }; //  0.12888383034157279340e2
	static AE_ALIGN16 const AEUINT32 s0[4]		= { 0x3FA3CF0F, 0x3FA3CF0F, 0x3FA3CF0F, 0x3FA3CF0F }; //  0.12797564625607904396e1
	static AE_ALIGN16 const AEUINT32 s1[4]		= { 0x400C9F34, 0x400C9F34, 0x400C9F34, 0x400C9F34 }; //  0.21972168858277355914e1
	static AE_ALIGN16 const AEUINT32 s2[4]		= { 0x40DA37C2, 0x40DA37C2, 0x40DA37C2, 0x40DA37C2 }; //  0.68193064729268275701e1
	static AE_ALIGN16 const AEUINT32 s3[4]		= { 0x41E1A443, 0x41E1A443, 0x41E1A443, 0x41E1A443 }; //  0.28205206687035841409e2

	__m128 oneF = *(const __m128*)one;

	__m128 rx = _mm_rcp_ps(x);
	__m128 signMaskF = *(const __m128*)signMask;
	__m128 y_div_x = _mm_mul_ps(y, rx);
	__m128 esp4 = _mm_or_ps(_mm_and_ps(signMaskF, y), *(const __m128*)pi);
	__m128 x_ge_0 = _mm_cmple_ps(_mm_setzero_ps(), rx);

	__m128 minus1F = _mm_or_ps(oneF, signMaskF);
	__m128 x5 = _mm_or_ps(_mm_cmplt_ps(oneF, y_div_x), _mm_cmpnle_ps(minus1F, y_div_x));

	__m128 x4 = _mm_rcp_ps(y_div_x);
#if AE_SSE_VERSION >= 0x41
	x4 = _mm_blendv_ps(y_div_x,x4,x5);
#else
	x4 = _mm_or_ps(_mm_andnot_ps(x5, y_div_x), _mm_and_ps(x5, x4));
#endif
	__m128 x0 = x4;

	x4 = _mm_mul_ps(x4, x4);

	__m128 x1 = _mm_add_ps(*(const __m128*)s0, x4);
	x1 = _mm_mul_ps(_mm_rcp_ps(x1), *(const __m128*)t0);
	__m128 x3 = _mm_add_ps(*(const __m128*)s1, x4);
	x1 = _mm_add_ps(x1, x3);

	x1 = _mm_mul_ps(_mm_rcp_ps(x1), *(const __m128*)t1);
	__m128 x7 = _mm_add_ps(*(const __m128*)s2, x4);
	x1 = _mm_add_ps(x1, x7);

	x1 = _mm_mul_ps(_mm_rcp_ps(x1), *(const __m128*)t2);
	x3 = _mm_add_ps(*(const __m128*)s3, x4);
	__m128 x6 = _mm_mul_ps(*(const __m128*)t3, x0);
	x1 = _mm_add_ps(x1, x3);

	x0 = _mm_and_ps(x0, signMaskF);
	x1 = _mm_mul_ps(_mm_rcp_ps(x1), x6);

	x0 = _mm_sub_ps(_mm_or_ps(x0, *(const __m128*)piOver2), x1);

#if AE_SSE_VERSION >= 0x41
	x0 = _mm_blendv_ps(x1,x0,x5);
#else
	x0 = _mm_or_ps(_mm_andnot_ps(x5, x1), _mm_and_ps(x5, x0));
#endif

	// select case
	x1 = _mm_add_ps(x0, esp4);
#if AE_SSE_VERSION >= 0x41
	x0 = _mm_blendv_ps(x1,x0,x_ge_0);
#else
	x0 = _mm_or_ps(_mm_andnot_ps(x_ge_0, x1),_mm_and_ps(x_ge_0, x0));
#endif

	return x0;
}

/// Average absolute error 0.000069
/// Max absolute error 0.000712
/// About 1.5x faster than ::atan2 for 2 simultaneous values
AE_FORCEINLINE static __m128d AE_CALL twoAtan2(const __m128d& y, const __m128d& x)
{
	static AE_ALIGN16 const AEUINT64 signMask[2]= { 0x8000000000000000ull, 0x8000000000000000ull };
	static AE_ALIGN16 const AEUINT64 one[2]		= { 0x3FF0000000000000ull, 0x3FF0000000000000ull }; //  1.0
	static AE_ALIGN16 const AEUINT64 pi[2]      = { 0x400921FB54442D18ull, 0x400921FB54442D18ull }; // pi
	static AE_ALIGN16 const AEUINT64 piOver2[2] = { 0x3FF921FB54442D18ull, 0x3FF921FB54442D18ull }; // pi / 2
	static AE_ALIGN16 const AEUINT64 t0[2]		= { 0xBFB7761EB9E144ACull, 0xBFB7761EB9E144ACull }; // -0.91646118527267623468e-1
	static AE_ALIGN16 const AEUINT64 t1[2]		= { 0xBFF654C3D3DC204Eull, 0xBFF654C3D3DC204Eull }; // -0.13956945682312098640e1
	static AE_ALIGN16 const AEUINT64 t2[2]		= { 0xC057993615E9895Cull, 0xC057993615E9895Cull }; // -0.94393926122725531747e2
	static AE_ALIGN16 const AEUINT64 t3[2]		= { 0x4029C6DA241C0DB4ull, 0x4029C6DA241C0DB4ull }; //  0.12888383034157279340e2
	static AE_ALIGN16 const AEUINT64 s0[2]		= { 0x3FF479E1E998B120ull, 0x3FF479E1E998B120ull }; //  0.12797564625607904396e1
	static AE_ALIGN16 const AEUINT64 s1[2]		= { 0x400193E67256CAF0ull, 0x400193E67256CAF0ull }; //  0.21972168858277355914e1
	static AE_ALIGN16 const AEUINT64 s2[2]		= { 0x401B46F846AA7CC2ull, 0x401B46F846AA7CC2ull }; //  0.68193064729268275701e1
	static AE_ALIGN16 const AEUINT64 s3[2]		= { 0x403C34886CE9BD4Cull, 0x403C34886CE9BD4Cull }; //  0.28205206687035841409e2

	__m128d oneF = *(const __m128d*)one;

	__m128d rx = _mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(x)));
	__m128d signMaskF = *(const __m128d*)signMask;
	__m128d y_div_x = _mm_mul_pd(y, rx);
	__m128d esp4 = _mm_or_pd(_mm_and_pd(signMaskF, y), *(const __m128d*)pi);
	__m128d x_ge_0 = _mm_cmple_pd(_mm_setzero_pd(), rx);

	__m128d minus1F = _mm_or_pd(oneF, signMaskF);
	__m128d x5 = _mm_or_pd(_mm_cmplt_pd(oneF, y_div_x), _mm_cmpnle_pd(minus1F, y_div_x));

	__m128d x4 = _mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(y_div_x)));
#if AE_SSE_VERSION >= 0x41
	x4 = _mm_blendv_pd(y_div_x,x4,x5);
#else
	x4 = _mm_or_pd(_mm_andnot_pd(x5, y_div_x), _mm_and_pd(x5, x4));
#endif
	__m128d x0 = x4;

	x4 = _mm_mul_pd(x4, x4);

	__m128d x1 = _mm_add_pd(*(const __m128d*)s0, x4);
	x1 = _mm_mul_pd(_mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(x1))), *(const __m128d*)t0);
	__m128d x3 = _mm_add_pd(*(const __m128d*)s1, x4);
	x1 = _mm_add_pd(x1, x3);

	x1 = _mm_mul_pd(_mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(x1))), *(const __m128d*)t1);
	__m128d x7 = _mm_add_pd(*(const __m128d*)s2, x4);
	x1 = _mm_add_pd(x1, x7);

	x1 = _mm_mul_pd(_mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(x1))), *(const __m128d*)t2);
	x3 = _mm_add_pd(*(const __m128d*)s3, x4);
	__m128d x6 = _mm_mul_pd(*(const __m128d*)t3, x0);
	x1 = _mm_add_pd(x1, x3);

	x0 = _mm_and_pd(x0, signMaskF);
	x1 = _mm_mul_pd(_mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(x1))), x6);

	x0 = _mm_sub_pd(_mm_or_pd(x0, *(const __m128d*)piOver2), x1);

#if AE_SSE_VERSION >= 0x41
	x0 = _mm_blendv_pd(x1,x0,x5);
#else
	x0 = _mm_or_pd(_mm_andnot_pd(x5, x1), _mm_and_pd(x5, x0));
#endif

	// select case
	x1 = _mm_add_pd(x0, esp4);
#if AE_SSE_VERSION >= 0x41
	x0 = _mm_blendv_pd(x1,x0,x_ge_0);
#else
	x0 = _mm_or_pd(_mm_andnot_pd(x_ge_0, x1),_mm_and_pd(x_ge_0, x0));
#endif

	return x0;
}

namespace AEMathSSE
{
	AE_FORCEINLINE static __m128 AE_CALL QuadFloor(const __m128 &v)
	{
		#if AE_SSE_VERSION >= 0x41
			const __m128 result = _mm_floor_ps(v);
		#else
			static AE_ALIGN16 const AEUINT32 two23[4] = { 0x4B000000, 0x4B000000, 0x4B000000, 0x4B000000 }; // 2^23 as float

			const __m128 b = _mm_castsi128_ps( _mm_srli_epi32( _mm_slli_epi32( _mm_castps_si128(v), 1 ), 1 ) ); // fabs(v)
			const __m128 d = _mm_sub_ps( _mm_add_ps( _mm_add_ps( _mm_sub_ps( v, *(__m128*)&two23 ), *(__m128*)&two23 ), *(__m128*)&two23 ), *(__m128*)&two23 ); // the meat of floor
			const __m128 largeMaskE = _mm_cmpgt_ps( b, *(__m128*)&two23 ); // $ffffffff if v >= 2^23
			const __m128 g = _mm_cmplt_ps( v, d ); // check for possible off by one error
			const __m128 h = _mm_cvtepi32_ps( _mm_castps_si128(g) ); // convert positive check result to -1.0, negative to 0.0
			const __m128 t = _mm_add_ps( d, h ); // add in the error if there is one

			// Select between output result and input value based on v >= 2^23
			const __m128 result = _mm_or_ps( _mm_and_ps(largeMaskE, v), _mm_andnot_ps(largeMaskE, t) );
		#endif

		return result;
	}

	AE_FORCEINLINE static __m128d AE_CALL TwoFloor(const __m128d &v)
	{
		#if AE_SSE_VERSION >= 0x41
			const __m128d result = _mm_floor_pd(v);
		#else
			static AE_ALIGN16 const AEUINT64 two52[2] = { 0x4330000000000000ull, 0x4330000000000000ull }; // 2^52 as double

			const __m128d b = _mm_castsi128_pd( _mm_srli_epi64( _mm_slli_epi64( _mm_castpd_si128(v), 1 ), 1 ) ); // fabs(v)
			const __m128d d = _mm_sub_pd( _mm_add_pd( _mm_add_pd( _mm_sub_pd( v, *(__m128d*)&two52 ), *(__m128d*)&two52 ), *(__m128d*)&two52 ), *(__m128d*)&two52 ); // the meat of floor
			const __m128d largeMaskE = _mm_cmpgt_pd( b, *(__m128d*)&two52 ); // $ffffffffffffffff if v >= 2^52
			const __m128d g = _mm_cmplt_pd( v, d ); // check for possible off by one error
			const __m128d h = _mm_cvtepi32_pd( _mm_castpd_si128(g) ); // convert positive check result to -1.0, negative to 0.0 (only need the lower 64 bit anyways, so cvtepi32 is fine)
			const __m128d t = _mm_add_pd( d, h ); // add in the error if there is one

			// Select between output result and input value based on v >= 2^52
			const __m128d result = _mm_or_pd( _mm_and_pd(largeMaskE, v), _mm_andnot_pd(largeMaskE, t) );
		#endif
	
		return result;
	}

	AE_FORCEINLINE static __m128 AE_CALL QuadMod(const __m128 &a, const __m128 &b)
	{
		const __m128 denomIsZero = _mm_cmpeq_ps(b,_mm_setzero_ps());
		const __m128 q = QuadFloor(_mm_div_ps(a,b));
		__m128 result = _mm_sub_ps(a, _mm_mul_ps(q,b));
		return _mm_andnot_ps(denomIsZero, result);
	}

	AE_FORCEINLINE static __m128d AE_CALL TwoMod(const __m128d &a, const __m128d &b)
	{
		const __m128d denomIsZero = _mm_cmpeq_pd(b,_mm_setzero_pd());
		const __m128d q = TwoFloor(_mm_div_pd(a,b));
		__m128d result = _mm_sub_pd(a, _mm_mul_pd(q,b));
		return _mm_andnot_pd(denomIsZero, result);
	}

	AE_FORCEINLINE static __m128 AE_CALL QuadFabs(const __m128 &v)
	{
		return _mm_castsi128_ps( _mm_srli_epi32( _mm_slli_epi32( _mm_castps_si128(v), 1 ), 1 ) ); 
	}

	AE_FORCEINLINE static __m128d AE_CALL TwoFabs(const __m128d &v)
	{
		return _mm_castsi128_pd( _mm_srli_epi64( _mm_slli_epi64( _mm_castpd_si128(v), 1 ), 1 ) ); 
	}

} // Namespace

#define AEMATH_SQRT_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Sqrt( const AEFLOAT32 fValue )
{
	const __m128 sRes = _mm_sqrt_ss(_mm_load_ss(&fValue));
	return _mm_cvtss_f32( sRes );
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Sqrt( const AEDOUBLE64 dValue )
{
	const __m128d s = _mm_sqrt_sd(_mm_setzero_pd(),_mm_load_sd(&dValue));
	return s.m128d_f64[0];
}

AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::SqrtInverse( const AEFLOAT32 fValue )
{
	static AE_ALIGN16 const int one[4] = {  0x3F800000,  0x3F800000,  0x3F800000,  0x3F800000 }; // 1.0

	const __m128 sRes = _mm_sqrt_ss(_mm_load_ss( &fValue ) );
	return _mm_cvtss_f32( _mm_div_ss( *(__m128*)one, sRes ) );
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::SqrtInverse( const AEDOUBLE64 dValue )
{
	static AE_ALIGN16 const AEUINT64 one[4] = {  0x3ff0000000000000ull, 0x3ff0000000000000ull }; // 1.0

	const __m128d s = _mm_sqrt_sd(_mm_setzero_pd(),_mm_load_sd(&dValue));
	const __m128d q = _mm_div_sd(*(__m128d*)one,s);
	return q.m128d_f64[0];
}

#define AEMATH_ABS_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Abs( const AEFLOAT32 fValue )
{
	const __m128 v = _mm_load_ss(&fValue);
	const __m128 abs = AEMathSSE::QuadFabs(v); 
	return _mm_cvtss_f32(abs);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Abs( const AEDOUBLE64 dValue )
{
	const __m128d v = _mm_load_sd(&dValue);
	const __m128d abs = AEMathSSE::TwoFabs(v); 
	return abs.m128d_f64[0];
}

AE_FORCEINLINE AEINT32 AE_CALL AEMath::Abs( const AEINT32 nValue )
{
#if AE_SSE_VERSION >= 0x31
	__m128i rr = _mm_cvtsi32_si128(nValue);
	__m128i rabs = _mm_abs_epi32(rr);
	return _mm_cvtsi128_si32(rabs); 
#else
	return ( nValue < 0 ? -nValue : nValue );
#endif
}

#define AEMATH_CEIL_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Ceil( const AEFLOAT32 fValue )
{
	const __m128 v = _mm_load_ss( &fValue );

#if AE_SSE_VERSION >= 0x41
	const __m128 result = _mm_ceil_ss(v, v);
#else
	static AE_ALIGN16 const AEUINT32 one[4]	= { 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 }; //  1.0
	const __m128 result = _mm_add_ps(*(__m128*)one, AEMathSSE::QuadFloor(v));
#endif
	
	return _mm_cvtss_f32(result);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Ceil( const AEDOUBLE64 dValue )
{
	const __m128d v = _mm_load_sd(&dValue);
	
	#if AE_SSE_VERSION >= 0x41
		const __m128d result = _mm_ceil_sd(v, v);
	#else
		static AE_ALIGN16 const AEUINT64 one[2]	= { 0x3FF0000000000000ull, 0x3FF0000000000000ull }; //  1.0
		const __m128d result = _mm_add_pd(*(__m128d*)one, AEMathSSE::TwoFloor(v));
	#endif

	return result.m128d_f64[0];
}

#define AEMATH_FLOOR_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Floor( const AEFLOAT32 fValue )
{
	const __m128 v = _mm_load_ss(&fValue);
	const __m128 result = AEMathSSE::QuadFloor(v);
	return _mm_cvtss_f32(result);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Floor( const AEDOUBLE64 dValue )
{
	const __m128d v = _mm_load_sd(&dValue);
	const __m128d result = AEMathSSE::TwoFloor(v);
	return result.m128d_f64[0];
}

#define AEMATH_SIN_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Sin( const AEFLOAT32 fRadValue )
{
	__m128 rr = _mm_load_ss(&fRadValue);
	__m128 s = quadSin(rr);
	return _mm_cvtss_f32(s);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Sin( const AEDOUBLE64 dRadValue )
{
	__m128d rr = _mm_load_sd(&dRadValue);
	__m128d s = twoSin(rr);
	return s.m128d_f64[0];
}

#define AEMATH_COS_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Cos( const AEFLOAT32 fRadValue )
{
	__m128 rr = _mm_load_ss(&fRadValue);
	__m128 c = quadCos(rr);
	return _mm_cvtss_f32(c);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Cos( const AEDOUBLE64 dRadValue )
{
	__m128d rr = _mm_load_sd(&dRadValue);
	__m128d s = twoCos(rr);
	return s.m128d_f64[0];
}

#define AEMATH_ASIN_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Asin( const AEFLOAT32 fRadValue )
{
	AE_ALIGN16 static const AEINT32 floatOne[4]  = { 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 }; //  1.0
	AE_ALIGN16 static const AEINT32 floatMOne[4] = { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000 }; // -1.0

	// be generous about numbers outside range
	AEASSERT( Abs(fRadValue) < 1.001f ); // assert imported from default impl

	__m128 rr = _mm_load_ss(&fRadValue);
	rr = _mm_max_ps(rr, *(__m128*)floatMOne);
	rr = _mm_min_ps(rr, *(__m128*)floatOne);
	__m128 s = quadAsin(rr);

	return _mm_cvtss_f32(s);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Asin( const AEDOUBLE64 dRadValue )
{
	AE_ALIGN16 static const AEINT64 floatOne[2]  = { 0x3FF0000000000000ull, 0x3FF0000000000000ull }; //  1.0
	AE_ALIGN16 static const AEINT64 floatMOne[2] = { 0xBFF0000000000000ull, 0xBFF0000000000000ull }; // -1.0

	// be generous about numbers outside range
	AEASSERT( Abs(dRadValue) < 1.001 ); // assert imported from default impl

	__m128d rr = _mm_load_sd(&dRadValue);
	rr = _mm_max_pd(rr, *(__m128d*)floatMOne);
	rr = _mm_min_pd(rr, *(__m128d*)floatOne);
	__m128d s = twoAsin(rr);

	return s.m128d_f64[0];
}

#define AEMATH_ACOS_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Acos( const AEFLOAT32 fRadValue )
{
	AE_ALIGN16 static const AEINT32 floatOne[4]  = { 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 }; //  1.0
	AE_ALIGN16 static const AEINT32 floatMOne[4] = { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000 }; // -1.0

	// be generous about numbers outside range
	AEASSERT( Abs(fRadValue) < 1.001f ); // assert imported from default impl

	__m128 rr = _mm_load_ss(&fRadValue);
	rr = _mm_max_ps(rr, *(__m128*)floatMOne);
	rr = _mm_min_ps(rr, *(__m128*)floatOne);
	__m128 s = quadAcos(rr);

	return _mm_cvtss_f32(s);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Acos( const AEDOUBLE64 dRadValue )
{
	AE_ALIGN16 static const AEINT64 floatOne[2]  = { 0x3FF0000000000000ull, 0x3FF0000000000000ull }; //  1.0
	AE_ALIGN16 static const AEINT64 floatMOne[2] = { 0xBFF0000000000000ull, 0xBFF0000000000000ull }; // -1.0

	// be generous about numbers outside range
	AEASSERT( Abs(dRadValue) < 1.001 ); // assert imported from default impl
	
	__m128d rr = _mm_load_sd(&dRadValue);
	rr = _mm_max_pd(rr, *(__m128d*)floatMOne);
	rr = _mm_min_pd(rr, *(__m128d*)floatOne);
	__m128d s = twoAcos(rr);
	
	return s.m128d_f64[0];
}

#define AEMATH_ATAN2_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Atan2( const AEFLOAT32 fRadY, const AEFLOAT32 fRadX )
{
	__m128 qy = _mm_load_ss(&fRadY);
	__m128 qx = _mm_load_ss(&fRadX);
	__m128 a = quadAtan2(qy,qx);
	
	return _mm_cvtss_f32(a);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Atan2( const AEDOUBLE64 dRadY, const AEDOUBLE64 dRadX )
{
	__m128d qy = _mm_load_sd(&dRadY);
	__m128d qx = _mm_load_sd(&dRadX);
	__m128d a = twoAtan2(qy,qx);

	return a.m128d_f64[0];
}

#define AEMATH_MOD_FUNCS
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Mod( const AEFLOAT32 fValue, const AEFLOAT32 fMod )
{
	const __m128 numer = _mm_load_ss(&fValue);
	const __m128 denom = _mm_load_ss(&fMod);
	__m128 result = AEMathSSE::QuadMod(numer, denom);
	return _mm_cvtss_f32(result);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Mod( const AEDOUBLE64 dValue, const AEDOUBLE64 dMod )
{
	const __m128d numer = _mm_load_sd(&dValue);
	const __m128d denom = _mm_load_sd(&dMod);
	__m128d result = AEMathSSE::TwoMod(numer, denom);
	return result.m128d_f64[0];
}

#define AEMATH_FLOATTOINT_FUNCS
AE_FORCEINLINE AEINT32 AE_CALL AEMath::Float2Int( const AEFLOAT32 fValue )
{ 
	return _mm_cvtt_ss2si( _mm_load_ss( &fValue ) );
}

AE_FORCEINLINE AEINT32 AE_CALL AEMath::Float2Int( const AEDOUBLE64 dValue )
{ 
	return _mm_cvttsd_si32( _mm_load_sd( &dValue ) );
}

AE_FORCEINLINE AEBOOL32 AE_CALL AEMath::HasSignBitSet( const AEFLOAT32 &fValue )
{
	return _mm_movemask_ps( _mm_load_ss( &fValue ) );
}

AE_FORCEINLINE AEBOOL32 AE_CALL AEMath::HasSignBitSet( const AEDOUBLE64 &dValue )
{
	return _mm_movemask_pd( _mm_load_sd( &dValue ) );
}

AE_FORCEINLINE AEINT32 AE_CALL AEMath::CountBitsSet( AEUINT32 uiValue )
{
#if AE_SSE_VERSION >= 0x42
	return  _mm_popcnt_u32( uiValue );
#elif AE_SSE_VERSION >= 0x31
	const __m128i mask_lo = _mm_set1_epi8(0x0F);
	const __m128i mask_popcnt = _mm_setr_epi8( 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 );

	__m128i cnt = _mm_cvtsi32_si128( uiValue );
	__m128i lo = _mm_and_si128( cnt, mask_lo );
	__m128i hi = _mm_and_si128( _mm_srli_epi16( cnt, 4 ), mask_lo );
	lo = _mm_shuffle_epi8( mask_popcnt, lo );
	hi = _mm_shuffle_epi8( mask_popcnt, hi );
	cnt = _mm_add_epi8( lo, hi );
	// Horizontally sum up byte counters
	__m128i sum = _mm_sad_epu8( cnt, _mm_setzero_si128() );
	// Horizontally add 32-bit accumulators
	sum = _mm_add_epi32( _mm_shuffle_epi32( sum, _MM_SHUFFLE( 1, 0, 3, 2 ) ), sum );
	return _mm_cvtsi128_si32( sum );
#else
	const __m128i mask1 = _mm_set1_epi8( 0x55 );
	const __m128i mask2 = _mm_set1_epi8( 0x33 );
	const __m128i mask4 = _mm_set1_epi8( 0x0F );

	__m128i cnt = _mm_cvtsi32_si128( uiValue );
	cnt = _mm_add_epi8( _mm_and_si128( cnt, mask1 ), _mm_and_si128( _mm_srli_epi16( cnt, 1 ), mask1 ) );
	cnt = _mm_add_epi8( _mm_and_si128( cnt, mask2 ), _mm_and_si128( _mm_srli_epi16( cnt, 2 ), mask2 ) );
	cnt = _mm_and_si128( _mm_add_epi8( cnt, _mm_srli_epi16( cnt, 4 ) ), mask4 );
	// Horizontally sum up byte counters
	__m128i sum = _mm_sad_epu8( cnt, _mm_setzero_si128() );
	// Horizontally add 32-bit accumulators
	sum = _mm_add_epi32( _mm_shuffle_epi32( sum, _MM_SHUFFLE( 1, 0, 3, 2 ) ), sum );
	return _mm_cvtsi128_si32( sum );
#endif
}

AE_FORCEINLINE AEQuadFloat32 AE_CALL AEMath::QuadReciprocal( const AEQuadFloat32 &qfValue )
{
	const AEQuadFloat32 rb = _mm_rcp_ps( qfValue );
	// One Newton-Raphson refinement iteration
	const AEQuadFloat32 rbr = _mm_mul_ps( qfValue, rb );
	const AEQuadFloat32 d = _mm_sub_ps( _mm_set1_ps( 2.0f ), rbr );

	return _mm_mul_ps(rb, d);
}

AE_FORCEINLINE AEQuadDouble64 AE_CALL AEMath::QuadReciprocal( const AEQuadDouble64 &qdValue )
{
#if AE_SSE_VERSION >= 0x50
	return _mm256_div_pd(_mm256_set1_pd(1.0), qdValue);
#else
	const __m128 xy = _mm_cvtpd_ps(qdValue.xy);
	const __m128 zw = _mm_cvtpd_ps(qdValue.zw);
	const __m128 xyzw = _mm_shuffle_ps(xy,zw,_MM_SHUFFLE(1,0,1,0));
	const __m128 rb = _mm_rcp_ps(xyzw);
	// One Newton-Raphson refinement iteration
	const __m128 rbr = _mm_mul_ps(xyzw, rb);
	const __m128 d = _mm_sub_ps(_mm_set1_ps(2.0f), rbr);
	const __m128 re = _mm_mul_ps(rb, d);
	AEQuadDouble64 result;
	result.xy = _mm_cvtps_pd(re);
	result.zw = _mm_cvtps_pd(_mm_shuffle_ps(re,re,_MM_SHUFFLE(1,0,3,2)));
	return result;
#endif
}

AE_FORCEINLINE AEQuadFloat32 AE_CALL AEMath::QuadReciprocalSquareRoot( const AEQuadFloat32 &qfValue )
{
	const AEQuadFloat32 e = _mm_rsqrt_ps(qfValue);
	// One Newton-Raphson refinement iteration
	const AEQuadFloat32 he = _mm_mul_ps(_mm_set1_ps(0.5f),e);
	const AEQuadFloat32 ree = _mm_mul_ps(_mm_mul_ps(qfValue,e),e);

	return _mm_mul_ps(he, _mm_sub_ps(_mm_set1_ps(3.0f), ree) );
}

AE_FORCEINLINE AEQuadDouble64 AE_CALL AEMath::QuadReciprocalSquareRoot( const AEQuadDouble64 &qdValue )
{
#if HK_SSE_VERSION >= 0x50
	return _mm256_div_pd(_mm256_set1_pd(1.0), _mm256_sqrt_pd(qdValue));
#else
	const __m128 xy = _mm_cvtpd_ps(qdValue.xy);
	const __m128 zw = _mm_cvtpd_ps(qdValue.zw);
	const __m128 xyzw = _mm_shuffle_ps(xy,zw,_MM_SHUFFLE(1,0,1,0));
	const __m128 e = _mm_rsqrt_ps(xyzw);
	// One Newton-Raphson refinement iteration
	const __m128 he = _mm_mul_ps(_mm_set1_ps(0.5f),e);
	const __m128 ree = _mm_mul_ps(_mm_mul_ps(xyzw,e),e);
	const __m128 re = _mm_mul_ps(he, _mm_sub_ps(_mm_set1_ps(3.0f), ree) );
	AEQuadDouble64 result;
	result.xy = _mm_cvtps_pd(re);
	result.zw = _mm_cvtps_pd(_mm_shuffle_ps(re,re,_MM_SHUFFLE(1,0,3,2)));
	return result;
#endif
}

AE_FORCEINLINE AEQuadFloat32 AE_CALL AEMath::QuadReciprocalTwoIter( const AEQuadFloat32 &qfValue )
{
	const __m128 two = _mm_set1_ps(2.0f);
	const AEQuadFloat32 rb = _mm_rcp_ps(qfValue);
	//One round of Newton-Raphson refinement
	const AEQuadFloat32 rbr = _mm_mul_ps(qfValue, rb);
	const AEQuadFloat32 d = _mm_sub_ps(two, rbr);
	const AEQuadFloat32 rb1 = _mm_mul_ps(rb, d);
	//Another round
	const AEQuadFloat32 rbr2 = _mm_mul_ps(qfValue, rb1);
	const AEQuadFloat32 d2 = _mm_sub_ps(two, rbr2);

	return _mm_mul_ps(rb1, d2);
}

AE_FORCEINLINE AEQuadDouble64 AE_CALL AEMath::QuadReciprocalTwoIter( const AEQuadDouble64 &qdValue )
{
#if AE_SSE_VERSION >= 0x50
	return _mm256_div_pd(_mm256_set1_pd(1.0), qdValue);
#else
	const __m128 two = _mm_set1_ps(2.0f);
	const __m128 xy = _mm_cvtpd_ps(qdValue.xy);
	const __m128 zw = _mm_cvtpd_ps(qdValue.zw);
	const __m128 xyzw = _mm_shuffle_ps(xy,zw,_MM_SHUFFLE(1,0,1,0));
	const __m128 rb = _mm_rcp_ps(xyzw);
	//One round of Newton-Raphson refinement
	const __m128 rbr = _mm_mul_ps(xyzw, rb);
	const __m128 d = _mm_sub_ps(two, rbr);
	const __m128 rb1 = _mm_mul_ps(rb, d);
	//Another round
	const __m128 rbr2 = _mm_mul_ps(xyzw, rb1);
	const __m128 d2 = _mm_sub_ps(two, rbr2);
	const __m128 re = _mm_mul_ps(rb1, d2);
	AEQuadDouble64 result;
	result.xy = _mm_cvtps_pd(re);
	result.zw = _mm_cvtps_pd(_mm_shuffle_ps(re,re,_MM_SHUFFLE(1,0,3,2)));
	return result;
#endif
}

AE_FORCEINLINE AEQuadFloat32 AE_CALL AEMath::QuadReciprocalSquareRootTwoIter( const AEQuadFloat32 &qfValue )
{
	const __m128 half = _mm_set1_ps(0.5f);
	const __m128 three = _mm_set1_ps(3.0f);
	const AEQuadFloat32 e = _mm_rsqrt_ps(qfValue);
	// One Newton-Raphson refinement iteration
	const AEQuadFloat32 he = _mm_mul_ps(half,e);
	const AEQuadFloat32 ree = _mm_mul_ps(_mm_mul_ps(qfValue,e),e);
	const AEQuadFloat32 e1 = _mm_mul_ps(he, _mm_sub_ps(three, ree) );
	//Another round
	const AEQuadFloat32 he2 = _mm_mul_ps(half,e1);
	const AEQuadFloat32 ree2 = _mm_mul_ps(_mm_mul_ps(qfValue,e1),e1);

	return _mm_mul_ps(he2, _mm_sub_ps(three, ree2) );
}

AE_FORCEINLINE AEQuadDouble64 AE_CALL AEMath::QuadReciprocalSquareRootTwoIter( const AEQuadDouble64 &qdValue )
{
#if AE_SSE_VERSION >= 0x50
	return _mm256_div_pd(_mm256_set1_pd(1.0), _mm256_sqrt_pd(r));
#else
	const __m128 half = _mm_set1_ps(0.5f);
	const __m128 three = _mm_set1_ps(3.0f);
	const __m128 xy = _mm_cvtpd_ps(qdValue.xy);
	const __m128 zw = _mm_cvtpd_ps(qdValue.zw);
	const __m128 xyzw = _mm_shuffle_ps(xy,zw,_MM_SHUFFLE(1,0,1,0));
	const __m128 e = _mm_rsqrt_ps(xyzw);
	// One Newton-Raphson refinement iteration
	const __m128 he = _mm_mul_ps(half,e);
	const __m128 ree = _mm_mul_ps(_mm_mul_ps(xyzw,e),e);
	const __m128 e1 = _mm_mul_ps(he, _mm_sub_ps(three, ree) );
	//Another round
	const __m128 he2 = _mm_mul_ps(half,e1);
	const __m128 ree2 = _mm_mul_ps(_mm_mul_ps(xyzw,e1),e1);
	const __m128 re = _mm_mul_ps(he2, _mm_sub_ps(three, ree2) );
	AEQuadDouble64 result;
	result.xy = _mm_cvtps_pd(re);
	result.zw = _mm_cvtps_pd(_mm_shuffle_ps(re,re,_MM_SHUFFLE(1,0,3,2)));
	return result;
#endif
}

#define AEMATH_BITSFUNCS
AE_FORCEINLINE AEBOOL32 AE_CALL AEMath::SignBitSet( const AEFLOAT32 &fValue )
{
	return _mm_movemask_ps( _mm_load_ss( &fValue ) );
}

AE_FORCEINLINE AEBOOL32 AE_CALL AEMath::SignBitSet( const AEDOUBLE64 &dValue )
{
	return _mm_movemask_pd( _mm_load_sd( &dValue ) );
}

#define AEMATH_CLAMPFUNCS
template <>
AE_FORCEINLINE AEFLOAT32 AE_CALL AEMath::Clamp<AEFLOAT32, AEFLOAT32, AEFLOAT32>( AEFLOAT32 fValue, AEFLOAT32 fMin, AEFLOAT32 fMax )
{
	const __m128 lo = _mm_max_ss( _mm_load_ss( &fValue ), _mm_load_ss( &fMin ) );
	const __m128 hi = _mm_min_ss( lo, _mm_load_ss( &fMax ) );
	return _mm_cvtss_f32( hi );
}

template <>
AE_FORCEINLINE AEDOUBLE64 AE_CALL AEMath::Clamp<AEDOUBLE64, AEDOUBLE64, AEDOUBLE64>( AEDOUBLE64 dValue, AEDOUBLE64 dMin, AEDOUBLE64 dMax )
{
	const __m128d lo = _mm_max_sd( _mm_load_sd( &dValue ), _mm_load_sd( &dMin ) );
	const __m128d hi = _mm_min_sd( lo, _mm_load_sd( &dMax ) );
	return hi.m128d_f64[0];
}

#if AE_SSE_VERSION >= 0x41
	template <>
	AE_FORCEINLINE AEINT32 AE_CALL AEMath::Clamp<AEINT32>( AEINT32 nValue, AEINT32 nMin, AEINT32 nMax )
	{
		const __m128i lo = _mm_max_epi32( _mm_cvtsi32_si128( nValue ), _mm_cvtsi32_si128( nMin ) );
		const __m128i hi = _mm_min_epi32( lo, _mm_cvtsi32_si128( nMax ) );
		return _mm_cvtsi128_si32( hi );
	}
	
	template <>
	AE_FORCEINLINE AEUINT32 AE_CALL AEMath::Clamp<AEUINT32>( AEUINT32 uiValue, AEUINT32 uiMin, AEUINT32 uiMax )
	{
		const __m128i lo = _mm_max_epi32( _mm_cvtsi32_si128( uiValue ), _mm_cvtsi32_si128( uiMin ) );
		const __m128i hi = _mm_min_epi32( lo, _mm_cvtsi32_si128( uiMax ) );
		return _mm_cvtsi128_si32( hi );
	}
#endif

#define AEMATH_FLTEQFUNC
AE_FORCEINLINE AEBOOL32 AE_CALL AEMath::IsFloatEqual( AEFLOAT32 fValue1, AEFLOAT32 fValue2 )
{
	const __m128 xx = _mm_load_ss( &fValue1 );
	const __m128 yy = _mm_load_ss( &fValue2 );
	const __m128 abs = AEMathSSE::QuadFabs( _mm_sub_ss( xx, yy ) ); 
	const __m128 tol = _mm_set1_ps( 1e-5f );
	const int mask = _mm_ucomile_ss( abs, tol );
	return AEBOOL32(mask);
}

AE_FORCEINLINE AEBOOL32 AE_CALL AEMath::IsFloatEqual( AEFLOAT32 fValue1, AEFLOAT32 fValue2, AEFLOAT32 fTolerance )
{
	const __m128 xx = _mm_load_ss( &fValue1 );
	const __m128 yy = _mm_load_ss( &fValue2 );
	const __m128 abs = AEMathSSE::QuadFabs( _mm_sub_ss( xx, yy ) ); 
	const int mask = _mm_ucomile_ss( abs, _mm_load_ss( &fTolerance ) );
	return AEBOOL32( mask );
}

AE_FORCEINLINE AEBOOL32 AE_CALL AEMath::IsFloatEqual( AEDOUBLE64 dValue1, AEDOUBLE64 dValue2 )
{
	const __m128d xx = _mm_load_sd( &dValue1 );
	const __m128d yy = _mm_load_sd( &dValue2 );
	const __m128d abs = AEMathSSE::TwoFabs( _mm_sub_sd( xx, yy ) );
	const __m128d tol = _mm_set1_pd( 1e-5 );
	const int mask = _mm_ucomile_sd( abs, tol );
	return AEBOOL32( mask );
}

AE_FORCEINLINE AEBOOL32 AE_CALL AEMath::IsFloatEqual( AEDOUBLE64 dValue1, AEDOUBLE64 dValue2, AEDOUBLE64 dTolerance )
{
	const __m128d xx = _mm_load_sd( &dValue1 );
	const __m128d yy = _mm_load_sd( &dValue2 );
	const __m128d abs = AEMathSSE::TwoFabs( _mm_sub_sd( xx, yy ) );
	const int mask = _mm_ucomile_sd( abs, _mm_load_sd( &dTolerance ) );
	return AEBOOL32( mask );
}
#endif

AE_FORCEINLINE void AE_CALL QuadLogHelper( __m128 &x )
{
	static AE_ALIGN16 const unsigned int minNormalizedPosNumber[4]	= {  0x00800000,  0x00800000,  0x00800000,  0x00800000 }; // 1.1754943508222875E-38 (smallest normalized positive number)
	static AE_ALIGN16 const unsigned int one[4]					= {  0x3F800000,  0x3F800000,  0x3F800000,  0x3F800000 }; // 1.0
	static AE_ALIGN16 const			 int invMantissaMask[4]		= { ~0x7f800000, ~0x7f800000, ~0x7f800000, ~0x7f800000 };
	static AE_ALIGN16 const unsigned int int0x7f[4]				= {  0x0000007f,  0x0000007f,  0x0000007f,  0x0000007f };
	static AE_ALIGN16 const unsigned int logP0[4]				= {  0xBF4A21EF,  0xBF4A21EF,  0xBF4A21EF,  0xBF4A21EF }; // -7.89580278884799154124e-1
	static AE_ALIGN16 const unsigned int logQ0[4]					= {  0xC20EB06A,  0xC20EB06A,  0xC20EB06A,  0xC20EB06A }; // -3.56722798256324312549e1
	static AE_ALIGN16 const unsigned int logP1[4]					= {  0x418317E4,  0x418317E4,  0x418317E4,  0x418317E4 }; // 1.63866645699558079767e1
	static AE_ALIGN16 const unsigned int logQ1[4]					= {  0x439C0C01,  0x439C0C01,  0x439C0C01,  0x439C0C01 }; // 3.12093766372244180303e2
	static AE_ALIGN16 const unsigned int logP2[4]					= {  0xC2804831,  0xC2804831,  0xC2804831,  0xC2804831 }; // -6.41409952958715622951e1
	static AE_ALIGN16 const unsigned int logQ2[4]					= {  0xC4406C49,  0xC4406C49,  0xC4406C49,  0xC4406C49 }; // -7.69691943550460008604e2
	static AE_ALIGN16 const unsigned int logC0[4]					= {  0x3F317218,  0x3F317218,  0x3F317218,  0x3F317218 }; // 0.693147180559945

	// cut off denormalized stuff
	x = _mm_max_ps(x, *(__m128*)minNormalizedPosNumber);

	__m128i emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);

	// keep only the fractional part
	x = _mm_or_ps(_mm_and_ps(x, *(__m128*)invMantissaMask), *(__m128*)one);

	x = _mm_mul_ps(_mm_sub_ps(x, *(__m128*)one), _mm_rcp_ps(_mm_add_ps(x, *(__m128*)one)));
	x = _mm_add_ps(x, x);
	__m128 bs = x;

	x = _mm_mul_ps(x, x);

	__m128 x4 = _mm_mul_ps(*(__m128*)logP0, x);
	__m128 x6 = _mm_mul_ps(*(__m128*)logQ0, x);

	x4 = _mm_add_ps(x4, *(__m128*)logP1);
	x6 = _mm_add_ps(x6, *(__m128*)logQ1);

	x4 = _mm_mul_ps(x4, x);
	x6 = _mm_mul_ps(x6, x);

	x4 = _mm_add_ps(x4, *(__m128*)logP2);
	x6 = _mm_add_ps(x6, *(__m128*)logQ2);

	x = _mm_mul_ps(x, x4);
	x6 = _mm_rcp_ps(x6);

	x = _mm_mul_ps(x, x6);
	x = _mm_mul_ps(x, bs);

	emm0 = _mm_sub_epi32(emm0, *(__m128i*)int0x7f);
	__m128 x1 = _mm_mul_ps(_mm_cvtepi32_ps(emm0), *(__m128*)logC0);

	x = _mm_add_ps(x, bs);
	x = _mm_add_ps(x, x1);
}

/// Average absolute error 0.000046
/// Max absolute error 169.777725 for r=1
/// Max absolute error 0.001070 elsewhere
/// About 3x faster than ::logf for 4 simultaneous values
AE_FORCEINLINE AEQuadFloat32 AE_CALL QuadLog( const AEQuadFloat32 &r )
{
	__m128 x = r;
	QuadLogHelper( x );
	return x;
}

AE_FORCEINLINE AEQuadDouble64 AE_CALL QuadLog( const AEQuadDouble64 &r )
{
#if AE_SSE_VERSION >= 0x50
	__m128 x = _mm256_cvtpd_ps(r);
#else
	const __m128 xy = _mm_cvtpd_ps(r.xy);
	const __m128 zw = _mm_cvtpd_ps(r.zw);
	__m128 x = _mm_shuffle_ps( xy, zw, _MM_SHUFFLE( 1, 0, 1, 0 ) );
#endif
	QuadLogHelper(x);

#if AE_SSE_VERSION >= 0x50
	return _mm256_cvtps_pd(x);
#else
	AEQuadDouble64 result;
	result.xy = _mm_cvtps_pd(x);
	result.zw = _mm_cvtps_pd(_mm_shuffle_ps(x,x,_MM_SHUFFLE(1,0,3,2)));
	return result;
#endif
}

AE_FORCEINLINE AEFLOAT32 AE_CALL LogApproximation( const AEFLOAT32 &r )
{
	const AEQuadFloat32 q = _mm_load1_ps(&r);
	const AEQuadFloat32 l = QuadLog(q);
	return _mm_cvtss_f32(l);
}

AE_FORCEINLINE AEDOUBLE64 AE_CALL LogApproximation( const AEDOUBLE64 &r )
{
#if AE_SSE_VERSION >= 0x50
	AEQuadDouble64 q = _mm256_set1_pd(r);
	AEQuadDouble64 l = quadLog(q);
	return l.m256d_f64[0];
#else
	AEQuadDouble64 q;
	q.xy = _mm_load1_pd(&r);
	q.zw = _mm_load1_pd(&r);
	AEQuadDouble64 l = QuadLog(q); // change this!
	return l.xy.m128d_f64[0];
#endif
}


// _AEMATH_SSE_INL_