#ifndef __GLF_CORE_INTRINSIC_SSE_H_INCLUDED__
#define __GLF_CORE_INTRINSIC_SSE_H_INCLUDED__

#include <intrin.h>
#include <mmintrin.h>

struct f32x4
{
public:
	f32x4()
	{
	}

	f32x4(const __m128& vec) : Vec(vec) 
	{
	}

	float& operator[](int i) 
	{ 
		return Vec.m128_f32[i];
	}
	
	const float& operator[](int i) const 
	{ 
		return Vec.m128_f32[i];
	}
	
	operator __m128() const 
	{ 
		return Vec; 
	}

	f32x4& operator=(const __m128& vec) 
	{ 
		Vec = vec; 
		return *this; 
	}

protected:
	__m128 Vec;
};


namespace glf
{
namespace simd
{

inline
f32x4 add(const f32x4& a, const f32x4& b)
{
	return _mm_add_ps(a, b);
}

inline
f32x4 sub(const f32x4& a, const f32x4& b)
{
	return _mm_sub_ps(a, b);
}

inline
f32x4 div(const f32x4& a, const f32x4& b)
{
	return _mm_div_ps(a, b);
}

inline
f32x4 mul(const f32x4& a, const f32x4& b)
{
	return _mm_mul_ps(a, b);
}

inline
f32x4 mul(const f32x4& a, float b)
{
	return mul(a, _mm_set_ps1(b));
}

inline
f32x4 mul(float a, const f32x4& b)
{
	return mul(_mm_set_ps1(a), b);
}

inline
f32x4 muladd(const f32x4& a, const f32x4& b, const f32x4& c)
{
	return _mm_add_ps(a, _mm_mul_ps(b, c));
}

inline
f32x4 muladd(const f32x4& a, float b, const f32x4& c)
{
	return _mm_add_ps(a, _mm_mul_ps(_mm_set_ps1(b), c));
}

inline
f32x4 muladd(const f32x4& a, const f32x4& b, float c)
{
	return _mm_add_ps(a, _mm_mul_ps(b, _mm_set_ps1(c)));
}

template <int srcmask, int destmask>
inline
f32x4 dotproduct(const f32x4& a, const f32x4& b)
{
	// Compiler issue. In release with M$ compiler we have run time issue. Compiler use aligned load while
	// trying to load an unaligned address.
	//const int mask = ((srcmask & 0x7) << 4) | (destmask & 0x7);
	//return _mm_dp_ps(a, b, mask);
	f32x4 to_sum = mul(a, b);
	float sum = 0.0f;
	sum += srcmask & 0x1 ? to_sum[0] : 0.0f;
	sum += srcmask & 0x2 ? to_sum[1] : 0.0f;
	sum += srcmask & 0x4 ? to_sum[2] : 0.0f;
	sum += srcmask & 0x8 ? to_sum[3] : 0.0f;
	f32x4 out;
	out[0] = (destmask & 0x1 ? sum : 0.0f);
	out[1] = (destmask & 0x2 ? sum : 0.0f);
	out[2] = (destmask & 0x4 ? sum : 0.0f);
	out[3] = (destmask & 0x8 ? sum : 0.0f);
	return out;
}

template <int srcmask>
inline
void dotproduct(const f32x4& a, const f32x4& b, float& out)
{
	// Compiler issue. In release with M$ compiler we have run time issue. Compiler use aligned load while
	// trying to load an unaligned address.
	//const int mask = ((srcmask & 0x7) << 4) | 0x1;
	//out = _mm_dp_ps(a, b, mask).m128_f32[0];
	f32x4 to_sum = mul(a, b);
	out = 0.0f;
	out += srcmask & 0x1 ? to_sum[0] : 0.0f;
	out += srcmask & 0x2 ? to_sum[1] : 0.0f;
	out += srcmask & 0x4 ? to_sum[2] : 0.0f;
	out += srcmask & 0x8 ? to_sum[3] : 0.0f;
}

inline
f32x4 sqrt(const f32x4& a)
{
	return _mm_sqrt_ps(a);
}

inline 
f32x4 exor(const f32x4& a, const f32x4& b)
{
	return _mm_xor_ps(a, b);
}

inline
void load(float v, f32x4& out)
{
	out = _mm_set_ps1(v);
}

inline
f32x4 loadr(float x, float y, float z, float w)
{
	// Little endian storage with w being the lowest term
	return _mm_setr_ps(x, y, z, w);
}

inline
f32x4 load(float x, float y, float z, float w)
{
	// Little endian storage with w being the lowest term
	return _mm_set_ps(x, y, z, w);
}

inline
void loadu(const float* v, f32x4& out)
{
	out = _mm_loadu_ps(v);
}

inline
void loada(const float* v, f32x4& out)
{
	out = _mm_load_ps(v);
}

inline
void store(const f32x4& v, float& out)
{
	_mm_store_ss(&out, v);
}

inline
void storeu(const f32x4& v, float* out)
{
	_mm_storeu_ps(out, v);
}

inline
void storea(const f32x4& v, float* out)
{
	_mm_store_ps(out, v);
}


} // namespace simd
} // namespace glf


#endif //__GLF_CORE_INTRINSIC_SSE_H_INCLUDED__