#ifndef __GLF_CORE_INTRINSIC_NEON_H_INCLUDED__
#define __GLF_CORE_INTRINSIC_NEON_H_INCLUDED__

#include <arm_neon.h>

inline 
float32x4_t 
vdivq_f32( float32x4_t num, float32x4_t den )
{
	float32x4_t reciprocal = vrecpeq_f32( den );
	reciprocal = vmulq_f32( vrecpsq_f32( den, reciprocal ), den);
	return vmulq_f32( num, reciprocal );
}

typedef int32x4_t s32x4;
typedef uint32x4_t u32x4;
typedef float32x4_t f32x4;

namespace glf
{
namespace simd
{

inline
f32x4 add(const f32x4& a, const f32x4& b)
{
	return vaddq_f32(a, b);
}

inline
f32x4 sub(const f32x4& a, const f32x4& b)
{
	return vsubq_f32(a, b);
}

inline
f32x4 div(const f32x4& a, const f32x4& b)
{
	return vdivq_f32(a, b);
}

inline
f32x4 mul(const f32x4& a, const f32x4& b)
{
	return vmulq_f32(a, b);
}

inline
f32x4 mul(const f32x4& a, float b)
{
	return  vmulq_n_f32(a, b);
}

inline
f32x4 mul(float a, const f32x4& b)
{
	return  vmulq_n_f32(b, a);
}

inline
f32x4 muladd(const f32x4& a, const f32x4& b, const f32x4& c)
{
#ifndef __APPLE__
	return vfmaq_f32(a, b, c);
#else
	return add(a, mul(b, c));
#endif
}

inline
f32x4 muladd(const f32x4& a, float b, const f32x4& c)
{
#ifndef __APPLE__
	return vfmaq_f32(a, c, b);
#else
	return add(a, mul(c, b));
#endif
}

inline
f32x4 muladd(const f32x4& a, const f32x4& b, float c)
{
#ifndef __APPLE__
	return vfmaq_f32(a, b, c);
#else
	return add(a, mul(b, c));
#endif
}

template <int srcmask, int destmask>
inline
f32x4 dotproduct(const f32x4& a, const f32x4& b)
{
	float32x4_t to_sum = mul(a, b);
	float sum = 0.0f;
	sum += srcmask & 0x1 ? to_sum[0] : 0.0f;
	sum += srcmask & 0x2 ? to_sum[1] : 0.0f;
	sum += srcmask & 0x4 ? to_sum[2] : 0.0f;
	sum += srcmask & 0x8 ? to_sum[3] : 0.0f;
	float32x4_t out;
	out = vsetq_lane_f32(destmask & 0x1 ? sum : 0.0f, out, 0);
	out = vsetq_lane_f32(destmask & 0x2 ? sum : 0.0f, out, 1);
	out = vsetq_lane_f32(destmask & 0x4 ? sum : 0.0f, out, 2);
	out = vsetq_lane_f32(destmask & 0x8 ? sum : 0.0f, out, 3);
	return out;
}

template <int srcmask>
inline
void dotproduct(const f32x4& a, const f32x4& b, float& out)
{
	float32x4_t to_sum = mul(a, b);
	out = 0.0f;
	out += srcmask & 0x1 ? to_sum[0] : 0.0f;
	out += srcmask & 0x2 ? to_sum[1] : 0.0f;
	out += srcmask & 0x4 ? to_sum[2] : 0.0f;
	out += srcmask & 0x8 ? to_sum[3] : 0.0f;
}

inline
f32x4 sqrt(const f32x4& a)
{
	// step 0
	const f32x4 step0 = vrsqrteq_f32( a );
	const f32x4 step0Param = mul( a, step0 );
	const f32x4 step0Result = vrsqrtsq_f32( step0Param, step0 );
	// step 1
	const f32x4 step1 = mul( step0, step0Result );
	const f32x4 step1Param = mul( a, step1 );
	const f32x4 step1Result = vrsqrtsq_f32( step1Param, step1 );
	// take the res
	const f32x4 step2 = mul( step1, step1Result );
	return mul(a, step2);
}

inline 
f32x4 exor(const f32x4& a, const f32x4& b)
{
	return veorq_u32(*(u32x4*)&a, *(u32x4*)&b);
}

inline
void load(float v, f32x4& out)
{
	out = vdupq_n_f32(v);
}

inline
f32x4 loadr(float x, float y, float z, float w)
{
	// Little endian storage with w being the lowest term
	f32x4 out;
	out = vsetq_lane_f32(x, out, 0);
	out = vsetq_lane_f32(y, out, 1);
	out = vsetq_lane_f32(z, out, 2);
	out = vsetq_lane_f32(w, out, 3);
	return out;
}

inline
f32x4 load(float x, float y, float z, float w)
{
	// Little endian storage with w being the lowest term
	f32x4 out;
	out = vsetq_lane_f32(w, out, 0);
	out = vsetq_lane_f32(z, out, 1);
	out = vsetq_lane_f32(y, out, 2);
	out = vsetq_lane_f32(x, out, 3);
	return out;
}

inline
void loadu(const float* v, f32x4& out)
{
	out = vld1q_f32(v);
}

inline
void loada(const float* v, f32x4& out)
{
	loadu(v, out);
}

inline
void store(const f32x4& v, float& out)
{
	out = vgetq_lane_f32(v, 0);
}

inline
void storeu(const f32x4& v, float* out)
{
	vst1q_f32(out, v);
}

inline
void storea(const f32x4& v, float* out)
{
	storeu(v, out);
}

} // namespace simd
} // namespace glf

#endif