#pragma once
#include <intrin.h>
#include <memory>
#include <functional>
#include "glm/glm.hpp"
using namespace glm;
using namespace std;
#define align32 _declspec(align(32))

//tried out several approaches
//classes - didn't work out. copy constructors were a black box.. didn't know what simd was doing 
//settled on pure functions
//works out well - generated asm is really clean
typedef __m256 f32block;

inline f32block as_f32block(float a, float b, float c, float d, float e, float f, float g, float h) 
{
	return _mm256_set_ps(h, g, f, e, d, c, b, a);
}
inline f32block as_f32block(float v) { return _mm256_broadcast_ss(&v); }
inline f32block load_f32block(const float* __restrict v) { return _mm256_load_ps(v); }

static f32block neg_one = as_f32block(-1);

inline f32block operator+(f32block a, f32block b) { return _mm256_add_ps(a, b); }
inline f32block operator-(f32block a, f32block b) { return _mm256_sub_ps(a, b); }
//no division... comp reciprocal
inline f32block operator*(f32block a, f32block b) { return _mm256_mul_ps(a, b); }
//comparison
inline f32block operator<(f32block a, f32block b) { return _mm256_cmp_ps(a, b, _CMP_LT_OQ); }
inline f32block operator>(f32block a, f32block b) { return _mm256_cmp_ps(a, b, _CMP_GT_OQ); }
inline f32block operator<=(f32block a, f32block b) { return _mm256_cmp_ps(a, b, _CMP_LE_OQ); }
inline f32block operator>=(f32block a, f32block b) { return _mm256_cmp_ps(a, b, _CMP_GE_OQ); }
inline f32block operator&(f32block a, f32block b) { return _mm256_and_ps(a, b); }
inline f32block operator|(f32block a, f32block b) { return _mm256_or_ps(a, b); }
//inline f32block operator!(f32block a) { return _mm256_xor_ps; }
inline f32block min(f32block a, f32block b) { return _mm256_min_ps(a, b); }
//negation
inline f32block operator-(f32block v) { return neg_one * v; }
inline f32block inverse(f32block v) { return _mm256_rcp_ps(v); }
inline f32block invsqrt(f32block v) { return _mm256_rsqrt_ps(v); }
inline f32block sqrt(f32block v) { return _mm256_mul_ps(invsqrt(v), v) ; }
inline int mask(f32block v) { return _mm256_movemask_ps(v) ; }

struct v3block
{	
	f32block x;
	f32block y;
	f32block z;
};
inline v3block as_v3block(f32block a, f32block b, f32block c)
{
	v3block r = {a, b, c};
	return r;
}
inline v3block as_v3block(const glm::vec3& v)
{
	return as_v3block(as_f32block(v.x), as_f32block(v.y), as_f32block(v.z));
}

inline v3block as_v3block(
	const float* __restrict a, 
	const float* __restrict b, 
	const float* __restrict c)
{
	v3block r = { load_f32block(a), load_f32block(b), load_f32block(c)};
	return r;
}
inline v3block operator*(v3block a, f32block b) { return as_v3block(a.x * b, a.y * b, a.z * b); }
inline v3block operator-(v3block a, v3block b) { return as_v3block(a.x - b.x, a.y - b.y, a.z - b.z); }
inline v3block operator*(v3block a, v3block b) { return as_v3block(a.x * b.x, a.y * b.x, a.z * b.z); }
inline v3block operator+(v3block a, v3block b) { return as_v3block(a.x + b.x, a.y + b.x, a.z + b.z); }

inline v3block normalize(v3block a)
{
	auto im = invsqrt(a.x * a.x + a.y * a.y + a.z * a.z);
	a.x = a.x * im;
	a.y = a.y * im;
	a.z = a.z * im;
	return a;
}

inline f32block dot(v3block a, v3block b) { return a.x * b.x + a.y * b.y + a.z * b.z; }
inline f32block inv_mag(v3block a) { return invsqrt(dot(a, a)); }
inline f32block mag(v3block a) { return sqrt(dot(a, a)); }

struct v3stream
{
	v3stream(int p_block_count) : block_count(p_block_count), x(nullptr), y(nullptr), z(nullptr)
	{
		auto dataptr = (float*)_aligned_malloc(sizeof(f32block) * block_count * 3, 32);
		data_ = shared_ptr<float>(dataptr, ptr_fun(_aligned_free));
		x = dataptr;
		y = dataptr + 8 * block_count;
		z = dataptr + 8 * 2 * block_count;
	}	
	float* x;
	float* y;
	float* z;
	int block_count;

	v3block block(int block_index)
	{
		int offset = block_index * 8;
		return as_v3block(
			x + offset,
			y + offset,
			z + offset
		);
	}
	void set_block(int block_index, v3block block)
	{
		int offset = block_index * 8;
		
		_mm256_store_ps(x + offset, block.x);
		_mm256_store_ps(y + offset, block.y);
		_mm256_store_ps(z + offset, block.z);
	}
private:
	shared_ptr<float> data_;
};

struct f32stream
{
	f32stream(int p_block_count) : block_count(p_block_count), data_(nullptr)
	{
		auto dataptr = (float*)_aligned_malloc(sizeof(float) * 8 * block_count, 32);
		data_ = shared_ptr<float>(dataptr, ptr_fun(_aligned_free));
	}	
	//float* data;
	int block_count;

	f32block block(int block_index)
	{
		//todo: data_.get must be kinda expensive...
		return load_f32block(data_.get() + block_index * 8);
	}
	void set_block(int block_index, f32block block)
	{
		_mm256_store_ps(data_.get() + block_index * 8, block);
	}
private:
	shared_ptr<float> data_;
};

inline v3block operator&(v3block a, f32block b) { return as_v3block(a.x & b, a.y & b, a.z & b); }
inline v3block operator|(v3block a, f32block b) { return as_v3block(a.x | b, a.y | b, a.z | b); }

namespace simd
{		
	extern const f32block c_inf;
	extern const f32block c_increasing;
	extern const f32block c_8;
	extern const f32block c_4;
	extern const f32block c_0;
	extern const f32block c_2;
	extern const f32block c_0_5;
	extern const f32block c_neg_1;

}
inline void compact_and_write(f32block block, float* target)
{
	_mm256_movemask_ps(block);
}

//to do a full rotation across simd width (8 floats)
//rotate x 3 -> swap -> rotate x 3
inline f32block rotate_in_lane(f32block block)
{
	return _mm256_shuffle_ps(block, block, _MM_SHUFFLE(0, 3, 2, 1));
}
inline f32block swap_across_lanes(f32block block)
{
	return _mm256_permute2f128_ps(block, block, 1);
}
inline v3block operator*(f32block a, v3block b) { return as_v3block(b.x * a, b.y * a, b.z * a); }

struct Stream
{
	Stream(int p_block_capacity) : block_capacity(p_block_capacity) { }
	int count;
	int block_capacity;
	int blocks_per_row;
};
struct View
{
	int block_offset;
	int block_count;
};