#pragma once
#include <helper_cuda.h>
#include <thrust\device_vector.h>
#include <thrust\host_vector.h>
#include <base\meta.h>

namespace gc {

	#define GPUMalloc(ptr,size) cudaMalloc((void**)&ptr,size)
    #define CPU2GPU(dst,src,size) cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice)
    #define GPU2CPU(dst,src,size) cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost)
    #define GPU2GPU(dst,src,size) cudaMemcpy(dst, src, size, cudaMemcpyDeviceToDevice)

    #define checkThrustErrors(func) \
	try {func;}					\
	catch (thrust::system_error &e) { std::cout << std::string(__FILE__) << ":" << __LINE__ << " " << e.what() << std::endl; }


	/// CUDA atomic operations customized 
	__device__ bool atomicMinf(float* address, float val);
	__device__ bool atomicMaxf(float* address, float val);
	__device__ bool atomicMinD(double* address, double val);
	__device__ bool atomicMaxD(double* address, double val);

	template<typename T>
	inline __device__ bool atomicMinCustom(T* addr, T val);
	template<>
	inline __device__ bool atomicMinCustom<float>(float* addr, float val) { return atomicMinf(addr, val); }
	template<>
	inline __device__ bool atomicMinCustom<double>(double* addr, double val) { return atomicMinD(addr, val); }
	template<typename T>
	inline __device__ bool atomicMaxCustom(T* addr, T val);
	template<>
	inline __device__ bool atomicMaxCustom<float>(float* addr, float val) { return atomicMaxf(addr, val); }
	template<>
	inline __device__ bool atomicMaxCustom<double>(double* addr, double val) { return atomicMaxD(addr, val); }


	// 3D morton code of 30 bits
	__host__ __device__ uint morton3f_32b(float x, float y, float z);
	// 3D morton code of 63 bits
	__host__ __device__ uint64 morton3f_64b(float x, float y, float z);

	template<typename T>
	void cudaMergeQueue(T* dst, T* src_arrs, size_t pitch, int num_queue, int *sizes) {
		int pos = 0;
		for (int i = 0; i < num_queue; ++i) {
			T* q = (T*)((char*)src_arrs + i * pitch);
			checkCudaErrors(GPU2GPU(dst + pos, q, sizeof(T)*sizes[i]));
			pos += sizes[i];
		}

	}

	template<typename T>
	__inline__ __host__ const T* getConstPtr(T* ptr) {
		return static_cast<const T*>(ptr);
	}

	// thrust utils
	template<class T>
	__inline__ __host__ T* getRawPtr(thrust::device_vector<T> &V) {
		return thrust::raw_pointer_cast(V.data());
	}

	template<class T>
	__inline__ __host__ thrust::device_ptr<T> getDevicePtr(T* V) {
		return thrust::device_ptr<T>(V);
	}

	// alloc memory in GPU, can only be freed in device
	template<class T>
	__global__ void newInGPU(T** ptr, int size) {
		*ptr = new T[size];
	}

	// in pair with newInGPU
	template<class T>
	__global__ void deleteInGPU(T** ptr) {
		delete[] *ptr;
	}

	// vector in device
	template<typename T>
	class LocalVector
	{
	public:   // member variables must be public to enable CPU-GPU data transfer
		T ** m_begin;
		uint capacity;
		uint length;
		int mLock;  // used for atomic operations
		__device__ void expand() {
			capacity >>= 1;
			T* newBegin = new T[capacity];
			memcpy(newBegin, *m_begin, length * sizeof(T));
			delete[] *m_begin;
			*m_begin = newBegin;
		}
	public:
		__host__ __device__ LocalVector() : length(0), capacity(16), mLock(0) {
#ifdef __CUDA_ARCH__
			m_begin = new T*;
			*m_begin = new T[capacity];
#else
			GPUMalloc(m_begin, sizeof(T*));
			newInGPU<T> << <1, 1 >> > (m_begin, capacity);
			cudaDeviceSynchronize();
#endif
		}

		__host__ __device__ LocalVector(bool Alloc) {
			if (Alloc) {
				LocalVector();
			}
			else {
				length = 0;
				capacity = 0;
				mLock = 0;
			}
		}

		__host__ __device__ LocalVector(uint len) : length(len), capacity(len >> 1), mLock(0) {
#ifdef __CUDA_ARCH__
			m_begin = new T*;
			*m_begin = new T[capacity];
#else
			GPUMalloc(m_begin, sizeof(T*));
			newInGPU<T> << <1, 1>> >(m_begin, capacity);
			cudaDeviceSynchronize();
#endif
		}
		
		__host__ __device__ LocalVector(uint len, uint capacity) : length(len), capacity(capacity), mLock(0) {
#ifdef __CUDA_ARCH__
			m_begin = new T*;
			*m_begin = new T[capacity];
#else
			GPUMalloc(m_begin, sizeof(T**));
			newInGPU<T> << <1, 1>> >(m_begin, capacity);
			cudaDeviceSynchronize();
			getLastCudaError("LocalVector construction failure");
#endif
		}
		


		__host__ __device__  ~LocalVector() {
#ifdef __CUDA_ARCH__
			delete[] *m_begin;
			delete m_begin;
			m_begin = nullptr;
#else
			deleteInGPU<T> << <1, 1>> >(m_begin);
			cudaDeviceSynchronize();
			cudaFree(m_begin);
			m_begin = nullptr;
#endif
		}

		__device__ T& operator[] (uint index) {
			if (index >= length) printf("illegal memory access in local vector\n");
			return *(*m_begin + index);//*(begin+index)
		}
		__device__ T* begin() {
			return *m_begin;
		}
		__device__ T* end() {
			return *m_begin + length;
		}

		__device__ void push(T t) {

			if (length >= capacity) {
				expand();
			}

			(*m_begin)[length++] = t;
		}

		__device__ void atomicPush(T t) {
			bool blocked = true;
			while (blocked) {
				if (atomicCAS(&mLock, 0, 1) == 0) {
					push(t);
					atomicExch(&mLock, 0);
					blocked = false;
				}
			}
		}

		__device__ T pop() {
			if (length == 0) return T();
			T endElement = (*m_begin)[length - 1];
			--length;
			return endElement;
		}

		__device__ T atomicPop() {
			T elem;
			bool blocked = true;
			while (blocked) {
				if (atomicCAS(&mLock, 0, 1) == 0) {
					elem = pop();
					atomicExch(&mLock, 0);
					blocked = false;
				}
			}
			return elem;
		}

		__device__ uint size() {
			return length;
		}
	};
}
