#pragma once

#include <vector>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cassert>

namespace CH //CudaHelper
{
	template <class T>
	class GlobalMemory
	{
	public:
		///default constructor
		GlobalMemory()
		{
			device_memory_ptr_ = 0;
			size_ = 0;
		}

		///constructor with allocated space on device
		GlobalMemory(size_t size)
		{
			size_ = size;

			cudaError_t e = cudaMalloc((void **) &device_memory_ptr_, sizeof(T)*size_);
			assert( e == cudaSuccess );

			e = cudaMemset(device_memory_ptr_, 0, sizeof(T)*size_);
			assert( e == cudaSuccess );
		}

		///constructor from stl vector in host memory
		GlobalMemory(const std::vector<T>& cpu_vec)
		{
			size_ = cpu_vec.size();

			cudaError_t e = cudaMalloc((void **) &device_memory_ptr_, sizeof(T)*size_);
			assert( e == cudaSuccess );

			e = cudaMemcpy(device_memory_ptr_, &cpu_vec[0], sizeof(T)*size_, cudaMemcpyHostToDevice);
			assert( e == cudaSuccess );
		}

		///constructor from gpu vector in device memory
		///copy constructor
		GlobalMemory(const GlobalMemory<T>& gpu_vec)
		{
			size_ = gpu_vec.size();

			cudaError_t e = cudaMalloc((void **) &device_memory_ptr_, sizeof(T)*size_);
			assert( e == cudaSuccess );

			e = cudaMemcpy(device_memory_ptr_, gpu_vec.get_memory_ptr(), sizeof(T)*size_, cudaMemcpyDeviceToDevice);
			assert( e == cudaSuccess );
		}


		///data from host to device
		void set_gpu_data(const std::vector<T>& cpu_vec)
		{
			cudaError_t e;

			if( size_ != cpu_vec.size())
			{
				size_ = cpu_vec.size();

				if(device_memory_ptr_)
				{
					e = cudaFree(device_memory_ptr_);
					assert( e == cudaSuccess );
				}

				e = cudaMalloc((void **) &device_memory_ptr_, sizeof(T)*size_);
				assert( e == cudaSuccess );

			}

			e = cudaMemcpy(device_memory_ptr_, &cpu_vec[0], sizeof(T)*size_, cudaMemcpyHostToDevice);
			assert( e == cudaSuccess );
		}

		///data from device to host
		void get_gpu_data(std::vector<T>& cpu_vec) const
		{
			if(size_ != cpu_vec.size())
				cpu_vec.resize(size_);

			cudaError_t e = cudaMemcpy(&cpu_vec[0], device_memory_ptr_, sizeof(T)*size_, cudaMemcpyDeviceToHost);
			assert( e == cudaSuccess );
		}

		///copy assignment
		///data from device to device
		GlobalMemory<T>& operator = (const GlobalMemory<T>& gpu_vec)
		{
			cudaError_t e;

			if(size_ != gpu_vec.size())
			{
				size_ = gpu_vec.size();

				if(device_memory_ptr_)
				{
					e = cudaFree(device_memory_ptr_);
					assert( e == cudaSuccess );
				}

				e = cudaMalloc((void **) &device_memory_ptr_, sizeof(T)*size_);
				assert( e == cudaSuccess );
			}

			e = cudaMemcpy(device_memory_ptr_, gpu_vec.get_memory_ptr(), sizeof(T)*size_, cudaMemcpyDeviceToDevice);
			assert( e == cudaSuccess );

			return *this;
		}

		///get data chunk size
		size_t size() const
		{
			return size_;
		}

		///get gpu memory pointer
		const T* get_memory_ptr() const
		{
			return device_memory_ptr_;
		}

		T* get_memory_ptr()
		{
			return device_memory_ptr_;
		}

		///copy gpu data to cpu and dump to screen
		void dump_to_screen() const
		{
			std::vector<T> cpu_vec;
			this->get_gpu_data(cpu_vec);

			for(int i = 0; i < cpu_vec.size(); i++)
				std::cout << cpu_vec[i] << " ";
			std::cout << "\n";
		}

		///bind texture with global memroy
		void bind_texture(const struct textureReference &texture_ref)
		{
			cudaError_t e = cudaBindTexture(0, &texture_ref, device_memory_ptr_, &texture_ref.channelDesc, sizeof(T)*size_);
			assert( e == cudaSuccess );
		}

		///destructor
		virtual ~GlobalMemory()
		{
			if(device_memory_ptr_)
			{
				cudaError_t e = cudaFree(device_memory_ptr_);
				assert( e == cudaSuccess );
			}
		}

	protected:
		T* device_memory_ptr_;
		size_t size_;
		
	};

}