#include <cuda_runtime.h>
// #include <cublas.h>
#include <vector>
#include <exception>
#include <boost/shared_ptr.hpp>
#include <boost/utility.hpp>

#ifndef _KUAI_CUDA_H_
#define _KUAI_CUDA_H_

//Fast integer multiplication
#define THREAD_ID (__umul24(blockIdx.x, blockDim.x) + threadIdx.x)
#define MUL(a, b) __umul24(a, b)
#define THREADS_IN_BLOCK 128
#define MAX_BLOCKS 128
#define self (*this)

namespace kuai { 

	inline int block_size(int n, int tinb = THREADS_IN_BLOCK) {
		return (n+tinb-1) / tinb;
	}

	inline bool isPow2(unsigned int x)
	{
		return ((x&(x-1))==0);
	}


	inline unsigned int nextPow2( unsigned int x ) {
		--x;
		x |= x >> 1;
		x |= x >> 2;
		x |= x >> 4;
		x |= x >> 8;
		x |= x >> 16;
		return ++x;
	}
	
	typedef float RealNumber;
	typedef std::vector<RealNumber> RealArray;

	typedef int	Index;

	class ErrorMessage 
		: public std::runtime_error
	{
	public:
		ErrorMessage(const char* p)
			: std::runtime_error(p)
		{ }
	};


	namespace cuda {

	inline void CUDA_SAFE_CALL( cudaError_t code ) {
		if (cudaSuccess != code) {
			throw ErrorMessage(cudaGetErrorString(code));
		}
	}

	////////////////////////////////////////////////////////////////////////////////
	// Compute the number of threads and blocks to use for the reduction 
	// We set threads / block to the minimum of maxThreads and n/2. 
	////////////////////////////////////////////////////////////////////////////////
	void getNumBlocksAndThreads(int n, int &blocks, int &threads);

	class MemoryBuffer 
		: boost::noncopyable
	{
	public: 
		MemoryBuffer(int nItems, int nBytePerItem) {
			CUDA_SAFE_CALL( cudaMalloc(&_p, nItems*nBytePerItem) );
		}

		~MemoryBuffer() {
			cudaFree(_p);
		}

	public:
		void* pointer() const {
			return _p;
		}

	public:
		void* _p;
	};

	typedef boost::shared_ptr<MemoryBuffer> BufferPointer;

	template<typename PODT>
		class Array
	{
	public:
		explicit Array(int n, bool zero=false) 
			: _buffer(new MemoryBuffer(n, sizeof(PODT)))
		{ 
			_start = (PODT*)_buffer->pointer();
			_finish = _start+n;
			_skip = 1;

			if (zero) {
				CUDA_SAFE_CALL( cudaMemset(_buffer->pointer(), 0, sizeof(PODT)*n) ) ;
			}
		}

		explicit Array(const std::vector<PODT>& data, bool async=false, cudaStream_t stream=0) 
			: _buffer(new MemoryBuffer(data.size(), sizeof(PODT)))
		{
			_start = (PODT*)_buffer->pointer();
			_finish = _start+data.size();
			_skip = 1;

			if (async) {
				CUDA_SAFE_CALL( 
					cudaMemcpyAsync(
						_buffer->pointer(), &data[0], sizeof(PODT)*data.size(), 
						cudaMemcpyHostToDevice, stream
					)
				);
			}
			else {
				CUDA_SAFE_CALL( cudaMemcpy(_buffer->pointer(),
					&data[0], sizeof(PODT)*data.size(), cudaMemcpyHostToDevice) );
			}
		}

		explicit Array(Array& source, int start, int finish, int skip=1) 
			: _buffer(source._buffer)
		{
			int n = (finish-start+skip-1)/skip;
			_start = source.at(start);
			_finish = _start+n*skip;
			_skip = source.skip()*skip;
		}

		explicit Array(PODT* start, int n, int skip=1) 
		{
			_start = _start;
			_finish = _start + n*skip;
			_skip = skip;
		}


		PODT* at(int i) const {
			return _start+i*_skip;
		}

	public:
		int size() const {
			return (_finish-_start)/_skip;
		}

		int skip() const {
			return _skip;
		}

		PODT* c_pointer() {
			return _start;
		}
		const PODT* c_pointer() const {
			return _start;
		}
		PODT* f_pointer() {
			return _start-1;
		}
		const PODT* f_pointer() const {
			return _start-1;
		}

	public:
		void assign(const Array& source) {
			assert (this->size() == source.size());
			cublasScopy(size(), source.f_pointer(), source.skip(),
				this->f_pointer(), this->skip());
		}

		void get(std::vector<PODT>& result) const {
			result.resize(self.size());
			if (skip() == 1) {
				CUDA_SAFE_CALL( cudaMemcpy(&result[0], c_pointer(),
					sizeof(PODT)*self.size(), cudaMemcpyDeviceToHost) );
			}
		}

	public:
		Array clone() const {
			Array result(size());
			return result;
		}
	private:
		boost::shared_ptr<MemoryBuffer> _buffer;
		PODT* _start;
		PODT* _finish;
		int _skip;
	};

	typedef Array<Index> IndexArray;

	class FloatArray
		: public Array<float>
	{
	public:
		explicit FloatArray(int n, bool zero=false) 
			: Array<float>(n, zero)
		{ }

		explicit FloatArray(const std::vector<float>& data, bool async=false, cudaStream_t stream=0) 
			: Array<float>(data, async, stream)
		{ }

		explicit FloatArray(FloatArray& source, int start, int finish, int skip=1)
			: Array<float>(source, start, finish, skip)
		{ }

		explicit FloatArray(float* start, int n, int skip=1)
			: Array<float>(start, n, skip)
		{ }
		
	public:
		// self = x+y
		FloatArray& add(const FloatArray& x, const FloatArray& y);
		inline FloatArray& operator+=(const FloatArray& v0) {
			return self.add(self, v0);
		};
		// self = x-y;
		FloatArray& sub(const FloatArray& x, const FloatArray& y);
		inline FloatArray& operator-=(const FloatArray& v0) {
			return self.sub(self, v0);
		};

		// self = x*y;
		FloatArray& mul(const FloatArray& x, const FloatArray& y);
		inline FloatArray& operator*=(const FloatArray& v0) {
			return self.mul(self, v0);
		};
		FloatArray& operator*=(const float v0);

		// self = x/y
		FloatArray& div(const FloatArray& x, const FloatArray& y);
		FloatArray& operator/=(const FloatArray& v0) {
			return self.div(self, v0);
		};
		inline FloatArray& operator/=(const float v0) {
			return self *= 1/v0;
		};

		// self = 1/x;
		FloatArray& rev(const FloatArray& x);
		inline FloatArray& rev() {
			return rev(self);
		};

		// self += a*x;
		FloatArray& add_ax(float a, const FloatArray& x);

		float sum() const;
	};

} }


#endif

