#pragma once
#include <vector>
#include <iostream>
#include <cassert>
#include <string.h>
#include <Utilities/cuda_utilities.h>

namespace PhysLeo {

/**
 * a buffer class which manages cpu buffer and gpu buffer together.
 * with this class, you can transport data from cpu and gpu quickly. It also paves the way to combine gpu and cpu modules. In an simulation flow,
 * you can use gpu module  to compute internal force quickly and use cpu module to solve linear system.
 */
template<typename T>
class BufferData {
public:
	BufferData() = delete;

    /**
     * explicit constructor
     * cpu buffer and gpu buffer have not been allocated when the class is initialized. 
     * @param size  the size of this buffer, size can't be changed since it is set during construction
     */
    explicit BufferData(const int size):vec_(),ptr_cpu_(nullptr),ptr_gpu_(nullptr),size_(size){}

    /**
     * delete the default copy constructor
     */
    BufferData(const BufferData<T>&) = delete;

    /**
     * delete the default move constructor
     */
    BufferData(BufferData<T>&&) = delete;

    /**
     * delete the default copy assignment function
     */
    BufferData<T>& operator=(const BufferData<T>&) = delete;

    /**
     * delete the default move assignment function
     */
    BufferData<T>& operator=(BufferData<T>&&) = delete;

    /**
     * assign vector data to the cpu buffer
     * you need to promise the size of vector is equal to the size of buffer object.
     * @param vec  a std::vector container which has the same size as this buffer object
     */
    BufferData<T>& operator=(const std::vector<T> vec)
    {
        assert(size_ == vec.size());
        if (ptr_cpu_ == nullptr)allocateCpu();
        memcpy((void*)ptr_cpu_, vec.data(), vec.size() * sizeof(T));
        return *this;
    }

    /**
     * destructor, will release cpu and gpu buffer if they had been allocated.
     */
    ~BufferData()
    {
	    if (ptr_cpu_ != nullptr) releaseCpu();	    
	    if (ptr_gpu_ != nullptr) releaseGpu();
    }

    /**
     * the size of buffer object
     * @return the size of buffer object
     */
	int size() const { return size_; }

    /**
     * the raw pointer of cpu buffer
     * @return the raw pointer of cpu buffer
     */
	T* ptrCpu()
    {
        return ptr_cpu_;
    }

    /**
    * the raw pointer of gpu buffer, if gpu haven't been allocated but cpu buffer has value, automatically allocate memory for gpu buffer and call cpu2Gpu().
    * @return the raw pointer of gpu buffer
    */
	T* ptrGpu()
    {
        if(ptr_gpu_ == nullptr&&ptr_cpu_!=nullptr)
        {
            allocateGpu();
            cpu2Gpu();
        }
    	return ptr_gpu_;
    }

    /**
    * allocate cpu buffer
    * this function will do nothing if the cpu buffer has been allocated, and will report error if allocating memory is failed
    */
	void allocateCpu()
    {
	    if (ptr_cpu_ == nullptr && size_ >0) 
        {
		    vec_.resize(size_);
		    ptr_cpu_ = vec_.data();
		    assert(ptr_cpu_!=nullptr);
	    }
    }

    /**
    * allocate gpu buffer
    * this function will do nothing if the gpu buffer has been allocated, and will report error if allocating memory is failed
    */
	void allocateGpu()
    {
        if (ptr_gpu_ == nullptr && size_ > 0) cudaCheck(cudaMalloc(&ptr_gpu_, size_ * sizeof(T)));
    }

    /**
    * reset cpu buffer to all 0s
    * this function will do nothing if the cpu buffer hasn't been allocated
    */
	void setZeroCpu()
    {
        if (ptr_cpu_ != nullptr) memset(ptr_cpu_, 0, size_ * sizeof(T));
    }

    /**
    * reset gpu buffer to all 0s
    * this function will do nothing if the gpu buffer hasn't been allocated
    */
	void setZeroGpu()
    {
	    if (ptr_gpu_ != nullptr) cudaCheck(cudaMemset(ptr_gpu_, 0, size_ * sizeof(T)));
    }

    /**
    * release cpu buffer if it has been allocated
    */
	void releaseCpu()
    {
        if (ptr_cpu_ != nullptr) vec_.clear();
		ptr_cpu_= nullptr;
    }

    /**
    * release gpu buffer if it has been allocated
    */
	void releaseGpu()
    {
	    if (ptr_gpu_ != nullptr) cudaCheck(cudaFree(ptr_gpu_));
		ptr_gpu_ = nullptr;
    }

    /**
    * copy cpu buffer to gpu buffer
    */
	void cpu2Gpu()
    {
        if (ptr_cpu_ == nullptr) 
        {
		    std::cerr << "ERROR: can't copy null data from CPU to GPU " << __FILE__ << __LINE__ << std::endl;
		    exit(0);
	    }
	    if (ptr_gpu_ == nullptr) allocateGpu();
	    cudaCheck(cudaMemcpy(ptr_gpu_, ptr_cpu_, size_ * sizeof(T), cudaMemcpyHostToDevice));
    }

    /**
     * copy gpu buffer to cpu buffer
     */
	void gpu2Cpu()
    {
	    if (ptr_gpu_ == nullptr) 
        {
		    std::cerr << "ERROR: can't copy null data from GPU to CPU " << __FILE__ << __LINE__ << std::endl;
		    exit(0);
	    }
	    if (ptr_cpu_ == nullptr) allocateCpu();
	    cudaCheck(cudaMemcpy(ptr_cpu_, ptr_gpu_, size_ * sizeof(T), cudaMemcpyDeviceToHost));
    }

private:
    /**
     * cpu buffer is implemented by std vector
     * This variable vec_ is designed for convenient view of data in debug mode
     */
	std::vector<T> vec_;

    /**
     * raw pointer to cpu buffer
     */
	T* ptr_cpu_;

    /**
     * raw pointer to gpu buffer
     */
	T* ptr_gpu_;

    /**
     * buffer size
     */
	int size_;
};

}