#include "my_mixmemory.hpp"
#include "cuda_tools.hpp"
#include <string.h> //cpu mem func

namespace MMem
{
    MixMemory::MixMemory() {
    	this->device_id_ = 0;
    }

    MixMemory::MixMemory(int device_id)
    {
        this->device_id_ = CUDATools::get_device(device_id);
    }

    MixMemory::~MixMemory()
    {
		this->release_all();
	}

    void MixMemory::set_device(int device_id)
    {
        this->device_id_ = CUDATools::get_device(device_id);
    }

    void MixMemory::release_all()
    {
		this->release_cpu();
		this->release_gpu();
	}

    void MixMemory::release_cpu()
    {
        if (this->cpu_)
        {
            CUDATools::AutoDevice auto_device_exchange(this->device_id_);
            checkCudaRuntime(cudaFreeHost(this->cpu_));
			this->cpu_ = nullptr;
		}
		this->cpu_size_ = 0;
	}

    void MixMemory::release_gpu()
    {
		if (this->gpu_)
        {
            CUDATools::AutoDevice auto_device_exchange(this->device_id_);
            checkCudaRuntime(cudaFree(this->gpu_));
			this->gpu_ = nullptr;
		}
		this->gpu_size_ = 0;
	}

    void MixMemory::set_custream(cudaStream_t* stream)
    {
        CUDATools::AutoDevice auto_device_exchange(this->device_id_);
        this->custream_ptr_ = stream;
    }

    void MixMemory::synchronize()
    {
        if (nullptr != this->custream_ptr_)
        {
            CUDATools::AutoDevice auto_device_exchange(this->device_id_);
            checkCudaRuntime(cudaStreamSynchronize(*this->custream_ptr_));
        }
    }

    void MixMemory::new_gpu_mem(size_t size)
    {
        //内存复用，仅在原始的内存不够时才开辟
        if (this->gpu_size_ < size)
        {
			this->release_gpu();
			this->gpu_size_ = size;
			CUDATools::AutoDevice auto_device_exchange(this->device_id_);
            if (!this->custream_ptr_)
            {
                checkCudaRuntime(cudaMalloc(&this->gpu_, this->gpu_size_));
                // checkCudaRuntime(cudaMemset(this->gpu_, 0, this->gpu_size_));
            }
			else
            {
                checkCudaRuntime(cudaMallocAsync(&this->gpu_, this->gpu_size_, *this->custream_ptr_));
            }
        }
    }

    void MixMemory::new_cpu_mem(size_t size)
    {
        //内存复用，仅在原始的内存不够时才开辟
		if (this->cpu_size_ < size)
        {
			this->release_cpu();
			this->cpu_size_ = size;
			CUDATools::AutoDevice auto_device_exchange(this->device_id_);
			checkCudaRuntime(cudaMallocHost(&this->cpu_, this->cpu_size_));
			Assert(this->cpu_ != nullptr);
			// memset(this->cpu_, 0, this->cpu_size_);
		}
	}

    void MixMemory::check_copy_feasible_cpu(size_t offset_byte, size_t num_byte)
    {
        if (offset_byte >= this->cpu_size_)
        {
            INFOE("Offset location[%lld] >= bytes_[%lld], out of range", offset_byte, this->cpu_size_);
            return;
        }

        size_t remain_bytes = this->cpu_size_ - offset_byte;
        if (num_byte > remain_bytes)
        {
            INFOE("Copyed bytes[%lld] > remain bytes[%lld], out of range", num_byte, remain_bytes);
			return;
        }
    }

    void MixMemory::check_copy_feasible_gpu(size_t offset_byte, size_t num_byte)
    {
        if (offset_byte >= this->gpu_size_)
        {
            INFOE("Offset location[%lld] >= bytes_[%lld], out of range", offset_byte, this->gpu_size_);
            return;
        }

        size_t remain_bytes = this->gpu_size_ - offset_byte;
        if (num_byte > remain_bytes)
        {
            INFOE("Copyed bytes[%lld] > remain bytes[%lld], out of range", num_byte, remain_bytes);
			return;
        }
    }

    void MixMemory::copy_mem_cpu2cpu(size_t offset_byte, const void* src, size_t num_byte)
    {
        this->check_copy_feasible_cpu(offset_byte, num_byte);

        //之所以不能用这个，是因为外面的内存不一定都是cudaMallocHost开辟的锁业内存
        //checkCudaRuntime(cudaMemcpyAsync((char*)data_->cpu() + offset_location, src, copyed_bytes, cudaMemcpyHostToHost, stream_));
        memcpy(this->cpu_ + offset_byte, src, num_byte);
    }

    void MixMemory::copy_mem_gpu2gpu(size_t offset_byte, const void* src, size_t num_byte, int device_id)
    {
        this->check_copy_feasible_gpu(offset_byte, num_byte);

        CUDATools::AutoDevice auto_device_exchange(this->device_id_);
        int src_device_id = CUDATools::get_device(device_id);
        if(src_device_id != this->device_id_)
        {
            if (!this->custream_ptr_)
            {
                checkCudaRuntime(cudaMemcpyPeer(this->gpu_ + offset_byte, this->device_id_, src, src_device_id, num_byte));
            }
            else
            {
                checkCudaRuntime(cudaMemcpyPeerAsync(this->gpu_ + offset_byte, this->device_id_, src, src_device_id, num_byte, *this->custream_ptr_));
            }
        }
        else
        {
            if (!this->custream_ptr_)
            {
                checkCudaRuntime(cudaMemcpy(this->gpu_ + offset_byte, src, num_byte, cudaMemcpyDeviceToDevice));
            }
            else
            {
                checkCudaRuntime(cudaMemcpyAsync(this->gpu_ + offset_byte, src, num_byte, cudaMemcpyDeviceToDevice, *this->custream_ptr_));
            }
        }
    }

    void MixMemory::copy_mem_cpu2gpu(size_t offset_byte, const void* src, size_t num_byte, int device_id)
    {
        this->check_copy_feasible_gpu(offset_byte, num_byte);

        CUDATools::AutoDevice auto_device_exchange(this->device_id_);
        if (!this->custream_ptr_)
        {
            checkCudaRuntime(cudaMemcpy(this->gpu_ + offset_byte, src, num_byte, cudaMemcpyHostToDevice));
        }
        else
        {
            checkCudaRuntime(cudaMemcpyAsync(this->gpu_ + offset_byte, src, num_byte, cudaMemcpyHostToDevice, *this->custream_ptr_));
        }
    }

    void MixMemory::copy_mem_gpu2cpu(size_t offset_byte, const void* src, size_t num_byte, int device_id)
    {
        this->check_copy_feasible_cpu(offset_byte, num_byte);

        CUDATools::AutoDevice auto_device_exchange(this->device_id_);
        if (!this->custream_ptr_)
        {
            checkCudaRuntime(cudaMemcpy(this->cpu_ + offset_byte, src, num_byte, cudaMemcpyDeviceToHost));
        }
        else
        {
            checkCudaRuntime(cudaMemcpyAsync(this->cpu_ + offset_byte, src, num_byte, cudaMemcpyDeviceToHost, *this->custream_ptr_));
        }
    }

    void MixMemory::to_cpu()
    {
        if (nullptr != this->gpu_)
        {
            CUDATools::AutoDevice auto_device_exchange(this->device_id_);
            this->new_cpu_mem(this->gpu_size_);
            this->copy_mem_gpu2cpu(0, this->gpu_, this->gpu_size_, this->device_id_);
        }
    }

    void MixMemory::to_gpu()
    {
        if (nullptr != this->cpu_)
        {
            CUDATools::AutoDevice auto_device_exchange(this->device_id_);
            this->new_gpu_mem(this->cpu_size_);
            this->copy_mem_cpu2gpu(0, this->cpu_, this->cpu_size_, this->device_id_);
        }
    }
}
