#include "tensor.h"
#include <cassert>
#include <cstring>
#include <cuda_runtime_api.h>

namespace uzu
{
    Tensor::Tensor(uint32_t size)
    {
        m_Shape.push_back(size);
        Alloc(m_Shape);
        m_DeviceType = DeviceType::CPU;
    }

    Tensor::Tensor(uint32_t width, uint32_t height)
    {
        m_Shape.push_back(width);
        m_Shape.push_back(height);
        Alloc(m_Shape);
        m_DeviceType = DeviceType::CPU;
    }

    Tensor::Tensor(uint32_t width, uint32_t height, uint32_t channels)
    {
        m_Shape.push_back(width);
        m_Shape.push_back(height);
        m_Shape.push_back(channels);
        Alloc(m_Shape);
        m_DeviceType = DeviceType::CPU;
    }

    Tensor::~Tensor()
    {
        if (m_Data != nullptr) delete[] m_Data;
        if (m_DataGpu != nullptr) cudaFree(m_DataGpu);
    }

    void Tensor::CopyFromHost(float* data)
    {
        uint32_t size = ShapeSize(m_Shape);
        assert(m_Data != nullptr);
        memcpy(m_Data, data, size * sizeof(float));
    }

    uint32_t Tensor::Size()
    {
        return ShapeSize(m_Shape);
    }

    void Tensor::Create(const std::vector<uint32_t>& shape, DeviceType type)
    {
        // be carefull with shape mismatch between host and device data.
        if (type == DeviceType::CPU)
            Alloc(shape);
        else
            AllocGpu(shape);
        m_DeviceType = type;
    }

    void Tensor::ToDevice()
    {
        if (m_DeviceType == DeviceType::CUDA)
            return;
        assert(m_Data != nullptr && "host memory is not valid");
        uint32_t size = ShapeSize(m_Shape);
        if (m_DataGpu == nullptr)
            cudaMalloc((void**)&m_DataGpu, size * sizeof(float));
        cudaMemcpy(m_DataGpu, m_Data, size * sizeof(float), cudaMemcpyHostToDevice);
        m_DeviceType = DeviceType::CUDA;
    }

    void Tensor::ToHost()
    {
        if (m_DeviceType == DeviceType::CPU)
            return;
        assert(m_DataGpu != nullptr);
        uint32_t size = ShapeSize(m_Shape);
        if (m_Data == nullptr)
            m_Data = new float[size];
        cudaMemcpy(m_Data, m_DataGpu, size * sizeof(float), cudaMemcpyDeviceToHost);
        m_DeviceType = DeviceType::CPU;
    }

    void Tensor::Alloc(const std::vector<uint32_t>& shape)
    {
        uint32_t size = ShapeSize(shape);
        if (size == Size())
        {
            // allocate memory if not exists
            if (m_Data == nullptr)
                m_Data = new float[size];
        }
        else
        {
            if (m_Data != nullptr) delete[] m_Data;
            m_Shape = shape;
            m_Data = new float[size];
        }
    }

    void Tensor::AllocGpu(const std::vector<uint32_t>& shape)
    {
        uint32_t size = ShapeSize(shape);
        if (size == Size())
        {
            if (m_DataGpu == nullptr)
                cudaMalloc((void**)&m_DataGpu, size * sizeof(float));
            return;
        }
        else
        {
            if (m_DataGpu != nullptr) cudaFree(m_DataGpu);
            m_Shape = shape;
            cudaMalloc((void**)&m_DataGpu, size * sizeof(float));
        }
    }

    uint32_t Tensor::ShapeSize(const std::vector<uint32_t>& shape)
    {
        uint32_t s = 1;
        for (auto& dim : shape)
            s *= dim;
        return s;
    }

}
