#ifndef __VISION_TENSOR_H__
#define __VISION_TENSOR_H__

#include "common/status.h"
#include "common/device.h"

#include <vector>
#include <memory>

namespace vision
{

/**
 * @brief data format
 * 
 */
enum FORMAT {
    MAJOR_ROW_NCHW = 0,
    MAJOR_ROW_NHWC,
    MAJOR_COL_NCWH,
    MAJOR_COL_NHWC
};

/**
 * @brief common type use in this project
 *
 * @tparam TYPE 
 */
template <typename TYPE>
class Tensor
{
public:
    ~Tensor()
    {
        // 如果是自己申请的，才需要释放空间
        if (!m_allocOutside) {
            m_device.Free(m_buff);
        }
    }

    using ptr = std::shared_ptr<Tensor<TYPE>>;
    using mem_ptr = std::shared_ptr<char*>;

    /**
     * @brief tensor的创建，这里只提供这么一个函数来创建Tensor
     * 
     * @param shape shape
     * @param format 格式
     * @param buff 外部输入的数据指针
     * @param device 指定设备，默认为Device
     * @param lazyMode 指定lazy mode，默认为真
     * @return ptr 返回shared_ptr
     */
    static ptr Create(const std::vector<size_t>& shape, const FORMAT format, Device& device,
        char* buff = nullptr, bool lazyMode=true)
    {
        return std::shared_ptr<Tensor<TYPE>>(new Tensor<TYPE>(shape, format, device, lazyMode, buff));
    }

    /**
     * @brief tensor中元素的个数
     * 
     * @param start 开始计算的位置
     * @param end 结束，前闭后开
     * @return size_t 
     */
    size_t Eln(const size_t end=SupportTensorDims, const size_t start=0) const
    {
        assert (m_shape.size() == SupportTensorDims);
        assert (end <= SupportTensorDims and (start >= 0));
        size_t count = 1;
        for (int i = start; i < end; ++i) {
            count *= m_shape[i];
        }
        return count;
    }

    /**
     * @brief Tensor中字节的长度
     * 
     * @param start 同上
     * @param end 
     * @return size_t 
     */
    size_t Bytes(const size_t end=SupportTensorDims, const size_t start=0) const
    {
        size_t cnt = Eln(end, start);
        return cnt * sizeof(TYPE);
    }

    inline const FORMAT Format() const {return m_format;}
    inline const Device& GetDevice() const {return m_device;}
    inline const std::vector<size_t>& Shape() const {return m_shape;}
    /**
     * @brief 申请CPU内存
     * 
     * @return Status 
     */
    Status Alloc()
    {
        size_t bytes = Bytes();
        if (bytes <= 0 or (bytes > MaxMemorySize)) {
            return INPUT_PTR_IS_NULL;
        }

        m_buff = (char*)m_device.Alloc(Bytes());
        if (!m_buff) {
            return MEMORY_ALLOC_FAILED;
        }
        m_allocOutside = false;
        return SUCCESS;
    }

    Status CopyFromAddr(const char* src, const size_t bytes,
        const size_t srcOffset=0, const size_t dstOffset=0)
    {
        if (src == nullptr or (m_buff == nullptr)) {
            return INPUT_PTR_IS_NULL;
        }
        if (dstOffset + bytes > Bytes()) {
            return OUT_OF_RANGE;
        }
        char* dst = m_buff + dstOffset;
        (void)memcpy(dst, src + srcOffset, bytes);
        return SUCCESS;
    }

    Tensor::ptr CloneTo(Device& device)
    {
        Tensor::ptr out = Create(m_shape, m_format, device, m_lazyMode, m_buff);
        if (!out) {
            return nullptr;
        }
        out->Alloc();
        Status ret = m_device.CopyFromTo(device, m_buff, out->Buffer(), Bytes());
        if (ret != SUCCESS) {
            return nullptr;
        }
        return out;
    }

    Tensor::ptr MapTo(const Device& device)
    {
        Tensor::ptr out = Create(m_shape, m_format, device, m_lazyMode, m_buff);
        if (!out) {
            return MEMORY_ALLOC_FAILED;
        }
        // 这里需要注意映射时，原来Tensor智能指针的释放问题
        Status ret = m_device.MapFromTo(device, m_buff, out->Buffer(), Bytes());
        if (ret != SUCCESS) {
            return nullptr;
        }
        
        return out;
    }

protected:
    Tensor(const std::vector<size_t> shape, const FORMAT format,
        Device& device, bool lazyMode=true, char* buff = nullptr): m_device(device)
    {
        m_shape.insert(m_shape.end(), shape.begin(), shape.end());
        m_format = format;
        m_device = device;
        m_lazyMode = lazyMode;
        m_buff = buff;

        // 如果不需要申请空间
        if (m_buff) {
            m_allocOutside = true;
        // 如果需要空间，需要判断是否是lazymode,如果不是，立刻申请空间
        } else if (!lazyMode) {
            Alloc();
        }
    }

private:
    FORMAT m_format = MAJOR_ROW_NCHW;
    Device& m_device;
    bool m_lazyMode = true; // lazy mode, will not alloc in construct function
    bool m_allocOutside = false; // alloc memory outside

    char* m_buff = nullptr;
    std::vector<size_t> m_shape = {};

    static size_t SupportTensorDims;
    static size_t MaxMemorySize;

    inline char* Buffer() const {return m_buff;}

}; // class Tensor

template <typename T> size_t Tensor<T>::SupportTensorDims = 4;
template <typename T> size_t Tensor<T>::MaxMemorySize = 1024 * 1024 * 1024;

} // namespace vision

#endif // __VISION_TENSOR_H__