#ifndef __LLM_CORE_TENSOR_H__
#define __LLM_CORE_TENSOR_H__

#include "core/device.h"

#include <vector>
#include <cstddef>
#include <memory>

namespace llm_core
{
class Tensor
{
public:
    using Shape = std::vector<size_t>;
    enum Layout {
        LAYOUT_NCHW,
        LAYOUT_NHWC
    };

    enum DataType {
        DATA_TYPE_FP32 = 0,
        DATA_TYPE_FP16,
        DATA_TYPE_INT8,
        DATA_TYPE_INT16,
        DATA_TYPE_INT32,
        DATA_TYPE_UINT8,
        DATA_TYPE_UINT16,
        DATA_TYPE_UINT32,

        DATA_TYPE_COUNT
    };

    struct Params {
        Layout m_layout;
        DataType m_data_type;
        Shape m_dims;

        size_t alloc_bytes;
    };

    Tensor(const Device& device=Device()): m_device(device) {}
    Tensor(const Shape& shape, const Device& device=Device(),
        const Layout& layout=LAYOUT_NCHW, const DataType& data_type=DATA_TYPE_FP16):
        m_device(device) {}
    ~Tensor() {}

    size_t GetElnBytes();
    inline size_t GetBytes() { return GetElnBytes() * GetElnCount();}
    inline size_t GetElnCount(size_t dim=0)
    {
        size_t count = 1;
        auto& dims = m_params.m_dims;
        for(size_t i = dim; i < dims.size(); ++i) {
            count *= dims[i];
        }
        return count;
    }

protected:
    const Device& m_device;
    size_t* m_ref_count;
    Params m_params;
    static size_t MAX_ALLOC_BYTES;
}; // class Tensor

class CPUTensor: public Tensor
{
public:
    CPUTensor() {}

    struct CPUParams: public Params {
        char* data;
        size_t align_bytes;
    };

    CPUTensor(const Shape& shape, const Device& device=Device(),
        const Layout& layout=LAYOUT_NCHW, const DataType& data_type=DATA_TYPE_FP16)
    {
    }
    ~CPUTensor() {}

    Status FastAlloc()
    {
        if(m_params.data) {
            return LLM_REALLOC_BUFFER;
        }

        CPUParams& params = m_params;
        size_t bytes = params.alloc_bytes;
        if (bytes > Tensor::MAX_ALLOC_BYTES) {
            return LLM_OUT_OF_SIZE;
        }
        // 保存对齐前的指针
        bytes += sizeof(void*);
        // 内存对齐
        bytes += params.align_bytes;
        // 申请内存
        char* buff = (char*)malloc(bytes);
        if (buff == NULL) {
            return LLM_MALLOC_FAILED;
        }

        char** tmp = alignPtr((char**)tmp + 1, params.align_bytes);
        tmp[-1] = buff;
        params.data = (char*)tmp;
        return LLM_SUCCESS;
    }

    Status FastFree()
    {
        CPUParams& params = m_params;
        if (params.data == NULL) {
            return LLM_INVALID_PARAM;
        }
        if (!(params.data)) {
            return LLM_INVALID_PARAM;
        }
        char* buff = ((char**)(params.data))[-1];
        free(buff);
        params.data = nullptr;
        return LLM_SUCCESS;
    }

private:
    CPUParams m_params;
}; // class CPUTensor


/**
 * @brief ref no allowed to change
 * 
 */
class TensorRef
{
public:
    using Origin = std::vector<size_t>;

    TensorRef(const Tensor& tensor, const Origin& origin, const Tensor::Shape& shape): 
        m_tensor(tensor), m_shape(shape.begin(),
        shape.end()), m_origin(origin.begin(), origin.end()) {}
    ~TensorRef() {}

private:
    const Tensor& m_tensor;
    Tensor::Shape m_shape;
    Origin m_origin;
}; // class TensorRef


} // namespace llm_core


#endif // __LLM_CORE_TENSOR_H__