#ifndef _MY_TENSOR_HPP_
#define _MY_TENSOR_HPP_

#include <string>
#include <vector>
#include "my_mixmemory.hpp"

namespace MTsr
{
    //DataHead的设计是描述数据最新一次使用是在哪类设备，Init的设计是借鉴caffe的lazy策略，表示变量刚刚初始化，数据是空的
    enum class DataHead : int{
        Init   = 0,
        Device = 1,
        Host   = 2
    };

    //支持常用的数据类型
    enum class DataType : int {
        Unknow = -1,
        Float32 = 0,
        UInt8 = 1
    };

    //sizeof(DataType)
    static int data_type_size(DataType dt);
    //拿到enum DataHead，enum DataType的字符串
    static const char* data_head_string(DataHead dh);
    static const char* data_type_string(DataType dt);

    class MyTensor : public MMem::MixMemory
    {
    public:
        MyTensor(int device_id, DataType dtype);
        MyTensor(int device_id, DataType dtype, int n, int c, int h, int w, bool default_gpu = true);
        MyTensor(int device_id, DataType dtype, const std::vector<int>& dims, bool default_gpu = true);
        virtual ~MyTensor();

        void release_tensor();
        void new_dims_mem(int n, int c, int h, int w, bool default_gpu = true);
        void new_dims_mem(const std::vector<int>& dims, bool default_gpu = true);

        int numel() const;//shape_各个值相乘
        inline int ndims()          const{return shape_.size();}
        inline int size(int index)  const{return shape_[index];}
        inline int shape(int index) const{return shape_[index];}

        //针对四维张量的函数
        inline int batch()   const{return shape_[0];}
        inline int channel() const{return shape_[1];}
        inline int height()  const{return shape_[2];}
        inline int width()   const{return shape_[3];}

        inline DataType type()                      const { return dtype_; }
        inline const std::vector<int>& dims()       const { return shape_; }
        inline DataHead head()                      const { return head_; }

        //对某个特定的索引位置来说计算偏移位置，如Tensor.offset(x1, x2, x3, x4)
        template<typename ... _Args>
        int offset(int index, _Args ... index_args) const{
            const int index_array[] = {index, index_args...};
            return offset_array(sizeof...(index_args) + 1, index_array);
        }
        int offset_array(const std::vector<int>& index) const;

        template<typename DType, typename ... _Args> 
        inline DType& cpu_at(int i, _Args&& ... args) { return *((DType*)cpu() + offset(i, args...)); }

        template<typename DType, typename ... _Args> 
        inline DType& gpu_at(int i, _Args&& ... args) { return *((DType*)gpu() + offset(i, args...)); }
    private:
        void new_mem(bool default_gpu);
        int offset_array(size_t size, const int* index_array) const;

    private:
        //数据最新一次使用是在哪类设备
        DataHead head_   = DataHead::Init;
        std::vector<int> shape_;
        DataType dtype_  = DataType::Float32;
    };
}

#endif