#ifndef _DATA_H_
#define _DATA_H_

#include "common.h"
#include <vector>

namespace sp{

/**
 * @brief Data支持类型：UNKNOW,HEAD,MAT,RECT,TENSOR
 * 
 */
class Data{
private:
    // 禁止类的拷贝和复制
    Data(const Data&) = delete;
    Data& operator= (const Data&) = delete;
public:
    Data();
    // 原始构造函数
    Data(short app_id, short flow_id, short request_id, uint8_t type);
    Data(short app_id, short flow_id, short request_id, int channels, int rows, int cols, unsigned char* data);
    Data(short app_id, short flow_id, short request_id, float x, float y, float width, float height);
    Data(short app_id, short flow_id, short request_id, int C, int H, int W, float* data);
    Data(short app_id, short flow_id, short request_id, int C, int H, int W, short stream_id, short block_id);
    Data(short app_id, short flow_id, short request_id, int B, int C, int H, int W, float* data, Data* tensor_head);
    Data(short app_id, short flow_is, short request_id, int B, int C, int H, int W, short stream_id, short block_id, Data* tensor_head);
    Data(short app_id, short flow_id, short request_id, const char* str);
    Data(Data* da_in); // 浅复制
    // 由另一个Data提供id信息，构造Data
    Data(Data* da_in, uint8_t type);
    Data(Data* da_in, int channels, int rows, int cols, unsigned char* data);       // MAT
    Data(Data* da_in, float x, float y, float width, float height);                 // RECT
    Data(Data* da_in, int C, int H, int W, float* data);                            // TENSOR
    Data(Data* da_in, int C, int H, int W, short stream_id, short block_id);        // GPU_TENSOR
    Data(Data* da_in, int B, int C, int H, int W, float* data, std::vector<char*> ptr_vec);                     // BATCH_TENSOR
    Data(Data* da_in, int B, int C, int H, int W, short stream_id, short block_id, std::vector<char*> ptr_vec); // BATCH_GPU_TENSOR
    Data(std::vector<Data*> da_ins, int B, int C, int H, int W, float* data, std::vector<char*> ptr_vec);                     // vec -> BATCH_TENSOR
    Data(std::vector<Data*> da_ins, int B, int C, int H, int W, short stream_id, short block_id, std::vector<char*> ptr_vec); // vec -> BATCH_GPU_TENSOR
    Data(Data* da_in, const char* str); // STRING
    ~Data();
    // 在原始Data上重新设置
    void set(int channels, int rows, int cols, unsigned char* data);
    void set(float x, float y, float width, float height);
    void set(int C, int H, int W, float* data);
    void set(int C, int H, int W, short stream_id, short block_id);
    void set(int B, int C, int H, int W, float* data, Data* tensor_head);
    void set(int B, int C, int H, int W, short stream_id, short block_id, Data* tensor_head);
    void set(const char* str);
    // 获取ID
    short getAppId();
    short getFlowId();
    short getRequestId();
    // STRING 相关
    std::string getStr();
    // 存储引用计数相关
    void setRefCount(int ref_count);
    int subRefCount();
    int getRefCount();
    // HEAD长度及获取元素有关
    int getLength();
    Data* getItem(int i);
    // BATCH TENSOR tensor长度相关
    int getTensorLength();
    Data* getTensorHead();
    Data* getTensorItem(int i);
    // 属性
    short app_id;       // 标识这个数据来自哪个app
    short flow_id;      // 标识这个数据来自哪个flow
    short request_id;   // 标识这个数据来组第几个request
    uint8_t type;
    int ref_count;
    Data* next;
    union{
        struct{
            int channels;
            int rows;
            int cols;
            unsigned char* data;
        } mat;
        struct{
            float x;
            float y;
            float width;
            float height;
        } rect;
        struct{
            int C;
            int H;
            int W;
            float* data;
        } tensor;
        struct{
            char str[20];
        } string;
        struct{
            int B;
            int C;
            int H;
            int W;
            float* data;
            Data* tensor_head; // 保留原来每个tensor的头部。
        } batch_tensor;
        struct{
            int C;
            int H;
            int W;
            short stream_id; 
            short block_id; 
        } gpu_tensor;
        struct{
            int B;
            int C;
            int H;
            int W;
            short stream_id;
            short block_id;
            Data* tensor_head;
        } batch_gpu_tensor;
    } context;
};

};

#endif