#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION

#include <ctime>
#include <vector>
#include <string>
#include <memory>
#include <cassert>
#include <functional>
#include <sys/time.h>
#include <opencv2/opencv.hpp>
#include <numpy/arrayobject.h> 
#include "data.h"
#include "common.h"
#include "function.h"
#include "CvxText.h"
#include "util.h"

namespace sp{

int* init_numpy(){ //初始化 numpy 执行环境，主要是导入包，python2.7用void返回类型，python3.0以上用int返回类型
    import_array();
}

/**
 * @brief Construct a new Function:: Function object
 * 
 */

static short ID = 0;
std::map<std::string, std::function<Function*(std::vector<std::string>)>> Function::classes_map;
std::map<std::string, std::function<Function*()>> Function::classes_map2;

Function::Function(){
    id = ID++;
    name = "Undefined";
    batch_size = 1;
    executor = nullptr;
    Python_Function = false;
    Gpu_Function = false;
    Request_Group_Function = false;
    Flow_Split_Function = false;
    Separate_Function = false;
    type = NORMAL;
    is_end = false;
    cnt = 0;
}

Function::~Function(){
    // 表示某个function运行结束，会向next_Qs发送结束信号。
    for(auto q : next_Qs){
        for(int i = 0; i < MAX_PARALLEL; i++)
            q->enqueue(DATA_END);
    }
    // FIXME: 释放q的内存，没有多实例，多个function有相同的前置function，也会造成重复释放。此处如果在一个进程内，会造成重复释放Q，因为系统中每一对只有一个队列的实例，所以每个进程只释放next的Q。
    for(auto i : pre_Qs){
        // 此处隐含多实例时 重复释放内存空间 的BUG，可以给每个队列对象分配引用计数，然后对Function进行释放时等引用计数为0时再释放空间。
        delete i;
    }
    std::cout << name << " function ended." << std::endl;
}

std::string Function::getName(){
    return name;
}

std::string Function::getClassName(){
    return class_name;
}

int Function::getCnt(){
    return cnt;
}

short Function::getId(){
    return id;
}

void Function::setId(short id){
    this->id = id;
}

void Function::setBatchSize(int batch_size){
    this->batch_size = batch_size;
}

void Function::bindQueues(std::vector<BlockingQueue<Data*>*> pre_Qs, std::vector<BlockingQueue<Data*>*> next_Qs){
    this->pre_Qs = pre_Qs;
    this->next_Qs = next_Qs;
}

void Function::bindExecutor(Executor* executor){
    this->executor = executor;
}

void Function::addToPreQs(BlockingQueue<Data*>* q){
    pre_Qs.push_back(q);
}

void Function::addToNextQs(BlockingQueue<Data*>* q){
    next_Qs.push_back(q);
}

void Function::clearPreQs(){
    pre_Qs.clear();
}

void Function::clearNextQs(){
    next_Qs.clear();
}

std::vector<BlockingQueue<Data*>*> Function::getPreQs(){
    return pre_Qs;
}

std::vector<BlockingQueue<Data*>*> Function::getNextQs(){
    return next_Qs;
}

void Function::setPreQs(std::vector<BlockingQueue<Data*>*> pre_Qs){
    this->pre_Qs = pre_Qs;
}

void Function::setNextQs(std::vector<BlockingQueue<Data*>*> next_Qs){
    this->next_Qs = next_Qs;
}

bool Function::recvFromQueues(std::vector<std::vector<Data*>>& data){
    if(pre_Qs.size() == 0)
        return true;
    for(int i = recv_buffer.size()-1; i < batch_size; i++){
        if(is_end == true){ // 如果已经结束了
            if(!waitForResource()) // 如果是资源没准备好
                return false;
            assert(recv_buffer.size() >= i);
            data.assign(recv_buffer.begin(), recv_buffer.begin() + i);
            recv_buffer.clear();
            return true;
        }
        if(i == -1){ // 如果recv_buffer是空的 
            std::vector<Data*> cur;
            recv_buffer.push_back(cur);
            continue;
        }
        assert(i >= 0 && i < recv_buffer.size());
        // pre_Qs中所有元素等够一组才取
        for(int j = recv_buffer[i].size(); j < pre_Qs.size(); j++){ // 确保每个队列的队首都是一个非EMPTY类型的元素才会继续 
            if(pre_Qs[j]->empty())  // 当此时队列为空，直接返回false
                return false;
            while(!pre_Qs[j]->empty()){
                if(pre_Qs[j]->front_value() == DATA_END){
                    break;
                }else if(pre_Qs[j]->front_value()->type == EMPTY){
                    if(Request_Group_Function){
                        break; // 将EMPTY当成正常元素装入
                    }else{
                        Data* data_ptr = nullptr;
                        pre_Qs[j]->dequeue(data_ptr);
                        // 直接转发给下游Function
                        assert(next_Qs.size() > 0);
                        data_ptr->setRefCount(1);
                        next_Qs[0]->wait_enqueue(data_ptr);
                    }
                }else{
                    break;
                }
            }
            if(pre_Qs[j]->empty())  // 当此时队列为空，直接返回false
                return false;
        }
        for(int j = recv_buffer[i].size(); j < pre_Qs.size(); j++){
            // 从recv_buffer[i][j]开始需要填充
            Data* data_ptr = nullptr;
            assert(j >= 0 && j < pre_Qs.size());
            // pre_Qs中所有元素都准备好再取。
            if(!pre_Qs[j]->dequeue(data_ptr)){ // 因为前面有过判断，此时一定可以准备好。
                return false;
            }else{ // 数据已经准备好，做判断
                // 若这个元素时DATA_END
                if(data_ptr == DATA_END){
                    is_end = true;
                    if(!waitForResource())
                        return false;
                    // 将recv_buffer的前i行赋值给data
                    assert(recv_buffer.size() >= i);
                    data.assign(recv_buffer.begin(), recv_buffer.begin() + i);
                    recv_buffer.clear();
                    return true;
                }else{ // 若这个元素是正常元素   
                    recv_buffer[i].push_back(data_ptr);
                }
            }
        }
        if(i < batch_size - 1){
            std::vector<Data*> cur;
            recv_buffer.push_back(cur);
        }
    }
    // 如果所有元素都已准备好
    if(!waitForResource())
        return false;
    assert(recv_buffer.size() == batch_size);
    data.assign(recv_buffer.begin(), recv_buffer.end());
    recv_buffer.clear();
    return true;
}

void Function::sendToQueues(std::vector<Data*>& data){
    if(next_Qs.size() == 0)
        return;
    for(int i = 0; i < data.size(); i++){
        if(data[i] == DATA_END){
            // 给所有后续Function发送
            for(auto q : next_Qs)
                q->wait_enqueue(data[i]);
        }else if(Flow_Split_Function){
            // 按照data的flow_id发送到相应队列
            data[i]->setRefCount(1);
            short flow_id = data[i]->getFlowId();
            assert(flow_id >= 0 && flow_id < next_Qs.size());
            next_Qs[flow_id]->wait_enqueue(data[i]);
        }else{
            // 复制发送给多个队列
            data[i]->setRefCount(next_Qs.size());
            for(auto q : next_Qs)
                q->wait_enqueue(data[i]);
        }
    }
}

void Function::handle(std::vector<std::vector<Data*>>& data_input, std::vector<Data*>& data_output){
    // 如果是HeadFunction，生成空Data。
    if(isHeadFunction()){
        char* ptr = executor->malloc(Data_Memory);
        Data* data = new (ptr)Data();
        std::vector<Data*> v;
        v.push_back(data);
        data_input.push_back(v);
    }
    struct timeval t;
    // before process
    gettimeofday(&t, NULL);
    short data_id = -1;
    if(PROFILE && executor->getProfile() != nullptr){
        assert(data_input.size() > 0);
        if(data_input[0][0] != DATA_END){
            data_id = data_input[0][0]->request_id;
            executor->getProfile()->update_data_lifecycle_detail_map(data_id, executor->getId(), "before " + std::to_string(id) + "-" + name, t);
        }
    }
    // process
    this->process(data_input, data_output);
    // 次数技术+1
    cnt++;
    // after process
    gettimeofday(&t, NULL);
    if(data_id != -1){
        executor->getProfile()->update_data_lifecycle_detail_map(data_id, executor->getId(), "after " + std::to_string(id) + "-" + name, t);
    }
    // 如果是tail Function，释放data空间。
    if(isTailFunction()){
        for(int i = 0; i < data_output.size(); i++){
            assert(data_output[i]->type == UNKNOW);
            executor->free((char*)(data_output[i]));
        }
    }
    // 释放资源
    releaseResource();
}

bool Function::process(){
    std::cout << name << " do not provide process(no parameters) method." << std::endl;
    assert(false);
}

void Function::defaultCopy(Function* other){
    this->data_type_current = other->data_type_current;
}

bool Function::isPythonFunction(){
    return Python_Function;
}

bool Function::isGpuFunction(){
    return Gpu_Function;
}

bool Function::isHeadFunction(){
    if(pre_Qs.empty())
        return true;
    else
        return false;
}

bool Function::isTailFunction(){
    if(next_Qs.empty())
        return true;
    else
        return false;
}

bool Function::isSeparateFunction(){
    return Separate_Function;
}

bool Function::isEnd(){
    return is_end;
}

void Function::printSupportTypeMsg(){
    // 输出所有支持的类型信息
    std::cout << name << " support: " << std::endl;
    for(auto i : data_type_support){
        std::cout << "    ";
        for(auto j : i.first){
            std::cout << Util::DataTypeToString(j) << " ";
        }
        if(i.second == UNKNOW)
            std::cout << "-> " << " " << std::endl;
        else
            std::cout << "-> " << Util::DataTypeToString(i.second) << std::endl;
    }
    std::cout << std::endl;
}

void Function::printCurrentTypeMsg(){
    // 输出现在的类型信息
    std::cout << "    " << name << ": ";
    for(auto i : data_type_current.first){
        std::cout << Util::DataTypeToString(i) << " ";
    }
    if(data_type_current.second == UNKNOW)
        std::cout << "-> " << " " << std::endl;
    else
        std::cout << "-> " << Util::DataTypeToString(data_type_current.second) << std::endl;
}

std::vector<std::pair<std::vector<uint8_t>, uint8_t>> Function::getDataTypeSupport(){
    return data_type_support;
}

void Function::setDataTypeCurrent(int flag, uint8_t type){
    if(flag == 0){
        data_type_current.first.push_back(type);
    }else{
        data_type_current.second = type;
    }
}

void Function::check(){
    for(auto i : data_type_support){
        if(i.first.size() != data_type_current.first.size()){
            continue;
        }
        bool isMatch = true;
        for(int j = 0; j < i.first.size(); j++){
            if(i.first[j] != data_type_current.first[j]){
                isMatch = false;
                break;
            }
        }
        if(isMatch)
            return;
    }
    std::cout << name << " data type not match. " << std::endl;
    printSupportTypeMsg();
    printCurrentTypeMsg();
    assert(false);
}

void Function::Register(std::string class_name, std::function<Function*(std::vector<std::string>)> create_function){
    std::cout << "register " << class_name << " to map." << std::endl;
    classes_map[class_name] = create_function;
}

void Function::Register(std::string class_name, std::function<Function*()> create_function){
    std::cout << "register " << class_name << " to map2." << std::endl;
    classes_map2[class_name] = create_function;
}

Function* Function::Create(std::string class_name, std::vector<std::string> params){
    for(std::map<std::string, std::function<Function*(std::vector<std::string>)>>::iterator it = classes_map.begin(); it != classes_map.end(); it++){
        if(it->first == class_name){
            // 调用创建函数并返回返回值。
            return (it->second)(params);
        }
    }
    std::cout << "cnnot found class by name: " << class_name << std::endl;
    assert(false);
}

Function* Function::Create(std::string class_name){
    for(std::map<std::string, std::function<Function*()>>::iterator it = classes_map2.begin(); it != classes_map2.end(); it++){
        if(it->first == class_name){
            // 调用创建函数并返回返回值。
            return (it->second)();
        }
    }
    std::cout << "cnnot found class by name: " << class_name << std::endl;
    assert(false);
}

void Function::run(){
    if(isSeparateFunction()){
        start();
        while(true){
            if(!process())  // 如果处理出来没有结果
                break;
        }
        finish();
    }else{
        std::cout << "only the separate function can run. " << std::endl;
        assert(false);
    }
}

/**
 * @brief Construct a new Python Function:: Python Function object
 * 
 * @param pModulePath 
 * @param pModuleHomePath 
 * @param pModuleName 
 * @param pClassName 
 * @param args 
 */

PythonFunction::PythonFunction(){
    Python_Function = true;
}

PythonFunction::PythonFunction(std::string pModulePath, std::string pModuleHomePath, std::string pModuleName, std::string pClassName, std::vector<std::string> args){
    Python_Function = true;
    this->pModulePath = pModulePath;
    this->pModuleHomePath = pModuleHomePath;
    this->pModuleName = pModuleName;
    this->pClassName = pClassName;
    this->args.assign(args.begin(), args.end());
}

PythonFunction::~PythonFunction(){

}

void PythonFunction::defaultStart(){
    // init env
    init_numpy();
    // add paths
    PyRun_SimpleString("import sys");
    std::string command = "";
    if(pModulePath != ""){
        command = "sys.path.append('" + pModulePath + "')";
        PyRun_SimpleString(command.c_str());
    }
    if(pModuleHomePath != ""){
        command = "sys.path.append('" + pModuleHomePath + "')";
        PyRun_SimpleString(command.c_str());
    }
    // assign and check
    if(pModuleName != "" && pClassName != ""){
        // 获取模块对象
        pModule = PyImport_ImportModule(pModuleName.c_str());
        assert(pModule != nullptr);
        // 获取模块中的类对象
        pDict = PyModule_GetDict(pModule);
        assert(pDict != nullptr);
        // 获取类的构造方法
        pClass = PyDict_GetItemString(pDict, pClassName.c_str());
        assert(pClass != nullptr);
        // 获取构造方法
        pConstruct = PyInstanceMethod_New(pClass);
        assert(pConstruct != nullptr);
    }
    // other assign
    pInstance = nullptr;
    pArgs = nullptr;
    pReturn = nullptr;
}

void PythonFunction::defaultFinish(){
    if(pModule != nullptr)
        Py_DECREF(pModule);
    if(pDict != nullptr)
        Py_DECREF(pDict);
    if(pClass != nullptr)
        Py_DECREF(pClass);
    if(pConstruct != nullptr)
        Py_DECREF(pConstruct);
    if(pInstance != nullptr)
        Py_DECREF(pInstance);
    if(pArgs != nullptr)
        Py_DECREF(pArgs);
    if(pReturn != nullptr)
        Py_DECREF(pReturn);
}

PyObject* PythonFunction::convertToPython(std::vector<std::vector<Data*>>& data_input){
    assert(data_input.size() != 0);
    assert(data_input[0].size() == 1);
    // 确保data_input的长度和pList的长度相等。
    PyObject* pList = PyList_New(data_input.size());
    for(int i = 0; i < data_input.size(); i++){
        for(int j = 0; j < data_input[i].size(); j++){
            Data* d = data_input[i][j];
            int length = d->getLength();
            PyObject* ppList;
            if(length != -1)
                ppList = PyList_New(length);
            int index = 0;
            while(d){
                if(d->type == HEAD){
                    
                }else if(d->type == MAT){
                    cv::Mat* img = new cv::Mat(d->context.mat.rows, d->context.mat.cols, CV_8UC3);
                    img->data = d->context.mat.data;
                    img->datastart = img->data;
                    img->dataend = img->datastart + d->context.mat.channels*d->context.mat.rows*d->context.mat.cols;
                    int m = img->rows, n = img->cols, c = img->channels();
                    npy_intp Dims[3] = {m,n,c};
                    PyObject* pArray = PyArray_SimpleNewFromData(3, Dims, NPY_UBYTE, (void*)(img->data));
                    if(length == -1)
                        PyList_SET_ITEM(pList, i, pArray);
                    else
                        PyList_SET_ITEM(ppList, index++, pArray);
                }else if(d->type == RECT){
                    
                }else if(d->type == TENSOR){
                    int c = d->context.tensor.C, h = d->context.tensor.H, w = d->context.tensor.W;
                    npy_intp Dims[3] = {c,h,w};
                    PyObject* pArray = PyArray_SimpleNewFromData(3, Dims, NPY_FLOAT, (void*)(d->context.tensor.data));
                    if(length == -1)
                        PyList_SET_ITEM(pList, i, pArray);
                    else
                        PyList_SET_ITEM(ppList, index++, pArray);
                }else if(d->type == BATCH_TENSOR){
                    int b = d->context.batch_tensor.B, c = d->context.batch_tensor.C, h = d->context.batch_tensor.H, w = d->context.batch_tensor.W;
                    npy_intp Dims[4] = {b,c,h,w};
                    PyObject* pArray = PyArray_SimpleNewFromData(4, Dims, NPY_FLOAT, (void*)(d->context.batch_tensor.data));
                    if(length == -1)
                        PyList_SET_ITEM(pList, i, pArray);
                    else
                        PyList_SET_ITEM(ppList, index++, pArray);
                }else if(d->type == GPU_TENSOR){
                    // DONE: GPU TENSOR时向Python Function中传递什么，初步想法使用元组传递Tuple，代表[C,H,W,S,B] 在Python Function中转换为GPU_TENSOR。
                    int c = d->context.gpu_tensor.C, h = d->context.gpu_tensor.H, w = d->context.gpu_tensor.W, stream_id = d->context.gpu_tensor.stream_id, block_id = d->context.gpu_tensor.block_id;
                    PyObject* pTuple = PyTuple_New(5);
                    PyTuple_SET_ITEM(pTuple, 0, Py_BuildValue("i", c));
                    PyTuple_SET_ITEM(pTuple, 1, Py_BuildValue("i", h));
                    PyTuple_SET_ITEM(pTuple, 2, Py_BuildValue("i", w));
                    // DONE: 利用executor中gpu_sharedMemory中的convert将stream_id转换为size_t类型的pos和size，传入python函数。
                    size_t src_pos = executor->getDeviceMemoryPos(stream_id);
                    size_t src_size = executor->getDeviceMemorySize(stream_id);
                    PyTuple_SET_ITEM(pTuple, 3, Py_BuildValue("l", src_pos));
                    PyTuple_SET_ITEM(pTuple, 4, Py_BuildValue("l", src_size));
                    if(length == -1)
                        PyList_SET_ITEM(pList, i, pTuple);
                    else
                        PyList_SET_ITEM(ppList, index++, pTuple);
                }else if(d->type == BATCH_GPU_TENSOR){
                    // DONE: BATCH GPU TENSOR时向Python Function中传递什么，初步想法使用元组传递Tuple，代表[B,C,H,W,S,B] 在Python Function中转换为BATCH_GPU_TENSOR。
                    int b = d->context.batch_gpu_tensor.B, c = d->context.batch_gpu_tensor.C, h = d->context.batch_gpu_tensor.H, w = d->context.batch_gpu_tensor.W, stream_id = d->context.batch_gpu_tensor.stream_id, block_id = d->context.batch_gpu_tensor.block_id;
                    PyObject* pTuple = PyTuple_New(6);
                    PyTuple_SET_ITEM(pTuple, 0, Py_BuildValue("i", b));
                    PyTuple_SET_ITEM(pTuple, 1, Py_BuildValue("i", c));
                    PyTuple_SET_ITEM(pTuple, 2, Py_BuildValue("i", h));
                    PyTuple_SET_ITEM(pTuple, 3, Py_BuildValue("i", w));
                    // DONE: 利用executor中gpu_sharedMemory中的convert将stream_id转换为size_t类型的pos和size，传入python函数。
                    size_t src_pos = executor->getDeviceMemoryPos(stream_id);
                    size_t src_size = executor->getDeviceMemorySize(stream_id);
                    PyTuple_SET_ITEM(pTuple, 4, Py_BuildValue("l", src_pos));
                    PyTuple_SET_ITEM(pTuple, 5, Py_BuildValue("l", src_size));
                    if(length == -1)
                        PyList_SET_ITEM(pList, i, pTuple);
                    else
                        PyList_SET_ITEM(ppList, index++, pTuple);
                }else{
                    assert(false);
                }
                d = d->next;
            }
            if(length != -1)
                PyList_SET_ITEM(pList, i, ppList);
        }
    }
    return pList;
}

// DONE: 支持linkPos，以及上述所有情况。
void PythonFunction::convertToCpp(PyObject* pReturn, std::vector<std::vector<Data*>>& data_input, std::vector<Data*>& data_output, int linkPos){
    // 确保返回值不为空
    assert(PyList_Check(pReturn) != 0);
    // 得到batch的大小
    int batch_size = PyList_Size(pReturn);
    if(type == GATHER && data_input.size() == 0 && is_end){
        // GATHER的终结时data_input可以为空
    }else{
        // 确保输入的batch大小对应返回值的batch大小
        assert(data_input.size() == batch_size);
    }
    // 循环处理
    for(int no = 0; no < batch_size; no++){
        // 得到返回值中的第i个元素      
        PyObject* pObj = PyList_GetItem(pReturn, no);
        Data* da_in;
        if(type == GATHER && data_input.size() == 0 && is_end){
            
        }else{
            da_in = data_input[no][0]; // 转化时的参考输入
            if(type == GATHER){
                Data* data = new Data(da_in);  // 浅复制
                data_ins.push_back(data);      // 记录每次解析时的input的data，用于vec转batch_tensor
            }
        }
        if(PyList_Check(pObj)){
            int size = PyList_Size(pObj);
            std::vector<Data*> datas;
            for(int i = 0; i < size; i++){
                PyObject* ppObj = PyList_GetItem(pObj, i);
                if(PyList_Check(ppObj)){
                    int size = PyList_Size(ppObj);
                    std::vector<Data*> datas;
                    for(int j = 0; j < size; j++){
                        PyObject* pppObj = PyList_GetItem(ppObj, j);
                        // 此层不再是列表，最大支持返回值为三维列表
                        assert(PyList_Check(pppObj) == false);
                        // 1:m*n 面向二维列表的解析
                        Data* data = parsePyObj(da_in, pppObj, i, j);
                        datas.push_back(data);
                    }
                    if(linkPos == 2){
                        // 此层链入链表
                        data_output.push_back(to_list(da_in, datas));
                    }else{
                        // 直接加入data_output
                        data_output.insert(data_output.end(), datas.begin(), datas.end());
                    }
                }else{
                    // 1:n 面向一维列表的解析
                    Data* data;
                    if(type == GATHER){
                        // GATHER型function需要从上次进入到这次进入时的da_in都放入vector中parse
                        data = parsePyObj(data_ins, ppObj);
                        // 回收内存并清空
                        for(Data* data : data_ins)
                            delete data;
                        data_ins.clear();
                    }else{
                        data = parsePyObj(da_in, ppObj, i);
                    }
                    datas.push_back(data);
                }
            }
            if(linkPos == 1){
                // 此层链入链表
                data_output.push_back(to_list(da_in, datas));
            }else{
                // 直接加入data_output
                data_output.insert(data_output.end(), datas.begin(), datas.end());
            }
        }else{
            // 1:1 最外层的解析
            Data* data = parsePyObj(da_in, pObj); 
            data_output.push_back(data);
        }
    }
}

Data* PythonFunction::to_list(Data* data, std::vector<Data*> data_vec){
    // DONE: 完善函数
    char* ptr = executor->malloc(Data_Memory);
    if(data_vec.size() == 0){
        // 从data建立head
        Data* head = new (ptr)Data(data, HEAD);
        // 直接返回空head
        return head;
    }else{
        // 从第一个元素建立head
        Data* head = new (ptr)Data(data_vec[0], HEAD);
        // 拼接data_vec中的data，并链在head后
        Data* p = head;
        for(Data* cur_data : data_vec){
            p->next = cur_data;
            p = cur_data;
        }
        return head;
    }
}

/** 最外层直接解析： 1:1，符合情况的如下：
    Mat -> Tensor
    Mat -> Rect
    Tensor -> Tensor
    Tensor -> Rect
    Tensor -> String
    Batch_Tensor -> Batch_Tensor
    Tensor -> Gpu_Tensor
    Gpu_Tensor -> Tensor
    Gpu_Tensor -> Gpu_Tensor
    Batch_Tensor -> Batch_Gpu_Tensor
    Batch_Gpu_Tensor -> Batch_Tensor
**/
Data* PythonFunction::parsePyObj(Data* data_input, PyObject* obj){
    assert(data_input != nullptr);
    assert(data_input->type == MAT || data_input->type == TENSOR || data_input->type == GPU_TENSOR || data_input->type == BATCH_TENSOR || data_input->type == BATCH_GPU_TENSOR);
    if(PyTuple_Check(obj)){
        int length = PyTuple_GET_SIZE(obj);
        if(length == 4){       // rectangle
            float params[4];
            int ok = PyArg_ParseTuple(obj, "ffff", &params[0], &params[1], &params[2], &params[3]);
            assert(ok == 1);
            char* ptr = executor->malloc(Data_Memory);
            Data* data = new (ptr)Data(data_input, params[0], params[1], params[2], params[3]);
            return data;
        }else if(length == 5){ // gpu_tensor
            size_t params[5];
            int ok = PyArg_ParseTuple(obj, "iiili", &params[0], &params[1], &params[2], &params[3], &params[4]);
            assert(ok == 1);
            char* ptr = executor->malloc(Data_Memory);
            short stream_id = executor->getStreamId(params[3]);
            Data* data = new (ptr)Data(data_input, params[0], params[1], params[2], stream_id, (short)(params[4]));
            return data;
        }else if(length == 6){ // batch_gpu_tensor
            int length = data_input->getTensorLength();
            size_t params[6];
            int ok = PyArg_ParseTuple(obj, "iiiili", &params[0], &params[1], &params[2], &params[3], &params[4], &params[5]);
            assert(ok == 1);
            char* ptr = executor->malloc(Data_Memory);
            std::vector<char*> ptr_vec; // 申请length+1个Data用于组成batch tensor的tensor head
            for(int i = 0; i < length+1; i++){ 
                char* ptr = executor->malloc(Data_Memory);
                ptr_vec.push_back(ptr);
            }
            short stream_id = executor->getStreamId(params[4]);
            Data* data = new (ptr)Data(data_input, params[0], params[1], params[2], params[3], stream_id, (short)(params[5]), ptr_vec);
            return data;
        }else{
            assert(false);
        }
    }else if(PyArray_Check(obj)){ // tensor / batch_tensor
        // 获取tensor的数据
        PyArrayObject* pArray = (PyArrayObject*)PyArray_GETCONTIGUOUS((PyArrayObject*)obj);
        int nDim = PyArray_NDIM(pArray);
        npy_intp* dims = PyArray_DIMS(pArray);
        size_t size = PyArray_SIZE(pArray);
        float* arrayData = (float*)PyArray_BYTES(pArray);
        char* array_ptr = executor->malloc(size*sizeof(float));
        memcpy(array_ptr, arrayData, size*sizeof(float));
        // 构造Data
        char* ptr = executor->malloc(Data_Memory);
        Data* data;
        if(nDim == 3){        // tensor 
            data = new (ptr)Data(data_input, dims[0], dims[1], dims[2], (float*)array_ptr);
        }else if(nDim == 4){  // batch_tensor 
            int length = data_input->getTensorLength();
            std::vector<char*> ptr_vec; // 申请length+1个Data用于组成batch tensor的tensor head
            for(int i = 0; i < length+1; i++){ 
                char* ptr = executor->malloc(Data_Memory);
                ptr_vec.push_back(ptr);
            }
            data = new (ptr)Data(data_input, dims[0], dims[1], dims[2], dims[3], (float*)array_ptr, ptr_vec);
        }else{
            assert(false);
        }
        return data;
    }else if(PyUnicode_Check(obj)){ // STRING
            const char* str = PyUnicode_AsUTF8(obj);
            // 构建Data
            char* data_ptr = executor->malloc(Data_Memory);
            Data* data = new (data_ptr)Data(data_input, str);
            return data;
    }else{
        assert(false);
    }
}

/* 第一层列表解析 1:n
    NORMAL:
    Mat -> [Rect,Rect,...]
    Tensor -> [Rect,Rect,...]
    Tensor -> [String,String,...]
    Tensor -> [Tensor,Tensor,Tensor]
    Batch_Tensor -> [Batch_Tensor,Batch_Tensor,Batch_Tensor]
    Head*->Tensor*->Tensor*->Tensor* -> [Rect,Rect,...]
    Head*->Tensor*->Tensor*->Tensor* -> [Gpu_Tensor,Gpu_Tensor,Gpu_Tensor]
    Head*->Gpu_Tensor->Gpu_Tensor->Gpu_Tensor -> [Tensor,Tensor,Tensor]
    Head*->Batch_Tensor*->Batch_Tensor*->Batch_Tensor* -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor]
    Head*->Batch_Gpu_Tensor->Batch_Gpu_Tensor->Batch_Gpu_Tensor -> [Batch_Tensor,Batch_Tensor,Batch_Tensor]
    SCATTER:
    Batch_Tensor -> [Tensor,Tensor,...]
*/
Data* PythonFunction::parsePyObj(Data* data_input, PyObject* obj, int i){
    assert(data_input != nullptr);
    assert(data_input->type == MAT || data_input->type == TENSOR || data_input->type == GPU_TENSOR || data_input->type == BATCH_TENSOR || data_input->type == BATCH_GPU_TENSOR || data_input->type == HEAD);
    if(data_input->type == HEAD){
        // 找到HEAD的第i个元素送入
        if(i < data_input->getLength()){
            return parsePyObj(data_input->getItem(i), obj); // 如果一一对应，则一一对应取。
        }else{
            return parsePyObj(data_input->getItem(0), obj); // 如果不一一对应，则以第一个Tensor为参考。
        }
    }else if(type == SCATTER){
        // Split型Function 1:n复制，对应传入 Batch_Tensor -> Tensor, Batch_Gpu_Tensor -> Gpu_Tensor
        assert(data_input->type == BATCH_TENSOR || data_input->type == BATCH_GPU_TENSOR);
        assert(i < data_input->getTensorLength());
        return parsePyObj(data_input->getTensorItem(i), obj);
    }else{
        // 普通1:n复制，直接传入
        return parsePyObj(data_input, obj);
    }
}

/* 第二层列表解析 1:[m*n]
    Head*->Batch_Tensor*->Batch_Tensor*->Batch_Tensor* -> [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...] 
    Head*->Batch_Gpu_Tensor*->Batch_Gpu_Tensor*->Batch_Gpu_Tensor* -> [[Batch_Gpu_Tensor, Batch_Gpu_Tensor, Batch_Gpu_Tensor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...]
*/
Data* PythonFunction::parsePyObj(Data* data_input, PyObject* obj, int i, int j){
    assert(data_input->type == HEAD);
    // i为batch size中的第i个，j为0,1,2中的一个
    // 首先从HEAD中取到第j个
    assert(data_input->getLength() == 3);
    assert(j < 3);
    Data* cur_data = data_input->getItem(j);
    // 在从TENSOR HEAD中取到第i个
    assert(i < cur_data->getTensorLength());
    return parsePyObj(cur_data->getTensorItem(i), obj);
}

/*
    GATHER:
    Tensor -> [] | [Batch_Tensor]
    GPU_Tensor -> [] | [Batch_Gpu_Tensor]
*/
Data* PythonFunction::parsePyObj(std::vector<Data*> da_ins, PyObject* obj){
    assert(type == GATHER);
    if(PyArray_Check(obj)){ // Batch_Tensor
        int length = da_ins.size();
        char* ptr = executor->malloc(Data_Memory);
        std::vector<char*> ptr_vec; // 申请length+1个Data用于组成batch tensor的tensor head
        for(int i = 0; i < length+1; i++){ 
            char* ptr = executor->malloc(Data_Memory);
            ptr_vec.push_back(ptr);
        }
        PyArrayObject* pArray = (PyArrayObject*)PyArray_GETCONTIGUOUS((PyArrayObject*)obj);
        npy_intp* dims = PyArray_DIMS(pArray);
        size_t size = PyArray_SIZE(pArray);
        float* arrayData = (float*)PyArray_BYTES(pArray);
        char* array_ptr = executor->malloc(size*sizeof(float));
        memcpy(array_ptr, arrayData, size*sizeof(float));
        Data* data = new (ptr)Data(da_ins, dims[0], dims[1], dims[2], dims[3], (float*)array_ptr, ptr_vec);
        return data;
    }else if(PyTuple_Check(obj)){ // Batch_Gpu_Tensor
        assert(PyTuple_GET_SIZE(obj) == 6);
        int length = da_ins.size();
        size_t params[6];
        int ok = PyArg_ParseTuple(obj, "iiiili", &params[0], &params[1], &params[2], &params[3], &params[4], &params[5]);
        assert(ok == 1);
        char* ptr = executor->malloc(Data_Memory);
        std::vector<char*> ptr_vec; // 申请length+1个Data用于组成batch tensor的tensor head
        for(int i = 0; i < length+1; i++){ 
            char* ptr = executor->malloc(Data_Memory);
            ptr_vec.push_back(ptr);
        }
        short stream_id = executor->getStreamId(params[4]);
        Data* data = new (ptr)Data(da_ins, params[0], params[1], params[2], params[3], stream_id, (short)(params[5]), ptr_vec);
        return data;
    }else{
        assert(false);
    }
}

/**
 * @brief Construct a new Gpu Function:: Gpu Function object
 * 
 */

GpuFunction::GpuFunction(){
    Gpu_Function = true;
    occuStreamId = -1;
    targetStreamId = -1;
}

GpuFunction::~GpuFunction(){
    
}

void GpuFunction::setOccuStreamIdByDataInput(std::vector<std::vector<Data*>>& data_input){
    assert(data_input.size() == 1);
    assert(data_input[0].size() == 1);
    Data* data = data_input[0][0];
    assert(data->type == HEAD || data->type == GPU_TENSOR || data->type == BATCH_GPU_TENSOR);
    if(data->type == HEAD){
        assert(data->getLength() >= 1);
        if(data->getItem(0)->type == GPU_TENSOR){
            occuStreamId = data->getItem(0)->context.gpu_tensor.stream_id;
        }else if(data->getItem(0)->type == BATCH_GPU_TENSOR){
            occuStreamId = data->getItem(0)->context.batch_gpu_tensor.stream_id;
        }else{
            assert(false);
        }
    }else if(data->type == GPU_TENSOR){
        occuStreamId = data->context.gpu_tensor.stream_id;
    }else if(data->type == BATCH_GPU_TENSOR){
        occuStreamId = data->context.batch_gpu_tensor.stream_id;
    }else{
        assert(false);
    }
}

// DONE: 完善函数
bool GpuFunction::defaultWaitForResource(){
    // 如果有空闲的stream，则申请到空闲的stream，并且返回true
    short stream_id = executor->mallocDeviceMemory();
    if(stream_id == -1)
        return false;
    targetStreamId = stream_id;
    return true;
}

// DONE: 完善函数
bool GpuFunction::defaultReleaseResource(){
    assert(occuStreamId != -1);
    executor->freeDeviceMemory(occuStreamId);
    occuStreamId = -1;
    return true;
}

/**
 * @brief 方法，提供给api
 * 
 */

void check(Function& a, Function& b){
    // 比较a data_type_support的输出和b data_type_support的输入，观察是否匹配。
    for(auto i : a.getDataTypeSupport()){
        uint8_t output_data_type = i.second;
        for(auto j : b.getDataTypeSupport()){
            for(int k = 0; k < j.first.size(); k++){
                uint8_t input_data_type = j.first[k];
                if(input_data_type == output_data_type){
                    // 设置current
                    a.setDataTypeCurrent(1, output_data_type);
                    b.setDataTypeCurrent(0, input_data_type);
                    return;
                }
            }
        }
    }
    // 没有匹配
    std::cout << a.getName() << " " << b.getName() << " not match. " << std::endl;
    a.printSupportTypeMsg();
    b.printSupportTypeMsg();
    assert(false);
}

void check(Function* a, Function* b){
    // 比较a data_type_support的输出和b data_type_support的输入，观察是否匹配。
    for(auto i : a->getDataTypeSupport()){
        uint8_t output_data_type = i.second;
        for(auto j : b->getDataTypeSupport()){
            for(int k = 0; k < j.first.size(); k++){
                uint8_t input_data_type = j.first[k];
                if(input_data_type == output_data_type){
                    // 设置current
                    a->setDataTypeCurrent(1, output_data_type);
                    b->setDataTypeCurrent(0, input_data_type);
                    return;
                }
            }
        }
    }
    // 没有匹配
    std::cout << a->getName() << " and " << b->getName() << " not match. " << std::endl;
    a->printSupportTypeMsg();
    b->printSupportTypeMsg();
    assert(false);
}

void linkFunctions(Function& a, Function& b){
    // 新建一个BlockingQueue
    check(a, b);
    BlockingQueue<Data*>* q = new BlockingQueue<Data*>(5000);
    a.addToNextQs(q);
    b.addToPreQs(q);
}

void linkFunctions(Function* a, Function* b){
    check(a, b);
    // 新建一个BlockingQueue
    BlockingQueue<Data*>* q = new BlockingQueue<Data*>(5000);
    a->addToNextQs(q);
    b->addToPreQs(q);
}

};