#include "retinanet_inference.h"

namespace sp::Model::Retinanet{

void registe_retinanet_inference(){
    retinanet_inference::registe();
}

retinanet_inference::retinanet_inference(std::vector<std::string> v): PythonFunction("/home/lx/SmartPipe/src/core/functions/Model/Retinanet/retinanet/", "/home/lx/SmartPipe/src/core/functions/Model/Retinanet/retinanet/repo/Retinanet/", "retinanet", "retinanet", v), GpuFunction(){
    assert(v.size() == 0);
    name = "retinanet_inference";
    class_name = "Model-Retinanet-retinanet_inference";
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({TENSOR},TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_TENSOR},BATCH_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({GPU_TENSOR},GPU_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_GPU_TENSOR},BATCH_GPU_TENSOR));
}

retinanet_inference::retinanet_inference(): PythonFunction(), GpuFunction(){
    name = "retinanet_inference";
    class_name = "Model-Retinanet-retinanet_inference";
    pModulePath = "/home/lx/SmartPipe/src/core/functions/Model/Retinanet/retinanet/";
    pModuleHomePath = "/home/lx/SmartPipe/src/core/functions/Model/Retinanet/retinanet/repo/Retinanet/";
    pModuleName = "retinanet";
    pClassName = "retinanet";
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({TENSOR},TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_TENSOR},BATCH_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({GPU_TENSOR},GPU_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_GPU_TENSOR},BATCH_GPU_TENSOR));
}

retinanet_inference::~retinanet_inference(){

}

void retinanet_inference::registe(){
    Function::Register("Model-Retinanet-retinanet_inference", createObject);
    Function::Register("Model-Retinanet-retinanet_inference", createObject2);
}

Function* retinanet_inference::createObject(std::vector<std::string> params){
    return new retinanet_inference(params);
}

Function* retinanet_inference::createObject2(){
    return new retinanet_inference();
}


void retinanet_inference::start(){
    // 构建实例
    PythonFunction::defaultStart();
    pInstance = PyObject_CallObject(pConstruct, nullptr);
    assert(pInstance != nullptr);
    PyObject_CallMethod(pInstance, "start", "");
}

bool retinanet_inference::waitForResource(){
    return true;
}

/*
    vector<vector<Tensor*>> -> [Tensor,Tensor,...] 
        retinanet_inference(Function内拼接, Host Memory): [Tensor,Tensor,...] -> [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...]
    [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...] -> vector<Head*> (Head* -> Tensor* -> Tensor* -> Tensor*)

    vector<vector<Gpu_Tensor*>> -> [Gpu_Tensor,Gpu_Tensor,...] 
        retinanet_inference(Function内拼接，Device Memory): [Gpu_Tensor,Gpu_Tensor,...] -> [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...]
    [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...] -> vector<Head*> (Head* -> Gpu_Tensor* -> Gpu_Tensor* -> Gpu_Tensor*)

    vector<vector<Batch_Tensor*>> -> [Batch_Tensor,Batch_Tensor,...]
        retinanet_inference(Function外拼接, Host Memory): [Batch_Tensor,Batch_Tensor,...] -> [[Batch_Tensor,Batch_Tensor,Batch_Tensor], [Batch_Tensor,Batch_Tensor,Batch_Tensor],...]
    [[Batch_Tensor,Batch_Tensor,Batch_Tensor], [Batch_Tensor,Batch_Tensor,Batch_Tensor],...] -> vector<Head*> (Head* -> Batch_Tensor -> Batch_Tensor -> ...)

    vector<vector<Batch_Gpu_Tensor*>> -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
        retinanet_inference(Function外拼接, Device Memory): [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor], [Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...]
    [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor], [Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...] -> vector<Head*> (Head* -> Batch_Gpu_Tensor -> Batch_Gpu_Tensor -> ...)
*/
void retinanet_inference::process(std::vector<std::vector<Data*>>& data_input, std::vector<Data*>& data_output){
    assert(data_input.size() != 0);
    assert(data_input[0].size() == 1);
    // 将内存中的tensor数据转换为Python的numpy矩阵
    PyObject* data = convertToPython(data_input);
    // 调用python函数
    pArgs = PyTuple_New(1);
    PyTuple_SetItem(pArgs, 0, data);
    pReturn = PyObject_CallMethod(pInstance, "inference", "O", pArgs);
    // 将结果解析成Data并赋值给data_output
    convertToCpp(pReturn, data_input, data_output, 1);
    // 释放内存
    for(int i = 0; i < data_input.size(); i++){
        assert(data_input[i][0]->type == TENSOR || data_input[i][0]->type == GPU_TENSOR || data_input[i][0]->type == BATCH_TENSOR || data_input[i][0]->type == BATCH_GPU_TENSOR);
        if(data_input[i][0]->subRefCount() == 0){
            executor->freeAll(data_input[i][0]);
        }
    }
}

bool retinanet_inference::releaseResource(){
    return true;
}

void retinanet_inference::finish(){
    PyObject_CallMethod(pInstance, "finish", "");
    PythonFunction::defaultFinish();
}

void retinanet_inference::copy(Function* other){
    Function::defaultCopy(other);
}

};