#include "lprnet_inference.h"

namespace sp::Model::LPRnet{

void registe_lprnet_inference(){
    lprnet_inference::registe();
}

lprnet_inference::lprnet_inference(std::vector<std::string> v): PythonFunction("/home/lx/SmartPipe/src/core/functions/Model/LPRnet/lprnet/", "/home/lx/SmartPipe/src/core/functions/Model/LPRnet/lprnet/repo/plate/", "lprnet", "lprnet", v), GpuFunction(){
    assert(v.size() == 0);
    name = "lprnet_inference";
    class_name = "Model-LPRnet-lprnet_inference";
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({TENSOR},TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_TENSOR},BATCH_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({GPU_TENSOR},GPU_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_GPU_TENSOR},BATCH_GPU_TENSOR));
}

lprnet_inference::lprnet_inference(): PythonFunction(), GpuFunction(){
    name = "lprnet_inference";
    class_name = "Model-LPRnet-lprnet_inference";
    pModulePath = "/home/lx/SmartPipe/src/core/functions/Model/LPRnet/lprnet/";
    pModuleHomePath = "/home/lx/SmartPipe/src/core/functions/Model/LPRnet/lprnet/repo/plate/";
    pModuleName = "lprnet";
    pClassName = "lprnet";
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({TENSOR},TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_TENSOR},BATCH_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({GPU_TENSOR},GPU_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_GPU_TENSOR},BATCH_GPU_TENSOR));
}

lprnet_inference::~lprnet_inference(){
    
}

void lprnet_inference::registe(){
    Function::Register("Model-LPRnet-lprnet_inference", createObject);
    Function::Register("Model-LPRnet-lprnet_inference", createObject2);
}

Function* lprnet_inference::createObject(std::vector<std::string> params){
    return new lprnet_inference(params);
}

Function* lprnet_inference::createObject2(){
    return new lprnet_inference();
}

void lprnet_inference::start(){
    // 构建实例
    PythonFunction::defaultStart();
    pInstance = PyObject_CallObject(pConstruct, nullptr);
    assert(pInstance != nullptr);
    PyObject_CallMethod(pInstance, "start", "");
}

bool lprnet_inference::waitForResource(){
    return true;
}

/*
    vector<vector<Tensor*>> -> [Tensor,Tensor,...]
        lprnet_inference(Function内拼接, Host Memory): [Tensor,Tensor,...] -> [Tensor,Tensor,...]
    [Tensor,Tensor,...] -> vector<Tensor*>

    vector<vector<Gpu_Tensor*>> -> [Gpu_Tensor,Gpu_Tensor,...]
        lprnet_inference(Function内拼接, Device Memory): [Gpu_Tensor,Gpu_Tensor,...] -> [Gpu_Tensor,Gpu_Tensor,...]
    [Gpu_Tensor,Gpu_Tensor,...] -> vector<Gpu_Tensor*>

    vector<vector<Batch_Tensor*>> -> [Batch_Tensor,Batch_Tensor,...]
        lprnet_inference(Function外拼接, Host Memory): [Batch_Tensor,Batch_Tensor,...] -> [Batch_Tensor,Batch_Tensor,...]
    [Batch_Tensor,Batch_Tensor,...] -> vector<Batch_Tensor*>

    vector<vector<Batch_Gpu_Tensor*>> -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
        lprnet_inference(Function外拼接, Device Memory): [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
    [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> vector<Batch_Gpu_Tensor*>
*/
void lprnet_inference::process(std::vector<std::vector<Data*>>& data_input, std::vector<Data*>& data_output){
    assert(data_input.size() != 0);
    assert(data_input[0].size() == 1);
    // 将输入的tensor变成python格式
    PyObject* data = convertToPython(data_input);
    // 调用Python函数
    pArgs = PyTuple_New(1);
    PyTuple_SetItem(pArgs, 0, data);
    pReturn = PyObject_CallMethod(pInstance, "inference", "O", pArgs);
    // 将结果解析成data并赋值给data_output
    convertToCpp(pReturn, data_input, data_output, 0);
    // 释放内存
    for(int i = 0; i < data_input.size(); i++){
        assert(data_input[i][0]->type == TENSOR || data_input[i][0]->type == GPU_TENSOR || data_input[i][0]->type == BATCH_TENSOR || data_input[i][0]->type == BATCH_GPU_TENSOR);
        if(data_input[i][0]->subRefCount() == 0){
            executor->freeAll(data_input[i][0]);
        }
    }
}

bool lprnet_inference::releaseResource(){
    return true;
}

void lprnet_inference::finish(){
    PyObject_CallMethod(pInstance, "finish", "");
    PythonFunction::defaultFinish();
}

void lprnet_inference::copy(Function* other){
    Function::defaultCopy(other);
}

};