#include "openpose_preprocess.h"

// TODO:待完善
namespace sp::Model::Openpose{

void registe_openpose_preprocess(){
    openpose_preprocess::registe();
}

openpose_preprocess::openpose_preprocess(std::vector<std::string> v): PythonFunction("/home/lx/SmartPipe/src/core/functions/Model/Openpose/openpose/", "/home/lx/SmartPipe/src/core/functions/Model/Openpose/openpose/repo/pytorch-openpose/", "openpose", "openpose", v){
    name = "openpose_preprocess";
    class_name = "Model-Openpose-openpose_preprocess";
    assert(false);
}

openpose_preprocess::openpose_preprocess(): PythonFunction(){
    name = "openpose_preprocess";
    class_name = "Model-Openpose-openpose_preprocess";
    pModulePath = "/home/lx/SmartPipe/src/core/functions/Model/Openpose/openpose/";
    pModuleHomePath = "/home/lx/SmartPipe/src/core/functions/Model/Openpose/openpose/repo/pytorch-openpose/";
    pModuleName = "openpose";
    pClassName = "openpose";
    assert(false);
}

openpose_preprocess::~openpose_preprocess(){
    assert(false);
}

void openpose_preprocess::registe(){
    Function::Register("Model-Openpose-openpose_preprocess", createObject);
    Function::Register("Model-Openpose-openpose_preprocess", createObject2);
}

Function* openpose_preprocess::createObject(std::vector<std::string> params){
    return new openpose_preprocess(params);
}

Function* openpose_preprocess::createObject2(){
    return new openpose_preprocess();
}

void openpose_preprocess::start(){
    // 构建实例
    PythonFunction::defaultStart();
    assert(false);
}

bool openpose_preprocess::waitForResource(){
    return true;
}

/*
    vector<vector<Tensor*>> -> [Tensor,Tensor,...]
        lprnet_inference(Function内拼接, Host Memory): [Tensor,Tensor,...] -> [Tensor,Tensor,...]
    [Tensor,Tensor,...] -> vector<Tensor*>

    vector<vector<Gpu_Tensor*>> -> [Gpu_Tensor,Gpu_Tensor,...]
        lprnet_inference(Function内拼接, Device Memory): [Gpu_Tensor,Gpu_Tensor,...] -> [Gpu_Tensor,Gpu_Tensor,...]
    [Gpu_Tensor,Gpu_Tensor,...] -> vector<Gpu_Tensor*>

    vector<vector<Batch_Tensor*>> -> [Batch_Tensor,Batch_Tensor,...]
        lprnet_inference(Function外拼接, Host Memory): [Batch_Tensor,Batch_Tensor,...] -> [Batch_Tensor,Batch_Tensor,...]
    [Batch_Tensor,Batch_Tensor,...] -> vector<Batch_Tensor*>

    vector<vector<Batch_Gpu_Tensor*>> -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
        lprnet_inference(Function外拼接, Device Memory): [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
    [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> vector<Batch_Gpu_Tensor*>
*/
void openpose_preprocess::process(std::vector<std::vector<Data*>>& data_input, std::vector<Data*>& data_output){
    assert(false);
}

bool openpose_preprocess::releaseResource(){
    return true;
}

void openpose_preprocess::finish(){
    assert(false);
    PythonFunction::defaultFinish();
}

void openpose_preprocess::copy(Function* other){
    Function::defaultCopy(other);
}

};