#include "transferToDeviceMemory.h"

namespace sp::Model::Trans{

void registe_transferToDeviceMemory(){
    transferToDeviceMemory::registe();
}

transferToDeviceMemory::transferToDeviceMemory(std::vector<std::string> v): PythonFunction("/home/lx/SmartPipe/src/core/functions/Model/Trans/", "", "transferToDeviceMemory", "transferToDeviceMemory", v), GpuFunction(){
    assert(v.size() == 0);
    name = "transferToDeviceMemory";
    class_name = "Model-Trans-transferToDeviceMemory";
    assert(batch_size == 1); // 目前只支持batch size为1
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({TENSOR}, GPU_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_TENSOR}, BATCH_GPU_TENSOR));
}

transferToDeviceMemory::transferToDeviceMemory(): PythonFunction(), GpuFunction(){
    name = "transferToDeviceMemory";
    class_name = "Model-Trans-transferToDeviceMemory";
    assert(batch_size == 1); // 目前只支持batch size为1
    pModulePath = "/home/lx/SmartPipe/src/core/functions/Model/Trans/";
    pModuleHomePath = "";
    pModuleName = "transferToDeviceMemory";
    pClassName = "transferToDeviceMemory";
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({TENSOR}, GPU_TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_TENSOR}, BATCH_GPU_TENSOR));
}

transferToDeviceMemory::~transferToDeviceMemory(){

}

void transferToDeviceMemory::registe(){
    Function::Register("Model-Trans-transferToDeviceMemory", createObject);
    Function::Register("Model-Trans-transferToDeviceMemory", createObject2);
}

Function* transferToDeviceMemory::createObject(std::vector<std::string> params){
    return new transferToDeviceMemory(params);
}

Function* transferToDeviceMemory::createObject2(){
    return new transferToDeviceMemory();
}

void transferToDeviceMemory::start(){
    PythonFunction::defaultStart();
    pInstance = PyObject_CallObject(pConstruct, nullptr);
    assert(pInstance != nullptr);
    PyObject_CallMethod(pInstance, "start", "");
}

bool transferToDeviceMemory::waitForResource(){
    return GpuFunction::defaultWaitForResource();
}

/**
    vector<vector<Tensor*>> -> [Tensor,Tensor,...]
        transferToDeviceMemory: [Tensor,Tensor,...] -> [Gpu_Tensor,Gpu_Tensor,...]
    [Gpu_Tensor,Gpu_Tensor,...] -> vector<Gpu_Tensor*>
    
    vector<vector<Batch_Tensor*>> -> [Batch_Tensor,Batch_Tensor,...]
        transferToDeviceMemory: [Batch_Tensor,Batch_Tensor,...] -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
    [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> vector<Batch_Gpu_Tensor*>

    vector<vector<Head*>> -> [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...]
        transferToDeviceMemory: [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...] -> [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...]
    [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...] -> vector<Head*> (Head* -> Gpu_Tensor* -> Gpu_Tensor* -> Gpu_Tensor*)

    vector<vector<Head*>> -> [[Batch_Tensor,Batch_Tensor,Batch_Tenor],[Batch_Tensor,Batch_Tensor,Batch_Tensor],...]
        transferToDeviceMemory: [[Batch_Tensor,Batch_Tensor,Batch_Tenor],[Batch_Tensor,Batch_Tensor,Batch_Tensor],...] -> [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tenor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...]
    [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tenor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...] -> vector<Head*> (Head* -> Batch_Gpu_Tensor* -> Batch_Gpu_Tensor* -> Batch_Gpu_Tensor*)
**/
void transferToDeviceMemory::process(std::vector<std::vector<Data*>>& data_input, std::vector<Data*>& data_output){
    assert(data_input.size() != 0);
    assert(data_input[0].size() == 1);
    // 输入的tensor转成python格式
    PyObject* data = convertToPython(data_input);
    // 调用python函数
    assert(targetStreamId != -1);
    size_t pos = executor->getDeviceMemoryPos(targetStreamId);
    size_t size = executor->getDeviceMemorySize(targetStreamId);
    pReturn = PyObject_CallMethod(pInstance, "process", "Oll", data, (long)pos, (long)size);
    targetStreamId = -1;
    // 结果转换成cpp格式并添加到data_output
    if(data_input[0][0]->type == HEAD)
        convertToCpp(pReturn, data_input, data_output, 1);
    else
        convertToCpp(pReturn, data_input, data_output, 0);
    // 释放内存
    for(int i = 0; i < data_input.size(); i++){
        assert(data_input[i][0]->type == BATCH_TENSOR || data_input[i][0]->type == TENSOR || (data_input[i][0]->type == HEAD && data_input[i][0]->next->type == TENSOR && data_input[i][0]->next->next->type == TENSOR && data_input[i][0]->next->next->next->type == TENSOR) || (data_input[i][0]->type == HEAD && data_input[i][0]->next->type == BATCH_TENSOR && data_input[i][0]->next->next->type == BATCH_TENSOR && data_input[i][0]->next->next->next->type == BATCH_TENSOR));
        if(data_input[i][0]->subRefCount() == 0){
            executor->freeAll(data_input[i][0]);
        }
    }
}

bool transferToDeviceMemory::releaseResource(){
    return true;
}

void transferToDeviceMemory::finish(){
    PyObject_CallMethod(pInstance, "finish", "");
    PythonFunction::defaultFinish();
}

void transferToDeviceMemory::copy(Function* other){
    Function::defaultCopy(other);
}

};