#include "transferToHostMemory.h"

namespace sp::Model::Trans{

void registe_transferToHostMemory(){
    transferToHostMemory::registe();
}

transferToHostMemory::transferToHostMemory(std::vector<std::string> v): PythonFunction("/home/lx/SmartPipe/src/core/functions/Model/Trans/", "", "transferToHostMemory", "transferToHostMemory", v), GpuFunction(){
    assert(v.size() == 0);
    name = "transferToHostMemory";
    class_name = "Model-Trans-transferToHostMemory";
    assert(batch_size == 1); // 目前只支持batch size为1
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({GPU_TENSOR}, TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_GPU_TENSOR}, BATCH_TENSOR));
}

transferToHostMemory::transferToHostMemory(): PythonFunction(), GpuFunction(){
    name = "transferToHostMemory";
    class_name = "Model-Trans-transferToHostMemory";
    assert(batch_size == 1); // 目前只支持batch size为1
    pModulePath = "/home/lx/SmartPipe/src/core/functions/Model/Trans/";
    pModuleHomePath = "";
    pModuleName = "transferToHostMemory";
    pClassName = "transferToHostMemory";
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({GPU_TENSOR}, TENSOR));
    data_type_support.push_back(std::pair<std::vector<uint8_t>, uint8_t>({BATCH_GPU_TENSOR}, BATCH_TENSOR));
}

transferToHostMemory::~transferToHostMemory(){

}

void transferToHostMemory::registe(){
    Function::Register("Model-Trans-transferToHostMemory", createObject);
    Function::Register("Model-Trans-transferToHostMemory", createObject2);
}

Function* transferToHostMemory::createObject(std::vector<std::string> params){
    return new transferToHostMemory(params);
}

Function* transferToHostMemory::createObject2(){
    return new transferToHostMemory();
}

void transferToHostMemory::start(){
    PythonFunction::defaultStart();
    pInstance = PyObject_CallObject(pConstruct, nullptr);
    assert(pInstance != nullptr);
    PyObject_CallMethod(pInstance, "start", "");
}

bool transferToHostMemory::waitForResource(){
    return true;
}

/**
    vector<vector<Gpu_Tensor*>> -> [Gpu_Tensor,Gpu_Tensor,...]
        transferToHostMemory: [Gpu_Tensor,Gpu_Tensor,...] -> [Tensor,Tensor,...]
    [Tensor,Tensor,...] -> vector<Tensor*>

    vector<vector<Batch_Gpu_Tensor*>> -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
        transferToHostMemory: [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [Batch_Tensor,Batch_Tensor,...]
    [Batch_Tensor,Batch_Tensor,...] -> vector<Batch_Tensor*>

    vector<vector<Head*>> -> [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...]
        transferToHostMemory: [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...] -> [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...]
    [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...] -> vector<Head*> (Head* -> Tensor* -> Tensor* -> Tensor*)

    vector<vector<Head*>> -> [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...]
        transferToHostMemory: [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...] -> [[Batch_Tensor,Batch_Tensor,Batch_Tensor],[Batch_Tensor,Batch_Tensor,Batch_Tensor],...]
    [[Batch_Tensor,Batch_Tensor,Batch_Tensor],[Batch_Tensor,Batch_Tensor,Batch_Tensor],...] -> vector<Head*> (Head* -> Batch_Tensor* -> Batch_Tensor* -> Batch_Tensor*)
**/
void transferToHostMemory::process(std::vector<std::vector<Data*>>& data_input, std::vector<Data*>& data_output){
    // struct timeval t1, t2, t3, t4;
    // gettimeofday(&t1, NULL);
    assert(data_input.size() != 0);
    assert(data_input[0].size() == 1);
    // 通过data_input设置occStreamId
    setOccuStreamIdByDataInput(data_input);
    // 输入的tensor转成python格式
    PyObject* data = convertToPython(data_input);
    // gettimeofday(&t2, NULL);
    // 调用python函数
    pArgs = PyTuple_New(1);
    PyTuple_SetItem(pArgs, 0, data);
    pReturn = PyObject_CallMethod(pInstance, "process", "O", pArgs);
    // gettimeofday(&t3, NULL);
    // 结果转换成cpp格式并添加到data_output
    if(data_input[0][0]->type == HEAD)
        convertToCpp(pReturn, data_input, data_output, 1);
    else
        convertToCpp(pReturn, data_input, data_output, 0);
    // 释放内存
    for(int i = 0; i < data_input.size(); i++){
        assert(data_input[i][0]->type == GPU_TENSOR || data_input[i][0]->type == BATCH_GPU_TENSOR || (data_input[i][0]->type == HEAD && data_input[i][0]->next->type == GPU_TENSOR && data_input[i][0]->next->next->type == GPU_TENSOR && data_input[i][0]->next->next->next->type == GPU_TENSOR) || (data_input[i][0]->type == HEAD && data_input[i][0]->next->type == BATCH_GPU_TENSOR && data_input[i][0]->next->next->type == BATCH_GPU_TENSOR && data_input[i][0]->next->next->next->type == BATCH_GPU_TENSOR));
        if(data_input[i][0]->subRefCount() == 0){
            executor->freeAll(data_input[i][0]);
        }
    }
    // gettimeofday(&t4, NULL);
    // std::cout << (t4.tv_sec + ((double)t4.tv_usec/1000000)) - (t1.tv_sec + ((double)t1.tv_usec/1000000)) << " ";
    // std::cout << (t2.tv_sec + ((double)t2.tv_usec/1000000)) - (t1.tv_sec + ((double)t1.tv_usec/1000000)) << " ";
    // std::cout << (t3.tv_sec + ((double)t3.tv_usec/1000000)) - (t2.tv_sec + ((double)t2.tv_usec/1000000)) << " ";
    // std::cout << (t4.tv_sec + ((double)t4.tv_usec/1000000)) - (t3.tv_sec + ((double)t3.tv_usec/1000000)) << " ";
    // std::cout << std::endl;
}

bool transferToHostMemory::releaseResource(){
    return GpuFunction::defaultReleaseResource();
}

void transferToHostMemory::finish(){
    PyObject_CallMethod(pInstance, "finish", "");
    PythonFunction::defaultFinish();
}

void transferToHostMemory::copy(Function* other){
    Function::defaultCopy(other);
}

};