#include "Trt.h"

void Trt::infer(){
    // transfer input buffer from host(CPU) to device(GPU)
    cudaMemcpy(m_buffers_gpu_[0], m_buffers_cpu_[0], 3 * 640 * 640 * sizeof(float), cudaMemcpyHostToDevice);

    // infer model
    if(!m_context_->executeV2(m_buffers_gpu_)) {
        std::cout<<"infer failed"<<std::endl;
    }

    // transfer output buffer from device(GPU) to host(CPU)
    cudaMemcpy(m_buffers_cpu_[1], m_buffers_gpu_[1], 18 * 80 * 80 * sizeof(float), cudaMemcpyDeviceToHost);
    cudaMemcpy(m_buffers_cpu_[2], m_buffers_gpu_[2], 18 * 40 * 40 * sizeof(float), cudaMemcpyDeviceToHost);
    cudaMemcpy(m_buffers_cpu_[3], m_buffers_gpu_[3], 18 * 20 * 20 * sizeof(float), cudaMemcpyDeviceToHost);
}

void Trt::create_engine(const std::string& onnx_file){
    std::string engine_file = onnx_file.substr(0, onnx_file.rfind(".")) + "_fp16.trt";
    bool has_engine_file = true;
    std::fstream file;
    file.open(engine_file,ios::in);
    if(!file) {
        has_engine_file = false;
    }else{
        file.close();
    }
    if(!has_engine_file) {
        serialize(onnx_file, engine_file);
    }
    deserialize(engine_file);
    allocate_buffer();
}

void** Trt::get_io(){
    return m_buffers_cpu_;
}

void Trt::print_info(){
    int binding_num = m_engine_->getNbBindings();
    std::cout << "bingding_num: " << binding_num << std::endl;
    for(int i = 0; i < binding_num; i++) {
        std::cout << "name of bind " + std::to_string(i) << " is: " << m_engine_->getBindingName(i) << std::endl;
        if(m_engine_->getBindingDataType(0) != nvinfer1::DataType::kFLOAT) {
            std::cout<<"[FAULT] data type of input is not kfloat!! " << "is " << (int)m_engine_->getBindingDataType(0) << std::endl;
        }
        nvinfer1::Dims dim = m_engine_->getBindingDimensions(i);
        std::cout << "dims info: ";
        for(int i = 0; i < 4; i++) {
            std::cout << dim.d[i] << " ";
        }
        std::cout << std::endl;
    }
}

int Trt::serialize(const std::string& onnx_file, const std::string& engine_file)
{
    IBuilder* builder = createInferBuilder(m_glogger_);
    INetworkDefinition* network = builder->createNetworkV2(1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
    if (!network) {
        std::cout<<"createNetworkV2 false!!"<<std::endl;
        return false;
    }
    auto config = builder->createBuilderConfig();
    if (!config) {
        std::cout<<"createBuilderConfig false!!"<<std::endl;
        return false;
    }
    auto parser = nvonnxparser::createParser(*network, m_glogger_);
    if (!parser) {
        std::cout<<"createParser false!!"<<std::endl;
        return false;
    }

    auto parsed = parser->parseFromFile(onnx_file.c_str(), 0);
    if (!parsed) {
        std::cout<<"parseFromFile false!!"<<std::endl;
        return false;
    }

    config->setFlag(BuilderFlag::kFP16);

    auto plan = builder->buildSerializedNetwork(*network, *config);

    std::cout << "allocate memory size: " << plan->size() << "bytes \n";
    std::ofstream outfile(engine_file.c_str(), std::ios::out | std::ios::binary);
    if (!outfile.is_open()) {
        std::cout << "fail to open file to write: " << engine_file.c_str() << std::endl;;
        return -1;
    }
    unsigned char* p = (unsigned char*)plan->data();
    outfile.write((char*)p, plan->size());
    outfile.close();
    return 0;
}

int Trt::deserialize(const std::string& engine_file){
    auto runtime = createInferRuntime(m_glogger_);
    if (!runtime) {
        std::cout<<"createInferRuntime false!!"<<std::endl;
        return -1;
    }
    
    ifstream file(engine_file.c_str(), std::ios::binary);
    file.seekg(0, file.end);
    size_t size = file.tellg();
    file.seekg(0, file.beg);
    vector<char> trtModelStream(size);
    file.read(trtModelStream.data(), size);
    file.close();
    m_engine_ = runtime->deserializeCudaEngine(trtModelStream.data(), size, nullptr);

    m_context_ = m_engine_->createExecutionContext();
    if (!m_context_) {
        std::cout<<"createExecutionContext false!!"<<std::endl;
        return -1;
    }
    return 0;
}

void Trt::allocate_buffer(){
    // cuda buffer
    int binding_num = m_engine_->getNbBindings();
    for(int i = 0; i < binding_num; i++) {
        nvinfer1::Dims dim = m_engine_->getBindingDimensions(i);
        cudaMalloc(&m_buffers_gpu_[i], dim.d[0] * dim.d[1] * dim.d[2] * dim.d[3] * sizeof(float));
    }

    // cpu buffer
    for(int i = 0; i < binding_num; i++) {
        nvinfer1::Dims dim = m_engine_->getBindingDimensions(i);
        m_buffers_cpu_[i] = malloc(dim.d[0] * dim.d[1] * dim.d[2] * dim.d[3] * sizeof(float));
    }
}