#include "TrtEngine.h"
#include <chrono>
#include <iostream>
#include <stdio.h>
#include <unistd.h>

#define ACC(x)                    \
    {                             \
        if (access(x, F_OK) != 0) \
        {                         \
            printf("%s\n", x);    \
            assert(0);            \
        }                         \
    }

using namespace std;

TrtEngine::TrtEngine()
{
    string fpath = cv::format("/project/train/mysdk/config/config.yaml");
    // string fpath = cv::format("/project/train/mysdk/config/config.yaml");
    ACC(fpath.c_str());
    cv::FileStorage fs((fpath), cv::FileStorage::READ);
    // system(string("cat " + fpath).c_str());
    // std::cout << CURRENT_VERSION << std::endl;
    fs["mode_path"] >> path;
    fs["nmsThreshold"] >> nmsThreshold;
    fs["boxThreshold"] >> boxThreshold;
    // loadTRTEngine(path);
};

void TrtEngine::init()
{

    // fs["SampleDetector_P6"] >> SampleDetector_P6;
    // fs["DEBUG"] >> DEBUG;
    // fs["use_gpu"] >> isCuda;
    // fs["class_num"] >> class_num;

    // 指定输入和输出节点名来获取输入输出索引
    vector<nvinfer1::Dims> input_dims;
    vector<nvinfer1::Dims> output_dims;
    for (size_t i = 0; i < trtengine->getNbBindings(); ++i)
    {
        if (trtengine->bindingIsInput(i))
        {                                                                //判断如果是输入
            input_dims.emplace_back(trtengine->getBindingDimensions(i)); //输入
        }
        else
        { //输出大小
            output_dims.emplace_back(trtengine->getBindingDimensions(i));
        }
    }
    if (input_dims.empty() || output_dims.empty())
    { //判断输入和输出是否为空
        cerr << "Expect at least one input and one output for network\n";
        return;
    }
    channel = input_dims[0].d[1];
    netHeight = input_dims[0].d[2];
    netWidth = input_dims[0].d[3];
    output_size = output_dims[0].d[1];
    output_width = output_dims[0].d[2];
    batch_size = input_dims[0].d[0];

    cout << "batch_size:" << batch_size << endl;
    cout << "netHeight:" << netHeight << endl;
    cout << "channel:" << channel << endl;
    cout << "netWidth:" << netWidth << endl;
    cout << "output_width:" << output_width << endl;
    cout << "output_size:" << output_size << endl;

    cudaMallocManaged((void **)&buffers[0], netWidth * netWidth * 4 * sizeof(float));
    cudaMallocManaged((void **)&d_img, netWidth * netWidth * 3 * sizeof(uint8_t));
    CHECK(cudaMallocManaged(&buffers[1], batch_size * output_size * output_width * sizeof(float))); //申请输出的缓存
}
/**
 * @brief TrtEngine::loadOnnxEngine 加载onnx文件并转换为cuda engine
 * @param onnx_filename onnx模型路径
 * @return
 */

void TrtEngine::loadOnnxEngine(const string onnx_filename)
{
    // 1 load the onnx model
    //创建builder，builder接受的参数为gLogger类型
    nvinfer1::IBuilder *builder{nvinfer1::createInferBuilder(gLogger)}; //创建builder
    //创建flag，固定写法
    const auto flag = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
    //创建网络定义
    nvinfer1::INetworkDefinition *network = builder->createNetworkV2(flag);
    //创建onnx解释器，用于接受network和gLogger这两个参数
    nvonnxparser::IParser *parser = nvonnxparser::createParser(*network, gLogger);
    //验证是否支持一下FP16，以后可以转换为fp16格式的进行推理
    cout << "support FP16--------" << builder->platformHasFastFp16() << endl; //验证是否支持FP16
    cout << "support INT8--------" << builder->platformHasFastInt8() << endl; //验证是否支持FP16

    //读取ONNX源文件，如果报错直接返回为空。否者读到parser解释器中
    if (!parser->parseFromFile(onnx_filename.c_str(), static_cast<int>(Logger::Severity::kWARNING)))
    {
        cerr << "ERROR:Could not parse onnx engine \n";
        return;
    }
    for (int i = 0; i < parser->getNbErrors(); ++i)
    { //打印onnx的信息
        std::cout << parser->getError(i)->desc() << std::endl;
    }
    cout << "successfully load the onnx model" << endl;
    // 2 build the engine
    //因为TensorRT只是一个可以在GPU上独立运行的一个库，并不能够进行完整的训练流程
    //所以我们一般是通过神经网络框架训练后导出模型再通过TensorRT转化工具转化为TensorRT的格式再去运行
    unsigned int maxBatchSize = 1;
    builder->setMaxBatchSize(maxBatchSize);                            //设置最大的BatchSize值
    nvinfer1::IBuilderConfig *config = builder->createBuilderConfig(); //创建config
    config->setMaxWorkspaceSize(1 << 30);                              //设置每层神经网络的最大空间值

    // if (builder->platformHasFastInt8())
    // {
    //     // builder->setInt8Calibrator(calibrator); //校准器接口
    //     config->setFlag(nvinfer1::BuilderFlag::kINT8);
    // }
    // else if (builder->platformHasFastFp16())
    // {
    // };

    nvinfer1::ICudaEngine *engine = builder->buildEngineWithConfig(*network, *config);
    cout << "successfully create the engine" << endl;
    // 3 serialize Model 序列化模型
    nvinfer1::IHostMemory *gieModelStream = engine->serialize();  //模型序列化
    size_t lastindex = onnx_filename.find_last_of(".");           //寻找最后一个点
    string trtfile = onnx_filename.substr(0, lastindex) + ".trt"; //命名trt文件
    ofstream engieFile(trtfile, ios::binary);                     // trtfile写入文件
    //写入文件格式为trt
    engieFile.write(static_cast<char *>(gieModelStream->data()), gieModelStream->size());
}
/**
 * @brief TrtEngine::loadTRTEngine 加载trt模型
 * @param enginePath trt模型路径
 * @return
 */
nvinfer1::ICudaEngine *TrtEngine::loadTRTEngine(string name)
{
    string enginePath = name + ".trt";
    if (access(enginePath.c_str(), F_OK) != 0)
    {
        // system("python3 /project/train/yolov5/export.py --weights /project/train/models/n6_zhan.pt --imgsz 1280 --include onnx");
        string onnx_path = name + ".onnx";
        loadOnnxEngine(onnx_path);
        printf("%s not exist!!\n", enginePath.c_str());
        // assert(0);
    }
    cout << enginePath << endl;

    ifstream gieModelStream(enginePath, ios::binary);
    if (!gieModelStream.good())
    {
        cerr << "ERROR: Could not read engine! \n";
        gieModelStream.close();
        return nullptr;
    }
    gieModelStream.seekg(0, ios::end);
    size_t modelSize = gieModelStream.tellg();
    gieModelStream.seekg(0, ios::beg);

    void *modelData = malloc(modelSize);
    if (!modelData)
    {
        cerr << "ERROR: Could not allocate memory for onnx engine! \n";
        gieModelStream.close();
        return nullptr;
    }
    gieModelStream.read((char *)modelData, modelSize);
    gieModelStream.close();

    nvinfer1::IRuntime *runtime = nvinfer1::createInferRuntime(gLogger); //创建runtime时
    if (runtime == nullptr)
    {
        cerr << "ERROR: Could not create InferRuntime! \n";
        return nullptr;
    }
    trtengine = runtime->deserializeCudaEngine(modelData, modelSize); //反序列化文件
    cout << "sucessful get trt engine.................";
    assert(trtengine);
    init();

    mp_context = trtengine->createExecutionContext(); //获取context运行上下文
    CHECK(cudaStreamCreate(&stream));
    assert(mp_context != nullptr);

    return trtengine;
}
/**
 * @brief TrtEngine::getClassNames 读取类型文件
 * @param imagenet_classes 类别名称
 * @return
 */

/**
 * @brief TrtEngine::preprocess_img 图片预处理过程
 * @param img Mat对象
 * @return
 */
cv::Mat TrtEngine::preprocess_img(cv::Mat &img)
{
    int w, h, x, y;
    float r_w = netWidth / (img.cols * 1.0);
    float r_h = netHeight / (img.rows * 1.0);
    if (r_h > r_w)
    {
        w = netWidth;
        h = r_w * img.rows;
        x = 0;
        y = (netHeight - h) / 2;
    }
    else
    {
        w = r_h * img.cols;
        h = netHeight;
        x = (netWidth - w) / 2;
        y = 0;
    }
    cv::Mat re(h, w, CV_8UC3);
    cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
    cv::Mat out(netHeight, netWidth, CV_8UC3, cv::Scalar(128, 128, 128));
    re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
    return out;
}
/**
 * @brief TrtEngine::letterbox 实现yolov5图像的letterbox
 * @param img
 * @return
 */
cv::Mat TrtEngine::letterbox(cv::Mat &img)
{
    //生成带边框的图像
    int in_w = img.cols;
    int in_h = img.rows;
    int tar_w = netWidth;
    int tar_h = netHeight;
    //那个缩放比例小选用那个
    float r = min(float(tar_h) / in_h, float(tar_w) / in_w);
    int inside_w = std::round(in_w * r);
    int inside_h = std::round(in_h * r);
    int padd_w = tar_w - inside_w;
    int padd_h = tar_h - inside_h;
    //内层图像resize
    cv::Mat resize_img;
    cv::resize(img, resize_img, cv::Size(inside_w, inside_h));
    padd_w = padd_w / 2;
    padd_h = padd_h / 2;
    //外层边框填充灰色
    int top = int(std::round(padd_h - 0.1));
    int bottom = int(std::round(padd_h + 0.1));
    int left = int(std::round(padd_w - 0.1));
    int right = int(std::round(padd_w + 0.1));
    copyMakeBorder(resize_img, resize_img, top, bottom, left, right, cv::BORDER_CONSTANT, cv::Scalar(114, 114, 114));
    // cout << resize_img.size() << endl;
    //    cv::imshow("pad", resize_img);
    //    cv::waitKey(0);
    // cout<<resize_img.size()<<" --"<<__LINE__<<endl;
    return resize_img;
}

/**
 * @brief TrtEngine::getTRTResult 获取TRT引擎的执行结果
 * @param img_path 图片的路径
 * @param engine_path 引擎的地址
 * @param img 得到最终的结果
 * @return
 */

Mat letterbox(Mat &src)
{
    //以下为带边框图像生成
    int in_w = src.cols;
    int in_h = src.rows;
    int tar_w = 1280;
    int tar_h = 1280;
    //哪个缩放比例小选用哪个
    float r = min(float(tar_h) / in_h, float(tar_w) / in_w);
    int inside_w = std::round(in_w * r);
    int inside_h = std::round(in_h * r);
    int padd_w = tar_w - inside_w;
    int padd_h = tar_h - inside_h;
    //内层图像resize
    Mat resize_img;
    resize(src, resize_img, Size(inside_w, inside_h));
    // cvtColor(resize_img, resize_img, COLOR_BGR2RGB);

    padd_w = padd_w / 2;
    padd_h = padd_h / 2;
    //外层边框填充灰色
    int top = int(round(padd_h - 0.1));
    int bottom = int(round(padd_h + 0.1));
    int left = int(round(padd_w - 0.1));
    int right = int(round(padd_w + 0.1));
    copyMakeBorder(resize_img, resize_img, top, bottom, left, right, BORDER_CONSTANT, Scalar(114, 114, 114));
    cout << resize_img.size() << " --" << __LINE__ << endl;
    return resize_img;
}

__global__ void blob_kernel(uint8_t *rgb, float *output, int width)
{
    const int X = blockIdx.x * blockDim.x + threadIdx.x;
    const int Y = blockIdx.y * blockDim.y + threadIdx.y;
    if (X >= width || Y >= width)
        return;
    uint8_t *rgb_id = &rgb[(Y * 1280 + X) * 3];
    output[Y * width + X] = rgb_id[2] / 255.0f;
    output[Y * width + X + width * width] = rgb_id[1] / 255.0f;
    output[Y * width + X + width * width * 2] = rgb_id[0] / 255.0f;
}

float *TrtEngine::ProcessImage(cv::Mat &img)
{
    auto netInputImg = letterbox(img); // letterbox(SrcImg, {1280, 1280}, cv::Vec3b(114, 114, 114));
    CHECK(cudaMemcpy(d_img, netInputImg.ptr<uint8_t>(), netInputImg.rows * netInputImg.cols * 3 * sizeof(uint8_t), cudaMemcpyHostToDevice));
    dim3 grid{64, 64}, block{32, 32};
    blob_kernel<<<grid, block>>>(d_img, (float *)buffers[0], netWidth);
    CHECK(cudaDeviceSynchronize());
    mp_context->enqueue(1, buffers, stream, nullptr);
    cudaStreamSynchronize(stream);
    cudaDeviceSynchronize();
    return (float *)buffers[1];
}

cv::Rect TrtEngine::get_rect(cv::Mat &img, float bbox[4])
{
    int l, r, t, b;
    float r_w = netWidth / (img.cols * 1.0);
    float r_h = netHeight / (img.rows * 1.0);
    if (r_h > r_w)
    {
        l = bbox[0] - bbox[2] / 2.f;
        r = bbox[0] + bbox[2] / 2.f;
        t = bbox[1] - bbox[3] / 2.f - (netHeight - r_w * img.rows) / 2;
        b = bbox[1] + bbox[3] / 2.f - (netHeight - r_w * img.rows) / 2;
        l = l / r_w;
        r = r / r_w;
        t = t / r_w;
        b = b / r_w;
    }
    else
    {
        l = bbox[0] - bbox[2] / 2.f - (netWidth - r_h * img.cols) / 2;
        r = bbox[0] + bbox[2] / 2.f - (netWidth - r_h * img.cols) / 2;
        t = bbox[1] - bbox[3] / 2.f;
        b = bbox[1] + bbox[3] / 2.f;
        l = l / r_h;
        r = r / r_h;
        t = t / r_h;
        b = b / r_h;
    }
    return cv::Rect(l, t, r - l, b - t);
}
