#include "YoloV5Detection.h"
#include <iostream>
#include <fstream>
#include <chrono>
#include "cuda_utils.h"
#include "logging.h"
#include "common.hpp"
#include "utils.h"

using namespace std;

#define USE_FP32  // set USE_INT8 or USE_FP16 or USE_FP32
#define DEVICE 0  // GPU id

#define syshen_min(a,b) ((a)<(b) ? (a) : (b))
#define syshen_max(a,b) ((a)>(b) ? (a) : (b))

//#define NMS_THRESH 0.4
//#define CONF_THRESH 0.5
//#define BATCH_SIZE 1
static const int BATCH_SIZE = Yolo::BATCH_SIZE;
static const float NMS_THRESH = Yolo::NMS_THRESH;
static const float CONF_THRESH = Yolo::CONF_THRESH;
static const char MODESIZE = Yolo::MODESIZE;
//static const bool IS_DYNAMICSHAPE = Yolo::IS_DYNAMICSHAPE;

// stuff we know about the network and the input/output blobs
static const int INPUT_H = Yolo::INPUT_H;
static const int INPUT_W = Yolo::INPUT_W;
static const int CLASS_NUM = Yolo::CLASS_NUM;
static const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1;  // we assume the yololayer outputs no more than MAX_OUTPUT_BBOX_COUNT boxes that conf >= 0.1
const char* INPUT_BLOB_NAME = "data";
const char* OUTPUT_BLOB_NAME = "prob";
static Logger gLogger;

static int get_width(int x, float gw, int divisor = 8) {
    //return math.ceil(x / divisor) * divisor
    if (int(x * gw) % divisor == 0) {
        return int(x * gw);
    }
    return (int(x * gw / divisor) + 1) * divisor;
}

static int get_depth(int x, float gd) {
    if (x == 1) {
        return 1;
    }
    else {
        return round(x * gd) > 1 ? round(x * gd) : 1;
    }
}

ICudaEngine* build_engine(unsigned int maxBatchSize, uint8_t dataType, IBuilder* builder, IBuilderConfig* config, DataType dt, const float& gd, const float& gw, const std::string& wts_name) {
    INetworkDefinition* network = builder->createNetworkV2(1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
    //INetworkDefinition* network = builder->createNetworkV2(0U);

    // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
    // NOTE: donot support dynamic batchsize due to the limitation of device memory
    // ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims4{ 1, 3, -1, -1 });
    ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims4{1, 3, INPUT_H, INPUT_W });
    assert(data);

    std::map<std::string, Weights> weightMap = loadWeights(wts_name);

    /* ------ yolov5 backbone------ */
    auto focus0 = focus(network, weightMap, *data/*, *slice_size*/, 3, get_width(64, gw), 3, "model.0");
    //auto focus0 = focus_plugin(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
    auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
    auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
    auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
    auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
    auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
    auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
    auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
    auto spp8 = SPP(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, 9, 13, "model.8");

    ///* ------ yolov5 head ------ */
    auto bottleneck_csp9 = C3(network, weightMap, *spp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.9");
    auto conv10 = convBlock(network, weightMap, *bottleneck_csp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");

    // Resize Method 1:
    auto upsample11 = network->addResize(*conv10->getOutput(0));
    assert(upsample11);
    upsample11->setResizeMode(ResizeMode::kNEAREST);
    upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());
    // ע��: ���ַ�ʽ�������ĸ�resizelayer���ά��ȫ��[-1]�� ��û��Ӱ��
    // ע�⣺shape tensor��ֵ��build�Ĺ�����û��ȷ��������inference�����вŸ�ֵ�˶�ӦTensor��ά��
    //upsample11->setInput(1, *(network->addShape(*bottleneck_csp6->getOutput(0))->getOutput(0)));
    // Resize Method 2:
    /*auto upsample11 = addResizeLayer(network, conv10->getOutput(0), bottleneck_csp6->getOutput(0));
    assert(upsample11);*/

    ITensor* inputTensors12[] = { upsample11->getOutput(0), bottleneck_csp6->getOutput(0) };
    auto cat12 = network->addConcatenation(inputTensors12, 2);
    auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
    auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");

    // Resize Method 1:
    auto upsample15 = network->addResize(*conv14->getOutput(0));
    assert(upsample15);
    upsample15->setResizeMode(ResizeMode::kNEAREST);
     upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());
    //upsample15->setInput(1, *network->addShape(*bottleneck_csp4->getOutput(0))->getOutput(0));
    // Resize Method 2:
    /*auto upsample15 = addResizeLayer(network, conv14->getOutput(0), bottleneck_csp4->getOutput(0));
    assert(upsample15);*/

    ITensor* inputTensors16[] = { upsample15->getOutput(0), bottleneck_csp4->getOutput(0) };
    auto cat16 = network->addConcatenation(inputTensors16, 2);
    auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");

    ///* ------ detect ------ */
    IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
    auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
    ITensor* inputTensors19[] = { conv18->getOutput(0), conv14->getOutput(0) };
    auto cat19 = network->addConcatenation(inputTensors19, 2);
    auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
    IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
    auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
    ITensor* inputTensors22[] = { conv21->getOutput(0), conv10->getOutput(0) };
    auto cat22 = network->addConcatenation(inputTensors22, 2);
    auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
    IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);

    auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2});
    //auto yolo = addYoLoDynamicLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2}, data);
    yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
    network->markOutput(*yolo->getOutput(0));

    // Build engine
    builder->setMaxBatchSize(maxBatchSize);
    config->setMaxWorkspaceSize(16 * (1 << 20));  // 16MB
    switch (dataType) {
        case 0:
            config->setFlag(BuilderFlag::kFP16);
            break;
        case 1:
            std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
            assert(builder->platformHasFastInt8());
            config->setFlag(BuilderFlag::kINT8);
//            Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
//            config->setInt8Calibrator(calibrator);
            break;

        default:
            std::cout << "detection using fp32" << std::endl;
            break;
    }

    std::cout << "Building engine, please wait for a while..." << std::endl;
    ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
    std::cout << "Build engine successfully!" << std::endl;

    // Don't need the network any more
    network->destroy();

    // Release host memory
    for (auto& mem : weightMap)
    {
        free((void*)(mem.second.values));
    }

    return engine;
}


ICudaEngine* build_engine_dynamic(unsigned int maxBatchSize, uint8_t dataType, IBuilder* builder, IBuilderConfig* config, DataType dt, const float& gd, const float& gw, const std::string& wts_name) {
    INetworkDefinition* network = builder->createNetworkV2(1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));

    // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
    // NOTE: donot support dynamic batchsize due to the limitation of device memory
    ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims4{ 1, 3, -1, -1 });
    assert(data);

    std::map<std::string, Weights> weightMap = loadWeights(wts_name);

    /* ------ yolov5 backbone------ */
    //auto focus0 = focus(network, weightMap, *data/*, *slice_size*/, 3, get_width(64, gw), 3, "model.0");
    auto focus0 = focus_plugin(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
    auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
    auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
    auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
    auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
    auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
    auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
    auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
    auto spp8 = SPP(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, 9, 13, "model.8");

    ///* ------ yolov5 head ------ */
    auto bottleneck_csp9 = C3(network, weightMap, *spp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.9");
    auto conv10 = convBlock(network, weightMap, *bottleneck_csp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");

    // Resize Method 1:
    auto upsample11 = network->addResize(*conv10->getOutput(0));
    assert(upsample11);
    upsample11->setResizeMode(ResizeMode::kNEAREST);
    //upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());
    // ע��: ���ַ�ʽ�������ĸ�resizelayer���ά��ȫ��[-1]�� ��û��Ӱ��
    // ע�⣺shape tensor��ֵ��build�Ĺ�����û��ȷ��������inference�����вŸ�ֵ�˶�ӦTensor��ά��
    upsample11->setInput(1, *(network->addShape(*bottleneck_csp6->getOutput(0))->getOutput(0)));
    // Resize Method 2:
    /*auto upsample11 = addResizeLayer(network, conv10->getOutput(0), bottleneck_csp6->getOutput(0));
    assert(upsample11);*/

    ITensor* inputTensors12[] = { upsample11->getOutput(0), bottleneck_csp6->getOutput(0) };
    auto cat12 = network->addConcatenation(inputTensors12, 2);
    auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
    auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");

    // Resize Method 1:
    auto upsample15 = network->addResize(*conv14->getOutput(0));
    assert(upsample15);
    upsample15->setResizeMode(ResizeMode::kNEAREST);
    //upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());
    upsample15->setInput(1, *network->addShape(*bottleneck_csp4->getOutput(0))->getOutput(0));
    // Resize Method 2:
    /*auto upsample15 = addResizeLayer(network, conv14->getOutput(0), bottleneck_csp4->getOutput(0));
    assert(upsample15);*/

    ITensor* inputTensors16[] = { upsample15->getOutput(0), bottleneck_csp4->getOutput(0) };
    auto cat16 = network->addConcatenation(inputTensors16, 2);
    auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");

    ///* ------ detect ------ */
    IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
    auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
    ITensor* inputTensors19[] = { conv18->getOutput(0), conv14->getOutput(0) };
    auto cat19 = network->addConcatenation(inputTensors19, 2);
    auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
    IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
    auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
    ITensor* inputTensors22[] = { conv21->getOutput(0), conv10->getOutput(0) };
    auto cat22 = network->addConcatenation(inputTensors22, 2);
    auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
    IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);

    //auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2});
    auto yolo = addYoLoDynamicLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2}, data);
    yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
    network->markOutput(*yolo->getOutput(0));

    // Build engine
    builder->setMaxBatchSize(maxBatchSize);
    config->setMaxWorkspaceSize(16 * (1 << 20));  // 16MB

    switch (dataType) {
        case 0:
            config->setFlag(BuilderFlag::kFP16);
            break;
        case 1:
            std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
            assert(builder->platformHasFastInt8());
            config->setFlag(BuilderFlag::kINT8);
//            Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
//            config->setInt8Calibrator(calibrator);
            break;

        default:
            std::cout << "detection using fp32" << std::endl;
            break;
    }

    // add dynamic shape profile
    IOptimizationProfile* profile = builder->createOptimizationProfile();
    profile->setDimensions(INPUT_BLOB_NAME, OptProfileSelector::kMIN, Dims4(1, 3, 128, 128));
    profile->setDimensions(INPUT_BLOB_NAME, OptProfileSelector::kOPT, Dims4(1, 3, 640, 640));
    profile->setDimensions(INPUT_BLOB_NAME, OptProfileSelector::kMAX, Dims4(1, 3, 1024, 1024));
    config->addOptimizationProfile(profile);

    std::cout << "Building engine, please wait for a while..." << std::endl;
    ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
    std::cout << "Build engine successfully!" << std::endl;

    // Don't need the network any more
    network->destroy();

    // Release host memory
    for (auto& mem : weightMap)
    {
        free((void*)(mem.second.values));
    }

    return engine;
}


ICudaEngine* build_engine_p6(unsigned int maxBatchSize, uint8_t dataType, IBuilder* builder, IBuilderConfig* config, DataType dt, const float& gd, const float& gw, const std::string& wts_name) {
    INetworkDefinition* network = builder->createNetworkV2(0U);

    // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
    ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{ 3, INPUT_H, INPUT_W });
    assert(data);

    std::map<std::string, Weights> weightMap = loadWeights(wts_name);

    /* ------ yolov5 backbone------ */
    auto focus0 = focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
    auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
    auto c3_2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
    auto conv3 = convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
    auto c3_4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
    auto conv5 = convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
    auto c3_6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
    auto conv7 = convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7");
    auto c3_8 = C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
    auto conv9 = convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9");
    auto spp10 = SPP(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), 3, 5, 7, "model.10");
    auto c3_11 = C3(network, weightMap, *spp10->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.11");

    /* ------ yolov5 head ------ */
    auto conv12 = convBlock(network, weightMap, *c3_11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12");
    auto upsample13 = network->addResize(*conv12->getOutput(0));
    assert(upsample13);
    upsample13->setResizeMode(ResizeMode::kNEAREST);
    upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions());
    ITensor* inputTensors14[] = { upsample13->getOutput(0), c3_8->getOutput(0) };
    auto cat14 = network->addConcatenation(inputTensors14, 2);
    auto c3_15 = C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15");

    auto conv16 = convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16");
    auto upsample17 = network->addResize(*conv16->getOutput(0));
    assert(upsample17);
    upsample17->setResizeMode(ResizeMode::kNEAREST);
    upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions());
    ITensor* inputTensors18[] = { upsample17->getOutput(0), c3_6->getOutput(0) };
    auto cat18 = network->addConcatenation(inputTensors18, 2);
    auto c3_19 = C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19");

    auto conv20 = convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20");
    auto upsample21 = network->addResize(*conv20->getOutput(0));
    assert(upsample21);
    upsample21->setResizeMode(ResizeMode::kNEAREST);
    upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions());
    ITensor* inputTensors21[] = { upsample21->getOutput(0), c3_4->getOutput(0) };
    auto cat22 = network->addConcatenation(inputTensors21, 2);
    auto c3_23 = C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23");

    auto conv24 = convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24");
    ITensor* inputTensors25[] = { conv24->getOutput(0), conv20->getOutput(0) };
    auto cat25 = network->addConcatenation(inputTensors25, 2);
    auto c3_26 = C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26");

    auto conv27 = convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27");
    ITensor* inputTensors28[] = { conv27->getOutput(0), conv16->getOutput(0) };
    auto cat28 = network->addConcatenation(inputTensors28, 2);
    auto c3_29 = C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29");

    auto conv30 = convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30");
    ITensor* inputTensors31[] = { conv30->getOutput(0), conv12->getOutput(0) };
    auto cat31 = network->addConcatenation(inputTensors31, 2);
    auto c3_32 = C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32");

    /* ------ detect ------ */
    IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]);
    IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]);
    IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]);
    IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]);

    auto yolo = addYoLoLayer(network, weightMap, "model.33", std::vector<IConvolutionLayer*>{det0, det1, det2, det3});
    yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
    network->markOutput(*yolo->getOutput(0));

    // Build engine
    builder->setMaxBatchSize(maxBatchSize);
    config->setMaxWorkspaceSize(16 * (1 << 20));  // 16MB

    switch (dataType) {
        case 0:
            config->setFlag(BuilderFlag::kFP16);
            break;
        case 1:
            std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
            assert(builder->platformHasFastInt8());
            config->setFlag(BuilderFlag::kINT8);
//            Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
//            config->setInt8Calibrator(calibrator);
            break;

        default:
            std::cout << "detection using fp32" << std::endl;
            break;
    }

    std::cout << "Building engine, please wait for a while..." << std::endl;
    ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
    std::cout << "Build engine successfully!" << std::endl;

    // Don't need the network any more
    network->destroy();

    // Release host memory
    for (auto& mem : weightMap)
    {
        free((void*)(mem.second.values));
    }

    return engine;
}


int BuildEngine(
    unsigned int maxBatchSize,
    uint8_t dataType,
    bool use_dynamic,
    const bool& is_p6, 
    const float& gd, 
    const float& gw, 
    const std::string& wts_name, 
    const std::string& engine_name) 
{
	// Create builder
	IBuilder* builder = createInferBuilder(gLogger);
	IBuilderConfig* config = builder->createBuilderConfig();

	// Create model to populate the network, then set the outputs and create an engine
	ICudaEngine* engine = nullptr;
	if (is_p6) {
		engine = build_engine_p6(maxBatchSize, dataType, builder, config, DataType::kFLOAT, gd, gw, wts_name);
	}
	else {
        if (use_dynamic) {
            engine = build_engine_dynamic(maxBatchSize, dataType, builder, config, DataType::kFLOAT, gd, gw, wts_name);
        }
        else {
            engine = build_engine(maxBatchSize, dataType, builder, config, DataType::kFLOAT, gd, gw, wts_name);
        }
	}
	assert(engine != nullptr);

	// Serialize the engine
    IHostMemory* modelStream{ nullptr };
	modelStream = engine->serialize();
	assert(modelStream != nullptr);
	std::ofstream p(engine_name, std::ios::binary);
	if (!p) {
		std::cerr << "could not open plan output file" << std::endl;
		return -1;
	}
	p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
	modelStream->destroy();

	// Close everything down
	engine->destroy();
	builder->destroy();
	config->destroy();

    return 0;
}


YoloV5Detection::YoloV5Detection(int device_id, uint8_t dataType, bool use_dynamic, bool alway_serialize):
        device_id(device_id), dataType(dataType), use_dynamic(use_dynamic), alway_serialize(alway_serialize), runtime(nullptr), engine(nullptr), context(nullptr)
{
	// runtime = nullptr;
	// engine = nullptr;
	// context = nullptr;
	device_buffers[0] = nullptr, device_buffers[1] = nullptr;
	host_buffers[0] = nullptr, host_buffers[1] = nullptr;

}


YoloV5Detection::~YoloV5Detection()
{
    free();
}


int YoloV5Detection::free()
{
    cudaSetDevice(device_id);
	// Release stream and buffers
	if (stream != 0)
	{
		cudaStreamDestroy(stream); stream = 0;
	}
	free_buffers();
	// Destroy the engine
	if (context != nullptr)
	{
		context->destroy(); context = nullptr;
	}
	if (engine != nullptr)
	{
		engine->destroy(); engine = nullptr;
	}
	if (runtime != nullptr)
	{
		runtime->destroy(); runtime = nullptr;
	}
	return 0;
}


int YoloV5Detection::free_buffers()
{
    cudaSetDevice(device_id);
	if (device_buffers[0] != nullptr)
	{
		CUDA_CHECK(cudaFree(device_buffers[0])); device_buffers[0] = nullptr;
	}
	if (device_buffers[1] != nullptr)
	{
		CUDA_CHECK(cudaFree(device_buffers[1])); device_buffers[1] = nullptr;
	}
	if (host_buffers[0] != nullptr)
	{
		delete[] host_buffers[0]; host_buffers[0] = nullptr;
	}
	if (host_buffers[1] != nullptr)
	{
		delete[] host_buffers[1]; host_buffers[1] = nullptr;
	}
    return 0;
}


int YoloV5Detection::init_buffers(int batchSize, int input_h, int input_w)
{
    int device_count = 0;
    cudaGetDeviceCount(&device_count);
    int id;
    cudaGetDevice(&id);
    std::cout << "=>>>>>>>>>>>> total id: " << device_count << std::endl;
    std::cout << "=>>>>>>>>>>>> before id: " << id << std::endl;

    cudaSetDevice(device_id);

    cudaGetDevice(&id);
    std::cout << "=>>>>>>>>>>>> after id: " << id << std::endl;

	// Create GPU buffers on device
	CUDA_CHECK(cudaMalloc(&device_buffers[0], batchSize * 3 * input_h * input_w * sizeof(float)));
	CUDA_CHECK(cudaMalloc(&device_buffers[1], batchSize * OUTPUT_SIZE * sizeof(float)));
	// Create CPU buffers on host
	host_buffers[0] = new float[batchSize * 3 * input_h * input_w];
	host_buffers[1] = new float[batchSize * OUTPUT_SIZE];

	return 0;
}


int YoloV5Detection::DeserializeEngine(const std::string& engine_name)
{
	char* trtModelStream = nullptr;
	std::ifstream file(engine_name, std::ios::binary);
	if (!file.good()) {
		std::cerr << "read " << engine_name << " error!" << std::endl;
		return -1;
	}
	size_t size = 0;
	file.seekg(0, file.end);
	size = file.tellg();
	file.seekg(0, file.beg);
	trtModelStream = new char[size];
	assert(trtModelStream);
	file.read(trtModelStream, size);
	file.close();

	//
	runtime = createInferRuntime(gLogger);
	assert(runtime != nullptr);
	engine = runtime->deserializeCudaEngine(trtModelStream, size);
	assert(engine != nullptr);
	context = engine->createExecutionContext();
	assert(context != nullptr);
	delete[] trtModelStream;
	assert(engine->getNbBindings() == 2);
	// In order to bind the buffers, we need to know the names of the input and output tensors.
	// Note that indices are guaranteed to be less than IEngine::getNbBindings()
	const int inputIndex = engine->getBindingIndex(INPUT_BLOB_NAME);
	const int outputIndex = engine->getBindingIndex(OUTPUT_BLOB_NAME);
	//context->setBindingDimensions(inputIndex, Dims4{ BATCH_SIZE, 3, INPUT_H, INPUT_W });

	assert(inputIndex == 0);
	assert(outputIndex == 1);

	if (!use_dynamic)
	{
        init_buffers(BATCH_SIZE, INPUT_H, INPUT_W);
	}

	CUDA_CHECK(cudaStreamCreate(&stream));
	return 0;
}


int YoloV5Detection::init(const std::string& weight_path)
{
    int device_count = 0;
    cudaGetDeviceCount(&device_count);
    assert(device_count > 0);
    if((device_id + 1) > device_count)
    {
        device_id = 0;
    }
    std::cout << "GPU_nums: " << device_count << std::endl;
    std::cout << "YoloV5Detection GPU ID: " << device_id << std::endl;
    cudaSetDevice(device_id);

//    int idx = weight_path.find(".wts");
//    std::string engine_path(weight_path.c_str(), weight_path.c_str() + idx);

    std::string key_name = ".wts";
    std::string engine_path = weight_path;
    engine_path = engine_path.replace(engine_path.find(key_name), key_name.size(), ".engine");

//    if (use_dynamic)
//    {
//        engine_path += "_dynamicshape.engine";
//    }
//    else
//    {
//        engine_path += ".engine";
//    }
    //std::cout << "engine_path=" << engine_path << std::endl;

    bool is_p6 = false;
    float gd = 0.33f, gw = 0.50f;
	if (MODESIZE == 's') {
		gd = 0.33;
		gw = 0.50;
	}
	else if (MODESIZE == 'm') {
		gd = 0.67;
		gw = 0.75;
	}
	else if (MODESIZE == 'l') {
		gd = 1.0;
		gw = 1.0;
	}
    else if (MODESIZE == 'x') {
        gd = 1.33;
        gw = 1.25;
    }

    fstream fs;
    fs.open(engine_path, ios::in);
    if (!fs.is_open() || alway_serialize) {
		// build engine and serialize from weight file
		BuildEngine(BATCH_SIZE, dataType, use_dynamic, is_p6, gd, gw, weight_path, engine_path);
    }
	// deserialize engine from engine file
	DeserializeEngine(engine_path);
//    std::cout << "=>>>>>>>>>>>>>>> DeserializeEngine success !!!" << std::endl;
    return 0;
}


int YoloV5Detection::doInference(const float* input, float* output, int batchSize, int input_h, int input_w)
{
    cudaSetDevice(device_id);
	// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
	CUDA_CHECK(cudaMemcpyAsync(device_buffers[0], input, batchSize * 3 * input_h * input_w * sizeof(float), cudaMemcpyHostToDevice, stream));
	context->enqueueV2(device_buffers, stream, nullptr);
	CUDA_CHECK(cudaMemcpyAsync(output, device_buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
	cudaStreamSynchronize(stream);

    return 0;
}


int YoloV5Detection::predict(const vector<cv::Mat>& vec_imgs, std::vector<std::vector<Detection>>& final_res, int predict_h, int predict_w)
{
    cudaSetDevice(device_id);

    int batchsize = BATCH_SIZE;
    int input_w = INPUT_W;
    int input_h = INPUT_H;
    if (use_dynamic)
    {
        free_buffers();
		batchsize = vec_imgs.size();
		input_h = predict_h;
		input_w = predict_w;
        init_buffers(batchsize, input_h, input_w);
        context->setBindingDimensions(0, Dims4{ batchsize, 3, input_h, input_w });
    }

	float* data = (float*)host_buffers[0];
	float* prob = (float*)host_buffers[1];
	int img_num = vec_imgs.size();

	// process input
	for (int b = 0; b < img_num; b++) {
		const cv::Mat& img = vec_imgs[b];
		cv::Mat pr_img = preprocess_img(img, input_w, input_h); // letterbox BGR to RGB
		int i = 0;
		for (int row = 0; row < input_h; ++row) {
			uchar* uc_pixel = pr_img.data + row * pr_img.step;
			for (int col = 0; col < input_w; ++col) {
				data[b * 3 * input_h * input_w + i] = (float)uc_pixel[2] / 255.0;
				data[b * 3 * input_h * input_w + i + input_h * input_w] = (float)uc_pixel[1] / 255.0;
				data[b * 3 * input_h * input_w + i + 2 * input_h * input_w] = (float)uc_pixel[0] / 255.0;
				uc_pixel += 3;
				++i;
			}
		}
	}

	// Run inference
	//auto start = std::chrono::system_clock::now();
	doInference(data, prob, batchsize, input_h, input_w);
	//auto end = std::chrono::system_clock::now();
	//std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;

	// process output
	std::vector<std::vector<Yolo::Detection>> batch_res(img_num);

	for (int b = 0; b < img_num; b++) {
		auto& res = batch_res[b];
		nms(res, &prob[b * OUTPUT_SIZE], CONF_THRESH, NMS_THRESH);
	}
	// rescale boxes
	final_res.clear();
	final_res.resize(img_num);
	for (int b = 0; b < img_num; b++) {
		const cv::Mat& img = vec_imgs[b];
		auto& res = batch_res[b];
		auto& _res = final_res[b];
//		_res.resize(res.size());
		for (size_t j = 0; j < res.size(); j++) {
//            if (res[j].class_id > 0) continue;
			cv::Rect r = get_rect(img, res[j].bbox, input_h, input_w);
			int x1,x2,y1,y2;
			x1=r.x;y1=r.y,x2=r.x+r.width;y2=r.y+r.height;
            x1 = syshen_min(syshen_max(0, x1), img.cols-1);
			y1 = syshen_min(syshen_max(0, y1), img.rows-1);
			x2 = syshen_min(syshen_max(0, x2), img.cols-1);
			y2 = syshen_min(syshen_max(0, y2), img.rows-1);
            if ((x2-x1<16) || (y2-y1<16))
                continue;

            //added by henson
            Detection _box;
            _box.name = class_names[res[j].class_id];
            _box.cls = res[j].class_id;
            _box.score = res[j].conf;
            _box.tracking_id = -1;
            _box.box = cv::Rect(x1,y1,x2-x1,y2-y1);
            _res.push_back(_box);

//			_res.push_back(cv::Rect(x1,y1,x2-x1,y2-y1));
		}
	}

	return 0;
}
