#include <iostream>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "logging.h"
#include "chrono"
#include "vector"
#include "fstream"
#include "utils.h"
#include "math.h"
#include "opencv2/opencv.hpp"
#include "sstream"
#include "string"

using namespace cv;
using namespace std;
using namespace nvinfer1;

static Logger gLogger;

static const int INPUT_H = 32;
static const int INPUT_W = 130;
static const int OUTPUT_SIZE = 32 * 5 * 5;

const char *INPUT_BLOB_NAME = "data";
const char *OUTPUT_BLOB_NAME = "D";


vector<float> loadWeights(const string &filePath) {
    int size = 0;
    ifstream file(filePath, ios::binary);
    file.read((char *) &size, 4);
    char *floatWeight = new char[size * 4];
    float *fp = (float *) floatWeight;
    file.read(floatWeight, 4 * size);
    vector<float> weights(fp, fp + size);
    delete[] floatWeight;
    file.close();
    return weights;
}

vector<Weights> splitLstmWeights(vector<float> weights) {
    vector<Weights> vc;

    int weight_size = weights.size();
    for (int i = 0; i < 4; i++) {
        Weights wt{nvinfer1::DataType::kFLOAT, nullptr, 0};
        wt.count = weight_size / 4;
        float *val = reinterpret_cast<float *>(malloc(sizeof(float) * wt.count));
        for (int j = 0; j < wt.count; j++) {
            val[j] = (float) weights[i * wt.count + j];
        }
        wt.values = val;
        vc.push_back(wt);
    }
    return vc;
}

IActivationLayer *
CBR(INetworkDefinition *network, string wgt_number, ITensor &input, int out_channel, int hstride = 1, int wstride = 1,
    int pad = 1) {
    vector<float> conv1_weights;

    conv1_weights = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/conv2d_" + wgt_number + ".weight.wgt");

    int w_size = conv1_weights.size();

    Weights convWeights{nvinfer1::DataType::kFLOAT, nullptr, w_size};
    Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, 0};
    float *val_wt = new float[w_size];

    for (int i = 0; i < w_size; i++) {
        val_wt[i] = conv1_weights[i];
    }
    convWeights.values = val_wt;


    IConvolutionLayer *conv1 = network->addConvolutionNd(input, out_channel, DimsHW{3, 3}, convWeights, convBias);
    assert(conv1);
    conv1->setStrideNd(DimsHW{hstride, wstride});
    conv1->setPaddingNd(DimsHW{pad, pad});


    float eps = 1e-5;
    vector<float> bn_1_bias;
    vector<float> bn_1_mean;
    vector<float> bn_1_var;
    vector<float> bn_1_weight;
    bn_1_bias = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".bias.wgt");
    bn_1_mean = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".running_mean.wgt");
    bn_1_var = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".running_var.wgt");
    bn_1_weight = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".weight.wgt");
    int bn_size = bn_1_weight.size();

    float *scval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        scval[i] = bn_1_weight[i] / sqrt(bn_1_var[i] + eps);
    }

    float *shval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        shval[i] = bn_1_bias[i] - bn_1_mean[i] * bn_1_weight[i] / sqrt(bn_1_var[i] + eps);
    }
    float *pval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        pval[i] = 1.0;
    }
    Weights shift{nvinfer1::DataType::kFLOAT, shval, bn_size};
    Weights scale{nvinfer1::DataType::kFLOAT, scval, bn_size};
    Weights power{nvinfer1::DataType::kFLOAT, pval, bn_size};

    IScaleLayer *scale1 = network->addScale(*conv1->getOutput(0), ScaleMode::kCHANNEL, shift, scale, power);
    assert(scale1);

    IActivationLayer *relu = network->addActivation(*scale1->getOutput(0), ActivationType::kRELU);
    assert(relu);

    return relu;
}


IActivationLayer *
CBRV2(INetworkDefinition *network, string wgt_number, ITensor &input, int out_channel, int hstride = 2, int wstride = 1,
      int pad = 1) {
    vector<float> conv1_weights;

    conv1_weights = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/conv2d_" + wgt_number + ".weight.wgt");

    int w_size = conv1_weights.size();

    Weights convWeights{nvinfer1::DataType::kFLOAT, nullptr, w_size};
    Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, 0};
    float *val_wt = new float[w_size];

    for (int i = 0; i < w_size; i++) {
        val_wt[i] = conv1_weights[i];
    }
    convWeights.values = val_wt;


    IConvolutionLayer *conv1 = network->addConvolutionNd(input, out_channel, DimsHW{3, 3}, convWeights, convBias);
    assert(conv1);
    conv1->setStrideNd(DimsHW{hstride, wstride});
    conv1->setPaddingNd(DimsHW{pad, pad});


    float eps = 1e-5;
    vector<float> bn_1_bias;
    vector<float> bn_1_mean;
    vector<float> bn_1_var;
    vector<float> bn_1_weight;
    bn_1_bias = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".bias.wgt");
    bn_1_mean = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".running_mean.wgt");
    bn_1_var = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".running_var.wgt");
    bn_1_weight = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".weight.wgt");
    int bn_size = bn_1_weight.size();

    float *scval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        scval[i] = bn_1_weight[i] / sqrt(bn_1_var[i] + eps);
    }

    float *shval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        shval[i] = bn_1_bias[i] - bn_1_mean[i] * bn_1_weight[i] / sqrt(bn_1_var[i] + eps);
    }
    float *pval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        pval[i] = 1.0;
    }
    Weights shift{nvinfer1::DataType::kFLOAT, shval, bn_size};
    Weights scale{nvinfer1::DataType::kFLOAT, scval, bn_size};
    Weights power{nvinfer1::DataType::kFLOAT, pval, bn_size};

    IScaleLayer *scale1 = network->addScale(*conv1->getOutput(0), ScaleMode::kCHANNEL, shift, scale, power);
    assert(scale1);

    IActivationLayer *relu = network->addActivation(*scale1->getOutput(0), ActivationType::kRELU);
    assert(relu);

    return relu;
}


IScaleLayer *
CB(INetworkDefinition *network, string wgt_number, ITensor &input, int out_channel, int hstride = 1, int wstride = 1,
   int pad = 1, int kernel_size = 3) {
    vector<float> conv1_weights;

    conv1_weights = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/conv2d_" + wgt_number + ".weight.wgt");

    int w_size = conv1_weights.size();

    Weights convWeights{nvinfer1::DataType::kFLOAT, nullptr, w_size};
    Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, 0};
    float *val_wt = new float[w_size];

    for (int i = 0; i < w_size; i++) {
        val_wt[i] = conv1_weights[i];
    }
    convWeights.values = val_wt;


    IConvolutionLayer *conv1 = network->addConvolutionNd(input, out_channel, DimsHW{kernel_size, kernel_size},
                                                         convWeights, convBias);
    assert(conv1);
    conv1->setStrideNd(DimsHW{hstride, wstride});
    conv1->setPaddingNd(DimsHW{pad, pad});


    float eps = 1e-5;
    vector<float> bn_1_bias;
    vector<float> bn_1_mean;
    vector<float> bn_1_var;
    vector<float> bn_1_weight;
    bn_1_bias = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".bias.wgt");
    bn_1_mean = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".running_mean.wgt");
    bn_1_var = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".running_var.wgt");
    bn_1_weight = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/BN_" + wgt_number + ".weight.wgt");
    int bn_size = bn_1_weight.size();

    float *scval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        scval[i] = bn_1_weight[i] / sqrt(bn_1_var[i] + eps);
    }

    float *shval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        shval[i] = bn_1_bias[i] - bn_1_mean[i] * bn_1_weight[i] / sqrt(bn_1_var[i] + eps);
    }
    float *pval = reinterpret_cast<float *>(malloc(sizeof(float) * bn_size));
    for (int i = 0; i < bn_size; i++) {
        pval[i] = 1.0;
    }
    Weights shift{nvinfer1::DataType::kFLOAT, shval, bn_size};
    Weights scale{nvinfer1::DataType::kFLOAT, scval, bn_size};
    Weights power{nvinfer1::DataType::kFLOAT, pval, bn_size};

    IScaleLayer *scale1 = network->addScale(*conv1->getOutput(0), ScaleMode::kCHANNEL, shift, scale, power);
    assert(scale1);


    return scale1;
}


IActivationLayer *BasicBlock(INetworkDefinition *network, string wgt_number, ITensor &input, int out_channel) {
    auto *relu1 = CBR(network, wgt_number, input, out_channel);
    assert(relu1);

    auto *cb1 = CB(network, to_string(atoi(wgt_number.c_str()) + 1), *relu1->getOutput(0), out_channel);
    assert(cb1);

    auto *cb2 = CB(network, to_string(atoi(wgt_number.c_str()) + 2), input, out_channel, 1, 1, 0, 1);
    assert(cb2);

    IElementWiseLayer *ew1 = network->addElementWise(*cb1->getOutput(0), *cb2->getOutput(0),
                                                     ElementWiseOperation::kSUM);
    assert(ew1);

    IActivationLayer *relu = network->addActivation(*ew1->getOutput(0), ActivationType::kRELU);
    assert(relu);

    return relu;


}

IActivationLayer *BasicBlockV3(INetworkDefinition *network, string wgt_number, ITensor &input, int out_channel) {
    auto *relu1 = CBRV2(network, wgt_number, input, out_channel);
    assert(relu1);

    auto *cb1 = CB(network, to_string(atoi(wgt_number.c_str()) + 1), *relu1->getOutput(0), out_channel);
    assert(cb1);

    auto *ap = network->addPoolingNd(input, PoolingType::kAVERAGE, DimsHW{2, 1});
    assert(ap);

    auto *cb2 = CB(network, to_string(atoi(wgt_number.c_str()) + 2), *ap->getOutput(0), out_channel, 1, 1, 0, 1);

    IElementWiseLayer *ew1 = network->addElementWise(*cb2->getOutput(0), *cb1->getOutput(0),
                                                     ElementWiseOperation::kSUM);
    assert(ew1);

    IActivationLayer *relu = network->addActivation(*ew1->getOutput(0), ActivationType::kRELU);
    assert(relu);

    return relu;


}


IActivationLayer *BasicBlockv2(INetworkDefinition *network, string wgt_number, ITensor &input, int out_channel) {
    auto *relu1 = CBR(network, wgt_number, input, out_channel);
    assert(relu1);

    auto *cb1 = CB(network, to_string(atoi(wgt_number.c_str()) + 1), *relu1->getOutput(0), out_channel);
    assert(cb1);


    IElementWiseLayer *ew1 = network->addElementWise(*cb1->getOutput(0), input, ElementWiseOperation::kSUM);
    assert(ew1);

    IActivationLayer *relu = network->addActivation(*ew1->getOutput(0), ActivationType::kRELU);
    assert(relu);

    return relu;


}

IRNNv2Layer *AddLSTM(INetworkDefinition *network, ITensor &input) {


    vector<float> bias_hh_l0;
    vector<float> bias_hh_l0_reverse;
    vector<float> bias_hh_l1;
    vector<float> bias_hh_l1_reverse;
    vector<float> bias_ih_l0;
    vector<float> bias_ih_l0_reverse;
    vector<float> bias_ih_l1;
    vector<float> bias_ih_l1_reverse;

    vector<float> weight_hh_l0;
    vector<float> weight_hh_l0_reverse;
    vector<float> weight_hh_l1;
    vector<float> weight_hh_l1_reverse;
    vector<float> weight_ih_l0;
    vector<float> weight_ih_l0_reverse;
    vector<float> weight_ih_l1;
    vector<float> weight_ih_l1_reverse;


    bias_hh_l0 = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.bias_hh_l0.wgt");
    bias_hh_l0_reverse = loadWeights(
            "/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.bias_hh_l0_reverse.wgt");
    bias_hh_l1 = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.bias_hh_l1.wgt");
    bias_hh_l1_reverse = loadWeights(
            "/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.bias_hh_l1_reverse.wgt");
    bias_ih_l0 = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.bias_ih_l0.wgt");
    bias_ih_l0_reverse = loadWeights(
            "/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.bias_ih_l0_reverse.wgt");
    bias_ih_l1 = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.bias_ih_l1.wgt");
    bias_ih_l1_reverse = loadWeights(
            "/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.bias_ih_l1_reverse.wgt");

    weight_hh_l0 = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.weight_hh_l0.wgt");
    weight_hh_l0_reverse = loadWeights(
            "/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.weight_hh_l0_reverse.wgt");
    weight_hh_l1 = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.weight_hh_l1.wgt");
    weight_hh_l1_reverse = loadWeights(
            "/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.weight_hh_l1_reverse.wgt");
    weight_ih_l0 = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.weight_ih_l0.wgt");
    weight_ih_l0_reverse = loadWeights(
            "/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.weight_ih_l0_reverse.wgt");
    weight_ih_l1 = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.weight_ih_l1.wgt");
    weight_ih_l1_reverse = loadWeights(
            "/home/luotianhang/CLionProjects/crnn/weights/neck.encoder.lstm.weight_ih_l1_reverse.wgt");

    auto temp1 = splitLstmWeights(weight_ih_l0);
    Weights weight_ih_l00 = temp1[0];
    Weights weight_ih_l01 = temp1[1];
    Weights weight_ih_l02 = temp1[2];
    Weights weight_ih_l03 = temp1[3];

    auto temp2 = splitLstmWeights(weight_ih_l0_reverse);
    Weights weight_ih_l0_reverse0 = temp2[0];
    Weights weight_ih_l0_reverse1 = temp2[1];
    Weights weight_ih_l0_reverse2 = temp2[2];
    Weights weight_ih_l0_reverse3 = temp2[3];

    auto temp3 = splitLstmWeights(weight_hh_l0);
    Weights weight_hh_l00 = temp3[0];
    Weights weight_hh_l01 = temp3[1];
    Weights weight_hh_l02 = temp3[2];
    Weights weight_hh_l03 = temp3[3];

    auto temp4 = splitLstmWeights(weight_hh_l0_reverse);
    Weights weight_hh_l0_reverse0 = temp4[0];
    Weights weight_hh_l0_reverse1 = temp4[1];
    Weights weight_hh_l0_reverse2 = temp4[2];
    Weights weight_hh_l0_reverse3 = temp4[3];

    auto temp5 = splitLstmWeights(bias_ih_l0);
    Weights bias_ih_l00 = temp5[0];
    Weights bias_ih_l01 = temp5[1];
    Weights bias_ih_l02 = temp5[2];
    Weights bias_ih_l03 = temp5[3];

    auto temp6 = splitLstmWeights(bias_ih_l0_reverse);
    Weights bias_ih_l0_reverse0 = temp6[0];
    Weights bias_ih_l0_reverse1 = temp6[1];
    Weights bias_ih_l0_reverse2 = temp6[2];
    Weights bias_ih_l0_reverse3 = temp6[3];

    auto temp7 = splitLstmWeights(bias_hh_l0);
    Weights bias_hh_l00 = temp7[0];
    Weights bias_hh_l01 = temp7[1];
    Weights bias_hh_l02 = temp7[2];
    Weights bias_hh_l03 = temp7[3];

    auto temp8 = splitLstmWeights(bias_hh_l0_reverse);
    Weights bias_hh_l0_reverse0 = temp8[0];
    Weights bias_hh_l0_reverse1 = temp8[1];
    Weights bias_hh_l0_reverse2 = temp8[2];
    Weights bias_hh_l0_reverse3 = temp8[3];

    auto temp9 = splitLstmWeights(weight_ih_l1);
    Weights weight_ih_l10 = temp9[0];
    Weights weight_ih_l11 = temp9[1];
    Weights weight_ih_l12 = temp9[2];
    Weights weight_ih_l13 = temp9[3];

    auto temp10 = splitLstmWeights(weight_ih_l1_reverse);
    Weights weight_ih_l1_reverse0 = temp10[0];
    Weights weight_ih_l1_reverse1 = temp10[1];
    Weights weight_ih_l1_reverse2 = temp10[2];
    Weights weight_ih_l1_reverse3 = temp10[3];

    auto temp11 = splitLstmWeights(weight_hh_l1);
    Weights weight_hh_l10 = temp11[0];
    Weights weight_hh_l11 = temp11[1];
    Weights weight_hh_l12 = temp11[2];
    Weights weight_hh_l13 = temp11[3];

    auto temp12 = splitLstmWeights(weight_hh_l1_reverse);
    Weights weight_hh_l1_reverse0 = temp12[0];
    Weights weight_hh_l1_reverse1 = temp12[1];
    Weights weight_hh_l1_reverse2 = temp12[2];
    Weights weight_hh_l1_reverse3 = temp12[3];

    auto temp13 = splitLstmWeights(bias_ih_l1);
    Weights bias_ih_l10 = temp13[0];
    Weights bias_ih_l11 = temp13[1];
    Weights bias_ih_l12 = temp13[2];
    Weights bias_ih_l13 = temp13[3];

    auto temp14 = splitLstmWeights(bias_ih_l1_reverse);
    Weights bias_ih_l1_reverse0 = temp14[0];
    Weights bias_ih_l1_reverse1 = temp14[1];
    Weights bias_ih_l1_reverse2 = temp14[2];
    Weights bias_ih_l1_reverse3 = temp14[3];

    auto temp15 = splitLstmWeights(bias_hh_l1);
    Weights bias_hh_l10 = temp15[0];
    Weights bias_hh_l11 = temp15[1];
    Weights bias_hh_l12 = temp15[2];
    Weights bias_hh_l13 = temp15[3];

    auto temp16 = splitLstmWeights(bias_hh_l1_reverse);
    Weights bias_hh_l1_reverse0 = temp16[0];
    Weights bias_hh_l1_reverse1 = temp16[1];
    Weights bias_hh_l1_reverse2 = temp16[2];
    Weights bias_hh_l1_reverse3 = temp16[3];


    auto lstm1 = network->addRNNv2(input, 1, 256, input.getDimensions().d[1], RNNOperation::kLSTM);
    lstm1->setDirection(RNNDirection::kBIDIRECTION);
    // layer 0
    //input weight
    lstm1->setWeightsForGate(0, RNNGateType::kINPUT, true, weight_ih_l00);
    lstm1->setWeightsForGate(0, RNNGateType::kFORGET, true, weight_ih_l01);
    lstm1->setWeightsForGate(0, RNNGateType::kCELL, true, weight_ih_l02);
    lstm1->setWeightsForGate(0, RNNGateType::kOUTPUT, true, weight_ih_l03);
    //hidden weight
    lstm1->setWeightsForGate(0, RNNGateType::kINPUT, false, weight_hh_l00);
    lstm1->setWeightsForGate(0, RNNGateType::kFORGET, false, weight_hh_l01);
    lstm1->setWeightsForGate(0, RNNGateType::kCELL, false, weight_hh_l02);
    lstm1->setWeightsForGate(0, RNNGateType::kOUTPUT, false, weight_hh_l03);
    //input bias
    lstm1->setBiasForGate(0, RNNGateType::kINPUT, true, bias_ih_l00);
    lstm1->setBiasForGate(0, RNNGateType::kFORGET, true, bias_ih_l01);
    lstm1->setBiasForGate(0, RNNGateType::kCELL, true, bias_ih_l02);
    lstm1->setBiasForGate(0, RNNGateType::kOUTPUT, true, bias_ih_l03);
    //hidden bias
    lstm1->setBiasForGate(0, RNNGateType::kINPUT, false, bias_hh_l00);
    lstm1->setBiasForGate(0, RNNGateType::kFORGET, false, bias_hh_l01);
    lstm1->setBiasForGate(0, RNNGateType::kCELL, false, bias_hh_l02);
    lstm1->setBiasForGate(0, RNNGateType::kOUTPUT, false, bias_hh_l03);

    // layer 0 reverse
    //input weight
    lstm1->setWeightsForGate(1, RNNGateType::kINPUT, true, weight_ih_l0_reverse0);
    lstm1->setWeightsForGate(1, RNNGateType::kFORGET, true, weight_ih_l0_reverse1);
    lstm1->setWeightsForGate(1, RNNGateType::kCELL, true, weight_ih_l0_reverse2);
    lstm1->setWeightsForGate(1, RNNGateType::kOUTPUT, true, weight_ih_l0_reverse3);
    //hidden weight
    lstm1->setWeightsForGate(1, RNNGateType::kINPUT, false, weight_hh_l0_reverse0);
    lstm1->setWeightsForGate(1, RNNGateType::kFORGET, false, weight_hh_l0_reverse1);
    lstm1->setWeightsForGate(1, RNNGateType::kCELL, false, weight_hh_l0_reverse2);
    lstm1->setWeightsForGate(1, RNNGateType::kOUTPUT, false, weight_hh_l0_reverse3);
    //input bias
    lstm1->setBiasForGate(1, RNNGateType::kINPUT, true, bias_ih_l0_reverse0);
    lstm1->setBiasForGate(1, RNNGateType::kFORGET, true, bias_ih_l0_reverse1);
    lstm1->setBiasForGate(1, RNNGateType::kCELL, true, bias_ih_l0_reverse2);
    lstm1->setBiasForGate(1, RNNGateType::kOUTPUT, true, bias_ih_l0_reverse3);
    //hidden bias
    lstm1->setBiasForGate(1, RNNGateType::kINPUT, false, bias_hh_l0_reverse0);
    lstm1->setBiasForGate(1, RNNGateType::kFORGET, false, bias_hh_l0_reverse1);
    lstm1->setBiasForGate(1, RNNGateType::kCELL, false, bias_hh_l0_reverse2);
    lstm1->setBiasForGate(1, RNNGateType::kOUTPUT, false, bias_hh_l0_reverse3);

    auto lstm2 = network->addRNNv2(*lstm1->getOutput(0), 1, 256, lstm1->getOutput(0)->getDimensions().d[1],
                                   RNNOperation::kLSTM);
    lstm2->setDirection(RNNDirection::kBIDIRECTION);
    // layer1
    //input weight
    lstm2->setWeightsForGate(0, RNNGateType::kINPUT, true, weight_ih_l10);
    lstm2->setWeightsForGate(0, RNNGateType::kFORGET, true, weight_ih_l11);
    lstm2->setWeightsForGate(0, RNNGateType::kCELL, true, weight_ih_l12);
    lstm2->setWeightsForGate(0, RNNGateType::kOUTPUT, true, weight_ih_l13);
    //hidden weight
    lstm2->setWeightsForGate(0, RNNGateType::kINPUT, false, weight_hh_l10);
    lstm2->setWeightsForGate(0, RNNGateType::kFORGET, false, weight_hh_l11);
    lstm2->setWeightsForGate(0, RNNGateType::kCELL, false, weight_hh_l12);
    lstm2->setWeightsForGate(0, RNNGateType::kOUTPUT, false, weight_hh_l13);
    //input bias
    lstm2->setBiasForGate(0, RNNGateType::kINPUT, true, bias_ih_l10);
    lstm2->setBiasForGate(0, RNNGateType::kFORGET, true, bias_ih_l11);
    lstm2->setBiasForGate(0, RNNGateType::kCELL, true, bias_ih_l12);
    lstm2->setBiasForGate(0, RNNGateType::kOUTPUT, true, bias_ih_l13);
    //hidden bias
    lstm2->setBiasForGate(0, RNNGateType::kINPUT, false, bias_hh_l10);
    lstm2->setBiasForGate(0, RNNGateType::kFORGET, false, bias_hh_l11);
    lstm2->setBiasForGate(0, RNNGateType::kCELL, false, bias_hh_l12);
    lstm2->setBiasForGate(0, RNNGateType::kOUTPUT, false, bias_hh_l13);

    // layer1 reverse
    //input weight
    lstm2->setWeightsForGate(1, RNNGateType::kINPUT, true, weight_ih_l1_reverse0);
    lstm2->setWeightsForGate(1, RNNGateType::kFORGET, true, weight_ih_l1_reverse1);
    lstm2->setWeightsForGate(1, RNNGateType::kCELL, true, weight_ih_l1_reverse2);
    lstm2->setWeightsForGate(1, RNNGateType::kOUTPUT, true, weight_ih_l1_reverse3);
    //hidden weight
    lstm2->setWeightsForGate(1, RNNGateType::kINPUT, false, weight_hh_l1_reverse0);
    lstm2->setWeightsForGate(1, RNNGateType::kFORGET, false, weight_hh_l1_reverse1);
    lstm2->setWeightsForGate(1, RNNGateType::kCELL, false, weight_hh_l1_reverse2);
    lstm2->setWeightsForGate(1, RNNGateType::kOUTPUT, false, weight_hh_l1_reverse3);
    //input bias
    lstm2->setBiasForGate(1, RNNGateType::kINPUT, true, bias_ih_l1_reverse0);
    lstm2->setBiasForGate(1, RNNGateType::kFORGET, true, bias_ih_l1_reverse1);
    lstm2->setBiasForGate(1, RNNGateType::kCELL, true, bias_ih_l1_reverse2);
    lstm2->setBiasForGate(1, RNNGateType::kOUTPUT, true, bias_ih_l1_reverse3);
    //hidden bias
    lstm2->setBiasForGate(1, RNNGateType::kINPUT, false, bias_hh_l1_reverse0);
    lstm2->setBiasForGate(1, RNNGateType::kFORGET, false, bias_hh_l1_reverse1);
    lstm2->setBiasForGate(1, RNNGateType::kCELL, false, bias_hh_l1_reverse2);
    lstm2->setBiasForGate(1, RNNGateType::kOUTPUT, false, bias_hh_l1_reverse3);


    return lstm2;


}


ICudaEngine *createEngine(unsigned int maxBatchSize, IBuilder *builder, IBuilderConfig *config, nvinfer1::DataType dt) {
    INetworkDefinition *network = builder->createNetworkV2(0U);
    //x
    ITensor *data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{3, INPUT_H, INPUT_W});
    assert(data);
    //backbone
#pragma region
    //conv1
    auto *RELU1 = CBR(network, "1", *data, 32);
    assert(RELU1);
    auto *RELU2 = CBR(network, "2", *RELU1->getOutput(0), 32);
    assert(RELU2);
    auto *RELU3 = CBR(network, "3", *RELU2->getOutput(0), 64);
    assert(RELU3);
    //pool1
    auto *MAXPOOL1 = network->addPoolingNd(*RELU3->getOutput(0), PoolingType::kMAX, DimsHW{3, 3});
    assert(MAXPOOL1);
    MAXPOOL1->setPaddingNd(DimsHW{1, 1});
    MAXPOOL1->setStrideNd(DimsHW{2, 2});
    //stage 1
    auto *BASIC1 = BasicBlock(network, "4", *MAXPOOL1->getOutput(0), 64);
    assert(BASIC1);

    auto *BASIC2 = BasicBlockv2(network, "7", *BASIC1->getOutput(0), 64);
    assert(BASIC2);

    auto *BASIC3 = BasicBlockv2(network, "9", *BASIC2->getOutput(0), 64);
    assert(BASIC3);
    //stage2
    auto *BASIC4 = BasicBlockV3(network, "11", *BASIC3->getOutput(0), 128);
    assert(BASIC4);
    auto *BASIC5 = BasicBlockv2(network, "14", *BASIC4->getOutput(0), 128);
    assert(BASIC5);
    auto *BASIC6 = BasicBlockv2(network, "16", *BASIC5->getOutput(0), 128);
    assert(BASIC6);
    auto *BASIC7 = BasicBlockv2(network, "18", *BASIC6->getOutput(0), 128);
    assert(BASIC7);
    //stage3
    auto *BASIC8 = BasicBlockV3(network, "20", *BASIC7->getOutput(0), 256);
    assert(BASIC8);
    auto *BASIC9 = BasicBlockv2(network, "23", *BASIC8->getOutput(0), 256);
    assert(BASIC9);
    auto *BASIC10 = BasicBlockv2(network, "25", *BASIC9->getOutput(0), 256);
    assert(BASIC10);
    auto *BASIC11 = BasicBlockv2(network, "27", *BASIC10->getOutput(0), 256);
    assert(BASIC11);
    auto *BASIC12 = BasicBlockv2(network, "29", *BASIC11->getOutput(0), 256);
    assert(BASIC12);
    auto *BASIC13 = BasicBlockv2(network, "31", *BASIC12->getOutput(0), 256);
    assert(BASIC13);
    //stage4
    auto *BASIC14 = BasicBlockV3(network, "33", *BASIC13->getOutput(0), 512);
    assert(BASIC14);
    auto *BASIC15 = BasicBlockv2(network, "36", *BASIC14->getOutput(0), 512);
    assert(BASIC15);
    auto *BASIC16 = BasicBlockv2(network, "38", *BASIC15->getOutput(0), 512);
    assert(BASIC16);
    //pool2
    auto *MAXPOOL2 = network->addPoolingNd(*BASIC16->getOutput(0), PoolingType::kMAX, DimsHW{2, 2});
    assert(MAXPOOL2);
    MAXPOOL2->setPaddingNd(DimsHW{0, 0});
    MAXPOOL2->setStrideNd(DimsHW{2, 2});
#pragma endregion
    //neck
#pragma region
//    cout<<MAXPOOL2->getOutput(0)->getDimensions().d[0]<<endl;
//    cout<<MAXPOOL2->getOutput(0)->getDimensions().d[1]<<endl;
//    cout<<MAXPOOL2->getOutput(0)->getDimensions().d[2]<<endl;
    auto *SHUFFLE = network->addShuffle(*MAXPOOL2->getOutput(0));
    assert(SHUFFLE);
    Dims3 dims{512, 1, -1};
    SHUFFLE->setReshapeDimensions(dims);
    Permutation permutation{1, 2, 0};
    SHUFFLE->setSecondTranspose(permutation);
//    cout<<SHUFFLE->getOutput(0)->getDimensions().d[0]<<endl;
//    cout<<SHUFFLE->getOutput(0)->getDimensions().d[1]<<endl;
//    cout<<SHUFFLE->getOutput(0)->getDimensions().d[2]<<endl;
    auto *LSTM = AddLSTM(network, *SHUFFLE->getOutput(0));
    assert(LSTM);


#pragma endregion
    //head
    auto *SHUFFLE2 = network->addShuffle(*LSTM->getOutput(0));
    assert(SHUFFLE2);
    Dims4 dims4{1, 1, -1, 512};
    SHUFFLE2->setReshapeDimensions(dims4);
    Permutation permutation2{2, 0, 1, 3};
    SHUFFLE2->setSecondTranspose(permutation2);
//    Dims2 dims2{-1,512};
//    SHUFFLE2->setReshapeDimensions(dims2);

    cout << SHUFFLE2->getOutput(0)->getDimensions().d[0] << endl;
    cout << SHUFFLE2->getOutput(0)->getDimensions().d[1] << endl;
    cout << SHUFFLE2->getOutput(0)->getDimensions().d[2] << endl;
    cout << SHUFFLE2->getOutput(0)->getDimensions().d[3] << endl;

    vector<float> fc_weights;
    vector<float> fc_bias;
    fc_weights = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/linear_1.weight.wgt");
    fc_bias = loadWeights("/home/luotianhang/CLionProjects/crnn/weights/linear_1.bias.wgt");


    int fc_w_size = fc_weights.size();
    int fc_b_size = fc_bias.size();
    Weights groot{nvinfer1::DataType::kFLOAT, nullptr, fc_w_size};
    Weights racoon{nvinfer1::DataType::kFLOAT, nullptr, fc_b_size};

    float *grootWt = new float[fc_w_size];
    for (int i = 0; i < fc_w_size; ++i) {
        grootWt[i] = fc_weights.at(i);
    }
    groot.values = grootWt;
    float *racoonWt = new float[fc_b_size];
    if (fc_bias.size() == fc_b_size) {
        for (int j = 0; j < fc_b_size; ++j) {
            racoonWt[j] = fc_bias.at(j);
        }
    } else {
        for (int j = 0; j < fc_b_size; ++j) {
            racoonWt[j] = 0.0;
        }
    }
    racoon.values = racoonWt;

    IFullyConnectedLayer *fc1 = network->addFullyConnected(*SHUFFLE2->getOutput(0), fc_b_size, groot, racoon);

    assert(fc1);


    fc1->getOutput(0)->setName(OUTPUT_BLOB_NAME);

    network->markOutput(*fc1->getOutput(0));

    builder->setMaxBatchSize(maxBatchSize);
    config->setMaxWorkspaceSize(200 * (1 << 20));
    ICudaEngine *engine = builder->buildEngineWithConfig(*network, *config);

    network->destroy();

    return engine;


}


void APIToModel(unsigned int maxBatchSize, IHostMemory **modelStream) {
    IBuilder *builder = createInferBuilder(gLogger);
    IBuilderConfig *config = builder->createBuilderConfig();

    ICudaEngine *engine = createEngine(maxBatchSize, builder, config, nvinfer1::DataType::kFLOAT);
    assert(engine != nullptr);

    (*modelStream) = engine->serialize();

    engine->destroy();
    builder->destroy();
    config->destroy();
}

void doInference(IExecutionContext &context, float *input, float *output, int batchSize) {
    const ICudaEngine &engine = context.getEngine();

    assert(engine.getNbBindings() == 2);
    void *buffers[2];

    const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
    const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);

    CHECK(cudaMalloc(&buffers[inputIndex], batchSize * 3 * INPUT_W * INPUT_H * sizeof(float)));
    CHECK(cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float)));

    cudaStream_t stream;
    CHECK(cudaStreamCreate(&stream));

    CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * 3 * INPUT_W * INPUT_H * sizeof(float),
                          cudaMemcpyHostToDevice, stream));
    context.enqueue(batchSize, buffers, stream, nullptr);
    CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost,
                          stream));
    cudaStreamSynchronize(stream);

    cudaStreamDestroy(stream);
    CHECK(cudaFree(buffers[inputIndex]));
    CHECK(cudaFree(buffers[outputIndex]));
}


int main() {
    char *trtModelStream{nullptr};
    size_t size{0};

    IHostMemory *modelStream{nullptr};
    APIToModel(1, &modelStream);
    assert(modelStream != nullptr);

    std::ofstream p("crnn.engine");
    if (!p) {
        std::cerr << "can not open plan output file" << std::endl;
        return -1;
    }

    p.write(reinterpret_cast<const char *>(modelStream->data()), modelStream->size());
    modelStream->destroy();

    std::ifstream file("crnn.engine", std::ios::binary);
    if (file.good()) {
        file.seekg(0, file.end);
        size = file.tellg();
        file.seekg(0, file.beg);
        trtModelStream = new char[size];
        assert(trtModelStream);
        file.read(trtModelStream, size);
        file.close();
    } else {
        return -1;
    }

//    float data[3*INPUT_H*INPUT_W];
//    for (int i=0;i<3*INPUT_H*INPUT_W;i++){
//        data[i]=1;
//    }
    static float data[3 * INPUT_H * INPUT_W];
//    cv::Mat img = cv::imread("1.jpg");
//    if (img.empty()) {
//        std::cerr << "demo.png not found !!!" << std::endl;
//        return -1;
//    }
//    cv::cvtColor(img, img, COLOR_BGR2RGB);
//    cv::resize(img, img, cv::Size(INPUT_W, INPUT_H));
    for (int i = 0; i < INPUT_H * INPUT_W * 3; i++) {
//        data[i] = ((float)img.at<uchar>(i) / 255.0 - 0.5) * 2.0;
//            data[i]= (float ) img.at<uchar>(i);
        data[i] = 1;
    }

    IRuntime *runtime = createInferRuntime(gLogger);
    assert(runtime != nullptr);
    ICudaEngine *engine = runtime->deserializeCudaEngine(trtModelStream, size, nullptr);
    assert(engine != nullptr);
    IExecutionContext *context = engine->createExecutionContext();
    assert(context != nullptr);

    float prob[OUTPUT_SIZE];
    doInference(*context, data, prob, 1);
    auto start = std::chrono::system_clock::now();
    for (int i = 0; i < 100; i++) {

        doInference(*context, data, prob, 1);

    }
    auto end = std::chrono::system_clock::now();

    std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << " ms" << std::endl;
    context->destroy();
    engine->destroy();
    runtime->destroy();

//    std::cout << "\n OUTPUT:\n";
//    for (unsigned int i = 0; i < OUTPUT_SIZE; i++) {
//        std::cout << prob[i] << ",";
//        if ((i + 1) % 5 == 0)std::cout << std::endl;
//    }

    std::cout << std::endl;

    return 0;

}






