//
// Created by zhangcc on 2020/7/4.
//
#include "crnn.h"
#include "utils/RrConfig.h"
#include "utils/utils.h"
#include <fstream>
#include <sstream>

//#define DEBUG

int CrnnPaddle::serialization(const std::string &model_file, const std::string &engine_file){
    Logger gLogger(nvinfer1::ILogger::Severity::kERROR);//kWARNING, kINFO, kINTERNAL_ERROR
    int device_count = 0;
    cudaGetDeviceCount(&device_count);
    assert(device_count > 0);
    if ((device_id + 1) > device_count) {
        device_id = 0;
    }
    std::cout << "GPU_nums: " << device_count << std::endl;
    std::cout << "CraftNet GPU ID: " << device_id << std::endl;
    cudaSetDevice(device_id);

    std::unique_ptr <nvinfer1::IRuntime, nvinfer1::Destroy<nvinfer1::IRuntime>> runtime{
            nvinfer1::createInferRuntime(gLogger)};

    std::unique_ptr <nvinfer1::IBuilder, nvinfer1::Destroy<nvinfer1::IBuilder>> builder{
            nvinfer1::createInferBuilder(gLogger)};
    if (!builder) {
        return -1;
    }

    const auto explicitBatch =
            1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
    std::unique_ptr <nvinfer1::INetworkDefinition, nvinfer1::Destroy<nvinfer1::INetworkDefinition>> network{
            builder->createNetworkV2(explicitBatch)};
    if (!network) {
        return -1;
    }

    std::unique_ptr <nvonnxparser::IParser, nvinfer1::Destroy<nvonnxparser::IParser>> parser{
            nvonnxparser::createParser(*network, gLogger)};
    if (!parser) {
        return -1;
    }

    std::unique_ptr <nvinfer1::IBuilderConfig, nvinfer1::Destroy<nvinfer1::IBuilderConfig>> config{
            builder->createBuilderConfig()};
    if (!config) {
        return -1;
    }

    auto parsed = parser->parseFromFile(model_file.c_str(), static_cast<int>(nvinfer1::ILogger::Severity::kINFO));
    if (!parsed) {
        std::cout << "ERROR: could not parse input engine." << std::endl;
        return -1;
    }

    //builder->setMaxBatchSize(1);
    builder->setMaxWorkspaceSize(((1ULL << 10) * 1));
    {
        builder->setFp16Mode(true);
        builder->setMaxBatchSize(1);
        auto profile = builder->createOptimizationProfile();

        profile->setDimensions(network->getInput(0)->getName(), nvinfer1::OptProfileSelector::kMIN,
                               nvinfer1::Dims4{1, 3, 32, 32});
        profile->setDimensions(network->getInput(0)->getName(), nvinfer1::OptProfileSelector::kOPT,
                               nvinfer1::Dims4{1, 3, 32, 320});
        profile->setDimensions(network->getInput(0)->getName(), nvinfer1::OptProfileSelector::kMAX,
                               nvinfer1::Dims4{1, 3, 32, 1920});
        config->addOptimizationProfile(profile);
        config->setFlag(nvinfer1::BuilderFlag::kFP16);
    }
    config->setMaxWorkspaceSize(static_cast<size_t>(1) << 10);
    config->setFlag(nvinfer1::BuilderFlag::kDISABLE_TIMING_CACHE);
    uint32_t cublas = 1U << static_cast<uint32_t>(nvinfer1::TacticSource::kCUBLAS);
    uint32_t cublasLt = 1U << static_cast<uint32_t>(nvinfer1::TacticSource::kCUBLAS_LT);
    config->setTacticSources(cublas | cublasLt);
    config->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);
    config->setFlag(nvinfer1::BuilderFlag::kSTRICT_TYPES);
    std::unique_ptr <nvinfer1::ICudaEngine, nvinfer1::Destroy<nvinfer1::ICudaEngine>> serialize_eigine{builder->buildEngineWithConfig(*network, *config)};

    // 序列化
    std::unique_ptr <nvinfer1::IHostMemory, nvinfer1::Destroy<nvinfer1::IHostMemory>> trtModelStream{serialize_eigine->serialize()};
    nvinfer1::writeBuffer(trtModelStream->data(), trtModelStream->size(), engine_file.c_str());

}

int CrnnPaddle::init(const std::string &model_file) {
    Logger gLogger(nvinfer1::ILogger::Severity::kERROR);//kWARNING, kINFO, kINTERNAL_ERROR, kERROR
    int device_count = 0;
    cudaGetDeviceCount(&device_count);
    assert(device_count > 0);
    if ((device_id + 1) > device_count) {
        device_id = 0;
    }
    std::cout << "GPU_nums: " << device_count << std::endl;
    std::cout << "CrnnNet GPU ID: " << device_id << std::endl;
    cudaSetDevice(device_id);

    //反序列化
    size_t dot_pos = model_file.find(".");
    std::string engine_file = model_file.substr(0, dot_pos) + ".engine";
    std::string buffer = nvinfer1::readBuffer(engine_file.c_str());
    if (trt_serialize) {
        if (!buffer.size()){
            std::string TrtRoot = RrConfigMgr::GetInstance().ReadString("ocrConfig", "TrtRoot", "");
            std::string name = TrtRoot + "/bin/trtexec --onnx=";
            name += model_file + " --tacticSources=-cublasLt,+cublas --workspace=1024 ";
            name += "--minShapes=input_0:1x3x32x32 --optShapes=input_0:1x3x32x320 --maxShapes=input_0:1x3x32x1920 --shapes=input_0:1x3x32x1024 ";
            name += "--fp16 --saveEngine=" + engine_file;
            system(name.c_str());
        }
    } else {
        if (!buffer.size()) {
            serialization(model_file, engine_file);
        }
    }
    std::unique_ptr <nvinfer1::IRuntime, nvinfer1::Destroy<nvinfer1::IRuntime>> mRuntime{
            nvinfer1::createInferRuntime(gLogger)};
    buffer = nvinfer1::readBuffer(engine_file.c_str());
    mEngine.reset(mRuntime->deserializeCudaEngine(buffer.data(), buffer.size(), nullptr));
    context.reset(mEngine->createExecutionContext());
    assert(mEngine->getNbBindings() == MAX_BINDING_NUMS);
    cudaStreamCreate(&stream_t);

    getchar("../src/utils/ocr_keys.txt");

    return 0;
}

//bool CrnnPaddle::getchar() {
//    std::ifstream fin("../src/utils/ocr_keys.txt");   // filename: xly2016I.txt
//    std::stringstream buffer;            // stringstream object
//    if(fin) // 有该文件
//    {
//        std::string line;
//        while (getline (fin, line)) // line中不包括每行的换行符
//        {
//            line.erase(std::remove(line.begin(), line.end(), '\n'), line.end());
//            line.erase(std::remove(line.begin(), line.end(), '\r'), line.end());
//            alphabet.push_back(line);
//            //std::cout << "@@@@@@ " << line << std::endl;
//        }
//        alphabet.push_back("");
//    }
//    return true;
//}

bool CrnnPaddle::Input(const cv::Mat &src, float *data) {
    int i = 0;
    for (int row = 0; row < src.rows; ++row) {
        uchar* uc_pixel = src.data + row * src.step;
        for (int col = 0; col < src.cols; ++col) {
            data[i] = 2.0f * ((float)uc_pixel[2] / 255.0f - 0.5f);
            data[i + src.rows * src.cols] = 2.0f * ((float)uc_pixel[1] / 255.0f - 0.5f);
            data[i + 2 * src.rows * src.cols] = 2.0f * ((float)uc_pixel[0] / 255.0f - 0.5f);
            uc_pixel += 3;
            ++i;
        }
    }
}

//std::vector<std::string> CrnnPaddle::strDecode(std::vector<int> &preds, bool raw) {
//    std::vector<std::string> str;
//    if (raw) {
//        for (auto v: preds) {
//            str.push_back(alphabet[v]);
//        }
//    } else {
//        for (size_t i = 0; i < preds.size(); i++) {
//            if (preds[i] == 0 || (i > 0 && preds[i - 1] == preds[i])) continue;
//            str.push_back(alphabet[preds[i] - 1]);
//        }
//    }
//    return str;
//}

std::vector<std::string> CrnnPaddle::extract(const cv::Mat &inputs) {

    cudaSetDevice(device_id);
    /////////////////////////////////////////////
    int max_width = 1920, min_width = 6, imgh = 32, imgw = 320;
    cv::Mat process, padded_im;
    int h = inputs.rows, w = inputs.cols;
    float ratio = float(h) / float(imgh);
    int new_w = w / ratio;
    int resize_w = new_w > max_width ? max_width : new_w;
    resize_w = resize_w / 32 * 32;
    resize_w = resize_w < 32 ? 32 : resize_w;

    cv::resize(inputs, process, cv::Size(resize_w, imgh));
    if (process.cols<imgw){
        cv::copyMakeBorder(
                process, padded_im, 0, 0, 0, 320-resize_w,
                cv::BORDER_CONSTANT,
                cv::Scalar::all(0));
    } else{
        padded_im = process;
    }

    // int inputBufferSize = 3 * padded_im.rows * padded_im.cols;
    // alpha_len = padded_im.cols / 4;
    // feats_dim = alpha_len * alpha_size;
    // int outputBufferSize = 1 * feats_dim;
    // CHECK(cudaMalloc(&bindings[0], inputBufferSize * sizeof(float)));
    // CHECK(cudaMalloc(&bindings[1], outputBufferSize * sizeof(float)));

    // float *data = new float[inputBufferSize];
    // float *outputs = new float[outputBufferSize];
    // Input(padded_im, data);
    context->setOptimizationProfile(0);
    nvinfer1::Dims4 inputDims{1, 3, padded_im.rows, padded_im.cols};
    context->setBindingDimensions(0, inputDims);
    int inputBufferSize = getMemorySize(inputDims);
    CHECK(cudaMalloc(&bindings[0], inputBufferSize * sizeof(float)));
    
    auto outputDims = context->getBindingDimensions(1);
    int outputBufferSize = getMemorySize(outputDims);
    CHECK(cudaMalloc(&bindings[1], outputBufferSize * sizeof(float)));
    alpha_len = outputDims.d[1];//padded_im.cols / 4;
    feats_dim = outputDims.d[2];//alpha_len * alpha_size;
    float *data = new float[inputBufferSize];
    float *outputs = new float[outputBufferSize];
    Input(padded_im, data);

    CHECK(cudaMemcpyAsync((float *)bindings[0], data, \
        inputBufferSize * sizeof(float), cudaMemcpyHostToDevice, stream_t));

    bool status = context->enqueueV2(bindings, stream_t, nullptr);
    if (!status) {
        std::cout << "Enqueue failed" << std::endl;
    }
    CHECK(cudaMemcpyAsync(outputs, (float *)bindings[1], \
                outputBufferSize * sizeof(float), cudaMemcpyDeviceToHost, stream_t));

    cudaStreamSynchronize(stream_t);

    float *output_  = outputs;
    std::vector<int> preds;
    for (int icc = 0; icc < alpha_len; icc++) {
        std::vector<float> alpha_(output_, output_ + alpha_size);
        std::vector<float>::iterator max_iterator = std::max_element(alpha_.begin(), alpha_.end());
        int maxj = std::distance(alpha_.begin(), max_iterator);
        preds.push_back(maxj);
        output_ += alpha_size;
    }

    std::vector<std::string> aa = strDecode(preds, false);

    delete data;
    delete outputs;
    for (void *ptr : bindings) {
        cudaFree(ptr);
    }

    return aa;
}

CrnnPaddle::~CrnnPaddle() {
    cudaStreamDestroy(stream_t);
}

REGISTER_CRNN_CLASS(CrnnPaddle);