#include <iostream>
#include <chrono>
#include "cuda_runtime_api.h"
#include "logging.h"
#include <opencv2/opencv.hpp>
#include <dirent.h>
#include "NvInfer.h"
#include <fstream>

#define CHECK(status) \
    do\
    {\
        auto ret = (status);\
        if (ret != 0)\
        {\
            std::cerr << "Cuda failure: " << ret << std::endl;\
            abort();\
        }\
    } while (0)

using namespace nvinfer1;

#define USE_FP16  // comment out this if want to use FP32
#define DEVICE 0  // GPU id
#define NMS_THRESH 0.5
#define CONF_THRESH 0.3
#define BATCH_SIZE 1

// stuff we know about the network and the input/output blobs
static const int INPUT_H = 224;
static const int INPUT_W = 224;
static const int OUTPUT_SIZE = 6;  // we assume the yololayer outputs no more than 1000 boxes that conf >= 0.1
const char* INPUT_BLOB_NAME = "input";
const char* OUTPUT_BLOB_NAME = "output";
static Logger gLogger;



int read_files_in_dir(const char *p_dir_name, std::vector<std::string> &file_names) {
    DIR *p_dir = opendir(p_dir_name);
    if (p_dir == nullptr) {
        return -1;
    }

    struct dirent* p_file = nullptr;
    while ((p_file = readdir(p_dir)) != nullptr) {
        if (strcmp(p_file->d_name, ".") != 0 &&
                strcmp(p_file->d_name, "..") != 0) {
            //std::string cur_file_name(p_dir_name);
            //cur_file_name += "/";
            //cur_file_name += p_file->d_name;
            std::string cur_file_name(p_file->d_name);
            file_names.push_back(cur_file_name);
        }
    }

    closedir(p_dir);
    return 0;
}

bool cv2vec(const cv::Mat src_image, std::vector<float> &input_vectors) {
      cv::Mat channels[3]; //借用来进行HWC->CHW
      cv::split(src_image, channels);
      for (int i = 0; i < src_image.channels(); i++)  // HWC->CHW
      {
          std::vector<float> data = std::vector<float>(channels[i].reshape(1, src_image.cols * src_image.rows));
          input_vectors.insert(input_vectors.end(), data.begin(), data.end());
      }
      
      return true;
  }

bool image_preprocess(const cv::Mat &src_image, int width, int height, std::vector<float> &input_vectors, std::vector<float>&means, std::vector<float>&stds) {
    cv::Mat src_temp, dst_temp, dst_image;
    // 1. resize by CUBIC mode
    cv::resize(src_image, src_temp, cv::Size(width, height), 0, 0, cv::INTER_CUBIC);
    src_temp.convertTo(dst_temp, CV_32F);
    dst_temp.copyTo(dst_image);

    // 2. convert BGR to RGB
    cv::cvtColor(dst_image, dst_image, cv::COLOR_BGR2RGB);
    for (int i = 0; i < dst_image.rows; i++) {
        for (int j = 0; j < dst_image.cols; j++) {
            cv::Vec3f buf = dst_image.at<cv::Vec3f>(i,j);
            // printf("buf %f %f %f\n", buf[0], buf[1], buf[2]);
            buf[0] = ((buf[0] / 255.0f) - means[0]) / stds[0];
            buf[1] = ((buf[1] / 255.0f) - means[1]) / stds[1];
            buf[2] = ((buf[2] / 255.0f) - means[2]) / stds[2];
            dst_image.at<cv::Vec3f>(i,j) = buf;
            // printf("buf %f %f %f\n", buf[0], buf[1], buf[2]);
        }
    }

    // 4.convert mat to vector
    cv2vec(dst_image, input_vectors);

    return true;
  }

  

void doInference(IExecutionContext& context, std::vector<float> &input_vectors, float* output, int batchSize) {
    const ICudaEngine& engine = context.getEngine();

    // Pointers to input and output device buffers to pass to engine.
    // Engine requires exactly IEngine::getNbBindings() number of buffers.
    assert(engine.getNbBindings() == 2);
    void* buffers[2];

    // In order to bind the buffers, we need to know the names of the input and output tensors.
    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
    const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
    const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
    std::cout<<"inputIndex:"<<inputIndex<<", outputIndex"<<outputIndex<<std::endl;

    // Create GPU buffers on device
    CHECK(cudaMalloc(&buffers[inputIndex], batchSize * 3 * INPUT_H * INPUT_W * sizeof(float)));
    CHECK(cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float)));

    // Create stream
    cudaStream_t stream;
    CHECK(cudaStreamCreate(&stream));

    // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
    CHECK(cudaMemcpyAsync(buffers[inputIndex], input_vectors.data(), batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
    context.enqueue(batchSize, buffers, stream, nullptr);
    CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
    cudaStreamSynchronize(stream);

    // Release stream and buffers
    cudaStreamDestroy(stream);
    CHECK(cudaFree(buffers[inputIndex]));
    CHECK(cudaFree(buffers[outputIndex]));
}

int main(int argc, char** argv) {
    cudaSetDevice(DEVICE);
    // create a model using the API directly and serialize it to a stream
    char *trtModelStream{nullptr};
    size_t size{0};

    if (argc == 3 && std::string(argv[1]) == "-d") {
        std::ifstream file("/home/admincaode/code/yolov5_tensorrt-master/yolov5s/yolov5s.engine", std::ios::binary);
        if (file.good()) {
            file.seekg(0, file.end);
            size = file.tellg();
            file.seekg(0, file.beg);
            trtModelStream = new char[size];
            assert(trtModelStream);
            file.read(trtModelStream, size);
            file.close();
        }
    } else {
        std::cerr << "arguments not right!" << std::endl;
        std::cerr << "./yolov5s -s  // serialize model to plan file" << std::endl;
        std::cerr << "./yolov5s -d ../samples  // deserialize plan file and run inference" << std::endl;
        return -1;
    }

    std::vector<std::string> file_names;
    if (read_files_in_dir(argv[2], file_names) < 0) {
        std::cout << "read_files_in_dir failed." << std::endl;
        return -1;
    }

    // prepare input data ---------------------------
    static float data[BATCH_SIZE * 3 * INPUT_H * INPUT_W];
    static float prob[BATCH_SIZE * OUTPUT_SIZE];
    IRuntime* runtime = createInferRuntime(gLogger);
    assert(runtime != nullptr);
    ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
    assert(engine != nullptr);
    IExecutionContext* context = engine->createExecutionContext();
    nvinfer1::Dims input_dims = nvinfer1::Dims4(1, 3, 224, 224);
    context->setBindingDimensions(0, input_dims);
    nvinfer1::Dims out_dims = nvinfer1::Dims2(1, 6);
    context->setBindingDimensions(1, out_dims);

    assert(context != nullptr);
    delete[] trtModelStream;
    
    std::cout<<"1"<<std::endl;
    cv::Mat img = cv::imread("/home/admincaode/code/yolov5_tensorrt-master/yolov5s/part.jpg");
    
    // cv::Mat pr_img; 
    // cv::resize(img, pr_img, cv::Size(INPUT_H,INPUT_W),0, 0, cv::INTER_CUBIC);
    // std::cout<<"3"<<std::endl;
    // if (pr_img.empty()) 
    //     std::cout<<"empty"<<std::endl;
    // std::cout<<"width:"<<pr_img.rows<<", heights:"<<pr_img.cols<<", channels:"<< pr_img.channels()<<std::endl; 
    std::vector<float> input_vectors;

    std::vector<float> means =  {0.48145466, 0.4578275, 0.40821073};
    std::vector<float> stds = {0.26862954, 0.26130258, 0.27577711};
    image_preprocess(img, INPUT_H, INPUT_W, input_vectors, means, stds);
    for(int i=0; i<input_vectors.size(); i++){
        std::cout<<input_vectors[i]<<" ";
        if (i > 100){
            break;
        }
    }

    std::cout<<"2"<<std::endl;
    for (int f = 0; f < 1; f++) {

        auto start = std::chrono::system_clock::now();

        doInference(*context, input_vectors, prob, BATCH_SIZE);
        for (int i=0;i <OUTPUT_SIZE; i++){
            std::cout<<prob[i]<<" ";
        }

        auto end = std::chrono::system_clock::now();
        
        std::cout << "inference time:" << std::endl;
        std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
        std::cout<<"f1:"<<std::endl;
        
    }

    // Destroy the engine
    context->destroy();
    engine->destroy();
    runtime->destroy();

    return 0;
}
