#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <time.h>

#include <cuda_runtime_api.h>
#include <NvInferRuntime.h>
#include <NvInfer.h>

#include "logging.h"
#include "utils.h"
#include "argsParser.h"

void doInference(nvinfer1::IExecutionContext& context, cudaStream_t& stream, void **buffers, 
                float* input, float* output, infer_utils::Args& args) {
    // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
    CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, args.batch * 3 * args.input_h * args.input_w * sizeof(float), cudaMemcpyHostToDevice, stream));
    context.enqueue(args.batch, buffers, stream, nullptr);
    //context.enqueueV2(buffers, stream, nullptr);
    CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], args.batch * 1000 * sizeof(float), cudaMemcpyDeviceToHost, stream));
    cudaStreamSynchronize(stream);
}

int main(int argc, char* argv[]){
    cudaSetDevice(0);
    if(argc == 1){infer_utils::printUsage(); return -1;}
    infer_utils::Args args;
    infer_utils::parseArgs(args, argc, argv);
    infer_utils::printArgs(args);

    Logger gLogger;
    for (int i=1; i <=2; i++){
        double full_time{0};
        char* trtModelStream{nullptr};
        size_t size{0};
        std::string engine_name = (i == 1)? args.engine1 : args.engine2;
        std::cout << "current engine " << engine_name << std::endl;
        std::ifstream file(engine_name, std::ios::binary);
        if (file.good()) {
            file.seekg(0, file.end);
            size = file.tellg();
            file.seekg(0, file.beg);
            trtModelStream = new char[size];
            assert(trtModelStream);
            file.read(trtModelStream, size);
            file.close();
        }else{
            std::cout << "can not open " << engine_name << std::endl;
        }
        nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(gLogger);
        assert(runtime != nullptr);
        nvinfer1::ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
        assert(engine != nullptr);
        nvinfer1::IExecutionContext* context = engine->createExecutionContext();
        assert(context != nullptr);
        delete[] trtModelStream;
        
        // input dims
        std::cout << "NbOptimizationProfiles: "<< engine->getNbOptimizationProfiles() << std::endl;
        std::cout << "NbEngineBindings: " << engine->getNbBindings() << std::endl;
        for (int binding=0; binding < engine->getNbBindings(); binding++) {
            std::cout << "Binding " << binding << ": " << engine->getBindingName(binding) << std::endl;
        }
        
        if (!engine->hasImplicitBatchDimension()){
            std::cout << "engine has implicitBatchDimensions." << std::endl;
            nvinfer1::Dims4 inputdims{args.batch, 3, args.input_h, args.input_w};
            int input_index = engine->getBindingIndex(args.input_blob_name.c_str());
            if (!context->setBindingDimensions(input_index, inputdims)){
                std::cerr << "Invalid " << args.input_blob_name << " binding dimensions. \n";}
            if(!context->allInputDimensionsSpecified()) {
                std::cerr<< "Not all input dimensions are sepecied. "<< std::endl;
                return false;
            }
        }
        

        float* data = new float[args.batch * 3 * args.input_h * args.input_w];
        float* prob = new float[args.batch * 1000];
        void* buffers[engine->getNbBindings()];
        CUDA_CHECK(cudaMalloc(&buffers[0], args.batch * 3 * args.input_h * args.input_w * sizeof(float)));
        for (int i = 1; i < engine->getNbBindings(); i++){
            CUDA_CHECK(cudaMalloc(&buffers[i], args.batch * 1000 * sizeof(float)));
        }
        cudaStream_t stream;
        cudaStreamCreate(&stream);
        std::vector<std::string> file_names;
        if (read_files_in_dir(args.datadir.c_str(), file_names) < 0) {
            std::cout << "read_files_in_dir failed." << std::endl;
            return -1;
        }
        
        int fcount = 0;
        for (int f = 0; f < (int)file_names.size(); f++) {
            fcount++;
            if (fcount < args.batch && f + 1 != (int)file_names.size()) continue;
            for (int b = 0; b < fcount; b++) {
                cv::Mat img = cv::imread(std::string(args.datadir) + file_names[f - fcount + 1 + b]);
                if (img.empty()) continue;
                cv::Mat pr_img = preprocess_img(img, args.input_w, args.input_h); // letterbox BGR to RGB
                int i = 0;
                for (int row = 0; row < args.input_h; ++row) {
                    uchar* uc_pixel = pr_img.data + row * pr_img.step;
                    for (int col = 0; col < args.input_w; ++col) {
                        data[b * 3 * args.input_h * args.input_w + i] = (float)uc_pixel[2] / 255.0;
                        data[b * 3 * args.input_h * args.input_w + i + args.input_h * args.input_w] = (float)uc_pixel[1] / 255.0;
                        data[b * 3 * args.input_h * args.input_w + i + 2 * args.input_h * args.input_w] = (float)uc_pixel[0] / 255.0;
                        uc_pixel += 3;
                        ++i;
                    }
                }
                clock_t s_time = clock();
                doInference(*context, stream, buffers, data, prob, args);
                double infer_time = (double)(clock() - s_time)/CLOCKS_PER_SEC * 1000;
                full_time += infer_time;
                std::cout << "batch " << (int)(f/args.batch) << "/" << (int)(file_names.size()/args.batch)
                          << " | time : " << infer_time << std::endl;

                // need to save result or check result
            fcount = 0;
            } //--one batch

        }
        std::cout << "finished full time : " << full_time << " ms" << std::endl;
        std::cout << "==========================================\n";
        cudaStreamDestroy(stream);
        cudaFree(buffers[0]);
        cudaFree(buffers[1]);
        delete[] data;
        delete[] prob;
        context->destroy();
        engine->destroy();
        runtime->destroy();
    } // engine1, engine2

};