#include <iostream>
#include<opencv2/opencv.hpp>

// tensorRT文件
#include <NvInfer.h>  // 编译用的头文件
#include <NvInferRuntime.h>  // 推理用的运行时头文件

// cuda include
#include <cuda_runtime.h>  



#include <stdio.h>
#include <math.h>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <functional>
#include <unistd.h>
#include <chrono>

using namespace std;


#define checkRuntime(op)  __check_cuda_runtime((op), #op, __FILE__, __LINE__)

bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line){
    if(code != cudaSuccess){    
        const char* err_name = cudaGetErrorName(code);    
        const char* err_message = cudaGetErrorString(code);  
        printf("runtime error %s:%d  %s failed. \n  code = %s, message = %s\n", file, line, op, err_name, err_message);   
        return false;
    }
    return true;
}


inline const char* severity_string(nvinfer1::ILogger::Severity t) {
	switch (t) {
		case nvinfer1::ILogger::Severity::kINTERNAL_ERROR: return "internal_error";
		case nvinfer1::ILogger::Severity::kERROR: return "error";
		case nvinfer1::ILogger::Severity::kWARNING: return "warning";
		case nvinfer1::ILogger::Severity::kINFO: return "info";
		case nvinfer1::ILogger::Severity::kVERBOSE: return "verbose";
		default: return "unknown";
	}
}

class TRTLogger : public nvinfer1::ILogger {
public:
	virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override {
		if (severity <= Severity::kWARNING) {
			if (severity == Severity::kWARNING) printf("\033[33m%s: %s\033[0m\n", severity_string(severity), msg);
			else if (severity == Severity::kERROR) printf("\031[33m%s: %s\033[0m\n", severity_string(severity), msg);
			else printf("%s: %s\n", severity_string(severity), msg);
		}
	}
};

// 通过智能指针管理nv返回的指针参数
// 内存自动释放，避免泄漏
template<typename _T> std::shared_ptr<_T> make_nvshared(_T* ptr)
{
    return shared_ptr<_T>(ptr, [](_T* p){p->destroy();});
}


std::vector<unsigned char> load_file(const std::string& file) {
	ifstream in(file, ios::in | ios::binary);
	if (!in.is_open()) return {};

	in.seekg(0, ios::end);
	size_t length = in.tellg();

	vector<uint8_t> data;
	if (length > 0) {
		in.seekg(0, ios::beg);
		data.resize(length);

		in.read((char*)&data[0], length);
	}
	in.close();
	return data;
}

void inference(const string& image_path) {
	TRTLogger logger;
  // 加载模型
	auto engine_data = load_file("../engine/40-400-16.engine");
  
  /*
    创建runtime对象反序列化
    执行推理前，需要创建一个推理的runtime接口实例。与builer一样，runtime需要logger
  */
 
	auto runtime = make_nvshared(nvinfer1::createInferRuntime(logger));
	auto engine = make_nvshared(runtime->deserializeCudaEngine(engine_data.data(), engine_data.size()));
	
    if (engine == nullptr) {
		printf("Deserialize cuda engine failed.\n");
		runtime->destroy();
		return;
	}

	if (engine->getNbBindings() != 2) {
		printf("Must be single input, single Output, got %d output.\n", engine->getNbBindings() - 1);
		return;
	}
    
  // 创建CUDA流，以确定这个batch的推理是独立的
	cudaStream_t stream = nullptr;
	checkRuntime(cudaStreamCreate(&stream));
    
    // 获得上下文
	auto execution_context = make_nvshared(engine->createExecutionContext());
    // getBindingDimensions()查看engine的输入输出维度

    int input_size=1;
    int output_size=1;
    
    for (int i = 0; i < engine->getNbBindings(); i++)
    {
        
        nvinfer1::Dims dims = execution_context->getBindingDimensions(i);
        printf("index %d, dims: (");
        for (int d = 0; d < dims.nbDims; d ++)
        {
            // std::cout << d << endl;
             if (d < dims.nbDims -1) 
             {
                 printf("%d,", dims.d[d]);
                 input_size *= dims.d[d];
             }
                
             else
             {
                printf("%d", dims.d[d]);
                output_size *= dims.d[d];
                 
             }
               
        }   
        printf(")\n");
    }
    std::cout << input_size << endl;
    std::cout << output_size << endl;
	int input_batch = execution_context->getBindingDimensions(0).d[0];
	int input_channel = execution_context->getBindingDimensions(0).d[1];
	int input_height = execution_context->getBindingDimensions(0).d[2];
	int input_width = execution_context->getBindingDimensions(0).d[3];

  // 准备好input_data_host和input_data_device，分别表示内存中的数据指针和显存中的数据指针
  // 一会儿将预处理过的图像数据搬运到GPU
	// int input_numel = input_batch * input_channel * input_height * input_width;
	
    float* input_data_host = nullptr;
	float* input_data_device = nullptr;
	checkRuntime(cudaMallocHost(&input_data_host,  input_size * sizeof(float)));
	checkRuntime(cudaMalloc(&input_data_device, input_size * sizeof(float)));

  // 图片读取与预处理，与之前python中的预处理方式一致：
  // BGR->RGB、归一化/除均值减标准差
    float mean[] = {0.406, 0.456, 0.485};
	float std[] = {0.225, 0.224, 0.229};
  
	auto image = cv::imread(image_path);
	cv::resize(image, image, cv::Size(input_width, input_height));

	int image_area = image.cols * image.rows;
	unsigned char* pimage = image.data;
	float* phost_b = input_data_host + image_area * 0;
	float* phost_g = input_data_host + image_area * 1;
	float* phost_r = input_data_host + image_area * 2;
	for (int i=0; i<image_area; ++i, pimage += 3) {
		 *phost_r++ = (pimage[0] / 255.0f - mean[0]) / std[0];
		 *phost_g++ = (pimage[1] / 255.0f - mean[1]) / std[1];
		 *phost_b++ = (pimage[2] / 255.0f - mean[2]) / std[2];
	 }

  // 进行推理
	checkRuntime(cudaMemcpyAsync(input_data_device, input_data_host, input_size *sizeof(float), cudaMemcpyHostToDevice, stream));

	// const int num_classes = 1000;
	// 输出分配内存
    float output_data_host[output_size];
    float* output_data_device = nullptr;
	checkRuntime(cudaMalloc(&output_data_device, sizeof(output_data_host)));

	auto input_dims = engine->getBindingDimensions(0);  // nvinfer1::Dims dims
	input_dims.d[0] = input_batch;

	execution_context->setBindingDimensions(0, input_dims);
  // 用一个指针数组bindings指定input和output在gpu中的指针。
	float* bindings[] = {input_data_device, output_data_device};
	
    bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);

	checkRuntime(cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream));
	checkRuntime(cudaStreamSynchronize(stream));

	float* prob = output_data_host;

    std::cout << prob[0] << endl;
    std::cout << prob[1] << endl;
    std::cout << prob[2] << endl;
    std::cout << prob[409599] << endl;
	// int predict_label = max_element(prob, prob + num_classes) - prob;
	// float conf = prob[predict_label];
	// printf("test_image: %s, max_idx: %d, probability: %f", image_path.c_str(), predict_label, conf);

  // 释放显存
	checkRuntime(cudaStreamDestroy(stream));
	checkRuntime(cudaFreeHost(input_data_host));
	checkRuntime(cudaFree(input_data_device));
	checkRuntime(cudaFree(output_data_device));
}

int main(int argc, char* argv[])
{   
    TRTLogger logger;
    inference("/home/lin/code/cv/1.png");
    return 0;
}