// trt_util.hpp

#ifndef TRT_UTIL_HPP
#define TRT_UTIL_HPP

#include <NvInfer.h>
#include <opencv2/opencv.hpp>
#include <NvOnnxParser.h>
#include <cuda_runtime_api.h>
#include <string>
#include <vector>
#include <memory>
#include <mutex>
#include <condition_variable>
#include <chrono>
#include <filesystem>
#include "camera_control.hpp"
#include <iostream>
#include <fstream>
#include <exception>
#include <cstdlib> // 用于system函数

using namespace std::chrono;
using namespace nvinfer1;
using namespace nvonnxparser;

// trt.hpp 或其他头文件
extern bool g_bExit;



class TensorRTModel
{
public:
	TensorRTModel(const std::string& modelPath)
	{
		initialize(modelPath);
	}

	~TensorRTModel()
	{
		// 显式释放资源
		context.reset();
		engine.reset();
	}

	std::vector<float> infer(const std::vector<float>& inputData)
	{
		// 获取输入和输出的索引
		int inputIndex = engine->getBindingIndex("input.1");
		int outputIndex = engine->getBindingIndex("495");

		// 设置输入维度并分配内存
		Dims inputDims = engine->getBindingDimensions(inputIndex);
		inputDims.d[0] = 1;
		context->setBindingDimensions(inputIndex, inputDims);

		void* buffers[2];
		cudaMalloc(&buffers[inputIndex], inputData.size() * sizeof(float));
		cudaMalloc(&buffers[outputIndex], 2 * sizeof(float));

		// 将输入数据从主机传输到设备
		cudaMemcpy(buffers[inputIndex], inputData.data(), inputData.size() * sizeof(float), cudaMemcpyHostToDevice);

		// 执行推理
		void* bindings[] = { buffers[inputIndex], buffers[outputIndex] };
		context->enqueueV2(bindings, 0, nullptr);

		// 从设备获取输出数据
		std::vector<float> outputData(2);
		cudaMemcpy(outputData.data(), buffers[outputIndex], 2 * sizeof(float), cudaMemcpyDeviceToHost);

		// 释放 GPU 内存
		cudaFree(buffers[inputIndex]);
		cudaFree(buffers[outputIndex]);

		return outputData;
	}

private:


    // TensorRT 日志类
    class Logger : public ILogger
    {
        void log(Severity severity, const char* msg) noexcept override
        {
            // 仅输出 kERROR 和更严重级别的信息
            if (severity == Severity::kERROR || severity == Severity::kINTERNAL_ERROR)
            {
                std::cout << msg << std::endl;
            }else{
				std::cout << msg << std::endl;
			}
        }
    } logger;

	std::shared_ptr<nvinfer1::ICudaEngine> engine = nullptr;
	std::unique_ptr<IExecutionContext> context = nullptr;

	void initialize(const std::string& modelPath)
	{
		// 加载并构建引擎
		engine = loadModel(modelPath);

		std::cerr << "加载并构建引擎" << std::endl;
		if (!engine)
		{
			std::cerr << "引擎构建失败" << std::endl;
			exit(-1);
		}
		std::cout << "引擎构建成功" << std::endl;
		// 创建推理上下文
		context = std::unique_ptr<IExecutionContext>(engine->createExecutionContext());
		if (!context)
		{
			std::cerr << "创建推理上下文失败" << std::endl;
			exit(-1);
		}
		std::cout << "创建推理上下文成功" << std::endl;
	}

	std::shared_ptr<nvinfer1::ICudaEngine> loadModel(const std::string& modelPath)
	{
		try{
			std::cout<<nvinfer1::kNV_TENSORRT_VERSION_IMPL<<std::endl;
			// 创建 TensorRT Builder
			auto builder = std::unique_ptr<IBuilder>(nvinfer1::createInferBuilder(logger));
			auto network = std::unique_ptr<INetworkDefinition>(builder->createNetworkV2(1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
			auto parser = std::unique_ptr<IParser>(nvonnxparser::createParser(*network, logger));

			// 解析 ONNX 文件
			if (!parser->parseFromFile(modelPath.c_str(), static_cast<int>(ILogger::Severity::kERROR)))
			{
				std::cerr << "解析 ONNX 文件失败" << std::endl;
				return nullptr;
			}
			std::cout << "解析 ONNX 文件成功" << std::endl;

			// 创建 Builder 配置并设置最大工作空间
			auto config = std::unique_ptr<IBuilderConfig>(builder->createBuilderConfig());

			std::cout << "创建 Builder 配置并设置最大工作空间 成功" << std::endl;
			size_t free,total;
			cudaMemGetInfo(&free,&total);

			std::cout << "free:" <<free<<"   total:"<<total<< std::endl;
			// config->setMaxWorkspaceSize(4*(1 << 30));  // 设置最大工作空间为 1 GB
			config->setMaxWorkspaceSize(free);  // 设置最大工作空间为 1 GB
			std::cout << "设置最大工作空间为 1 GB 成功" << std::endl;
			// 构建 TensorRT 引擎
			return std::shared_ptr<nvinfer1::ICudaEngine>(builder->buildEngineWithConfig(*network, *config));
		}catch (const std::exception&e){
			std::cerr << "引擎构建失败"<<e.what() << std::endl;
		}
		
	}
};

typedef struct {
    TensorRTModel* trtModel;          // TensorRT 模型实例指针
    SharedResourcesWrapper* sharedResWrapper;
} InferenceThreadArgs;

// 图像预处理函数
std::vector<float> preprocessImage(const cv::Mat& image, int inputHeight, int inputWidth);

// 声明addInferenceResultToSharedResources函数，它接受InferenceResult实例引用和InferenceSharedResources引用作为参数。
void addInferenceResultToSharedResources(InferenceResult& result, InferenceSharedResources& sharedRes);

// 推理线程函数原型
void* InferenceThread(void* pUser);

// 推理结果显示线程函数原型
void* InferDisplayThread(void* pUser);

#endif // TRT_UTIL_HPP