#include "NvInfer.h"
#include "NvOnnxParser.h"
#include <opencv2/opencv.hpp>

class TRTEngine {
public:
    TRTEngine(const std::string& engine_path) {
        loadEngine(engine_path);
        createContext();
    }

    void infer(cv::Mat& input, std::vector<float>& output) {
        // 预处理
        cv::Mat resized; 
        cv::resize(input, resized, cv::Size(640, 640));
        std::vector<float> blob = normalize(resized);

        // 创建buffer
        void* buffers[2];
        cudaMalloc(&buffers[0], inputSize * sizeof(float));
        cudaMalloc(&buffers[1], outputSize * sizeof(float));

        // 执行推理
        cudaMemcpy(buffers[0], blob.data(), inputSize * sizeof(float), cudaMemcpyHostToDevice);
        context->executeV2(buffers);
        cudaMemcpy(output.data(), buffers[1], outputSize * sizeof(float), cudaMemcpyDeviceToHost);

        // 后处理
        processOutput(output);
    }

private:
    void loadEngine(const std::string& path) {
        std::ifstream engineFile(path, std::ios::binary);
        engineFile.seekg(0, std::ios::end);
        size_t size = engineFile.tellg();
        engineFile.seekg(0, std::ios::beg);
        engineData.resize(size);
        engineFile.read(engineData.data(), size);
    }

    void createContext() {
        runtime = nvinfer1::createInferRuntime(logger);
        engine = runtime->deserializeCudaEngine(engineData.data(), engineData.size());
        context = engine->createExecutionContext();
    }

    std::vector<float> normalize(cv::Mat& img) {
        // 归一化处理
        cv::Mat floatImg;
        img.convertTo(floatImg, CV_32FC3, 1.0 / 255.0);
        return std::vector<float>(floatImg.data, floatImg.data + floatImg.total() * 3);
    }

    nvinfer1::ILogger logger;
    std::vector<char> engineData;
    nvinfer1::IRuntime* runtime;
    nvinfer1::ICudaEngine* engine;
    nvinfer1::IExecutionContext* context;
    const int inputSize = 640*640*3;
    const int outputSize = 8400*6;
};
