#pragma once

#include "NvInfer.h"
#include "NvOnnxParser.h"
#include <chrono>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <fstream>
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudawarping.hpp>
#include <opencv2/opencv.hpp>

// Utility methods
namespace Util {
    // 检测对应的文件是否存在
inline bool doesFileExist(const std::string &filepath) {
    std::ifstream f(filepath.c_str());
    return f.good();
}
// cuda产生错误
inline void checkCudaErrorCode(cudaError_t code) {
    if (code != 0) {
        std::string errMsg = "CUDA operation failed with code: " + std::to_string(code) + "(" + cudaGetErrorName(code) +
                             "), with message: " + cudaGetErrorString(code);
        std::cout << errMsg << std::endl;
        throw std::runtime_error(errMsg);
    }
}

std::vector<std::string> getFilesInDirectory(const std::string &dirPath);
} // namespace Util
// Utility Timer
template <typename Clock = std::chrono::high_resolution_clock> class Stopwatch {
    typename Clock::time_point start_point;

public:
    Stopwatch() : start_point(Clock::now()) {}

    // Returns elapsed time
    template <typename Rep = typename Clock::duration::rep, typename Units = typename Clock::duration> Rep elapsedTime() const {
        std::atomic_thread_fence(std::memory_order_relaxed);
        auto counted_time = std::chrono::duration_cast<Units>(Clock::now() - start_point).count();
        std::atomic_thread_fence(std::memory_order_relaxed);
        return static_cast<Rep>(counted_time);
    }
};

using preciseStopwatch = Stopwatch<>;
// 检测精度的类型
// Precision used for GPU inference
enum class Precision {
    // Full precision floating point value
    FP32,
    // Half prevision floating point value
    FP16,
    // Int8 quantization.
    // Has reduced dynamic range, may result in slight loss in accuracy.
    // If INT8 is selected, must provide path to calibration dataset directory.
    INT8,
};

// Options for the network
struct Options {
    // Precision to use for GPU inference.
    Precision precision = Precision::FP16;
    // If INT8 precision is selected, must provide path to calibration dataset
    // directory.
    std::string calibrationDataDirectoryPath;
    // The batch size to be used when computing calibration data for INT8
    // inference. Should be set to as large a batch number as your GPU will
    // support.
    int32_t calibrationBatchSize = 128;
    // The batch size which should be optimized for.
    int32_t optBatchSize = 1;
    // Maximum allowable batch size
    int32_t maxBatchSize = 16;
    // GPU device index
    int deviceIndex = 0;
};

// Class used for int8 calibration
class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 {
public:
    Int8EntropyCalibrator2(int32_t batchSize, int32_t inputW, int32_t inputH, const std::string &calibDataDirPath,
                           const std::string &calibTableName, const std::string &inputBlobName,
                           const std::array<float, 3> &subVals = {0.f, 0.f, 0.f}, const std::array<float, 3> &divVals = {1.f, 1.f, 1.f},
                           bool normalize = true, bool readCache = true);
    virtual ~Int8EntropyCalibrator2();
    // Abstract base class methods which must be implemented
    int32_t getBatchSize() const noexcept override;
    bool getBatch(void *bindings[], char const *names[], int32_t nbBindings) noexcept override;
    void const *readCalibrationCache(std::size_t &length) noexcept override;
    void writeCalibrationCache(void const *ptr, std::size_t length) noexcept override;

private:
    const int32_t m_batchSize;
    const int32_t m_inputW;
    const int32_t m_inputH;
    int32_t m_imgIdx;
    std::vector<std::string> m_imgPaths;
    size_t m_inputCount;
    const std::string m_calibTableName;
    const std::string m_inputBlobName;
    const std::array<float, 3> m_subVals;
    const std::array<float, 3> m_divVals;
    const bool m_normalize;
    const bool m_readCache;
    void *m_deviceInput;
    std::vector<char> m_calibCache;
};

// Class to extend TensorRT logger
class Logger : public nvinfer1::ILogger {
    void log(Severity severity, const char *msg) noexcept override;
};

// Engine 类模板，提供通用的推理引擎功能
template <typename T> 
class Engine {
public:
    // 构造函数，接受一个 Options 类型的参数
    Engine(const Options &options);

    // 析构函数
    ~Engine();

    // 构建 ONNX 模型到 TensorRT 引擎文件，缓存模型到磁盘，然后将模型加载到内存中
    // 默认情况下会将值归一化到 [0.f, 1.f]，设置 normalize 为 false 会保留值在 [0.f, 255.f] 之间
    // 如果模型需要值归一化到 [-1.f, 1.f]，使用以下参数:
    //    subVals = {0.5f, 0.5f, 0.5f};
    //    divVals = {0.5f, 0.5f, 0.5f};
    //    normalize = true;
    bool buildLoadNetwork(std::string onnxModelPath, const std::array<float, 3> &subVals = {0.f, 0.f, 0.f},
                          const std::array<float, 3> &divVals = {1.f, 1.f, 1.f}, bool normalize = true);

    // 从磁盘加载 TensorRT 引擎文件到内存
    // 默认情况下会将值归一化到 [0.f, 1.f]，设置 normalize 为 false 会保留值在 [0.f, 255.f] 之间
    // 如果模型需要值归一化到 [-1.f, 1.f]，使用以下参数:
    //    subVals = {0.5f, 0.5f, 0.5f};
    //    divVals = {0.5f, 0.5f, 0.5f};
    //    normalize = true;
    bool loadNetwork(std::string trtModelPath, const std::array<float, 3> &subVals = {0.f, 0.f, 0.f},
                     const std::array<float, 3> &divVals = {1.f, 1.f, 1.f}, bool normalize = true);

    // 运行推理
    // 输入格式 [input][batch][cv::cuda::GpuMat]
    // 输出格式 [batch][output][feature_vector]
    bool runInference(const std::vector<std::vector<cv::cuda::GpuMat>> &inputs, std::vector<std::vector<std::vector<T>>> &featureVectors);

    // 实用方法，用于在保持纵横比的同时调整图像大小，通过缩放后添加填充来实现
    // 虽然信箱填充通常会在上下或左右两侧添加填充，但此实现仅在右侧或底部添加填充
    // 这样做是为了更容易将检测到的坐标（例如 YOLO 模型）转换回原始参考框架
    static cv::cuda::GpuMat resizeKeepAspectRatioPadRightBottom(const cv::cuda::GpuMat &input, size_t height, size_t width,
                                                                const cv::Scalar &bgcolor = cv::Scalar(0, 0, 0));

    // 获取输入维度
    const std::vector<nvinfer1::Dims3> &getInputDims() const { return m_inputDims; };

    // 获取输出维度
    const std::vector<nvinfer1::Dims> &getOutputDims() const { return m_outputDims; };

    // 实用方法，用于将三重嵌套的输出数组转换为二维数组
    // 当输出批量大小为 1，但有多个输出特征向量时使用
    static void transformOutput(std::vector<std::vector<std::vector<T>>> &input, std::vector<std::vector<T>> &output);

    // 实用方法，用于将三重嵌套的输出数组转换为单个数组
    // 当输出批量大小为 1，并且只有一个输出特征向量时使用
    static void transformOutput(std::vector<std::vector<std::vector<T>>> &input, std::vector<T> &output);

    // 将 NHWC 转换为 NCHW 并应用缩放和均值减法
    static cv::cuda::GpuMat blobFromGpuMats(const std::vector<cv::cuda::GpuMat> &batchInput, const std::array<float, 3> &subVals,
                                            const std::array<float, 3> &divVals, bool normalize);

private:
    // 构建网络
    bool build(std::string onnxModelPath, const std::array<float, 3> &subVals, const std::array<float, 3> &divVals, bool normalize);

    // 将引擎选项转换为字符串
    std::string serializeEngineOptions(const Options &options, const std::string &onnxModelPath);

    // 获取设备名称
    void getDeviceNames(std::vector<std::string> &deviceNames);

    // 清空 GPU 缓冲区
    void clearGpuBuffers();

    // 输入的归一化、缩放和均值减法
    std::array<float, 3> m_subVals{};
    std::array<float, 3> m_divVals{};
    bool m_normalize;

    // 保存输入和输出 GPU 缓冲区的指针
    std::vector<void *> m_buffers;
    std::vector<uint32_t> m_outputLengths{};
    std::vector<nvinfer1::Dims3> m_inputDims;
    std::vector<nvinfer1::Dims> m_outputDims;
    std::vector<std::string> m_IOTensorNames;
    int32_t m_inputBatchSize;

    // 必须保留 IRuntime 以进行推理
    // https://forums.developer.nvidia.com/t/is-it-safe-to-deallocate-nvinfer1-iruntime-after-creating-an-nvinfer1-icudaengine-but-before-running-inference-with-said-icudaengine/255381/2?u=cyruspk4w6
    std::unique_ptr<nvinfer1::IRuntime> m_runtime = nullptr;
    std::unique_ptr<Int8EntropyCalibrator2> m_calibrator = nullptr;
    std::unique_ptr<nvinfer1::ICudaEngine> m_engine = nullptr;
    std::unique_ptr<nvinfer1::IExecutionContext> m_context = nullptr;
    const Options m_options;
    Logger m_logger;
};


// Engine 构造函数，接受一个 Options 类型的参数
template <typename T> 
Engine<T>::Engine(const Options &options) : m_options(options) {}

// Engine 析构函数，调用 clearGpuBuffers 函数释放 GPU 缓冲区
template <typename T> 
Engine<T>::~Engine() { 
    clearGpuBuffers(); 
}

// clearGpuBuffers 函数，释放 GPU 缓冲区
template <typename T> 
void Engine<T>::clearGpuBuffers() {
    if (!m_buffers.empty()) {
        // 获取输入数量
        const auto numInputs = m_inputDims.size();
        // 从输出绑定的索引开始释放 GPU 内存
        for (int32_t outputBinding = numInputs; outputBinding < m_engine->getNbIOTensors(); ++outputBinding) {
            Util::checkCudaErrorCode(cudaFree(m_buffers[outputBinding]));
        }
        // 清空缓冲区
        m_buffers.clear();
    }
}


template <typename T>
bool Engine<T>::buildLoadNetwork(std::string onnxModelPath, const std::array<float, 3> &subVals, const std::array<float, 3> &divVals,
                                 bool normalize) {
    // 仅在尚未生成指定选项的引擎文件时重新生成，否则从磁盘加载缓存版本
    const auto engineName = serializeEngineOptions(m_options, onnxModelPath);
    std::cout << "正在搜索名为 " << engineName << " 的引擎文件..." << std::endl;

    if (Util::doesFileExist(engineName)) {
        std::cout << "找到引擎文件，不重新生成..." << std::endl;
    } else {
        if (!Util::doesFileExist(onnxModelPath)) {
            throw std::runtime_error("找不到路径上的 ONNX 模型: " + onnxModelPath);
        }

        // 没有找到引擎文件，生成新的引擎文件...
        std::cout << "未找到引擎文件，正在生成。这可能需要一些时间..." << std::endl;

        // 将 ONNX 模型构建成 TensorRT 引擎
        auto ret = build(onnxModelPath, subVals, divVals, normalize);
        if (!ret) {
            return false;
        }
    }

    // 将 TensorRT 引擎文件加载到内存中
    return loadNetwork(engineName, subVals, divVals, normalize);
}


template <typename T>
bool Engine<T>::loadNetwork(std::string trtModelPath, const std::array<float, 3> &subVals, const std::array<float, 3> &divVals,
                            bool normalize) {
    m_subVals = subVals;
    m_divVals = divVals;
    m_normalize = normalize;

    // 从磁盘读取序列化的模型
    if (!Util::doesFileExist(trtModelPath)) {
        std::cout << "错误，无法读取路径上的 TensorRT 模型: " + trtModelPath << std::endl;
        return false;
    } else {
        std::cout << "加载 TensorRT 引擎文件，路径为: " << trtModelPath << std::endl;
    }

    std::ifstream file(trtModelPath, std::ios::binary | std::ios::ate);
    std::streamsize size = file.tellg();
    file.seekg(0, std::ios::beg);

    std::vector<char> buffer(size);
    if (!file.read(buffer.data(), size)) {
        throw std::runtime_error("无法读取引擎文件");
    }

    // 创建用于反序列化引擎文件的运行时
    m_runtime = std::unique_ptr<nvinfer1::IRuntime>{nvinfer1::createInferRuntime(m_logger)};
    if (!m_runtime) {
        return false;
    }

    // 设置设备索引
    auto ret = cudaSetDevice(m_options.deviceIndex);
    if (ret != 0) {
        int numGPUs;
        cudaGetDeviceCount(&numGPUs);
        auto errMsg = "无法设置 GPU 设备索引为: " + std::to_string(m_options.deviceIndex) + "。注意，你的设备有 " +
                      std::to_string(numGPUs) + " 个 CUDA 兼容 GPU。";
        throw std::runtime_error(errMsg);
    }

    // 创建引擎，即优化模型的表示
    m_engine = std::unique_ptr<nvinfer1::ICudaEngine>(m_runtime->deserializeCudaEngine(buffer.data(), buffer.size()));
    if (!m_engine) {
        return false;
    }

    // 执行上下文包含与特定调用相关的所有状态
    m_context = std::unique_ptr<nvinfer1::IExecutionContext>(m_engine->createExecutionContext());
    if (!m_context) {
        return false;
    }

    // 用于存储输入和输出缓冲区
    // 这将传递给 TensorRT 进行推理
    clearGpuBuffers();
    m_buffers.resize(m_engine->getNbIOTensors());

    m_outputLengths.clear();
    m_inputDims.clear();
    m_outputDims.clear();
    m_IOTensorNames.clear();

    // 创建 CUDA 流
    cudaStream_t stream;
    Util::checkCudaErrorCode(cudaStreamCreate(&stream));

    // 为输入和输出缓冲区分配 GPU 内存
    m_outputLengths.clear();
    for (int i = 0; i < m_engine->getNbIOTensors(); ++i) {
        const auto tensorName = m_engine->getIOTensorName(i);
        m_IOTensorNames.emplace_back(tensorName);
        const auto tensorType = m_engine->getTensorIOMode(tensorName);
        const auto tensorShape = m_engine->getTensorShape(tensorName);
        const auto tensorDataType = m_engine->getTensorDataType(tensorName);

        if (tensorType == nvinfer1::TensorIOMode::kINPUT) {
            // 该实现目前仅支持 float 类型的输入
            if (m_engine->getTensorDataType(tensorName) != nvinfer1::DataType::kFLOAT) {
                throw std::runtime_error("错误，该实现目前仅支持 float 类型的输入");
            }

            // 不需要为输入分配内存，因为我们将直接使用 OpenCV GpuMat 缓冲区

            // 存储输入维度以备后用
            m_inputDims.emplace_back(tensorShape.d[1], tensorShape.d[2], tensorShape.d[3]);
            m_inputBatchSize = tensorShape.d[0];
        } else if (tensorType == nvinfer1::TensorIOMode::kOUTPUT) {
            // 确保模型输出数据类型与用户指定的模板参数匹配
            if (tensorDataType == nvinfer1::DataType::kFLOAT && !std::is_same<float, T>::value) {
                throw std::runtime_error("错误，模型期望的输出类型为 float。Engine 类模板参数必须进行调整。");
            } else if (tensorDataType == nvinfer1::DataType::kHALF && !std::is_same<__half, T>::value) {
                throw std::runtime_error("错误，模型期望的输出类型为 __half。Engine 类模板参数必须进行调整。");
            } else if (tensorDataType == nvinfer1::DataType::kINT8 && !std::is_same<int8_t, T>::value) {
                throw std::runtime_error("错误，模型期望的输出类型为 int8_t。Engine 类模板参数必须进行调整。");
            } else if (tensorDataType == nvinfer1::DataType::kINT32 && !std::is_same<int32_t, T>::value) {
                throw std::runtime_error("错误，模型期望的输出类型为 int32_t。Engine 类模板参数必须进行调整。");
            } else if (tensorDataType == nvinfer1::DataType::kBOOL && !std::is_same<bool, T>::value) {
                throw std::runtime_error("错误，模型期望的输出类型为 bool。Engine 类模板参数必须进行调整。");
            } else if (tensorDataType == nvinfer1::DataType::kUINT8 && !std::is_same<uint8_t, T>::value) {
                throw std::runtime_error("错误，模型期望的输出类型为 uint8_t。Engine 类模板参数必须进行调整。");
            } else if (tensorDataType == nvinfer1::DataType::kFP8) {
                throw std::runtime_error("错误，模型具有不受支持的输出类型");
            }

            // 绑定是一个输出
            uint32_t outputLength = 1;
            m_outputDims.push_back(tensorShape);

            for (int j = 1; j < tensorShape.nbDims; ++j) {
                // 我们忽略 j = 0，因为那是批量大小，我们将在调整缓冲区大小时考虑到这一点
                outputLength *= tensorShape.d[j];
            }

            m_outputLengths.push_back(outputLength);
            // 现在根据最大可能的批量大小适当地调整输出缓冲区大小（尽管实际上我们可能会使用更少的内存）
            Util::checkCudaErrorCode(cudaMallocAsync(&m_buffers[i], outputLength * m_options.maxBatchSize * sizeof(T), stream));
        } else {
            throw std::runtime_error("错误，IO Tensor 既不是输入也不是输出！");
        }
    }

    // 同步并销毁 CUDA 流
    Util::checkCudaErrorCode(cudaStreamSynchronize(stream));
    Util::checkCudaErrorCode(cudaStreamDestroy(stream));

    return true;
}


template <typename T>
bool Engine<T>::build(std::string onnxModelPath, const std::array<float, 3> &subVals, const std::array<float, 3> &divVals, bool normalize) {
    // 创建引擎构建器
    auto builder = std::unique_ptr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(m_logger));
    if (!builder) {
        return false;
    }

    // 定义显式批量大小并创建网络（隐式批量大小已被弃用）
    // 详细信息参见：https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#explicit-implicit-batch
    auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
    auto network = std::unique_ptr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(explicitBatch));
    if (!network) {
        return false;
    }

    // 创建用于读取 ONNX 文件的解析器
    auto parser = std::unique_ptr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, m_logger));
    if (!parser) {
        return false;
    }

    // 先将 ONNX 文件读入内存，然后将缓冲区传递给解析器
    // 如果我们的 ONNX 模型文件已加密，这种方法将允许我们先解密缓冲区
    std::ifstream file(onnxModelPath, std::ios::binary | std::ios::ate);
    std::streamsize size = file.tellg();
    file.seekg(0, std::ios::beg);

    std::vector<char> buffer(size);
    if (!file.read(buffer.data(), size)) {
        throw std::runtime_error("无法读取引擎文件");
    }

    // 解析读入内存的缓冲区
    auto parsed = parser->parse(buffer.data(), buffer.size());
    if (!parsed) {
        return false;
    }

    // 确保所有输入具有相同的批量大小
    const auto numInputs = network->getNbInputs();
    if (numInputs < 1) {
        throw std::runtime_error("错误，模型至少需要一个输入！");
    }
    const auto input0Batch = network->getInput(0)->getDimensions().d[0];
    for (int32_t i = 1; i < numInputs; ++i) {
        if (network->getInput(i)->getDimensions().d[0] != input0Batch) {
            throw std::runtime_error("错误，模型有多个输入，每个输入的批量大小不同！");
        }
    }

    // 检查模型是否支持动态批量大小
    bool doesSupportDynamicBatch = false;
    if (input0Batch == -1) {
        doesSupportDynamicBatch = true;
        std::cout << "模型支持动态批量大小" << std::endl;
    } else {
        std::cout << "模型仅支持固定批量大小 " << input0Batch << std::endl;
        // 如果模型支持固定批量大小，确保 maxBatchSize 和 optBatchSize 设置正确
        if (m_options.optBatchSize != input0Batch || m_options.maxBatchSize != input0Batch) {
            throw std::runtime_error("错误，模型仅支持固定批量大小 " + std::to_string(input0Batch) +
                                     "。必须将 Options.optBatchSize 和 Options.maxBatchSize 设置为 1");
        }
    }

    auto config = std::unique_ptr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
    if (!config) {
        return false;
    }

    // 注册单个优化配置文件
    nvinfer1::IOptimizationProfile *optProfile = builder->createOptimizationProfile();
    for (int32_t i = 0; i < numInputs; ++i) {
        // 必须为模型期望的所有输入指定维度
        const auto input = network->getInput(i);
        const auto inputName = input->getName();
        const auto inputDims = input->getDimensions();
        int32_t inputC = inputDims.d[1];
        int32_t inputH = inputDims.d[2];
        int32_t inputW = inputDims.d[3];

        // 指定优化配置文件
        if (doesSupportDynamicBatch) {
            optProfile->setDimensions(inputName, nvinfer1::OptProfileSelector::kMIN, nvinfer1::Dims4(1, inputC, inputH, inputW));
        } else {
            optProfile->setDimensions(inputName, nvinfer1::OptProfileSelector::kMIN,
                                      nvinfer1::Dims4(m_options.optBatchSize, inputC, inputH, inputW));
        }
        optProfile->setDimensions(inputName, nvinfer1::OptProfileSelector::kOPT,
                                  nvinfer1::Dims4(m_options.optBatchSize, inputC, inputH, inputW));
        optProfile->setDimensions(inputName, nvinfer1::OptProfileSelector::kMAX,
                                  nvinfer1::Dims4(m_options.maxBatchSize, inputC, inputH, inputW));
    }
    config->addOptimizationProfile(optProfile);

    // 设置精度级别
    const auto engineName = serializeEngineOptions(m_options, onnxModelPath);
    if (m_options.precision == Precision::FP16) {
        // 确保 GPU 支持 FP16 推理
        if (!builder->platformHasFastFp16()) {
            throw std::runtime_error("错误：GPU 不支持 FP16 精度");
        }
        config->setFlag(nvinfer1::BuilderFlag::kFP16);
    } else if (m_options.precision == Precision::INT8) {
        if (numInputs > 1) {
            throw std::runtime_error("错误，此实现目前仅支持单输入模型的 INT8 量化");
        }

        // 确保 GPU 支持 INT8 量化
        if (!builder->platformHasFastInt8()) {
            throw std::runtime_error("错误：GPU 不支持 INT8 精度");
        }

        // 确保用户提供了校准数据目录路径
        if (m_options.calibrationDataDirectoryPath.empty()) {
            throw std::runtime_error("错误：如果选择了 INT8 精度，必须提供校准数据目录路径到 Engine::build 方法");
        }

        config->setFlag((nvinfer1::BuilderFlag::kINT8));

        const auto input = network->getInput(0);
        const auto inputName = input->getName();
        const auto inputDims = input->getDimensions();
        const auto calibrationFileName = engineName + ".calibration";

        m_calibrator = std::make_unique<Int8EntropyCalibrator2>(m_options.calibrationBatchSize, inputDims.d[3], inputDims.d[2],
                                                                m_options.calibrationDataDirectoryPath, calibrationFileName, inputName,
                                                                subVals, divVals, normalize);
        config->setInt8Calibrator(m_calibrator.get());
    }

    // 用于构建器分析的 CUDA 流
    cudaStream_t profileStream;
    Util::checkCudaErrorCode(cudaStreamCreate(&profileStream));
    config->setProfileStream(profileStream);

    // 构建引擎
    // 如果此调用失败，建议增加记录器的详细级别到 kVERBOSE 并尝试重新构建引擎
    // 这样做将为您提供有关其失败原因的更多信息
    std::unique_ptr<nvinfer1::IHostMemory> plan{builder->buildSerializedNetwork(*network, *config)};
    if (!plan) {
        return false;
    }

    // 将引擎写入磁盘
    std::ofstream outfile(engineName, std::ofstream::binary);
    outfile.write(reinterpret_cast<const char *>(plan->data()), plan->size());

    std::cout << "成功，将引擎保存到 " << engineName << std::endl;

    Util::checkCudaErrorCode(cudaStreamDestroy(profileStream));
    return true;
}


template <typename T>
bool Engine<T>::runInference(const std::vector<std::vector<cv::cuda::GpuMat>> &inputs,
                             std::vector<std::vector<std::vector<T>>> &featureVectors) {
    // 首先进行一些错误检查
    if (inputs.empty() || inputs[0].empty()) {
        std::cout << "===== Error =====" << std::endl;
        std::cout << "输入向量为空!" << std::endl;
        return false;
    }

    const auto numInputs = m_inputDims.size();
    if (inputs.size() != numInputs) {
        std::cout << "===== Error =====" << std::endl;
        std::cout << "提供的输入数量不正确!" << std::endl;
        return false;
    }

    // 确保批量大小不超过最大值
    if (inputs[0].size() > static_cast<size_t>(m_options.maxBatchSize)) {
        std::cout << "===== Error =====" << std::endl;
        std::cout << "批量大小超过了模型的期望值!" << std::endl;
        std::cout << "模型最大批量大小: " << m_options.maxBatchSize << std::endl;
        std::cout << "调用 runInference 提供的批量大小: " << inputs[0].size() << std::endl;
        return false;
    }

    // 确保如果模型有一个大于 1 的固定批量大小，则输入长度正确
    if (m_inputBatchSize != -1 && inputs[0].size() != static_cast<size_t>(m_inputBatchSize)) {
        std::cout << "===== Error =====" << std::endl;
        std::cout << "批量大小与模型期望的不符!" << std::endl;
        std::cout << "模型批量大小: " << m_inputBatchSize << std::endl;
        std::cout << "调用 runInference 提供的批量大小: " << inputs[0].size() << std::endl;
        return false;
    }

    const auto batchSize = static_cast<int32_t>(inputs[0].size());
    // 确保所有输入的批量大小相同
    for (size_t i = 1; i < inputs.size(); ++i) {
        if (inputs[i].size() != static_cast<size_t>(batchSize)) {
            std::cout << "===== Error =====" << std::endl;
            std::cout << "所有输入的批量大小必须一致!" << std::endl;
            return false;
        }
    }

    // 创建用于推理的 CUDA 流
    cudaStream_t inferenceCudaStream;
    Util::checkCudaErrorCode(cudaStreamCreate(&inferenceCudaStream));

    std::vector<cv::cuda::GpuMat> preprocessedInputs;

    // 预处理所有输入
    for (size_t i = 0; i < numInputs; ++i) {
        const auto &batchInput = inputs[i];
        const auto &dims = m_inputDims[i];

        auto &input = batchInput[0];
        if (input.channels() != dims.d[0] || input.rows != dims.d[1] || input.cols != dims.d[2]) {
            std::cout << "===== Error =====" << std::endl;
            std::cout << "输入的尺寸不正确!" << std::endl;
            std::cout << "期望: (" << dims.d[0] << ", " << dims.d[1] << ", " << dims.d[2] << ")" << std::endl;
            std::cout << "得到: (" << input.channels() << ", " << input.rows << ", " << input.cols << ")" << std::endl;
            std::cout << "确保你将输入图像调整到正确的尺寸" << std::endl;
            return false;
        }

        nvinfer1::Dims4 inputDims = {batchSize, dims.d[0], dims.d[1], dims.d[2]};
        m_context->setInputShape(m_IOTensorNames[i].c_str(), inputDims); // 定义批量大小

        // OpenCV 将图像读取到内存中为 NHWC 格式，而 TensorRT 期望图像为 NCHW 格式
        // 以下方法将 NHWC 转换为 NCHW。虽然 TensorRT 期望 IO 为 NCHW 格式，但在优化过程中，它可以
        // 内部使用 NHWC 以优化 CUDA 内核
        // 参见: https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#data-layout
        // 复制输入数据并执行预处理
        auto mfloat = blobFromGpuMats(batchInput, m_subVals, m_divVals, m_normalize);
        preprocessedInputs.push_back(mfloat);
        m_buffers[i] = mfloat.ptr<void>();
    }

    // 确保所有动态绑定已定义
    if (!m_context->allInputDimensionsSpecified()) {
        throw std::runtime_error("Error, not all required dimensions specified.");
    }

    // 设置输入和输出缓冲区的地址
    for (size_t i = 0; i < m_buffers.size(); ++i) {
        bool status = m_context->setTensorAddress(m_IOTensorNames[i].c_str(), m_buffers[i]);
        if (!status) {
            return false;
        }
    }

    // 运行推理
    bool status = m_context->enqueueV3(inferenceCudaStream);
    if (!status) {
        return false;
    }

    // 将输出复制回 CPU
    featureVectors.clear();

    for (int batch = 0; batch < batchSize; ++batch) {
        // 批处理
        std::vector<std::vector<T>> batchOutputs{};
        for (int32_t outputBinding = numInputs; outputBinding < m_engine->getNbIOTensors(); ++outputBinding) {
            // 从索引 m_inputDims.size() 开始，以考虑 m_buffers 中的输入
            std::vector<T> output;
            auto outputLength = m_outputLengths[outputBinding - numInputs];
            output.resize(outputLength);
            // 复制输出
            Util::checkCudaErrorCode(cudaMemcpyAsync(output.data(),
                                                     static_cast<char *>(m_buffers[outputBinding]) + (batch * sizeof(T) * outputLength),
                                                     outputLength * sizeof(T), cudaMemcpyDeviceToHost, inferenceCudaStream));
            batchOutputs.emplace_back(std::move(output));
        }
        featureVectors.emplace_back(std::move(batchOutputs));
    }

    // 同步 CUDA 流
    Util::checkCudaErrorCode(cudaStreamSynchronize(inferenceCudaStream));
    Util::checkCudaErrorCode(cudaStreamDestroy(inferenceCudaStream));
    return true;
}


// 将 NHWC 转换为 NCHW 并应用缩放和均值减法
template <typename T>
cv::cuda::GpuMat Engine<T>::blobFromGpuMats(const std::vector<cv::cuda::GpuMat> &batchInput, const std::array<float, 3> &subVals,
                                            const std::array<float, 3> &divVals, bool normalize) {
    cv::cuda::GpuMat gpu_dst(1, batchInput[0].rows * batchInput[0].cols * batchInput.size(), CV_8UC3);

    size_t width = batchInput[0].cols * batchInput[0].rows;
    for (size_t img = 0; img < batchInput.size(); img++) {
        std::vector<cv::cuda::GpuMat> input_channels{
            cv::cuda::GpuMat(batchInput[0].rows, batchInput[0].cols, CV_8U, &(gpu_dst.ptr()[0 + width * 3 * img])),
            cv::cuda::GpuMat(batchInput[0].rows, batchInput[0].cols, CV_8U, &(gpu_dst.ptr()[width + width * 3 * img])),
            cv::cuda::GpuMat(batchInput[0].rows, batchInput[0].cols, CV_8U, &(gpu_dst.ptr()[width * 2 + width * 3 * img]))};
        cv::cuda::split(batchInput[img], input_channels); // HWC -> CHW
    }

    cv::cuda::GpuMat mfloat;
    if (normalize) {
        gpu_dst.convertTo(mfloat, CV_32FC3, 1.f / 255.f);
    } else {
        gpu_dst.convertTo(mfloat, CV_32FC3);
    }

    cv::cuda::subtract(mfloat, cv::Scalar(subVals[0], subVals[1], subVals[2]), mfloat, cv::noArray(), -1);
    cv::cuda::divide(mfloat, cv::Scalar(divVals[0], divVals[1], divVals[2]), mfloat, 1, -1);

    return mfloat;
}
// 将引擎选项转换为字符串
template <typename T> std::string Engine<T>::serializeEngineOptions(const Options &options, const std::string &onnxModelPath) {
    const auto filenamePos = onnxModelPath.find_last_of('/') + 1;
    std::string engineName = onnxModelPath.substr(filenamePos, onnxModelPath.find_last_of('.') - filenamePos) + ".engine";

    std::vector<std::string> deviceNames;
    getDeviceNames(deviceNames);

    if (static_cast<size_t>(options.deviceIndex) >= deviceNames.size()) {
        throw std::runtime_error("Error, provided device index is out of range!");
    }

    auto deviceName = deviceNames[options.deviceIndex];
    deviceName.erase(std::remove_if(deviceName.begin(), deviceName.end(), ::isspace), deviceName.end());

    engineName += "." + deviceName;

    if (options.precision == Precision::FP16) {
        engineName += ".fp16";
    } else if (options.precision == Precision::FP32) {
        engineName += ".fp32";
    } else {
        engineName += ".int8";
    }

    engineName += "." + std::to_string(options.maxBatchSize);
    engineName += "." + std::to_string(options.optBatchSize);
    engineName = onnxModelPath.substr(0, onnxModelPath.find_last_of('.')) + ".engine";
    return engineName;
}

// 获取设备名称
template <typename T> void Engine<T>::getDeviceNames(std::vector<std::string> &deviceNames) {
    int numGPUs;
    cudaGetDeviceCount(&numGPUs);

    for (int device = 0; device < numGPUs; device++) {
        cudaDeviceProp prop;
        cudaGetDeviceProperties(&prop, device);

        deviceNames.push_back(std::string(prop.name));
    }
}

// 调整图像大小并保持纵横比，同时在右侧和底部添加填充
template <typename T>
cv::cuda::GpuMat Engine<T>::resizeKeepAspectRatioPadRightBottom(const cv::cuda::GpuMat &input, size_t height, size_t width,
                                                                const cv::Scalar &bgcolor) {
    float r = std::min(width / (input.cols * 1.0), height / (input.rows * 1.0));
    int unpad_w = r * input.cols;
    int unpad_h = r * input.rows;
    cv::cuda::GpuMat re(unpad_h, unpad_w, CV_8UC3);
    cv::cuda::resize(input, re, re.size());
    cv::cuda::GpuMat out(height, width, CV_8UC3, bgcolor);
    re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows)));
    return out;
}


// 将三重嵌套的输出数组转换为二维数组
template <typename T>
void Engine<T>::transformOutput(std::vector<std::vector<std::vector<T>>> &input, std::vector<std::vector<T>> &output) {
    if (input.size() != 1) {
        throw std::logic_error("The feature vector has incorrect dimensions!");
    }

    output = std::move(input[0]);
}

// 将三重嵌套的输出数组转换为单个数组
template <typename T> void Engine<T>::transformOutput(std::vector<std::vector<std::vector<T>>> &input, std::vector<T> &output) {
    if (input.size() != 1 || input[0].size() != 1) {
        throw std::logic_error("The feature vector has incorrect dimensions!");
    }

    output = std::move(input[0][0]);
}

