// main.cpp  :  UTF-8-BOM  ,  TensorRT-10.8  +  CUDA12.4  +  VS2022
#include <NvInfer.h>
#include <NvOnnxParser.h>
#include <cuda_runtime.h>
#include <opencv2/opencv.hpp>
#include <vector>
#include <fstream>
#include <iostream>

using namespace nvinfer1;
using namespace cv;

class MyLogger : public ILogger
{
public:
    void log(Severity severity, const char* msg) noexcept override
    {
        if (severity <= Severity::kWARNING)
            std::cout << msg << std::endl;
    }
} logger;

std::vector<char> loadEngine(const std::string& path)
{
    std::ifstream file(path, std::ios::binary);
    if (!file.good()) throw std::runtime_error("Cannot open engine!");
    file.seekg(0, std::ios::end);
    size_t size = file.tellg();
    file.seekg(0, std::ios::beg);
    std::vector<char> buffer(size);
    file.read(buffer.data(), size);
    return buffer;
}

int main(int argc, char** argv)
{
    if (argc < 3)
    {
        std::cerr << "Usage: yolo11_trt.exe model.engine image.jpg\n";
        return -1;
    }
    std::string enginePath = argv[1];
    std::string imgPath    = argv[2];

    // 1. 反序列化引擎（10.8 API）
    auto runtime = createInferRuntime(logger);
    auto engineBuf = loadEngine(enginePath);
    auto engine = runtime->deserializeCudaEngine(engineBuf.data(), engineBuf.size());
    auto context = engine->createExecutionContext();

    // 2. 分配 GPU/CPU 缓存
    const int INPUT_H = 640;
    const int INPUT_W = 640;
    const int INPUT_C = 3;
    const int INPUT_SIZE  = INPUT_C * INPUT_H * INPUT_W * sizeof(float);
    const int OUTPUT_SIZE = (5 + 80) * 8400 * sizeof(float);

    void *dIn, *dOut;
    cudaMalloc(&dIn, INPUT_SIZE);
    cudaMalloc(&dOut, OUTPUT_SIZE);
    std::vector<float> inputHost(INPUT_C * INPUT_H * INPUT_W);
    std::vector<float> outputHost((5 + 80) * 8400);

    // 3. 预处理
    Mat img0 = imread(imgPath);
    int h0 = img0.rows, w0 = img0.cols;
    float r = std::min(float(INPUT_H) / h0, float(INPUT_W) / w0);
    int h = int(h0 * r), w = int(w0 * r);
    Mat resized; resize(img0, resized, Size(w, h));
    int top = (INPUT_H - h) / 2, left = (INPUT_W - w) / 2;
    Mat blob(INPUT_H, INPUT_W, CV_8UC3, Scalar(114, 114, 114));
    resized.copyTo(blob(Rect(left, top, w, h)));
    blob.convertTo(blob, CV_32FC3, 1.0 / 255.0);
    // HWC -> CHW
    int idx = 0;
    for (int c = 0; c < INPUT_C; ++c)
        for (int row = 0; row < INPUT_H; ++row)
            for (int col = 0; col < INPUT_W; ++col)
                inputHost[idx++] = blob.at<Vec3f>(row, col)[c];

    // 4. 推理
    cudaMemcpy(dIn, inputHost.data(), INPUT_SIZE, cudaMemcpyHostToDevice);
    void* bindings[] = { dIn, dOut };
    context->enqueueV2(bindings, 0, nullptr);
    cudaMemcpy(outputHost.data(), dOut, OUTPUT_SIZE, cudaMemcpyDeviceToHost);

    // 5. 后处理（NMS + 画框）
    std::vector<Rect> boxes;
    std::vector<float> confs;
    auto* ptr = outputHost.data();
    const float conf_thres = 0.25f, nms_thres = 0.45f;
    for (int i = 0; i < 8400; ++i)
    {
        float x = ptr[i * 85 + 0];
        float y = ptr[i * 85 + 1];
        float w = ptr[i * 85 + 2];
        float h = ptr[i * 85 + 3];
        float conf = ptr[i * 85 + 4];
        if (conf < conf_thres) continue;
        int cls = std::max_element(ptr + i * 85 + 5, ptr + i * 85 + 85) - (ptr + i * 85 + 5);
        float score = conf * ptr[i * 85 + 5 + cls];
        if (score < conf_thres) continue;
        int x1 = int((x - w / 2 - left) / r);
        int y1 = int((y - h / 2 - top) / r);
        int x2 = int((x + w / 2 - left) / r);
        int y2 = int((y + h / 2 - top) / r);
        boxes.emplace_back(x1, y1, x2 - x1, y2 - y1);
        confs.push_back(score);
    }
    std::vector<int> indices;
    cv::dnn::NMSBoxes(boxes, confs, conf_thres, nms_thres, indices);
    for (int i : indices)
    {
        rectangle(img0, boxes[i], Scalar(0, 255, 0), 2);
        putText(img0, format("%.2f", confs[i]), Point(boxes[i].x, boxes[i].y - 5),
                FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 255, 0), 2);
    }
    imwrite("result.jpg", img0);
    std::cout << "Saved result.jpg  (" << indices.size() << " objects)\n";

    cudaFree(dIn);
    cudaFree(dOut);
    return 0;
}