#include "yolov8_tensorrt.h"
#include <fstream>
#include <algorithm>
#include <numeric>
#include <chrono>

static Logger gLogger;

YOLOv8TensorRT::YOLOv8TensorRT(const std::string& enginePath) {
    // 加载引擎
    if (!loadEngine(enginePath)) {
        throw std::runtime_error("Failed to load TensorRT engine");
    }
    
    // 创建执行上下文
    context.reset(engine->createExecutionContext());
    if (!context) {
        throw std::runtime_error("Failed to create execution context");
    }
    
    // 创建CUDA流
    cudaStreamCreate(&stream);
    
    // 分配设备内存
    size_t inputSize = 3 * INPUT_H * INPUT_W * sizeof(float);
    size_t outputSize = 1 * (NUM_CLASSES + 4) *  OUTPUT_SIZE * sizeof(float);
    
    cudaMalloc(&buffers[0], inputSize);
    cudaMalloc(&buffers[1], outputSize);
    
    // 分配主机内存
    inputBuffer = new float[3 * INPUT_H * INPUT_W];
    outputBuffer = new float[1 * (NUM_CLASSES + 4) * OUTPUT_SIZE];
}

YOLOv8TensorRT::~YOLOv8TensorRT() {
    // 释放资源
    cudaStreamDestroy(stream);
    cudaFree(buffers[0]);
    cudaFree(buffers[1]);
    delete[] inputBuffer;
    delete[] outputBuffer;
}

bool YOLOv8TensorRT::loadEngine(const std::string& enginePath) {
    std::ifstream file(enginePath, std::ios::binary);
    if (!file.good()) {
        std::cerr << "Error: Unable to open engine file: " << enginePath << std::endl;
        return false;
    }
    
    file.seekg(0, file.end);
    size_t size = file.tellg();
    file.seekg(0, file.beg);
    
    std::vector<char> engineData(size);
    file.read(engineData.data(), size);
    file.close();
    
    nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(gLogger);
    engine.reset(runtime->deserializeCudaEngine(engineData.data(), size));
    // 现在是通过智能指针管理的，不需要手动调用 destroy()，直接让其生命周期结束即可。
    // runtime->destroy();
    
    return engine != nullptr;
}

void YOLOv8TensorRT::preprocess(const cv::Mat& image, float* inputBuffer) {
    cv::Mat resized;
    cv::resize(image, resized, cv::Size(INPUT_W, INPUT_H));
    
    cv::Mat rgb;
    cv::cvtColor(resized, rgb, cv::COLOR_BGR2RGB);
    
    // 归一化到[0,1]
    rgb.convertTo(rgb, CV_32FC3, 1.0 / 255.0);
    
    // HWC to CHW
    std::vector<cv::Mat> channels;
    cv::split(rgb, channels);
    
    int channelSize = INPUT_H * INPUT_W;
    for (int i = 0; i < 3; ++i) {
        memcpy(inputBuffer + i * channelSize, 
               channels[i].data, 
               channelSize * sizeof(float));
    }
}

std::vector<Detection> YOLOv8TensorRT::postprocess(float* output, int imgWidth, int imgHeight) {
    std::vector<Detection> detections;

    // onnx输出为 [1, 84, 8400]，即 output[c * num_boxes + b]
    int num_boxes = OUTPUT_SIZE;      // 8400
    int channels = NUM_CLASSES + 4;   // 84

    float scaleX = (float)imgWidth / INPUT_W;
    float scaleY = (float)imgHeight / INPUT_H;

    for (int b = 0; b < num_boxes; ++b) {
        float cx = output[0 * num_boxes + b];
        float cy = output[1 * num_boxes + b];
        float w  = output[2 * num_boxes + b];
        float h  = output[3 * num_boxes + b];

        // 找最大类别分数
        float maxScore = 0.0f;
        int maxClassId = -1;
        for (int c = 0; c < NUM_CLASSES; ++c) {
            float score = output[(4 + c) * num_boxes + b];
            if (score > maxScore) {
                maxScore = score;
                maxClassId = c;
            }
        }

        if (maxScore > CONF_THRESHOLD) {
            Detection det;
            det.x1 = (cx - w / 2) * scaleX;
            det.y1 = (cy - h / 2) * scaleY;
            det.x2 = (cx + w / 2) * scaleX;
            det.y2 = (cy + h / 2) * scaleY;
            det.confidence = maxScore;
            det.classId = maxClassId;

            // 裁剪到图像边界
            det.x1 = std::max(0.0f, det.x1);
            det.y1 = std::max(0.0f, det.y1);
            det.x2 = std::min((float)imgWidth, det.x2);
            det.y2 = std::min((float)imgHeight, det.y2);

            detections.push_back(det);
        }
    }

    return detections;
}

void YOLOv8TensorRT::nms(std::vector<Detection>& detections) {
    // 按置信度排序
    std::sort(detections.begin(), detections.end(), 
              [](const Detection& a, const Detection& b) {
                  return a.confidence > b.confidence;
              });
    
    std::vector<Detection> result;
    std::vector<bool> suppressed(detections.size(), false);
    
    for (size_t i = 0; i < detections.size(); ++i) {
        if (suppressed[i]) continue;
        
        result.push_back(detections[i]);
        
        for (size_t j = i + 1; j < detections.size(); ++j) {
            if (suppressed[j]) continue;
            if (detections[i].classId != detections[j].classId) continue;
            
            // 计算IoU
            float x1 = std::max(detections[i].x1, detections[j].x1);
            float y1 = std::max(detections[i].y1, detections[j].y1);
            float x2 = std::min(detections[i].x2, detections[j].x2);
            float y2 = std::min(detections[i].y2, detections[j].y2);
            
            float intersection = std::max(0.0f, x2 - x1) * std::max(0.0f, y2 - y1);
            float area1 = (detections[i].x2 - detections[i].x1) * 
                         (detections[i].y2 - detections[i].y1);
            float area2 = (detections[j].x2 - detections[j].x1) * 
                         (detections[j].y2 - detections[j].y1);
            float iou = intersection / (area1 + area2 - intersection);
            
            if (iou > NMS_THRESHOLD) {
                suppressed[j] = true;
            }
        }
    }
    
    detections = result;
}

std::vector<Detection> YOLOv8TensorRT::detect(const cv::Mat& image) {
    auto start = std::chrono::high_resolution_clock::now();
    
    // 预处理
    preprocess(image, inputBuffer);
    
    // 复制输入到GPU
    cudaMemcpyAsync(buffers[0], inputBuffer, 
                    3 * INPUT_H * INPUT_W * sizeof(float),
                    cudaMemcpyHostToDevice, stream);
    
    // 推理
    // 旧的 **********************************************************
    // context->enqueueV2(buffers, stream, nullptr);
    context->setTensorAddress("images", buffers[0]);
    context->setTensorAddress("output0", buffers[1]);
    bool status = context->enqueueV3(stream);
    
    // 复制输出到CPU
    // 修改：输出大小要包含batch维度
    cudaMemcpyAsync(outputBuffer, buffers[1],
                    1 * (NUM_CLASSES + 4) * OUTPUT_SIZE * sizeof(float),
                    cudaMemcpyDeviceToHost, stream);
    
    cudaStreamSynchronize(stream);
    
    // 后处理
    std::vector<Detection> detections = postprocess(outputBuffer, 
                                                    image.cols, 
                                                    image.rows);
    
    // NMS
    nms(detections);
    
    auto end = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
    std::cout << "Inference time: " << duration.count() << " ms" << std::endl;
    
    return detections;
}