#include "yolov8_tensorrt.h"
#include <fstream>
#include <algorithm>
#include <numeric>
#include <chrono>
#include <opencv2/opencv.hpp>

static Logger gLogger;

YOLOv8TensorRT::YOLOv8TensorRT(const std::string& enginePath) {
    // 加载引擎
    if (!loadEngine(enginePath)) {
        throw std::runtime_error("Failed to load TensorRT engine");
    }
    
    // 创建执行上下文
    context.reset(engine->createExecutionContext());
    if (!context) {
        throw std::runtime_error("Failed to create execution context");
    }
    
    // 创建CUDA流
    cudaStreamCreate(&stream);
    
    // 分配设备内存
    size_t inputSize = 3 * INPUT_H * INPUT_W * sizeof(float);
    size_t outputSize = NUM_CLASSES * sizeof(float); // 输出为4个类别概率
    
    cudaMalloc(&buffers[0], inputSize);
    cudaMalloc(&buffers[1], outputSize);
    
    // 分配主机内存
    cudaHostAlloc((void**)&inputBuffer, inputSize, cudaHostAllocDefault);
    cudaHostAlloc((void**)&outputBuffer, outputSize, cudaHostAllocDefault);
}

YOLOv8TensorRT::~YOLOv8TensorRT() {
    // 释放资源
    cudaStreamDestroy(stream);
    cudaFree(buffers[0]);
    cudaFree(buffers[1]);
    if(inputBuffer) cudaFreeHost(inputBuffer);
    if(outputBuffer) cudaFreeHost(outputBuffer);
}

bool YOLOv8TensorRT::loadEngine(const std::string& enginePath) {
    std::ifstream file(enginePath, std::ios::binary);
    if (!file.good()) {
        std::cerr << "Error: Unable to open engine file: " << enginePath << std::endl;
        return false;
    }
    
    file.seekg(0, file.end);
    size_t size = file.tellg();
    file.seekg(0, file.beg);
    
    std::vector<char> engineData(size);
    file.read(engineData.data(), size);
    file.close();
    
    nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(gLogger);
    engine.reset(runtime->deserializeCudaEngine(engineData.data(), size));
    
    return engine != nullptr;
}

void YOLOv8TensorRT::preprocess(const cv::Mat& image, float* inputBuffer) {
    cv::Mat resized;
    cv::resize(image, resized, cv::Size(INPUT_W, INPUT_H));
    
    cv::Mat rgb;
    cv::cvtColor(resized, rgb, cv::COLOR_BGR2RGB);
    
    // 归一化到[0,1]
    rgb.convertTo(rgb, CV_32FC3, 1.0 / 255.0);
    
    // HWC to CHW
    std::vector<cv::Mat> channels;
    cv::split(rgb, channels);
    
    int channelSize = INPUT_H * INPUT_W;
    for (int i = 0; i < 3; ++i) {
        memcpy(inputBuffer + i * channelSize, 
               channels[i].data, 
               channelSize * sizeof(float));
    }
}

std::vector<float> YOLOv8TensorRT::classify(const cv::Mat& image) {
    // 预处理
    preprocess(image, inputBuffer);

    // 复制输入到GPU
    cudaMemcpyAsync(buffers[0], inputBuffer, 
                    3 * INPUT_H * INPUT_W * sizeof(float),
                    cudaMemcpyHostToDevice, stream);
    
    // 推理
    context->setTensorAddress("images", buffers[0]);
    context->setTensorAddress("output0", buffers[1]);
    context->enqueueV3(stream);

    // 复制输出回主机
    cudaMemcpyAsync(outputBuffer, buffers[1],
                    NUM_CLASSES * sizeof(float),
                    cudaMemcpyDeviceToHost, stream);
    
    cudaStreamSynchronize(stream);
    
    // 返回概率向量
    return std::vector<float>(outputBuffer, outputBuffer + NUM_CLASSES);
}