#include "yolo_detect_pkg/YOLOV10.hpp"
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudawarping.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>

std::string floatToStringWithTwoDecimals(float value) {
    std::stringstream stream;
    stream << std::fixed << std::setprecision(2) << value;
    return stream.str();
}

YOLOV10::YOLOV10() {
    
}


std::vector<Detection> YOLOV10::filterDetections(const std::vector<float> &results){
    // 需要返回的检测框
    std::vector<Detection> temp_detections;
    const int num_detections = results.size() / 6;
    float ratiow = float(input_image_widht) / neww;
    float ratioh = float(input_image_height) / newh;
    std::vector<cv::Rect> rects;
    std::vector<float> scores;

    for (int i = 0; i < num_detections; ++i)
    {
        float left = results[i * 6 + 0];
        float top = results[i * 6 + 1];
        float right = results[i * 6 + 2];
        float bottom = results[i * 6 + 3];
        float confidence = results[i * 6 + 4];
        int class_id = results[i * 6 + 5];
        int x = static_cast<int>(left * ratiow);
        int y = static_cast<int>(top * ratioh);
        int width = static_cast<int>((right - left) * ratiow);
        int height = static_cast<int>((bottom - top) * ratioh);
          
        temp_detections.push_back({
                confidence,
                 cv::Rect(x, y, width, height),
                 class_id,
                 CLASSNAMES[class_id]});
        
        rects.push_back(cv::Rect(x, y, width, height));
        scores.push_back(confidence);
    }
    std::vector<int> indexes{};
    cv::dnn::NMSBoxes(rects,scores,CONFIDENCE_THRESHOLD,NMS_THRESHOLD,indexes);
    std::vector<Detection> result{};
    for ( size_t i = 0; i < indexes.size(); i++)
    {
        result.push_back(temp_detections[indexes[i]]);
    }
    return result;
}


std::vector<std::vector<cv::cuda::GpuMat>> YOLOV10::YOLOV10_preprocess(
                                            cv::cuda::GpuMat &gpuImg) {
    // 获取当前输入的维度
    using namespace cv::cuda;
    const auto &inputDims = m_trtEngine->getInputDims();
    // BGR->rgb
    cv::cuda::GpuMat temp;
    temp = resize_image(gpuImg);
    cv::cuda::GpuMat rgbMat;
    cv::cuda::cvtColor(temp, rgbMat, cv::COLOR_BGR2RGB);
    // rgbMat.convertTo(temp,CV_32F,1.0/255 );
    std::vector<cv::cuda::GpuMat> input{std::move(temp)};
    std::vector<std::vector<cv::cuda::GpuMat>> inputs{std::move(input)};
    return inputs;
}

// 进行图像大小的处理
cv::cuda::GpuMat YOLOV10::resize_image(cv::cuda::GpuMat &srcimg){
    input_image_height = srcimg.rows;
    input_image_widht= srcimg.cols;
    in_p_height = m_trtEngine->getInputDims()[0].d[1];
    in_p_width = m_trtEngine->getInputDims()[0].d[2];
    newh = in_p_height;
    neww = in_p_width;
    cv::cuda::GpuMat dst_img;
    cv::cuda::GpuMat temp_img;
    if(this->keep_ratio&&input_image_height!=input_image_widht){
        float hw_ratio = (float)input_image_height / (float)input_image_widht;
        if(hw_ratio>1){
            newh = this->in_p_height;
            neww = static_cast<int>(this->in_p_height/hw_ratio);
            cv::cuda::resize(srcimg,dst_img,cv::Size(neww,newh),cv::INTER_AREA);
            paddleft = int((this->in_p_width-neww));
            cv::cuda::copyMakeBorder(dst_img,temp_img,0,0,0,paddleft,cv::BORDER_CONSTANT,114);
            
            
        } else {
            newh = static_cast<int>(this->in_p_height * hw_ratio);
			neww = this->in_p_width;
			cv::cuda::resize(srcimg, dst_img, cv::Size(neww, newh), cv::INTER_AREA);
			paddtop = (int)(this->in_p_height - newh);
			cv::cuda::copyMakeBorder(dst_img, temp_img, 0, paddtop ,0, 0,cv::BORDER_CONSTANT, 114);
        }
    }else{
        cv::cuda::resize(srcimg, temp_img, cv::Size(neww, newh), cv::INTER_AREA);
    }
    return temp_img;
}

cv::Mat YOLOV10::YOLOv10_Draw_Line(const cv::Mat &image, const std::vector<Detection> &detections)
{
    cv::Mat result = image.clone();
   
    for (const auto &detection : detections)
    {
        // 绘制边界框，颜色为绿色（BGR: 0, 255, 0），线宽为2
        cv::Scalar color = getRandomColor();
        cv::rectangle(result, detection.bbox, color, 2);

        // 准备标签文本，包含类别名称和置信度
        std::string label = detection.class_name + ": " + floatToStringWithTwoDecimals(detection.confidence);

        // 计算标签文本的尺寸和基线
        int baseLine;
        cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);

        // 绘制标签背景矩形，颜色为白色（BGR: 255, 255, 255）
        cv::rectangle(
            result,
            cv::Point(detection.bbox.x, detection.bbox.y - labelSize.height),
            cv::Point(detection.bbox.x + labelSize.width, detection.bbox.y + baseLine),
            color,
            cv::FILLED);

        // 绘制标签文本，颜色为黑色（BGR: 0, 0, 0）
        cv::putText(
            result,
            label,
            cv::Point(detection.bbox.x, detection.bbox.y),
            cv::FONT_HERSHEY_SIMPLEX,
            0.5,
            cv::Scalar(0, 0, 0),
            1);
    }

    return result;
}


std::vector<Detection>  YOLOV10::YoloDetectObjects(cv::Mat &inputImageBGR){

    cv::Mat localimage = inputImageBGR.clone();
    // Upload the image to GPU memory
    cv::cuda::GpuMat gpuImg;
    gpuImg.upload(localimage);

    // Call detectObjects with the GPU image
    return YoloDetectObjects(gpuImg);
}

std::vector<Detection>  YOLOV10::YoloDetectObjects(cv::cuda::GpuMat &inputImageBGR){
     // Preprocess the input image
    const auto input = YOLOV10_preprocess(inputImageBGR);
    std::vector<std::vector<std::vector<float>>> featureVectors;
    auto succ = m_trtEngine->runInference(input, featureVectors);
    
    std::vector<Detection> ret;
    const auto &numOutputs = m_trtEngine->getOutputDims().size();

    std::vector<float> featureVector;
    Engine<float>::transformOutput(featureVectors, featureVector);

    ret = filterDetections(featureVector);

    return ret;
}

cv::Scalar YOLOV10::getRandomColor()
{
    static std::random_device rd;
    static std::mt19937 gen(rd());
    static std::uniform_int_distribution<int> dis(0, 255);

    int r = dis(gen);
    int g = dis(gen);
    int b = dis(gen);

    return cv::Scalar(b, g, r);
}


bool YOLOV10::CreateModle(std::string OnnxModepath){
    auto succ = m_trtEngine->buildLoadNetwork(OnnxModepath, SUB_VALS, DIV_VALS, NORMALIZE);
    if (!succ) {
        const std::string errMsg = "Error: Unable to build or load the TensorRT engine. "
                                   "Try increasing TensorRT log severity to kVERBOSE (in /libs/tensorrt-cpp-api/engine.cpp).";
        throw std::runtime_error(errMsg);
    }
    return succ;
}


void YOLOV10::YOLOv10InitByConfigure(const YOLOV10Config & configure) {
    MODE_PATH = configure.Onnx_path;
    CONFIDENCE_THRESHOLD = configure.confidence_threshold;

    precision = configure.precision;
    calibrationDataDirectory = configure.calibrationDataDirectory;
    CLASSNAMES = configure.classNames;
    topK = configure.topK;
    NMS_THRESHOLD = configure.nms_threshold;
    return;
}

void YOLOV10::YOLOV10Inite() {
    Options options;
    options.optBatchSize = 1;
    options.maxBatchSize = 1;

    options.precision = precision;
    options.calibrationDataDirectoryPath = calibrationDataDirectory;

    if (options.precision == Precision::INT8) {
        if (options.calibrationDataDirectoryPath.empty()) {
            throw std::runtime_error("Error: Must supply calibration data path for INT8 calibration");
        }
    }
    // Create our TensorRT inference engine
    m_trtEngine = std::make_unique<Engine<float>>(options);

    auto succ = m_trtEngine->buildLoadNetwork(MODE_PATH, SUB_VALS, DIV_VALS, NORMALIZE);
    if (!succ) {
        const std::string errMsg = "Error: Unable to build or load the TensorRT engine. "
                                   "Try increasing TensorRT log severity to kVERBOSE (in /libs/tensorrt-cpp-api/engine.cpp).";
        throw std::runtime_error(errMsg);
    }

    return;
}