﻿#include "InferObjDetect.h"

#include <QString>
#include <QFileInfo>
#include <fstream>
#include "yaml-cpp/yaml.h"

#include <random>

InferObjDetect::InferObjDetect()
{
}

InferObjDetect::~InferObjDetect()
{
    release();
}

int InferObjDetect::infer(cv::Mat& src)
{
    result_.getResults().clear();

    int w = src.cols;
    int h = src.rows;
    int _max = std::max(h, w);
    cv::Mat image = cv::Mat::zeros(cv::Size(_max, _max), CV_8UC3);
    cv::Rect roi(0, 0, w, h);
    src.copyTo(image(roi));

    // fix bug, boxes consistence!
    xFactor_ = image.cols / static_cast<float>(input_w);
    yFactor_ = image.rows / static_cast<float>(input_h);

    cv::Mat blob = cv::dnn::blobFromImage(image,
        1 / 255.0,
        cv::Size(input_w, input_h),
        cv::Scalar(0, 0, 0),
        true, false);
    size_t tpixels = input_h * input_w * 3;
    std::array<int64_t, 4> input_shape_info{ 1, 3, input_h, input_w };

    // set input data and inference
    auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
    Ort::Value inputTensor = Ort::Value::CreateTensor<float>(allocator_info,
        blob.ptr<float>(),
        tpixels,
        input_shape_info.data(),
        input_shape_info.size());

    const std::array<const char*, 1> inputNames = { inputNodeNames_[0].c_str() };
    const std::array<const char*, 1> outNames = { outputNodeNames_[0].c_str() };
    std::vector<Ort::Value> ort_outputs;

    int64 start = cv::getTickCount();

    try {
        ort_outputs = session_->Run(Ort::RunOptions{ nullptr },
            inputNames.data(),
            &inputTensor,
            1,
            outNames.data(),
            outNames.size());
    }
    catch (std::exception e) {
        std::cout << e.what() << std::endl;
        lastErrorInfo_ = e.what();
        return -1;
    }

    int64 end = cv::getTickCount();
    double t = (end - start) * 1000 / cv::getTickFrequency();
    //qInfo() << "infer session->Run() time: " << t;


    // output data
    const float* pdata = ort_outputs[0].GetTensorMutableData<float>();
    cv::Mat dout(output_h, output_w, CV_32F, (float*)pdata);
    cv::Mat det_output = dout.t(); // 8400x84

    // post-process
    std::vector<cv::Rect> boxes;
    std::vector<int> classIds;
    std::vector<float> confidences;

    for (int i = 0; i < det_output.rows; i++) {
        cv::Mat classes_scores = det_output.row(i).colRange(4, det_output.cols);
        cv::Point classIdPoint;
        double score;
        minMaxLoc(classes_scores, 0, &score, 0, &classIdPoint);

        // 置信度 0～1之间
        if (score > confidenceThresh_)
        {
            float cx = det_output.at<float>(i, 0);
            float cy = det_output.at<float>(i, 1);
            float ow = det_output.at<float>(i, 2);
            float oh = det_output.at<float>(i, 3);
            int x = static_cast<int>((cx - 0.5 * ow) * xFactor_);
            int y = static_cast<int>((cy - 0.5 * oh) * yFactor_);
            int width = static_cast<int>(ow * xFactor_);
            int height = static_cast<int>(oh * yFactor_);
            cv::Rect box;
            box.x = x;
            box.y = y;
            box.width = width;
            box.height = height;

            boxes.push_back(box);
            classIds.push_back(classIdPoint.x);
            confidences.push_back(score);
        }
    }

    // NMS
    std::vector<int> indexes;
    cv::dnn::NMSBoxes(boxes, confidences, scoreThresh_, nmsThresh_, indexes);
    for (size_t i = 0; i < indexes.size(); i++) {
        int index = indexes[i];
        int idx = classIds[index];
        float confidence = confidences[index];

        DetectResult result;
        result.box = boxes[index];
        result.className = labels_[idx];
        result.confidence = confidence;
        result.class_id = idx;

        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_int_distribution<int> dis(100, 255);
        result.color = cv::Scalar(dis(gen),
            dis(gen),
            dis(gen));

        result_.getResults().push_back(result);
    }

    return 0;
}

bool InferObjDetect::loadModelFile(const char* modelPath)
{
    QString suffixStr = QFileInfo(modelPath).suffix().toLower();
    if (suffixStr != "onnx" && suffixStr != "pt")
    {
        qInfo() << "不支持当前格式，请检查文件是否正确";
        return false;
    }

    release();
    std::string strPath(modelPath);
    std::wstring modelPathLoad = std::wstring(strPath.begin(), strPath.end());

    session_options_ = new Ort::SessionOptions();
    env_ = new Ort::Env(Ort::Env(ORT_LOGGING_LEVEL_ERROR, "yolov11"));

    // cuda设备判断
    auto providers = Ort::GetAvailableProviders();
    for (auto provider : providers)
        std::cout << provider << std::endl;
    //看看有没有CUDA支持列表
    auto cudaAvailable = std::find(providers.begin(), providers.end(), "CUDAExecutionProvider");

    bool isGPU = true;  // 手动开关，默认使用gpu，如果判断没有cuda设备，调用cpu；置为false时，不管怎么样都使用cpu
    if (isGPU && (cudaAvailable != providers.end()))//找到cuda列表
    {
        session_options_->SetIntraOpNumThreads(1);
        session_options_->SetGraphOptimizationLevel(ORT_ENABLE_ALL);
        OrtStatusPtr ortPtr = OrtSessionOptionsAppendExecutionProvider_CUDA(*session_options_, 0);
        //OrtSessionOptionsAppendExecutionProvider_Tensorrt(*session_options_, 0);
        qInfo() << "Inference device: GPU";
    }
    else
    {
        // cpu版本测试ok
        qInfo() << "Inference device: CPU";
    }

    //session_options_->SetGraphOptimizationLevel(ORT_ENABLE_ALL);
    session_ = new Ort::Session(*env_, modelPathLoad.c_str(), *session_options_);

    inputNodeNames_.clear();
    outputNodeNames_.clear();

    size_t numInputNodes = session_->GetInputCount();
    size_t numOutputNodes = session_->GetOutputCount();
    Ort::AllocatorWithDefaultOptions allocator;
    inputNodeNames_.reserve(numInputNodes);

    // 获取输入信息
    for (int i = 0; i < numInputNodes; i++) {
        auto input_name = session_->GetInputNameAllocated(i, allocator);
        inputNodeNames_.push_back(input_name.get());
        Ort::TypeInfo input_type_info = session_->GetInputTypeInfo(i);
        auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
        auto input_dims = input_tensor_info.GetShape();
        input_w = input_dims[3];
        input_h = input_dims[2];
        //std::cout << "input format: NxCxHxW = " << input_dims[0] << "x" << input_dims[1] << "x" << input_dims[2] << "x" << input_dims[3] << std::endl;
    }

    // 获取输出信息
    Ort::TypeInfo output_type_info = session_->GetOutputTypeInfo(0);
    auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
    auto output_dims = output_tensor_info.GetShape();
    output_h = output_dims[1]; // 84
    output_w = output_dims[2]; // 8400
    std::cout << "output format : HxW = " << output_dims[1] << "x" << output_dims[2] << std::endl;
    for (int i = 0; i < numOutputNodes; i++) {
        auto out_name = session_->GetOutputNameAllocated(i, allocator);
        outputNodeNames_.push_back(out_name.get());
    }

    // 预先推理一下，避免第一帧推理时间过长
    cv::Mat test(480, 640, CV_8UC3);
    infer(test);

    return true;
}

bool InferObjDetect::loadLabelFile(const char* filePath)
{
    std::string labelFilePath(filePath);
    int pos = labelFilePath.find_last_of('.');
    std::string str = labelFilePath.substr(pos + 1, labelFilePath.length() - pos - 1);
    labels_.clear();
    if (str == "txt")
    {
        parseTextFile(labelFilePath);
    }
    else if (str == "yaml")
    {
        parseYamlFile(labelFilePath);
    }
    return true;
}

void InferObjDetect::setParams(float confidence, float score, float nms)
{
	confidenceThresh_ = confidence;
	scoreThresh_ = score;
	nmsThresh_ = nms;
}

void InferObjDetect::release()
{
    if (session_)
    {
        session_->release();
        delete session_;
    }

    if (session_options_)
    {
        session_options_->release();
        delete session_options_;
    }

    if (env_)
    {
        env_->release();
        delete env_;
    }
}

void InferObjDetect::parseTextFile(std::string filePath)
{
    std::ifstream inputFile(filePath);
    if (inputFile.is_open())
    {
        std::string classLine;
        while (std::getline(inputFile, classLine))
        {
            int p = classLine.find(":");
            if (p < 0)
            {
                continue;
            }
            classLine = classLine.substr(p + 1, classLine.length() - 1);
            if (classLine.length() < 1)
            {
                continue;
            }
            labels_.push_back(classLine);
        }
        inputFile.close();
    }
}

void InferObjDetect::parseYamlFile(std::string filePath)
{
    // 读取文件，并解析
    YAML::Node classesData = YAML::LoadFile(filePath);

    // 检索yaml中的值
    const YAML::Node names = classesData["names"];
    for (auto& item : names)
    {
        if (item.IsDefined())   // 自定义格式
        {
            std::string s = QString(item.as<std::string>().c_str()).toLocal8Bit().constData();
            labels_.push_back(s);
        }
        else  // coco.yaml标准格式
        {
            std::string s = QString(item.second.as<std::string>().c_str()).toLocal8Bit().constData();
            labels_.push_back(s);
        }
    }
}
