#include "InferenceOnnx.h"

#include <QApplication>
#include <QString>
#include <QFileInfo>
#include <random>
#include <fstream>
#include "Performance.h"


InferenceOnnx::~InferenceOnnx()
{
    release();
}

// yolov5推理
std::vector<Detection> InferenceOnnx::runInfer(cv::Mat& src)
{
    Performance timeTest;
    timeTest.Reset();

    detections_.clear();

    int w = src.cols;
    int h = src.rows;
    int _max = max(h, w);
    cv::Mat image = cv::Mat::zeros(cv::Size(_max, _max), CV_8UC3);
    cv::Rect roi(0, 0, w, h);
    src.copyTo(image(roi));

    // fix bug, boxes consistence!
    xFactor_ = image.cols / static_cast<float>(input_w);
    yFactor_ = image.rows / static_cast<float>(input_h);
    double factor = max(xFactor_, yFactor_);

    cv::Mat blob = cv::dnn::blobFromImage(image,
        1 / 255.0,
        cv::Size(input_w, input_h),
        cv::Scalar(0, 0, 0),
        true, false);
    size_t tpixels = input_h * input_w * 3;
    std::array<int64_t, 4> input_shape_info{ 1, 3, input_h, input_w };

    // set input data and inference
    auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
    Ort::Value inputTensor = Ort::Value::CreateTensor<float>(allocator_info,
        blob.ptr<float>(),
        tpixels,
        input_shape_info.data(),
        input_shape_info.size());

    const std::array<const char*, 1> inputNames = { inputNodeNames_[0].c_str() };
    const std::array<const char*, 1> outNames = { outputNodeNames_[0].c_str() };
    std::vector<Ort::Value> ort_outputs;

    double inferPre = timeTest.GetTimeMilli(true);

    try {
        ort_outputs = session_->Run(Ort::RunOptions{ nullptr },
            inputNames.data(),
            &inputTensor,
            1,
            outNames.data(),
            outNames.size());
    }
    catch (std::exception e) {
        std::cout << e.what() << std::endl;
        qFatal() << e.what();  // 保存到日志文件
        return detections_;
    }

    double inferRun = timeTest.GetTimeMilli(true);

    // output data
    const float* pdata = ort_outputs[0].GetTensorMutableData<float>();
    postProcess_v5((float*)pdata, factor);

    
    double inferPost = timeTest.GetTimeMilli(true);

    processInfo_ = std::format("inferPre:{0:.2f}---inferRun:{1:.2f}---inferPost:{2:.2f}", inferPre, inferRun, inferPost);

    return detections_;
}


bool InferenceOnnx::loadModel(std::string modelPath)
{
    QString suffixStr = QFileInfo(modelPath.c_str()).suffix().toLower();
    if (suffixStr != "onnx" && suffixStr != "pt" && suffixStr != "op")
    {
        qInfo() << "不支持当前格式，请检查文件是否正确";
        return false;
    }

    release();
    std::wstring modelPathLoad = std::wstring(modelPath.begin(), modelPath.end());
    if (modelPath[1] != ':')
    {
        std::wstring appPath = QApplication::applicationDirPath().toStdWString();
        modelPathLoad = appPath + std::wstring(modelPath.begin(), modelPath.end());
    }    

    session_options_ = new Ort::SessionOptions();
    env_ = new Ort::Env(Ort::Env(ORT_LOGGING_LEVEL_ERROR, "yolov8"));

    // cuda设备判断
    auto providers = Ort::GetAvailableProviders();
    for (auto provider : providers)
        std::cout << provider << std::endl;
    //看看有没有CUDA支持列表
    auto cudaAvailable = std::find(providers.begin(), providers.end(), "CUDAExecutionProvider");

    bool isGPU = true;  // 手动开关，默认使用gpu，如果判断没有cuda设备，调用cpu；置为false时，不管怎么样都使用cpu
    if (isGPU && (cudaAvailable != providers.end()))//找到cuda列表
    {
        session_options_->SetIntraOpNumThreads(1);
        session_options_->SetGraphOptimizationLevel(ORT_ENABLE_ALL);

        OrtSessionOptionsAppendExecutionProvider_CUDA(*session_options_, 0);

        qInfo() << modelPath<<"load success, " << "inference GPU";
    }
    else
    {
        // cpu版本测试ok
        qInfo() << modelPath << "load success, " << "inference CPU";
    }

    //session_options_->SetGraphOptimizationLevel(ORT_ENABLE_ALL);
    session_ = new Ort::Session(*env_, modelPathLoad.c_str(), *session_options_);

    inputNodeNames_.clear();
    outputNodeNames_.clear();

    size_t numInputNodes = session_->GetInputCount();
    size_t numOutputNodes = session_->GetOutputCount();
    Ort::AllocatorWithDefaultOptions allocator;
    inputNodeNames_.reserve(numInputNodes);

    // 获取输入信息
    for (int i = 0; i < numInputNodes; i++) {
        auto input_name = session_->GetInputNameAllocated(i, allocator);
        inputNodeNames_.push_back(input_name.get());
        Ort::TypeInfo input_type_info = session_->GetInputTypeInfo(i);
        auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
        auto input_dims = input_tensor_info.GetShape();
        input_w = input_dims[3];
        input_h = input_dims[2];
        //std::cout << "input format: NxCxHxW = " << input_dims[0] << "x" << input_dims[1] << "x" << input_dims[2] << "x" << input_dims[3] << std::endl;
    }

    // 获取输出信息
    Ort::TypeInfo output_type_info = session_->GetOutputTypeInfo(0);
    auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
    auto output_dims = output_tensor_info.GetShape();
    output_h = output_dims[1]; // 84
    output_w = output_dims[2]; // 8400
    std::cout << "output format : HxW = " << output_dims[1] << "x" << output_dims[2] << std::endl;
    for (int i = 0; i < numOutputNodes; i++) {
        auto out_name = session_->GetOutputNameAllocated(i, allocator);
        outputNodeNames_.push_back(out_name.get());
    }

    return true;
}

bool InferenceOnnx::loadLabels(std::string labelFilePath)
{
    std::wstring labelPathLoad = std::wstring(labelFilePath.begin(), labelFilePath.end());
    if (labelFilePath[1] != ':')
    {
        std::wstring appPath = QApplication::applicationDirPath().toStdWString();
        labelPathLoad = appPath + std::wstring(labelFilePath.begin(), labelFilePath.end());
    }
    
    int pos = labelFilePath.find_last_of('.');
    std::string str = labelFilePath.substr(pos + 1, labelFilePath.length() - pos - 1);

    labels_.clear();
    if (str == "txt")
    {
        parseTextFile(labelPathLoad);
    }
    else if (str == "yaml")
    {
        qInfo() << "不支持该文件格式";
    }
    return true;
}

void InferenceOnnx::postProcess_v8(float* data, float factor)
{
    // yolov8
    cv::Mat dout(output_h, output_w, CV_32F, (float*)data);
    cv::Mat det_output = dout.t(); // 8400x84

    // post-process
    std::vector<cv::Rect> boxes;
    std::vector<int> classIds;
    std::vector<float> confidences;

    for (int i = 0; i < det_output.rows; i++) {
        float confidence = det_output.at<float>(i, 4);
        if (confidence < modelConfidenceThreshold_)
        {
            continue;
        }
        cv::Mat classes_scores = det_output.row(i).colRange(det_output.cols - labels_.size(), det_output.cols);
        cv::Point classID;
        double score;
        cv::minMaxLoc(classes_scores, NULL, &score, NULL, &classID);

        // 置信度 0～1之间
        if (score > modelConfidenceThreshold_)
        {
            float cx = det_output.at<float>(i, 0);
            float cy = det_output.at<float>(i, 1);
            float ow = det_output.at<float>(i, 2);
            float oh = det_output.at<float>(i, 3);
            int x = static_cast<int>((cx - 0.5 * ow) * factor);
            int y = static_cast<int>((cy - 0.5 * oh) * factor);
            int width = static_cast<int>(ow * factor);
            int height = static_cast<int>(oh * factor);
            cv::Rect box;
            box.x = x;
            box.y = y;
            box.width = width;
            box.height = height;

            boxes.push_back(box);
            classIds.push_back(classID.x);
            confidences.push_back(score);
        }
    }

    // NMS
    std::vector<int> indexes;
    cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold_, modelNMSThreshold_, indexes);
    for (size_t i = 0; i < indexes.size(); i++) {
        int index = indexes[i];
        int idx = classIds[index];
        float confidence = confidences[index];

        Detection result;
        result.box = boxes[index];
        result.className = labels_[idx];
        result.confidence = confidence;
        result.class_id = idx;

        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_int_distribution<int> dis(100, 255);
        result.color = cv::Scalar(dis(gen),
            dis(gen),
            dis(gen));

        detections_.push_back(result);
    }
}

void InferenceOnnx::postProcess_v5(float* data, float factor)
{
    // yolov5
    cv::Mat det_output(output_h, output_w, CV_32F, (float*)data);

    // post-process
    std::vector<cv::Rect> boxes;
    std::vector<int> classIds;
    std::vector<float> confidences;

    for (int i = 0; i < det_output.rows; i++) {
        float confidence = det_output.at<float>(i, 4);
        if (confidence < modelConfidenceThreshold_)
        {
            continue;
        }
        cv::Mat classes_scores = det_output.row(i).colRange(det_output.cols - labels_.size() - 1, det_output.cols - 1);
        cv::Point classID;
        double score;
        cv::minMaxLoc(classes_scores, NULL, &score, NULL, &classID);

        score *= confidence;
        // 置信度 0～1之间
        if (score > modelConfidenceThreshold_)
        {
            float cx = det_output.at<float>(i, 0);
            float cy = det_output.at<float>(i, 1);
            float ow = det_output.at<float>(i, 2);
            float oh = det_output.at<float>(i, 3);
            int x = static_cast<int>((cx - 0.5 * ow) * factor);
            int y = static_cast<int>((cy - 0.5 * oh) * factor);
            int width = static_cast<int>(ow * factor);
            int height = static_cast<int>(oh * factor);
            cv::Rect box;
            box.x = x;
            box.y = y;
            box.width = width;
            box.height = height;

            boxes.push_back(box);
            classIds.push_back(classID.x);
            confidences.push_back(score);
        }
    }

    // NMS
    std::vector<int> indexes;
    cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold_, modelNMSThreshold_, indexes);
    for (size_t i = 0; i < indexes.size(); i++) {
        int index = indexes[i];
        int idx = classIds[index];
        float confidence = confidences[index];

        Detection result;
        result.box = boxes[index];
        result.className = labels_[idx];
        result.confidence = confidence;
        result.class_id = idx;

        #if 0
        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_int_distribution<int> dis(100, 255);
        result.color = cv::Scalar(dis(gen),
            dis(gen),
            dis(gen));
        #endif
        result.color = cv::Scalar(255,0,0);
        detections_.push_back(result);
    }
}

void InferenceOnnx::parseTextFile(std::wstring filePath)
{
    std::fstream inputFile(filePath);

    if (inputFile.is_open())
    {
        std::string classLine;
        while (std::getline(inputFile, classLine))
        {
            int p = classLine.find(":");
            if (p < 0)
            {
                continue;
            }
            classLine = classLine.substr(p + 1, classLine.length() - 1);
            if (classLine.length() < 1)
            {
                continue;
            }
            labels_.push_back(classLine);
        }
        inputFile.close();
    }
}


void InferenceOnnx::release()
{
    if (session_)
    {
        session_->release();
        delete session_;
    }

    if (session_options_)
    {
        session_options_->release();
        delete session_options_;
    }

    if (env_)
    {
        env_->release();
        delete env_;
    }
}
