﻿#include "yolov8.h"
using namespace std;
using namespace cv;
#include <codecvt>
#include <memory.h>
#include <fstream>
#include <QMessageBox>
#include <QDir>


static std::wstring charToWstring(const char* str)
{
    typedef std::codecvt_utf8<wchar_t> convert_type;
    std::wstring_convert<convert_type, wchar_t> converter;
    return converter.from_bytes(str);
}


inline static float clamp(float val, float min, float max)
{
    return val > min ? (val < max ? val : max) : min;
}


inline static bool isFileExists(const char* name) 
{
    return ifstream(name).good();
}





YOLOV8::YOLOV8()
{

}

YOLOV8::~YOLOV8()
{
    if (blob)
        delete [] blob;
    if (sessionOptions)
        delete sessionOptions;
    if (session)
        delete session;
}

bool YOLOV8::Initialization(const char* onnxfile)
{
    labels.reserve(20);
    scores.reserve(20);
    bboxes.reserve(20);
    mask_confs.reserve(20);
    indices.reserve(20);
    outputs.reserve(20);

    if (!isFileExists(onnxfile))
    {
        QMessageBox::critical(NULL,"Error",QStringLiteral("模型加载出错，请检查文件是否存在（文件路径中不要有中文）后重试！\n%1").arg(QDir::currentPath()),QMessageBox::Yes,QMessageBox::Yes);
        return false;
    }
    try {
        env = new Ort::Env(OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, "YOLOV8");
        sessionOptions = new Ort::SessionOptions();
        sessionOptions->SetInterOpNumThreads(4);
        sessionOptions->SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
        std::wstring w_onnxfile = charToWstring(onnxfile);

        if (!env)
        {
            //printf("env nullptr");
            QMessageBox::critical(NULL,"Error",QStringLiteral("推理引擎环境初始化失败！\n"),QMessageBox::Yes,QMessageBox::Yes);
            return false;
        }
        if (!sessionOptions)
        {
            //printf("sessionOptions nullptr");
            QMessageBox::critical(NULL,"Error",QStringLiteral("推理引擎环境配置初始化失败！\n"),QMessageBox::Yes,QMessageBox::Yes);
            return false;
        }
        session = new Ort::Session(*env, w_onnxfile.c_str(), *sessionOptions);
        if (!session)
        {
            //printf("session nullptr");
            QMessageBox::critical(NULL,"Error",QStringLiteral("推理引擎初始化失败！\n"),QMessageBox::Yes,QMessageBox::Yes);
            return false;
        }
        if (blob == nullptr)
        {
            blob = new float[inputTensorSize] { 0 };
        }
        return true;
    }
    catch (exception) {
        return false;
    }
}




void YOLOV8::detect(cv::Mat &image)
{
    int width = image.cols;
    int height = image.rows;
    cv::Mat outImage;
    if (preWidth == width && preHeight == height)
    {
        cv::warpAffine(image, outImage, M, dst_size, 1, 0, color);

    }else
    {
        preWidth = width;
        preHeight = height;
        float scale_xy = std::min((float)INPUT_SEG_W / (float)width, (float)INPUT_SEG_H / (float)height);
        float padw = (float)width * scale_xy;
        float padh = (float)height * scale_xy;
        this->pparam.ratio = 1 / scale_xy;
        this->pparam.dw = 0.5 * (INPUT_SEG_W - std::round(padw));
        this->pparam.dh = 0.5 * (INPUT_SEG_H - std::round(padh));
        this->pparam.height = height;
        this->pparam.width = width;
        M = (cv::Mat_<float>(2, 3) <<
            scale_xy, 0, -padw * 0.5 + (float)INPUT_SEG_W * 0.5,
            0, scale_xy, -padh * 0.5 + (float)INPUT_SEG_H * 0.5);
        cv::warpAffine(image, outImage, M, dst_size, 1, 0, color);
    }

    std::memset(blob, 0, INPUT_SIZE * sizeof(float));
    for (int c = 0; c < 3; ++c) {
        for (int i = 0; i < INPUT_SEG_H; ++i) {
            cv::Vec3b* p1 = outImage.ptr<cv::Vec3b>(i);
            for (int j = 0; j < INPUT_SEG_W; ++j) {
                blob[c * INPUT_SEG_W * INPUT_SEG_H + i * INPUT_SEG_W + j] = p1[j][2 - c] * 0.00392156862745098;
            }
        }
    }


    std::vector<float> inputTensorValues(blob, blob + INPUT_SIZE);
    std::vector<Ort::Value> inputTensors;
    Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(
        OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);

    inputTensors.push_back(Ort::Value::CreateTensor<float>(
                memoryInfo, inputTensorValues.data(), inputTensorSize,
                inputTensorShape.data(), inputTensorShape.size()
                ));

    std::vector<Ort::Value> outputTensors = this->session->Run(Ort::RunOptions{ nullptr },
        inputNames.data(),
        inputTensors.data(),
        1,
        outputNames.data(),
        2);

    labels.clear();
    scores.clear();
    bboxes.clear();
    mask_confs.clear();
    indices.clear();
    outputs.clear();

    const float* rawOutput0 = outputTensors[0].GetTensorData<float>();
    const float* rawOutput1 = outputTensors[1].GetTensorData<float>();

    cv::Mat output0(num_anchors, fea_channels, CV_32FC1, const_cast<float*>(rawOutput0));
    cv::Mat output1(seg_channels, segWidth * segHeight, CV_32FC1, const_cast<float*>(rawOutput1));

    for (int i = 0; i < num_anchors; i++)
    {
        auto row_ptr = output0.ptr<float>(i);
        auto bboxes_ptr = row_ptr;
        auto scores_ptr = row_ptr + 4;
        auto mask_confs_ptr = row_ptr + 4 + num_classes;
        auto max_s_ptr = std::max_element(scores_ptr, scores_ptr + num_classes);
        float score = *max_s_ptr;
        if (score > _classThreshold)
        {
            float x = *bboxes_ptr++ - pparam.dw;
            float y = *bboxes_ptr++ - pparam.dh;
            float w = *bboxes_ptr++;
            float h = *bboxes_ptr;
            float x0 = clamp((x - 0.5f * w) * this->pparam.ratio, 0.f, this->pparam.width);
            float y0 = clamp((y - 0.5f * h) * this->pparam.ratio, 0.f, this->pparam.height);
            float x1 = clamp((x + 0.5f * w) * this->pparam.ratio, 0.f, this->pparam.width);
            float y1 = clamp((y + 0.5f * h) * this->pparam.ratio, 0.f, this->pparam.height);
            int label = max_s_ptr - scores_ptr;
            float with_ = x1 - x0;
            float height_ = y1 - y0;
            if (with_ < 12 || height_ <12) continue;
            cv::Rect_<float> bbox{ x0, y0, with_, height_ };
            std::vector<float> mask_conf(mask_confs_ptr, mask_confs_ptr + seg_channels);
            bboxes.push_back(bbox);
            labels.push_back(label);
            scores.push_back(score);
            mask_confs.push_back(mask_conf);
        }
    }

    cv::dnn::NMSBoxes(bboxes, scores, _classThreshold, _nmsThreshold, indices);

    std::vector<float> masks;
    masks.reserve(seg_channels * indices.size());
    int cnt = 0;
    for (auto& i : indices)
    {
        if (cnt >= topk) break;
        seg::OutputSeg obj;
        obj.label = labels[i];
        obj.rect = bboxes[i];
        obj.prob = scores[i];
        masks.insert(masks.end(), mask_confs[i].begin(), mask_confs[i].end());
        outputs.emplace_back(obj);
        cnt += 1;
    }

    if (masks.empty()) return;
    cv::Mat masks_mat = cv::Mat(int(masks.size() / seg_channels), seg_channels, CV_32FC1, masks.data());
    cv::Mat matmulRes = (masks_mat * output1).t();
    cv::Mat maskMat = matmulRes.reshape(indices.size(), { segWidth, segHeight });
    std::vector<cv::Mat> maskChannels;
    cv::split(maskMat, maskChannels);

    int scale_dw = pparam.dw / INPUT_SEG_W * segWidth;
    int scale_dh = pparam.dh / INPUT_SEG_H * segHeight;
    cv::Rect roi(
        scale_dw,
        scale_dh,
        segWidth -2 * scale_dw,
        segHeight - 2 * scale_dh);

    cv::Mat dest, mask, mask_u8;
    for (size_t i = 0; i < indices.size(); i++)
    {
        cv::exp(-maskChannels[i], dest);
        dest = 1.0 / (1.0 + dest);
        cv::resize(dest(roi), mask, cv::Size((int)pparam.width, (int)pparam.height), cv::INTER_LINEAR);
        outputs[i].boxMask = mask(outputs[i].rect) > _maskThreshold;
    }
}


void YOLOV8::draw_objects(cv::Mat& image)
{
    cv::Mat mask = image.clone();
    for (auto& obj : outputs)
    {
        int idx = obj.label;
        cv::Scalar color = cv::Scalar(COLORS[idx][0], COLORS[idx][1], COLORS[idx][2]);
        cv::Scalar mask_color = cv::Scalar(MASK_COLORS[idx % 20][0],MASK_COLORS[idx % 20][1],MASK_COLORS[idx % 20][2]);
        cv::rectangle(image, obj.rect, color, 2);

        char text[256];
        sprintf(text, "%s %.1f%%", CLASS_NAMES[idx].c_str(), obj.prob * 100);
        mask(obj.rect).setTo(mask_color, obj.boxMask);
        int baseLine = 0;
        cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine);
        int x = (int)obj.rect.x;
        int y = (int)obj.rect.y + 1;
        if (y > image.rows)
            y = image.rows;
        cv::rectangle(image, cv::Rect(x, y, label_size.width, label_size.height + baseLine), { 0, 0, 255 }, -1);
        cv::putText(image, text, cv::Point(x, y + label_size.height), cv::FONT_HERSHEY_SIMPLEX, 0.4, { 255, 255, 255 }, 1);
    }
    cv::addWeighted(image, 0.5, mask, 0.8, 1, image);
}
