﻿#include "AlgorithmOnnxRuntimeV8.h"
#include "Config.h"
#include "Utils/Log.h"
#include "Utils/Common.h"
#include "Utils/AlgUtils.h"

namespace AVSAnalyzer 
{
    // OnnxRuntime推理引擎类的构造函数
    // @param config: 配置对象指针，包含模型路径等配置信息
    OnnxRuntimeEngine::OnnxRuntimeEngine(const AlgorithmConfig& algorithmConfig)
        : mAlgorithmConfig(algorithmConfig),
          mScoreThreshold(algorithmConfig.scoreThreshold),
          mNmsThreshold(algorithmConfig.nmsThreshold),
          mInputSize(algorithmConfig.algorithmParams.inputSize),
          mMean(algorithmConfig.algorithmParams.preprocess.mean),
          mStd(algorithmConfig.algorithmParams.preprocess.std),
          mPreprocessScale(algorithmConfig.algorithmParams.preprocess.scale)
    {
        try {
            // 使用 algorithmConfig 中的参数
            std::string modelPath = algorithmConfig.path;
            LOGI("modelPath=%s", modelPath.data());

            // 初始化ONNX Runtime环境
            mEnv = Ort::Env(OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, "YOLOV8");
            mSessionOptions = Ort::SessionOptions();
            // 设置基础图优化级别
            mSessionOptions.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);

            // 获取系统支持的推理后端提供者
            std::vector<std::string> providers = Ort::GetAvailableProviders();
            LOGI("supported onnxruntime providers");
            for (size_t i = 0; i < providers.size(); i++) 
            {
                // 可能的提供者:
                // - CPUExecutionProvider: CPU后端(包括核显)--- 核显或者cpu的不需要显式设置
                // - CUDAExecutionProvider: NVIDIA GPU后端
                // - AzureExecutionProvider: Azure后端
                LOGI("%lld,%s", i, providers[i].data());
            }

            // 检查是否支持CUDA加速
            auto f = std::find(providers.begin(), providers.end(), "CUDAExecutionProvider");
            if (f != providers.end()) 
            {
                // 如果需要启用CUDA，取消下面的注释
                //OrtCUDAProviderOptions cudaOption;
                //cudaOption.device_id = 0;
                //mSessionOptions.AppendExecutionProvider_CUDA(cudaOption);
            }

            // 根据不同平台加载模型
    #ifdef WIN32
            std::wstring modelPath_ws = std::wstring(modelPath.begin(), modelPath.end());
            mSession = Ort::Session(mEnv, modelPath_ws.c_str(), mSessionOptions);
    #else
            mSession = Ort::Session(mEnv, modelPath.c_str(), mSessionOptions);
    #endif

            // 获取模型输入输出信息
            Ort::AllocatorWithDefaultOptions allocator;
            
            // 获取输入节点信息
            size_t numInputNodes = mSession.GetInputCount();
            mInputNodeNames.reserve(numInputNodes);
            for (size_t i = 0; i < numInputNodes; i++) {
                auto input_name = mSession.GetInputNameAllocated(i, allocator);
                mInputNodeNames.push_back(input_name.get());
            }
            
            // 使用配置中的输入尺寸
            mInputWidth = mInputSize[0];
            mInputHeight = mInputSize[1];

            // 获取输出节点信息
            size_t numOutputNodes = mSession.GetOutputCount();
            mOutputNodeNames.reserve(numOutputNodes);
            for (size_t i = 0; i < numOutputNodes; i++) {
                auto out_name = mSession.GetOutputNameAllocated(i, allocator);
                mOutputNodeNames.push_back(out_name.get());
            }

            // 获取输出维度信息
            Ort::TypeInfo output_type_info = mSession.GetOutputTypeInfo(0);
            auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
            auto output_dims = output_tensor_info.GetShape();
            mOutputDim = output_dims[1];  //  类别数+坐标数 14= 10+4？
            mOutputRow = output_dims[2];  //  anchor数量

            LOGI("Model initialized successfully");
        }
        catch (const std::exception& e) {
            LOGE("Failed to initialize model: %s", e.what());
            throw;
        }
    }

    // 析构函数：释放ONNX Runtime相关资源
    OnnxRuntimeEngine::~OnnxRuntimeEngine()
    {
        mSessionOptions.release();
        mSession.release();
        mEnv.release();
    }

    // @param image: 输入图像(OpenCV Mat格式)
    // @param detects: 检测结果输出vector
    // @return: 推理是否成功
    bool OnnxRuntimeEngine::runInference(cv::Mat& image, std::vector<DetectObject>& detects) 
    {
        try {
            // 直接使用成员变量
            cv::Mat letterbox_img = AlgUtils::letterbox(image);
            float restore_scale = letterbox_img.size[0] / mInputWidth;

            // 图像预处理
            cv::Mat blob = cv::dnn::blobFromImage(letterbox_img, 1 / 255.0, 
                cv::Size(mInputWidth, mInputHeight), cv::Scalar(0, 0, 0), true, false);

            // 准备推理输入数据(使用int64_t防止溢出)
            int64_t tpixels = static_cast<int64_t>(mInputHeight) * 
                              static_cast<int64_t>(mInputWidth) * 
                              static_cast<int64_t>(3);

            // 检查溢出
            if (tpixels < 0 || tpixels > std::numeric_limits<size_t>::max()) {
                LOGE("Input size too large, would cause overflow: %dx%d", mInputHeight, mInputWidth);
                return false;
            }

            std::array<int64_t, 4> input_shape_info{ 1, 3, mInputHeight, mInputWidth };

            // 创建输入tensor
            auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
            Ort::Value input_tensor_ = Ort::Value::CreateTensor<float>(allocator_info, 
                blob.ptr<float>(), tpixels, input_shape_info.data(), input_shape_info.size());

            // 设置输入输出节点名称
            const std::array<const char*, 1> inputNames = { mInputNodeNames[0].c_str() };
            const std::array<const char*, 1> outNames = { mOutputNodeNames[0].c_str() };

            // 执行推理
            std::vector<Ort::Value> ort_outputs = mSession.Run(Ort::RunOptions{ nullptr }, 
                inputNames.data(), &input_tensor_, 1, outNames.data(), outNames.size());

            // 处理输出数据
            const float* pdata = ort_outputs[0].GetTensorMutableData<float>();
            cv::Mat dout(mOutputDim, mOutputRow, CV_32F, (float*)pdata);
            cv::Mat det_output = dout.t(); // 转置为 8400x（类数+4坐标）

            // 后处理
            std::vector<cv::Rect> boxes;
            std::vector<int> classIds;
            std::vector<float> confidences;

            // 遍历所有检测框
            for (int i = 0; i < det_output.rows; i++) 
            {
                // 获取类别得分 
                cv::Mat classes_scores = det_output.row(i).colRange(4, mOutputDim);  // 修改为实际类别+4
                cv::Point classIdPoint;
                double score;
                minMaxLoc(classes_scores, 0, &score, 0, &classIdPoint);

                // 筛选高置信度的检测框
                if (score > mScoreThreshold) 
                {
                    // 还原检测框坐标到原图尺寸
                    float cx = det_output.at<float>(i, 0);
                    float cy = det_output.at<float>(i, 1);
                    float ow = det_output.at<float>(i, 2);
                    float oh = det_output.at<float>(i, 3);
                    int x = static_cast<int>((cx - 0.5 * ow) * restore_scale);
                    int y = static_cast<int>((cy - 0.5 * oh) * restore_scale);
                    int width = static_cast<int>(ow * restore_scale);
                    int height = static_cast<int>(oh * restore_scale);

                    // 保存检测框信息
                    cv::Rect box;
                    box.x = x;
                    box.y = y;
                    box.width = width;
                    box.height = height;

                    boxes.push_back(box);
                    classIds.push_back(classIdPoint.x);
                    confidences.push_back(score);
                }
            }

            // 执行非极大值抑制(NMS)
            std::vector<int> indexes;
            cv::dnn::NMSBoxes(boxes, confidences, mScoreThreshold, mNmsThreshold, indexes);

            // 整理最终的检测结果
            for (size_t i = 0; i < indexes.size(); i++) 
            {
                int index = indexes[i];
                int class_id = classIds[index];
                float class_score = confidences[index];
                cv::Rect box = boxes[index];

                // 使用配置中的类别名称
                DetectObject detect;
                detect.x1 = box.x;
                detect.y1 = box.y;
                detect.x2 = box.x + box.width;
                detect.y2 = box.y + box.height;
                detect.class_id = class_id;
                detect.class_name = mAlgorithmConfig.className[class_id];
                detect.class_score = class_score;

                detects.push_back(detect);
            }

            return true;
        }
        catch (const std::exception& e) {
            LOGE("Inference error: %s", e.what());
            return false;
        }
    }

    // AlgorithmOnnxRuntime类实现
    AlgorithmOnnxRuntimeV8::AlgorithmOnnxRuntimeV8(const AlgorithmConfig& algorithmConfig)
        : Algorithm(algorithmConfig)
    {
        mEngine = new OnnxRuntimeEngine(algorithmConfig);
    }

    AlgorithmOnnxRuntimeV8::~AlgorithmOnnxRuntimeV8() 
    {
        LOGI("");
        delete mEngine;
        mEngine = nullptr;
    }

    // 目标检测接口实现
    // @param image: 输入图像
    // @param detects: 检测结果
    // @return: 检测是否成功
    bool AlgorithmOnnxRuntimeV8::objectDetect(cv::Mat& image, std::vector<DetectObject>& detects) 
    {
        mMtx.lock();
        bool ret = mEngine->runInference(image, detects);
        mMtx.unlock();
        return ret;
    }
}