﻿#include "pch.h"
#include "DLKit.h"
#include "ObjectDetectionInfer.h"
#include <ostream>
#include <iostream>

using namespace std;
using namespace ov;
using namespace cv;
using namespace ov::preprocess;

ObjectDetectionInfer::ObjectDetectionInfer()
{
}

ObjectDetectionInfer::~ObjectDetectionInfer()
{
    RELEASE(infer_request);
    RELEASE(compiled_model);
    RELEASE(core);
}

void ObjectDetectionInfer::setInferSize(cv::Size sz)
{
    InferSize = sz;
}

cv::Size ObjectDetectionInfer::getInferSize()
{
    return InferSize;
}

void ObjectDetectionInfer::setThreshold(double confidence, double score, double NMSthreshold)
{
    modelConfidenseThreshold = confidence;
    modelScoreThreshold = score;
    modelNMSThreshold = NMSthreshold;
}

const char *ObjectDetectionInfer::getRuntimeType()
{
    return typeid(ObjectDetectionInfer).name();
}

std::vector<std::string> ObjectDetectionInfer::getClassesName() { return classes; }

int ObjectDetectionInfer::LoadState()
{
    return loadState;
}

std::string ObjectDetectionInfer::LoadModel(const char *fileName, const char *labelName, bool isGPU, int batch_size)
{
    std::string SuccessState = "OK";
    std::filesystem::path currentPath = std::filesystem::current_path();
    std::cout << "Current software path: " << currentPath << " model:" << fileName << " label:" << labelName << " ";
    std::cout << ov::get_openvino_version().description << ":" << ov::get_openvino_version().buildNumber << std::endl;

    // Read label file
    std::ifstream file(labelName);
    if (file.is_open())
    {
        std::string line;
        while (std::getline(file, line))
        {
            classes.push_back(line);
        }
        file.close();
        loadState &= ~LOAD_TEXT_FILE_NOT_FOUND;
    }
    else
    {
        loadState |= LOAD_TEXT_FILE_NOT_FOUND;
    }

    ov::Core *_core = new ov::Core();
    ov::CompiledModel *_compiled_model = new ov::CompiledModel();
    ov::InferRequest *_infer_request = new ov::InferRequest();

    // Optimized memory configuration
    ov::AnyMap cpu_config = {
        ov::inference_num_threads(static_cast<int>(std::thread::hardware_concurrency())),
        ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY),
        ov::hint::num_requests(2),
        ov::enable_profiling(false)};

    ov::AnyMap gpu_config = {
        ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY),
        ov::hint::num_requests(2),
        ov::enable_profiling(false),
        ov::cache_dir("cache")};

    try
    {
        if (batch_size < 1)
        {
            batch_size = 1;
        }
        std::shared_ptr<ov::Model> model = _core->read_model(fileName);
        auto original_shape = model->input().get_partial_shape();
        max_batch_size = batch_size;

        // Detect if model is dynamic
        isDynamic = false;
        if (original_shape.rank().is_static() && original_shape.rank().get_length() == 4)
        {
            // Check which dimensions are dynamic
            if (original_shape[0].is_dynamic() || // batch dimension
                original_shape[2].is_dynamic() || // height dimension
                original_shape[3].is_dynamic())
            { // width dimension
                isDynamic = true;
                std::cout << "Detected DYNAMIC model" << std::endl;
            }
            else
            {
                if (original_shape[0].is_static())
                {
                    size_t model_batch_size = original_shape[0].get_length();

                    if (max_batch_size > model_batch_size)
                    {
                        std::cout << "Warning: Requested max_batch_size=" << max_batch_size
                                  << " exceeds static model batch size=" << model_batch_size
                                  << ", adjusting to " << model_batch_size << std::endl;
                        max_batch_size = model_batch_size;
                    }

                    std::cout << "Detected STATIC model with batch=" << model_batch_size << std::endl;
                }
                else
                {
                    std::cout << "Detected STATIC model with unknown batch dimension" << std::endl;
                }
            }
        }

        ov::PartialShape dynamic_shape;
        if (original_shape.rank().is_static() && original_shape.rank().get_length() == 4)
        {
            if (isDynamic)
            {
                ov::Dimension height_dim = original_shape[2].is_dynamic() ? ov::Dimension(320, 1280) : original_shape[2];
                ov::Dimension width_dim = original_shape[3].is_dynamic() ? ov::Dimension(320, 1280) : original_shape[3];

                dynamic_shape = {
                    ov::Dimension(1, max_batch_size), // batch range
                    original_shape[1],                // channels
                    height_dim,                       // height range
                    width_dim                         // width range
                };
                std::cout << "Setting dynamic shape for flexible inference: " << dynamic_shape << std::endl;
            }
            else
            {
                // Static model: fixed shape but can set batch size
                ov::Dimension height_dim = original_shape[2];
                ov::Dimension width_dim = original_shape[3];

                // Reshape if user specified batch size differs from model
                if (max_batch_size != original_shape[0].get_length())
                {
                    dynamic_shape = {
                        ov::Dimension(max_batch_size), // fixed batch size
                        original_shape[1],             // channels
                        height_dim,                    // height
                        width_dim                      // width
                    };
                    std::cout << "Reshaping static model to batch size: " << dynamic_shape << std::endl;
                }
                else
                {
                    // Keep original shape
                    dynamic_shape = original_shape;
                    std::cout << "Using original static shape with batch size: " << max_batch_size << std::endl;
                }
            }

            std::cout << "start setting model shape: " << dynamic_shape << std::endl;
            model->reshape({{model->input().get_any_name(), dynamic_shape}});
            std::cout << "Final model shape: " << dynamic_shape << std::endl;
        }
        else
        {
            // Handle non-4D models
            isDynamic = original_shape.is_dynamic();
            dynamic_shape = original_shape;
            if (dynamic_shape.rank().is_static() && dynamic_shape.rank().get_length() >= 1)
            {
                if (isDynamic)
                {
                    dynamic_shape[0] = ov::Dimension(1, max_batch_size); // dynamic batch range
                }
                else
                {
                    dynamic_shape[0] = ov::Dimension(max_batch_size); // fixed batch size
                }
            }
            model->reshape({{model->input().get_any_name(), dynamic_shape}});
            std::cout << "Set optimized batch dimension: " << dynamic_shape << std::endl;
        }

        // Set different performance modes based on dynamic/static
        if (isDynamic)
        {
            // Dynamic models work better with LATENCY mode
            cpu_config[ov::hint::performance_mode.name()] = ov::hint::PerformanceMode::LATENCY;
            gpu_config[ov::hint::performance_mode.name()] = ov::hint::PerformanceMode::LATENCY;
            std::cout << "Using LATENCY mode for dynamic model" << std::endl;
        }
        else
        {
            // Static models can use THROUGHPUT for better performance
            cpu_config[ov::hint::performance_mode.name()] = ov::hint::PerformanceMode::THROUGHPUT;
            gpu_config[ov::hint::performance_mode.name()] = ov::hint::PerformanceMode::THROUGHPUT;
            std::cout << "Using THROUGHPUT mode for static model" << std::endl;
        }

        if (isGPU)
        {
            try
            {
                std::cout << "Compiling for GPU with optimized settings..." << std::endl;
                *_compiled_model = _core->compile_model(model, "GPU", gpu_config);
                std::cout << "GPU compilation successful" << std::endl;
            }
            catch (ov::Exception &Error)
            {
                std::cout << "GPU compilation failed: " << Error.what() << std::endl;
                std::cout << "Falling back to CPU with optimized settings..." << std::endl;
                *_compiled_model = _core->compile_model(model, "CPU", cpu_config);
            }
        }
        else
        {
            std::cout << "Compiling for CPU with optimized settings..." << std::endl;
            *_compiled_model = _core->compile_model(model, "CPU", cpu_config);
        }

        *_infer_request = _compiled_model->create_infer_request();

        core = reinterpret_cast<HandleRT>(_core);
        compiled_model = reinterpret_cast<HandleRT>(_compiled_model);
        infer_request = reinterpret_cast<HandleRT>(_infer_request);
        loadState &= ~LOAD_INFER_FILE_NOT_FOUND;

        // Get input/output information
        auto input_info = _compiled_model->input();
        auto output_info = _compiled_model->output();

        auto input_partial_shape = input_info.get_partial_shape();
        auto output_partial_shape = output_info.get_partial_shape();

        // Inference size: first try from model partial shape, then from compiled model
        int h = 640, w = 640; // default fallback values
        try
        {
            // First try model's partial shape
            auto pshape = model->input().get_partial_shape();
            if (pshape.rank().is_static() && pshape.size() >= 4)
            {
                if (!pshape[2].is_dynamic())
                    h = static_cast<int>(pshape[2].get_length());
                if (!pshape[3].is_dynamic())
                    w = static_cast<int>(pshape[3].get_length());
            }
            else
            {
                // Then try compiled model's actual shape
                auto cs = _compiled_model->input().get_shape();
                if (cs.size() >= 4)
                {
                    h = static_cast<int>(cs[2]);
                    w = static_cast<int>(cs[3]);
                }
            }
        }
        catch (...)
        {
            // Keep default fallback values
        }
        InferSize = cv::Size(w, h);

        // Output model characteristics summary
        std::cout << "=== Model Characteristics ===" << std::endl;
        std::cout << "Model Type: " << (isDynamic ? "DYNAMIC" : "STATIC") << std::endl;
        std::cout << "Max Batch Size: " << max_batch_size << std::endl;
        std::cout << "Input partial shape: " << input_partial_shape << std::endl;
        std::cout << "Output partial shape: " << output_partial_shape << std::endl;
        std::cout << "Inference size: " << InferSize << std::endl;
        std::cout << "Performance Mode: " << (isDynamic ? "LATENCY" : "THROUGHPUT") << std::endl;

        // Memory usage estimation
        size_t single_image_memory = 1 * 3 * w * h * sizeof(float);     // single image memory
        size_t max_batch_memory = max_batch_size * single_image_memory; // max batch memory
        std::cout << "Estimated memory usage - Single: " << single_image_memory / (1024 * 1024)
                  << "MB, Max batch: " << max_batch_memory / (1024 * 1024) << "MB" << std::endl;
        std::cout << "=================================" << std::endl;

        return SuccessState;
    }
    catch (const ov::Exception &e)
    {
        RELEASE(_core);
        RELEASE(_compiled_model);
        RELEASE(_infer_request);
        loadState |= LOAD_INFER_FILE_NOT_FOUND;
        std::cout << e.what() << std::endl;
        return e.what();
    }
}

std::vector<Detection> ObjectDetectionInfer::Infer(const cv::Mat &input, double r1, double c1, double r2, double c2)
{
    std::vector<cv::Mat> single_input = {input};
    auto results = InferMulti(single_input);
    if (!results.empty())
    {
        return results[0];
    }
    return {};
}

std::vector<std::vector<Detection>> ObjectDetectionInfer::InferMulti(std::vector<cv::Mat> &inputs)
{
    if (inputs.empty())
        return {};

    std::vector<std::vector<Detection>> all_detections;

    if (inputs.size() > max_batch_size)
    {
        std::cout << "Warning: Input size " << inputs.size()
                  << " exceeds model max batch size " << max_batch_size
                  << ", will process in batches" << std::endl;
    }

    for (size_t start_idx = 0; start_idx < inputs.size(); start_idx += max_batch_size)
    {
        size_t end_idx = std::min(start_idx + max_batch_size, inputs.size());
        size_t current_batch_size = end_idx - start_idx;

        std::vector<cv::Mat> batch_inputs(inputs.begin() + start_idx, inputs.begin() + end_idx);

        try
        {
            std::vector<cv::Mat> processed_imgs;
            std::vector<std::vector<double>> paddings_list;
            std::vector<int> new_shape = {InferSize.width, InferSize.height};

            for (const auto &input : batch_inputs)
            {
                cv::Mat processed_input = input;
                if (processed_input.channels() != 3)
                {
                    if (processed_input.channels() == 1)
                    {
                        cv::cvtColor(processed_input, processed_input, cv::COLOR_GRAY2BGR);
                    }
                    else if (processed_input.channels() == 4)
                    {
                        cv::cvtColor(processed_input, processed_input, cv::COLOR_BGRA2BGR);
                    }
                }

                std::vector<double> paddings(3);
                auto resized_img = DLKit::formatToSquare(processed_input, paddings, new_shape);
                processed_imgs.push_back(resized_img);
                paddings_list.push_back(paddings);
            }

            cv::Mat batch_blob = cv::dnn::blobFromImages(processed_imgs, 1.0 / 255.0, InferSize,
                                                         cv::Scalar(0, 0, 0), true, false);

            ov::InferRequest *_infer_request = reinterpret_cast<ov::InferRequest *>(infer_request);
            ov::CompiledModel *_compiled_model = reinterpret_cast<ov::CompiledModel *>(compiled_model);

            auto input_port = _compiled_model->input();
            ov::Tensor input_tensor;

            if (isDynamic)
            {
                ov::Shape input_shape = {
                    current_batch_size, 
                    3,
                    static_cast<size_t>(InferSize.height),
                    static_cast<size_t>(InferSize.width)};

                input_tensor = ov::Tensor(input_port.get_element_type(), input_shape, batch_blob.ptr(0));
            }
            else
            {
                auto model_input_shape = input_port.get_shape();

                if (current_batch_size != model_input_shape[0])
                {
                    if (current_batch_size < model_input_shape[0])
                    {
                        int full_batch_size = static_cast<int>(model_input_shape[0]);
                        int height = static_cast<int>(model_input_shape[2]);
                        int width = static_cast<int>(model_input_shape[3]);

                        std::vector<cv::Mat> padded_images(full_batch_size);

                        for (size_t i = 0; i < current_batch_size; ++i)
                        {
                            padded_images[i] = processed_imgs[i];
                        }

                        for (size_t i = current_batch_size; i < full_batch_size; ++i)
                        {
                            padded_images[i] = cv::Mat::zeros(height, width, CV_8UC3);
                        }

                        cv::Mat padded_blob = cv::dnn::blobFromImages(padded_images, 1.0 / 255.0,
                                                                      cv::Size(width, height),
                                                                      cv::Scalar(0, 0, 0), true, false);

                        input_tensor = ov::Tensor(input_port.get_element_type(), model_input_shape, padded_blob.ptr(0));
                    }
                    else
                    {
                        throw std::runtime_error("Current batch size " +
                                                 std::to_string(current_batch_size) +
                                                 " exceeds static model batch size " +
                                                 std::to_string(model_input_shape[0]));
                    }
                }
                else
                {
                    input_tensor = ov::Tensor(input_port.get_element_type(), model_input_shape, batch_blob.ptr(0));
                }
            }

            _infer_request->set_input_tensor(input_tensor);
            _infer_request->infer();

            auto output = _infer_request->get_output_tensor(0);
            auto output_shape = output.get_shape();

            if (output_shape.size() != 3 && output_shape.size() != 4)
            {
                throw std::runtime_error("Unexpected output shape dimension: " +
                                         std::to_string(output_shape.size()));
            }

            int output_batch_size, dimensions, rows;

            if (output_shape.size() == 3)
            {
                // standard yolo: [batch, 84, 8400]
                output_batch_size = output_shape[0];
                dimensions = output_shape[1];
                rows = output_shape[2];
            }
            else
            {
                //special yolo : [batch, 84, rows, cols] - flat to 3D
                output_batch_size = output_shape[0];
                dimensions = output_shape[1];
                rows = output_shape[2] * output_shape[3];
            }

            int actual_output_batch_size = isDynamic ? current_batch_size : output_batch_size;

            if (!isDynamic && output_batch_size != static_cast<int>(current_batch_size))
            {
                std::cout << "Note: Static model output batch size (" << output_batch_size
                          << ") doesn't match current batch size (" << current_batch_size << ")" << std::endl;
            }

            float *data = output.data<float>();

            float score_threshold = modelScoreThreshold;
            float nms_threshold = modelNMSThreshold;

            for (int batch_idx = 0; batch_idx < actual_output_batch_size &&
                                    batch_idx < static_cast<int>(current_batch_size);
                 ++batch_idx)
            {
                int batch_offset = batch_idx * dimensions * rows;
                cv::Mat output_buffer(dimensions, rows, CV_32F, data + batch_offset);
                cv::transpose(output_buffer, output_buffer);

                std::vector<Detection> detections;
                std::vector<int> class_ids;
                std::vector<float> class_scores;
                std::vector<cv::Rect> boxes;

                const std::vector<double> &paddings = paddings_list[batch_idx];

                int NumClasses = dimensions - 4;
                while (classes.size() < (size_t)NumClasses)
                {
                    std::string Name = "Unknown label " + std::to_string(classes.size());
                    classes.push_back(Name);
                }

                for (int i = 0; i < output_buffer.rows; i++)
                {
                    float *row = output_buffer.ptr<float>(i);

                    float cx = row[0];
                    float cy = row[1];
                    float w = row[2];
                    float h = row[3];

                    cv::Mat classes_scores = output_buffer.row(i).colRange(4, dimensions);
                    cv::Point class_id;
                    double maxClassScore;
                    cv::minMaxLoc(classes_scores, 0, &maxClassScore, 0, &class_id);

                    if (maxClassScore > modelConfidenseThreshold)
                    {
                        int left = int((cx - 0.5 * w - paddings[2]) / paddings[0]);
                        int top = int((cy - 0.5 * h - paddings[1]) / paddings[0]);
                        int width = int(w / paddings[0]);
                        int height = int(h / paddings[0]);

                        boxes.push_back(cv::Rect(left, top, width, height));
                        class_scores.push_back(maxClassScore);
                        class_ids.push_back(class_id.x);
                    }
                }

                std::vector<int> indices;
                cv::dnn::NMSBoxes(boxes, class_scores, score_threshold, nms_threshold, indices);

                for (size_t i = 0; i < indices.size(); i++)
                {
                    int idx = indices[i];
                    Detection det;
                    det.class_id = class_ids[idx];
                    det.confidence = class_scores[idx];
                    det.box = boxes[idx];
                    det.className = classes[det.class_id];
                    det.color = cv::Scalar(0, 255, 0);

                    detections.push_back(det);
                }

                all_detections.push_back(detections);
            }
        }
        catch (const std::exception &e)
        {
            std::cerr << "Batch inference error at batch " << (start_idx / max_batch_size)
                      << ": " << e.what() << std::endl;
            for (size_t i = 0; i < batch_inputs.size(); i++)
            {
                all_detections.push_back({});
            }
        }
    }

    return all_detections;
}

void ObjectDetectionInfer::saveLabel(const cv::Mat &input, std::vector<Detection> detections, std::string fileName, std::string path)
{
    if (input.empty())
    {
        return;
    }
    if (!path.empty() && (path.back() == '/' || path.back() == '\\'))
    {
        path.pop_back();
    }

    std::string imagePath = path + "/images";
    std::string labelPath = path + "/labels";
    std::string imageName = imagePath + "/" + fileName;

    size_t lastDot = fileName.find_last_of('.');
    std::string labelFileName = (lastDot != std::string::npos) ? fileName.substr(0, lastDot) + ".txt" : fileName + ".txt";
    std::string labelName = labelPath + "/" + labelFileName;

    try
    {
        std::filesystem::create_directories(imagePath);
        std::filesystem::create_directories(labelPath);
    }
    catch (const std::filesystem::filesystem_error &e)
    {
        std::cerr << "Error creating directories: " << e.what() << std::endl;
        return;
    }

    cv::Mat image = input.clone();
    cv::imwrite(imageName, image);

    if (detections.size() < 1)
    {
        return;
    }
    std::ofstream labelFile(labelName);
    if (!labelFile.is_open())
    {
        std::cerr << "Error opening label file: " << labelName << std::endl;
        return;
    }

    for (auto &d : detections)
    {
        char buffer[256];
        int class_id = d.class_id;
        DLKit::xydata RectData, cxywh;
        RectData.x = d.box.x;
        RectData.y = d.box.y;
        RectData.w = d.box.width;
        RectData.h = d.box.height;
        DLKit::xywh2cxywh(RectData, cxywh, image.cols, image.rows);
        sprintf_s(buffer, "%d %.6f %.6f %.6f %.6f", class_id, cxywh.x, cxywh.y, cxywh.w, cxywh.h);

        labelFile << buffer << std::endl;
    }

    labelFile.close();
}















#if 0


std::string ObjectDetectionInfer::LoadModel(const char *fileName, const char *labelName, bool isGPU, int batch_size)
{
            #if 0



	try {

		ov::AnyMap auto_config = {
			{"INFERENCE_NUM_THREADS", std::thread::hardware_concurrency()},
			{"PERFORMANCE_HINT", "THROUGHPUT"},
			{"CPU_THREADS_NUM", std::thread::hardware_concurrency()},
			{"CPU_BIND_THREAD", "YES"}, 
			{"PERFORMANCE_HINT_NUM_REQUESTS", "4"}
		};

		//if(false == true)
		if (isGPU) 
		{
			try {
				std::cout << "comiple ok gpu " << std::endl;
				*_compiled_model = _core->compile_model(fileName, "GPU");
				std::cout << "comiple ng gpu " << std::endl;
			}
			catch (ov::Exception& Error) {
				std::cout << Error.what() << std::endl;
				std::cout << "fall back to CPU version with auto config" << std::endl;
				//*_compiled_model = _core->compile_model(fileName, "CPU", auto_config);
				*_compiled_model = _core->compile_model(fileName, "CPU");
			}
		}
		else {
			//*_compiled_model = _core->compile_model(fileName, "CPU", auto_config);
			*_compiled_model = _core->compile_model(fileName, "CPU");
		}


		*_infer_request = _compiled_model->create_infer_request();

		core = reinterpret_cast<HandleRT>(_core);
		compiled_model = reinterpret_cast<HandleRT>(_compiled_model);
		infer_request = reinterpret_cast<HandleRT>(_infer_request);
		loadState &= ~LOAD_INFER_FILE_NOT_FOUND;
		auto shape = _compiled_model->input().get_shape();
		InferSize = cv::Size(shape[3], shape[2]);
		std::cout << "load need input size is  " << InferSize << std::endl;
	}
	catch (ov::Exception& Error) {
		RELEASE(_core);
		RELEASE(_compiled_model);
		RELEASE(_infer_request);
		loadState |= LOAD_INFER_FILE_NOT_FOUND;
		std::cout << Error.what() << std::endl;
		return Error.what();
	}
	return SuccessState;

#endif
}
std::vector<Detection> ObjectDetectionInfer::Infer(const cv::Mat &input, double r1, double c1, double r2, double c2)
{
    std::vector<cv::Mat> single_input = {input};
    auto results = InferMulti(single_input);
    if (!results.empty())
    {
        return results[0];
    }
    return {};
#if 1
    if (input.channels() != 3)
    {
        throw std::exception("need 3 channels image");
    }

    cv::Rect2d ROI(c1, r1, c2 - c1, r2 - r1);
    std::vector<Detection> detections;
    std::vector<double> paddings(3);
    //	auto AreaOfROI = ROI.width * ROI.height;
    auto isCheckAll = false;
    if (ROI.width < 2.0 || ROI.height < 2.0)
    {
        isCheckAll = true;
    }

    try
    {
        std::vector<int> new_shape;
        new_shape.push_back(InferSize.width);
        new_shape.push_back(InferSize.height);
        auto resized_img = DLKit::formatToSquare(input, paddings, new_shape);
        // cv::imwrite("A.jpg", resized_img);
        //
        // auto blob = cv::dnn::blobFromImage(resized_img, 1.0 / 255.0, InferSize, cv::Scalar(114, 114, 114), true, false);
        auto blob = cv::dnn::blobFromImage(resized_img, 1.0 / 255.0, InferSize, cv::Scalar(0, 0, 0), true, false);

        auto input_port = reinterpret_cast<ov::CompiledModel *>(compiled_model)->input();
        ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
        reinterpret_cast<ov::InferRequest *>(infer_request)->set_input_tensor(input_tensor);
        reinterpret_cast<ov::InferRequest *>(infer_request)->infer();
        auto output = reinterpret_cast<ov::InferRequest *>(infer_request)->get_output_tensor(0);
        auto output_shape = output.get_shape();
        int rows = output_shape[2];       // 8400
        int dimensions = output_shape[1]; // 84: box[cx, cy, w, h]+80 classes scores

        float *data = output.data<float>();
        Mat output_buffer((int)output_shape[1], (int)output_shape[2], CV_32F, data);
        transpose(output_buffer, output_buffer);     //[8400,84]
        float score_threshold = modelScoreThreshold; // origin 0.25
        float nms_threshold = modelNMSThreshold;
        std::vector<int> class_ids;
        std::vector<float> class_scores;
        std::vector<cv::Rect> boxes;

        int NumClasses = dimensions - 4;
        while (classes.size() < (size_t)NumClasses)
        {
            std::string Name = "Unknown label " + std::to_string(classes.size());
            classes.push_back(Name);
        }

        // Figure out the bbox, class_id and class_score
        for (int i = 0; i < output_buffer.rows; i++)
        {
            // Mat classes_scores = output_buffer.row(i).colRange(4, 84);
            Mat classes_scores = output_buffer.row(i).colRange(4, dimensions);
            cv::Point class_id;
            double maxClassScore;
            minMaxLoc(classes_scores, 0, &maxClassScore, 0, &class_id);

            if (maxClassScore > modelConfidenseThreshold)
            {
                float cx = output_buffer.at<float>(i, 0);
                float cy = output_buffer.at<float>(i, 1);
                float w = output_buffer.at<float>(i, 2);
                float h = output_buffer.at<float>(i, 3);

                int left = int((cx - 0.5 * w - paddings[2]) / paddings[0]);
                int top = int((cy - 0.5 * h - paddings[1]) / paddings[0]);
                int width = int(w / paddings[0]);
                int height = int(h / paddings[0]);

                boxes.push_back(cv::Rect(left, top, width, height));
                class_scores.push_back(maxClassScore);
                class_ids.push_back(class_id.x);
            }
        }
        // NMS
        std::vector<int> indices;
        cv::dnn::NMSBoxes(boxes, class_scores, score_threshold, nms_threshold, indices);
        // -------- Visualize the detection results -----------
        for (size_t i = 0; i < indices.size(); i++)
        {
            int idx = indices[i];
            Detection result;
            result.class_id = class_ids[idx];
            result.confidence = class_scores[idx];
            result.className = classes[result.class_id];
            result.box = boxes[idx];
            detections.push_back(result);
        }
    }
    catch (cv::Exception &e)
    {
        throw std::exception(e.what());
    }
    catch (ov::Exception &e)
    {
        throw std::exception(e.what());
    }
    return detections;
#endif
}
#endif