//
// Created by yuan on 2025/8/1.
//

//
// Created by yuan on 2025/7/31.
//

#include "detector_yolo.h"

#include <iostream>
#include <algorithm>
#include <cmath>
#include <string>
#include <numeric>
#include "otl.h"
#include "otl_ffmpeg.h"
#include "streamer.h"

#include "image_converto.h"
#include "utils.h"
#include <opencv2/dnn/dnn.hpp>

static constexpr int PRE_NMS_TOPK = 500; // number of highest scoring boxes to keep before NMS

//#define DEBUG_DISABLE_POSTPROCESS 1
//#define DEBUG_DISABLE_PREPROCESS 1

// Helper functions from yolov5_ref.cpp
void clamp(float &val, const float low, const float high) {
    if (val > high) {
        val = high;
    } else if (val < low) {
        val = low;
    }
}

void sort(int n, const std::vector<float> x, std::vector<int> indices) {
    int i, j;
    for (i = 0; i < n; i++)
        for (j = i + 1; j < n; j++) {
            if (x[indices[j]] > x[indices[i]]) {
                int index_tmp = indices[i];
                indices[i] = indices[j];
                indices[j] = index_tmp;
            }
        }
}

bool nonMaximumSuppression(const std::vector<cv::Rect> rects,
                          const std::vector<float> score,
                          float overlap_threshold,
                          std::vector<int> &index_out) {
    int num_boxes = rects.size();
    int i, j;
    std::vector<float> box_area(num_boxes);
    std::vector<int> indices(num_boxes);
    std::vector<int> is_suppressed(num_boxes);

    for (i = 0; i < num_boxes; i++) {
        indices[i] = i;
        is_suppressed[i] = 0;
        box_area[i] = (float)((rects[i].width + 1) * (rects[i].height + 1));
    }

    sort(num_boxes, score, indices);

    for (i = 0; i < num_boxes; i++) {
        if (!is_suppressed[indices[i]]) {
            for (j = i + 1; j < num_boxes; j++) {
                if (!is_suppressed[indices[j]]) {
                    int x1max = std::max(rects[indices[i]].x, rects[indices[j]].x);
                    int x2min = std::min(rects[indices[i]].x + rects[indices[i]].width,
                                        rects[indices[j]].x + rects[indices[j]].width);
                    int y1max = std::max(rects[indices[i]].y, rects[indices[j]].y);
                    int y2min = std::min(rects[indices[i]].y + rects[indices[i]].height,
                                        rects[indices[j]].y + rects[indices[j]].height);
                    int overlap_w = x2min - x1max + 1;
                    int overlap_h = y2min - y1max + 1;
                    if (overlap_w > 0 && overlap_h > 0) {
                        float iou = (overlap_w * overlap_h) /
                                   (box_area[indices[j]] + box_area[indices[i]] -
                                    overlap_w * overlap_h);
                        if (iou > overlap_threshold) {
                            is_suppressed[indices[j]] = 1;
                        }
                    }
                }
            }
        }
    }

    for (i = 0; i < num_boxes; i++) {
        if (!is_suppressed[i])
            index_out.push_back(i);
    }

    return true;
}


int YoloDetector::preprocess(std::vector<FrameInfo>& frameInfos) {
    //std::cout << __FILE__ << ":" << __LINE__ << std::endl;
    std::lock_guard<std::mutex> lock(m_mutex);
    if (!m_engine || m_inputShapes.empty()) {
        std::cerr << "Engine(dev=" << m_deviceId << ") not initialized" << std::endl;
        return -1;
    }

    for (auto& frameInfo : frameInfos)
    {
        int batchSize = 1;
        //frameInfo.netHostInputs = allocHostMemory(m_inputShapes, 1, false);
        //frameInfo.netDeviceInputs = allocDeviceMemory(m_inputShapes, 1, false);
        allocHostMemory(frameInfo, true, m_inputShapes, 1, false);
        //freeHostMemory(frameInfo.netHostOutputs);
        //frameInfo.netHostOutputs = allocHostMemory(m_outputShapes, 1, false);
        allocHostMemory(frameInfo, false, m_outputShapes, 1, false);

#ifndef DEBUG_DISABLE_PREPROCESS
        // Shape is [n, c, h, w]
        int inputW = m_inputShapes[0].dims[3];
        int inputH = m_inputShapes[0].dims[2];
        for (int shapeIdx = 0; shapeIdx < m_inputShapes.size(); ++shapeIdx)
        {
            for (int i = 0; i < batchSize; ++i) {
                if (!frameInfo.frame) {
                    std::cerr << "Invalid frame at index " << i << std::endl;
                    return -1;
                }

                // 使用当前缓冲区而不是nextBuffer
                auto *begin = static_cast<float *>((void *) (
                    (char *) frameInfo.netDeviceInputs[shapeIdx] + m_inputShapes[shapeIdx].mem_size * i));
                topscv::ConvertParam convertoParams;
                assert(frameInfo.frame->format == AV_PIX_FMT_BGR24);

                convertoParams.m_srcROI.x = 0;
                convertoParams.m_srcROI.y = 0;
                convertoParams.m_srcROI.width = frameInfo.frame->width;
                convertoParams.m_srcROI.height = frameInfo.frame->height;
                convertoParams.m_dstROI = convertoParams.m_srcROI;
                convertoParams.m_swapRGB = true;
                convertoParams.m_scaleEnabled = true;
                convertoParams.m_scaleAlpha = 1.0f / 255.0f;

                topscv::TopsImageTensor tensor(m_deviceId, begin, {
                                                   m_inputShapes[shapeIdx].dims[1], m_inputShapes[shapeIdx].dims[2],
                                                   m_inputShapes[shapeIdx].dims[3]
                                               },
                                               m_inputShapes[shapeIdx].dtype);
                int ret;
                if (frameInfo.streamer->m_config.m_buffer_type == 1) {
                    ret = topscv::image_convert_to_L4(frameInfo.frame, &convertoParams, &tensor);
                } else {
                    ret = topscv::image_convert_to_L3(frameInfo.frame, &convertoParams, &tensor);
                }
                if (ret != 0) {
                    std::cout << "topscv::image_convert_to err=" << ret << std::endl;
                    return -1;
                }

                //topscv::image_tensor_save(&tensor, otl::fileNameAddTimeSuffix("preprocess.jpg"));
            }
        }

        //av_frame_free(&frameInfo.frame);
#endif

    }
    return 0;
}


int YoloDetector::postprocess(std::vector<FrameInfo>& frameInfos) {

    for (auto& frameInfo : frameInfos)
    {
        syncOutputs(frameInfo);
#ifndef DEBUG_DISABLE_POSTPROCESS
        if (frameInfo.netHostOutputs.empty()) {
            std::cerr << "No output data available" << std::endl;
            return -1;
        }

        const int batchSize = 1;
        // Shape is [n, c, h, w]
        int inputW = m_inputShapes[0].dims[3];
        int inputH = m_inputShapes[0].dims[2];

        // 检查是否有足够的输出形状信息
        if (m_outputShapes.empty()) {
            std::cerr << "Output shapes not initialized" << std::endl;
            return -1;
        }

        // 检查输出索引是否有效
        int outputShapeIdx = 0; // yolov5-v6.2 has only one output shape
        if (outputShapeIdx >= m_outputShapes.size() || outputShapeIdx >= frameInfo.netHostOutputs.size()) {
            std::cerr << "Invalid output shape index" << std::endl;
            return -1;
        }

        for (int batchIdx = 0; batchIdx < batchSize; ++batchIdx) {
            // 边界检查，确保不越界
            if (batchIdx >= batchSize) {
                std::cerr << "Batch index " << batchIdx << " exceeds batch size " << batchSize << std::endl;
                continue;
            }

            // Clear previous detections
            frameInfo.detection.clear();

            // 计算单个样本输出所需的内存大小
            size_t singleOutputSize = m_outputShapes[outputShapeIdx].volume;

            // 检查内存越界
            size_t maxElements = m_outputShapes[outputShapeIdx].mem_size / sizeof(float);
            if (batchIdx * singleOutputSize + singleOutputSize > maxElements) {
                std::cerr << "Memory access out of bounds: batchIdx=" << batchIdx
                         << ", volume=" << singleOutputSize
                         << ", max=" << maxElements << std::endl;
                continue;
            }

            // 计算安全的内存偏移量，确保不会越界
            size_t safeOffset = std::min(batchIdx * singleOutputSize, maxElements - singleOutputSize);
            float* output1 = (float*)frameInfo.netHostOutputs[outputShapeIdx] + safeOffset;

            std::vector<cv::Rect> selected_boxes;
            std::vector<float> confidence;
            std::vector<int> class_id;

            // YOLOv5 output format: [1, 25200, 85] where 85 = 4(bbox) + 1(conf) + 80(classes)
            int num_detections = m_outputShapes[outputShapeIdx].dims[1]; // 25200
            int detection_size = m_outputShapes[outputShapeIdx].dims[2]; // 85
            int num_classes = detection_size - 5;

            for (int i = 0; i < num_detections; ++i) {
                float* detection = output1 + i * detection_size;

                // Extract bbox coordinates and confidence
                float center_x = detection[0];
                float center_y = detection[1];
                float width = detection[2];
                float height = detection[3];
                float objectness = detection[4];

                // Skip low confidence detections
                if (objectness < m_confThreshold) continue;

                // Find best class
                float max_class_score = 0.0f;
                int best_class_id = 0;
                for (int c = 0; c < num_classes; ++c) {
                    float class_score = detection[5 + c];
                    if (class_score > max_class_score) {
                        max_class_score = class_score;
                        best_class_id = c;
                    }
                }

                float final_score = objectness * max_class_score;
                if (final_score < m_confThreshold) continue;

                // Convert center format to corner format
                int x = static_cast<int>(center_x - width / 2);
                int y = static_cast<int>(center_y - height / 2);
                int w = static_cast<int>(width);
                int h = static_cast<int>(height);

                selected_boxes.push_back(cv::Rect(x, y, w, h));
                confidence.push_back(final_score);
                class_id.push_back(best_class_id);
            }

            // No object detected
            if (selected_boxes.size() == 0) {
                //std::cout << "no bbox over score threshold detected." << std::endl;
                continue;
            }

            // Pre-NMS TopK: keep only top scoring boxes to reduce NMS cost
            std::vector<int> order(selected_boxes.size());
            std::iota(order.begin(), order.end(), 0);
            std::stable_sort(order.begin(), order.end(), [&](int a, int b){ return confidence[a] > confidence[b]; });
            if (order.size() > static_cast<size_t>(PRE_NMS_TOPK)) {
                order.resize(PRE_NMS_TOPK);
            }

            std::vector<cv::Rect> boxes_topk; boxes_topk.reserve(order.size());
            std::vector<float> scores_topk; scores_topk.reserve(order.size());
            std::vector<int> class_topk; class_topk.reserve(order.size());
            for (int idx : order) {
                boxes_topk.push_back(selected_boxes[idx]);
                scores_topk.push_back(confidence[idx]);
                class_topk.push_back(class_id[idx]);
            }

            // Apply OpenCV NMS (class-agnostic); you can switch to per-class by grouping on class_topk
            std::vector<int> indexes;
            cv::dnn::NMSBoxes(boxes_topk, scores_topk, m_confThreshold, m_nmsThreshold, indexes /*, eta=1.0f, top_k=0*/);

            // Convert detections to Bbox format and scale coordinates
            for (int id : indexes) {
                auto result_box = boxes_topk[id];

                // Scale coordinates back to original image dimensions
                int originalWidth = frameInfo.width;
                int originalHeight = frameInfo.height;

                // Calculate scale factors (letterbox preprocessing)
                int maxLen = std::max(originalWidth, originalHeight);
                float scale = static_cast<float>(maxLen) / inputW;

                result_box.x = static_cast<int>(result_box.x * scale);
                result_box.y = static_cast<int>(result_box.y * scale);
                result_box.width = static_cast<int>(result_box.width * scale);
                result_box.height = static_cast<int>(result_box.height * scale);

                // Clamp to image boundaries
                result_box.x = std::max(0, std::min(result_box.x, originalWidth));
                result_box.y = std::max(0, std::min(result_box.y, originalHeight));
                result_box.width = std::max(0, std::min(result_box.width, originalWidth - result_box.x));
                result_box.height = std::max(0, std::min(result_box.height, originalHeight - result_box.y));

                enrigin::Bbox det_bbox;
                det_bbox.x1 = result_box.x;
                det_bbox.y1 = result_box.y;
                det_bbox.x2 = result_box.x + result_box.width;
                det_bbox.y2 = result_box.y + result_box.height;
                det_bbox.confidence = scores_topk[id];
                det_bbox.classId = class_topk[id];
                frameInfo.detection.push_back(det_bbox);

                //std::cout << "cls: " << det_bbox.classId << " conf: " << det_bbox.confidence
                //          << " (" << result_box.x << "," << result_box.y << ","
                //          << result_box.width << "," << result_box.height << ")" << std::endl;
            }

            //std::cout << "Frame " << batchIdx << ": Detected " << frameInfo.detection.size() << " objects" << std::endl;
             // if (frameInfo.detection.size() > 50) {
             //     tops_utils::memory_opencv_save(true, frameInfo.netDeviceInputs[0], frameInfo.netInputsSize[0],
             //                                    {3, 640, 640}, TopsInference::TIF_FP32,
             //                                    otl::fileNameAddTimeSuffix("preprocess_error.jpg"));
             // }
        }
#endif
    }

    // Continue with original postprocessing logic
    for (auto frameInfo : frameInfos) {
        if (m_pfnDetectFinish) {
            m_pfnDetectFinish(frameInfo);
        }

        if (m_nextInferPipe != nullptr) {
            m_nextInferPipe->push_frame(&frameInfo);
        } else {
            //Free input/output
            freeHostMemory(frameInfo, true);
            freeHostMemory(frameInfo, false);

            // Stop pipeline
            if (frameInfo.pkt) {
                av_packet_unref(frameInfo.pkt);
                av_packet_free(&frameInfo.pkt);
            }

            if (frameInfo.frame) {
                av_frame_unref(frameInfo.frame);
                av_frame_free(&frameInfo.frame);
            }
        }
    }

    return 0;
}
