//
// Created by yuan on 2025/7/31.
//

#include "detector_yolo.h"

#include <iostream>
#include <algorithm>
#include <cmath>
#include <string>
#include <numeric>
#include "otl.h"
#include "otl_ffmpeg.h"
#include "streamer.h"
#include "image_converto.h"
#include <opencv2/dnn/dnn.hpp>

static constexpr int PRE_NMS_TOPK = 500; // keep top scoring boxes before NMS

//#define DEBUG_DISABLE_POSTPROCESS 1
#define TOPS_CHECK(func) {auto ret = func; assert(topsSuccess == ret);}

int YoloDetector::preprocess(std::vector<FrameInfo>& frameInfos) {
    std::lock_guard<std::mutex> lock(m_mutex);
    if (!m_engine || m_inputShapes.empty()) {
        std::cerr << "Engine(dev=" << m_deviceId << ") not initialized" << std::endl;
        return -1;
    }

    for (auto& frameInfo : frameInfos)
    {
        int batchSize = 1;
        //frameInfo.netHostInputs = allocHostMemory(m_inputShapes, 1, false);
        //frameInfo.netDeviceInputs = allocDeviceMemory(m_inputShapes, 1, false);
        allocHostMemory(frameInfo, true, m_inputShapes, 1, false);
        //freeHostMemory(frameInfo.netHostOutputs);
        //frameInfo.netHostOutputs = allocHostMemory(m_outputShapes, 1, false);
        allocHostMemory(frameInfo, false, m_outputShapes, 1, false);

        // Shape is [n, c, h, w]
        int inputW = m_inputShapes[0].dims[3];
        int inputH = m_inputShapes[0].dims[2];
        for (int shapeIdx = 0; shapeIdx < m_inputShapes.size(); ++shapeIdx)
        {
            for (int i = 0; i < batchSize; ++i)
            {
                // Convert AVFrame to cv::Mat
                cv::Mat image;
                if (!frameInfo.frame) {
                    std::cerr << "Invalid frame at index " << i << std::endl;
                    return -1;
                }
                // 使用当前缓冲区而不是nextBuffer
                auto *begin = static_cast<float *>((void *) (
                    (char *) frameInfo.netDeviceInputs[shapeIdx] + m_inputShapes[shapeIdx].mem_size * i));
                topscv::ConvertParam convertoParams;
                assert(frameInfo.frame->format == AV_PIX_FMT_BGR24);

                convertoParams.m_srcROI.x = 0;
                convertoParams.m_srcROI.y = 0;
                convertoParams.m_srcROI.width = frameInfo.frame->width;
                convertoParams.m_srcROI.height = frameInfo.frame->height;
                convertoParams.m_dstROI = convertoParams.m_srcROI;
                convertoParams.m_swapRGB = true;
                convertoParams.m_scaleEnabled = true;
                convertoParams.m_scaleAlpha = 1.0f / 255.0f;

                int device_id;
                topsGetDevice(&device_id);
                //std::cout << __FILE__ << ":" << __LINE__ << ", device_id=" << device_id << std::endl;
                assert(device_id == m_deviceId);
                topscv::TopsImageTensor tensor(m_deviceId, begin, {
                                                   m_inputShapes[shapeIdx].dims[1], m_inputShapes[shapeIdx].dims[2],
                                                   m_inputShapes[shapeIdx].dims[3]
                                               },
                                               m_inputShapes[shapeIdx].dtype);
                int ret;
                if (frameInfo.streamer->m_config.m_buffer_type == 1) {
                    ret = topscv::image_convert_to_L4(frameInfo.frame, &convertoParams, &tensor);
                } else {
                    ret = topscv::image_convert_to_L3(frameInfo.frame, &convertoParams, &tensor);
                }
                if (ret != 0) {
                    std::cout << "topscv::image_convert_to err=" << ret << std::endl;
                    return -1;
                }
            }
        }

    }

    return 0;
}

int YoloDetector::postprocess(std::vector<FrameInfo>& frameInfos) {

    for (auto& frameInfo : frameInfos)
    {
#ifndef DEBUG_DISABLE_POSTPROCESS
        syncOutputs(frameInfo);
        if (frameInfo.netHostOutputs.empty()) {
            std::cerr << "No output data available" << std::endl;
            return -1;
        }

        // Shape is [n, c, h, w]
        int inputW = m_inputShapes[0].dims[3];
        int inputH = m_inputShapes[0].dims[2];

        for (int batchIdx = 0; batchIdx < 1; ++batchIdx)
        {
            // OutputData is column-major, [[batch1_output1, batch2_output1,...],[batch1_output2, batch2_output2,...]...]
            int outputShapeIdx = 0; // yolov8m has only one output shape.
            //[1,84,8400]
            float* output1 = (float*)frameInfo.netHostOutputs[outputShapeIdx] + m_outputShapes[outputShapeIdx].volume * batchIdx;

            int output_h = m_outputShapes[outputShapeIdx].dims[1];
            int output_w = m_outputShapes[outputShapeIdx].dims[2];
            int maxLen = MAX(frameInfo.width, frameInfo.height);
            float x_factor, y_factor;
            x_factor = y_factor = maxLen / static_cast<float>(inputW);

            cv::Mat dout(output_h, output_w, CV_32F, (float*)output1);
            cv::Mat det_output = dout.t(); // 8400x84

            std::vector<cv::Rect> boxes;
            std::vector<int> classIds;
            std::vector<float> confidences;

            for (int i = 0; i < det_output.rows; i++)
            {
                cv::Mat classes_scores = det_output.row(i).colRange(4, 84);
                cv::Point classIdPoint;
                double score;
                cv::minMaxLoc(classes_scores, 0, &score, 0, &classIdPoint);

                // 置信度 0～1之间
                if (score > m_confThreshold)
                {
                    float cx = det_output.at<float>(i, 0);
                    float cy = det_output.at<float>(i, 1);
                    float ow = det_output.at<float>(i, 2);
                    float oh = det_output.at<float>(i, 3);
                    int x = static_cast<int>((cx - 0.5 * ow) * x_factor);
                    int y = static_cast<int>((cy - 0.5 * oh) * y_factor);
                    int width = static_cast<int>(ow * x_factor);
                    int height = static_cast<int>(oh * y_factor);
                    cv::Rect box;
                    box.x = x;
                    box.y = y;
                    box.width = width;
                    box.height = height;

                    boxes.push_back(box);
                    classIds.push_back(classIdPoint.x);
                    confidences.push_back(score);
                }
            }

            // Pre-NMS TopK to reduce NMS cost
            std::vector<int> order(boxes.size());
            std::iota(order.begin(), order.end(), 0);
            std::stable_sort(order.begin(), order.end(), [&](int a, int b){ return confidences[a] > confidences[b]; });
            if (order.size() > static_cast<size_t>(PRE_NMS_TOPK)) {
                order.resize(PRE_NMS_TOPK);
            }

            std::vector<cv::Rect> boxes_topk; boxes_topk.reserve(order.size());
            std::vector<float> scores_topk; scores_topk.reserve(order.size());
            std::vector<int> classIds_topk; classIds_topk.reserve(order.size());
            for (int idx : order) {
                boxes_topk.push_back(boxes[idx]);
                scores_topk.push_back(confidences[idx]);
                classIds_topk.push_back(classIds[idx]);
            }

            // NMS (score_threshold, nms_threshold)
            std::vector<int> indexes;
            cv::dnn::NMSBoxes(boxes_topk, scores_topk, m_confThreshold, m_nmsThreshold, indexes);

            //printf("detect num: %d\n", (int)indexes.size());
            for (size_t i = 0; i < indexes.size(); i++)
            {
                int index = indexes[i];
                int cls_id = classIds_topk[index];
                enrigin::Bbox bbox;
                bbox.x1 = boxes_topk[index].x;
                bbox.y1 = boxes_topk[index].y;
                bbox.x2 = bbox.x1 + boxes_topk[index].width;
                bbox.y2 = bbox.y1 + boxes_topk[index].height;
                bbox.classId = cls_id;

                //detection.label = label_map[cls_id];
                bbox.confidence = scores_topk[index];
                //printf("box[%d, %d, %d, %d], conf:%f, cls:%d\n", boxes[index].x, boxes[index].y, boxes[index].width, boxes[index].height, confidences[index], cls_id);
                frameInfo.detection.push_back(bbox);
            }

            // for debug
            //std::cout << "Frame " << batchIdx << ": Detected " << frameInfo.detection.size() << " objects" << std::endl;

        }
#endif
    }

    // Continue with original postprocessing logic
    for (auto frameInfo : frameInfos) {
        if (m_pfnDetectFinish) {
            m_pfnDetectFinish(frameInfo);
        }

        if (m_nextInferPipe != nullptr) {
            m_nextInferPipe->push_frame(&frameInfo);
        } else {
            //Free input/output
            freeHostMemory(frameInfo, true);
            freeHostMemory(frameInfo, false);

            // Stop pipeline
            av_packet_unref(frameInfo.pkt);
            av_packet_free(&frameInfo.pkt);

            av_frame_unref(frameInfo.frame);
            av_frame_free(&frameInfo.frame);
        }
    }

    return 0;
}
