// YOLO5_RDK.hpp
#ifndef YOLOS_EDGEPLATFORM_YOLO5_RDK_HPP
#define YOLOS_EDGEPLATFORM_YOLO5_RDK_HPP

/**
 * @file YOLO5_RDK.hpp
 * @brief YOLOv5 Detector for D-Robotics RDKx5 Platform (BPU Accelerated)
 *
 * This header provides a header-only implementation of YOLOv5 object detection
 * optimized for the D-Robotics RDKx5 BPU (Brain Processing Unit).
 *
 * Key Features:
 * - Single header file design for easy integration
 * - BPU hardware acceleration via libDNN API
 * - Anchor-based detection with 3 detection heads
 * - Supports NV12 (YUV420SP) input format
 * - Configurable preprocessing (Resize/LetterBox)
 *
 * Model Requirements:
 * - Input: NV12, (1, 3, H, W), YUV420SP format
 * - Output: 3 tensors for 3 detection heads (small/medium/large objects)
 *   - Each tensor: (1, H/stride, W/stride, 3 × (5 + CLASSES)), float32, NONE quant
 *   - Format: [objectness, x, y, w, h, class_scores...]
 *
 * Usage Example:
 * @code
 * YOLO5Detector detector("yolov5s.bin", "coco.names");
 * cv::Mat image = cv::imread("test.jpg");
 * auto results = detector.detect(image, 0.25f, 0.45f);
 * @endcode
 *
 * 作者: FANKYT
 * * 日期: 2025
 */

#include <iostream>
#include <vector>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <utility>
#include <cstring>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>

// RDK BPU libDNN API
#include "dnn/hb_dnn.h"
#include "dnn/hb_dnn_ext.h"
#include "dnn/hb_sys.h"

#include "det/BaseDetector.hpp"
#include "tools/ScopedTimer.hpp"
#include "tools/Common.hpp"

namespace yolos_edgeplatform {

/**
 * @brief YOLOv5 detector implementation for RDK X5 platform
 */
class YOLO5Detector : public BaseDetector {
public:
    /**
     * @brief Constructor - loads BPU model and initializes detector
     *
     * @param modelPath Path to the .bin model file
     * @param labelsPath Path to the class names file (one class per line)
     * @param numClasses Number of detection classes (default: 80 for COCO)
     */
    YOLO5Detector(const std::string &modelPath,
                  const std::string &labelsPath,
                  int numClasses = 80)
        : numClasses_(numClasses), preprocessType_(PreprocessType::LETTERBOX) {

        ScopedTimer timer("Model Loading");

        // Load class names
        classNames_ = loadClassNames(labelsPath);

        // Initialize anchors for YOLOv5
        // Default COCO anchors: [10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326]
        anchors_ = {
            {{10.0f, 13.0f}, {16.0f, 30.0f}, {33.0f, 23.0f}},   // Small objects (stride 8)
            {{30.0f, 61.0f}, {62.0f, 45.0f}, {59.0f, 119.0f}},  // Medium objects (stride 16)
            {{116.0f, 90.0f}, {156.0f, 198.0f}, {373.0f, 326.0f}} // Large objects (stride 32)
        };

        // Initialize BPU model
        const char *modelFile = modelPath.c_str();
        int ret = hbDNNInitializeFromFiles(&packedDNNHandle_, &modelFile, 1);
        if (ret != 0) {
            throw std::runtime_error("[ERROR] hbDNNInitializeFromFiles failed with code: " +
                                     std::to_string(ret));
        }

        // Get model info
        const char **modelNameList;
        int modelCount = 0;
        hbDNNGetModelNameList(&modelNameList, &modelCount, packedDNNHandle_);
        if (modelCount > 1) {
            std::cout << "[WARN] Multiple models in bin file, using first one" << std::endl;
        }

        const char *modelName = modelNameList[0];
        ret = hbDNNGetModelHandle(&dnnHandle_, packedDNNHandle_, modelName);
        if (ret != 0) {
            throw std::runtime_error("[ERROR] hbDNNGetModelHandle failed");
        }

        // Get input properties
        int32_t inputCount = 0;
        hbDNNGetInputCount(&inputCount, dnnHandle_);
        if (inputCount != 1) {
            throw std::runtime_error("[ERROR] Model should have exactly 1 input");
        }

        hbDNNGetInputTensorProperties(&inputProperties_, dnnHandle_, 0);

        // 自动检测输入张量类型并适配
        if (inputProperties_.tensorType == HB_DNN_IMG_TYPE_RGB) {
            inputImageType_ = InputImageType::RGB;
            std::cout << "[INFO] Input type: RGB (NCHW, int8 quantized)" << std::endl;
        } else if (inputProperties_.tensorType == HB_DNN_IMG_TYPE_NV12) {
            inputImageType_ = InputImageType::NV12;
            std::cout << "[INFO] Input type: NV12 (YUV420SP)" << std::endl;
        } else {
            throw std::runtime_error("[ERROR] Unsupported input tensor type");
        }

        // Get input dimensions
        if (inputProperties_.validShape.numDimensions == 4) {
            inputH_ = inputProperties_.validShape.dimensionSize[2];
            inputW_ = inputProperties_.validShape.dimensionSize[3];
            std::cout << "[INFO] Input size: " << inputW_ << "x" << inputH_ << std::endl;
        } else {
            throw std::runtime_error("[ERROR] Invalid input tensor shape");
        }

        // Get output properties
        int32_t outputCount = 0;
        hbDNNGetOutputCount(&outputCount, dnnHandle_);
        if (outputCount != 3) {
            throw std::runtime_error("[ERROR] YOLOv5 model should have 3 outputs");
        }

        // Determine output order
        determineOutputOrder();

        std::cout << "[INFO] YOLOv5 Detector initialized successfully" << std::endl;
        std::cout << "[INFO] Model: " << modelPath << std::endl;
        std::cout << "[INFO] Classes: " << numClasses_ << std::endl;
    }

    /**
     * @brief Destructor - releases BPU resources
     */
    ~YOLO5Detector() override {
        if (packedDNNHandle_) {
            hbDNNRelease(packedDNNHandle_);
        }
    }

    /**
     * @brief Performs object detection on an image
     */
    std::vector<Detection> detect(const cv::Mat &image,
                                  float confThreshold = 0.25f,
                                  float nmsThreshold = 0.45f) override {
        ScopedTimer timer("Overall Detection");

        // Preprocessing
        cv::Mat processedImage;
        int xShift = 0, yShift = 0;
        float scale = 1.0f;
        preprocess(image, processedImage, xShift, yShift, scale);

        // Inference
        std::vector<hbDNNTensor> outputTensors = inference(processedImage);

        // Postprocessing
        std::vector<Detection> detections = postprocess(outputTensors, confThreshold,
                                                        nmsThreshold, xShift, yShift, scale);

        // Release output tensors
        for (auto &tensor : outputTensors) {
            hbSysFreeMem(&tensor.sysMem[0]);
        }

        return detections;
    }

    cv::Size getInputSize() const override {
        return cv::Size(inputW_, inputH_);
    }

    int getNumClasses() const override {
        return numClasses_;
    }

    const std::vector<std::string>& getClassNames() const override {
        return classNames_;
    }

    void setPreprocessType(PreprocessType type) override {
        preprocessType_ = type;
    }

    /**
     * @brief Set custom anchors (optional)
     */
    void setAnchors(const std::vector<std::vector<std::pair<float, float>>> &anchors) {
        if (anchors.size() == 3 && anchors[0].size() == 3) {
            anchors_ = anchors;
        }
    }

private:
    // BPU handles
    hbPackedDNNHandle_t packedDNNHandle_ = nullptr;
    hbDNNHandle_t dnnHandle_ = nullptr;
    hbDNNTensorProperties inputProperties_;

    // Model parameters
    int inputH_ = 640;
    int inputW_ = 640;
    int numClasses_ = 80;

    // Output order mapping
    int outputOrder_[3] = {0, 1, 2};

    // Anchors: [3 scales][3 anchors per scale][width, height]
    std::vector<std::vector<std::pair<float, float>>> anchors_;

    // Configuration
    PreprocessType preprocessType_;
    InputImageType inputImageType_;  ///< 记录模型的输入格式
    std::vector<std::string> classNames_;

    /**
     * @brief Determines the correct output tensor order
     *
     * YOLOv5 has 3 outputs:
     * - output[0]: (1, H/8, W/8, 3 × (5 + CLASSES))   - Small objects
     * - output[1]: (1, H/16, W/16, 3 × (5 + CLASSES)) - Medium objects
     * - output[2]: (1, H/32, W/32, 3 × (5 + CLASSES)) - Large objects
     */
    void determineOutputOrder() {
        int H_8 = inputH_ / 8;
        int W_8 = inputW_ / 8;
        int H_16 = inputH_ / 16;
        int W_16 = inputW_ / 16;
        int H_32 = inputH_ / 32;
        int W_32 = inputW_ / 32;

        int expectedShapes[3][3] = {
            {H_8, W_8, 3 * (5 + numClasses_)},
            {H_16, W_16, 3 * (5 + numClasses_)},
            {H_32, W_32, 3 * (5 + numClasses_)}
        };

        for (int i = 0; i < 3; i++) {
            for (int j = 0; j < 3; j++) {
                hbDNNTensorProperties props;
                hbDNNGetOutputTensorProperties(&props, dnnHandle_, j);
                int h = props.validShape.dimensionSize[1];
                int w = props.validShape.dimensionSize[2];
                int c = props.validShape.dimensionSize[3];

                if (h == expectedShapes[i][0] && w == expectedShapes[i][1] && c == expectedShapes[i][2]) {
                    outputOrder_[i] = j;
                    break;
                }
            }
        }

        std::cout << "[INFO] Output order: {" << outputOrder_[0] << ", "
                  << outputOrder_[1] << ", " << outputOrder_[2] << "}" << std::endl;
    }

    /**
     * @brief Preprocesses input image for BPU inference
     *
     * 根据模型输入类型自动选择预处理方法:
     * - RGB: letterBox/resize → BGR2RGB + NCHW + 量化(u8-128)
     * - NV12: letterBox/resize → BGR2YUV_I420 → I420转NV12
     */
    void preprocess(const cv::Mat &src, cv::Mat &dst,
                    int &xShift, int &yShift, float &scale) {
        ScopedTimer timer("Preprocessing");

        cv::Mat resizedImage;
        if (preprocessType_ == PreprocessType::LETTERBOX) {
            letterBox(src, resizedImage, cv::Size(inputW_, inputH_), xShift, yShift, scale);
        } else {
            resizeImage(src, resizedImage, cv::Size(inputW_, inputH_));
            xShift = 0;
            yShift = 0;
            scale = std::min(static_cast<float>(inputH_) / src.rows,
                            static_cast<float>(inputW_) / src.cols);
        }

        if (inputImageType_ == InputImageType::NV12) {
            // NV12 模式：BGR -> YUV I420 -> NV12
            cv::Mat yuvMat;
            cv::cvtColor(resizedImage, yuvMat, cv::COLOR_BGR2YUV_I420);
            uint8_t *yuv = yuvMat.ptr<uint8_t>();

            // I420 -> NV12 (YUV420SP)
            // NV12 格式：Y 平面（H×W） + UV 交错平面（H/2×W）
            dst = cv::Mat(inputH_ * 3 / 2, inputW_, CV_8UC1);
            uint8_t *nv12Data = dst.ptr<uint8_t>();

            int ySize = inputH_ * inputW_;
            int uvHeight = inputH_ / 2;
            int uvWidth = inputW_ / 2;

            // 复制 Y 平面
            std::memcpy(nv12Data, yuv, ySize);

            // 交错复制 UV 平面 (I420: YYYYYYYY UU VV -> NV12: YYYYYYYY UVUVUVUV)
            uint8_t *nv12_uv = nv12Data + ySize;
            uint8_t *i420_u = yuv + ySize;
            uint8_t *i420_v = i420_u + uvHeight * uvWidth;

            for (int i = 0; i < uvHeight * uvWidth; i++) {
                nv12_uv[2 * i] = i420_u[i];       // U
                nv12_uv[2 * i + 1] = i420_v[i];   // V
            }
        } else {
            // RGB 模式：保持 BGR 格式，在 inference 中转换
            dst = resizedImage;
        }
    }

    /**
     * @brief Runs BPU inference
     *
     * 根据输入格式自动选择数据处理方式:
     * - RGB: BGR→RGB + NCHW + 量化(u8-128)
     * - NV12: 直接拷贝 NV12 数据
     */
    std::vector<hbDNNTensor> inference(const cv::Mat &image) {
        ScopedTimer timer("BPU Inference");

        // Allocate input tensor
        hbDNNTensor inputTensor;
        inputTensor.properties = inputProperties_;

        if (inputImageType_ == InputImageType::RGB) {
            // RGB 模式：分配 3*H*W 内存
            hbSysAllocCachedMem(&inputTensor.sysMem[0], 3 * inputH_ * inputW_);

            // Convert BGR to RGB and apply quantization (u8-128 -> int8)
            uint8_t *srcData = image.ptr<uint8_t>();
            int8_t *dstData = reinterpret_cast<int8_t*>(inputTensor.sysMem[0].virAddr);

            for (int h = 0; h < inputH_; h++) {
                for (int w = 0; w < inputW_; w++) {
                    int srcIdx = h * inputW_ * 3 + w * 3;
                    int rIdx = (0 * inputH_ * inputW_) + h * inputW_ + w;
                    int gIdx = (1 * inputH_ * inputW_) + h * inputW_ + w;
                    int bIdx = (2 * inputH_ * inputW_) + h * inputW_ + w;

                    dstData[rIdx] = static_cast<int8_t>(srcData[srcIdx + 2] - 128); // R
                    dstData[gIdx] = static_cast<int8_t>(srcData[srcIdx + 1] - 128); // G
                    dstData[bIdx] = static_cast<int8_t>(srcData[srcIdx + 0] - 128); // B
                }
            }
        } else {
            // NV12 模式：分配 1.5*H*W 内存
            int inputSize = inputH_ * inputW_ * 3 / 2;
            hbSysAllocCachedMem(&inputTensor.sysMem[0], inputSize);

            // Copy NV12 data directly
            std::memcpy(inputTensor.sysMem[0].virAddr, image.ptr<uint8_t>(), inputSize);
        }

        hbSysFlushMem(&inputTensor.sysMem[0], HB_SYS_MEM_CACHE_CLEAN);

        // Allocate output tensors
        std::vector<hbDNNTensor> outputTensors(3);
        for (int i = 0; i < 3; i++) {
            hbDNNGetOutputTensorProperties(&outputTensors[i].properties, dnnHandle_, i);
            int outAlignedSize = outputTensors[i].properties.alignedByteSize;
            hbSysAllocCachedMem(&outputTensors[i].sysMem[0], outAlignedSize);
        }

        // Run inference
        hbDNNTaskHandle_t taskHandle = nullptr;
        hbDNNInferCtrlParam inferCtrlParam;
        HB_DNN_INITIALIZE_INFER_CTRL_PARAM(&inferCtrlParam);
        hbDNNInfer(&taskHandle, outputTensors.data(), &inputTensor, dnnHandle_, &inferCtrlParam);
        hbDNNWaitTaskDone(taskHandle, 0);
        hbDNNReleaseTask(taskHandle);

        // Free input tensor
        hbSysFreeMem(&inputTensor.sysMem[0]);

        return outputTensors;
    }

    /**
     * @brief Postprocesses BPU outputs to generate detections (Anchor-based)
     */
    std::vector<Detection> postprocess(const std::vector<hbDNNTensor> &outputs,
                                       float confThreshold, float nmsThreshold,
                                       int xShift, int yShift, float scale) {
        ScopedTimer timer("Postprocessing");

        float confThreshRaw = -std::log(1.0f / confThreshold - 1.0f);

        std::vector<std::vector<cv::Rect2d>> bboxes(numClasses_);
        std::vector<std::vector<float>> scores(numClasses_);

        // Process 3 detection heads
        int strides[3] = {8, 16, 32};
        int heights[3] = {inputH_ / 8, inputH_ / 16, inputH_ / 32};
        int widths[3] = {inputW_ / 8, inputW_ / 16, inputW_ / 32};

        for (int headIdx = 0; headIdx < 3; headIdx++) {
            int outputIdx = outputOrder_[headIdx];

            // Flush memory
            hbSysFlushMem(&outputs[outputIdx].sysMem[0], HB_SYS_MEM_CACHE_INVALIDATE);

            auto *rawData = reinterpret_cast<float*>(outputs[outputIdx].sysMem[0].virAddr);

            int H = heights[headIdx];
            int W = widths[headIdx];
            int stride = strides[headIdx];
            const auto &headAnchors = anchors_[headIdx];

            for (int h = 0; h < H; h++) {
                for (int w = 0; w < W; w++) {
                    for (size_t anchorIdx = 0; anchorIdx < 3; anchorIdx++) {
                        float *curData = rawData;
                        rawData += (5 + numClasses_);

                        // Check objectness (before sigmoid)
                        if (curData[4] < confThreshRaw) {
                            continue;
                        }

                        // Find best class
                        int bestClsId = 0;
                        for (int c = 1; c < numClasses_; c++) {
                            if (curData[5 + c] > curData[5 + bestClsId]) {
                                bestClsId = c;
                            }
                        }

                        // Compute combined confidence
                        float objConf = 1.0f / (1.0f + std::exp(-curData[4]));
                        float clsConf = 1.0f / (1.0f + std::exp(-curData[5 + bestClsId]));
                        float conf = objConf * clsConf;

                        if (conf < confThreshold) {
                            continue;
                        }

                        // Decode bbox with anchors
                        float anchorW = headAnchors[anchorIdx].first;
                        float anchorH = headAnchors[anchorIdx].second;

                        float centerX = ((1.0f / (1.0f + std::exp(-curData[0]))) * 2.0f - 0.5f + w) * stride;
                        float centerY = ((1.0f / (1.0f + std::exp(-curData[1]))) * 2.0f - 0.5f + h) * stride;
                        float bboxW = std::pow((1.0f / (1.0f + std::exp(-curData[2]))) * 2.0f, 2.0f) * anchorW;
                        float bboxH = std::pow((1.0f / (1.0f + std::exp(-curData[3]))) * 2.0f, 2.0f) * anchorH;

                        float x1 = centerX - bboxW / 2.0f;
                        float y1 = centerY - bboxH / 2.0f;

                        bboxes[bestClsId].push_back(cv::Rect2d(x1, y1, bboxW, bboxH));
                        scores[bestClsId].push_back(conf);
                    }
                }
            }
        }

        // Apply NMS per class
        std::vector<Detection> finalDetections;
        for (int c = 0; c < numClasses_; c++) {
            if (bboxes[c].empty()) continue;

            std::vector<int> indices;
            cv::dnn::NMSBoxes(bboxes[c], scores[c], confThreshold, nmsThreshold, indices);

            for (int idx : indices) {
                Detection det;
                det.box.x = (bboxes[c][idx].x - xShift) / scale;
                det.box.y = (bboxes[c][idx].y - yShift) / scale;
                det.box.width = bboxes[c][idx].width / scale;
                det.box.height = bboxes[c][idx].height / scale;
                det.conf = scores[c][idx];
                det.classId = c;
                finalDetections.push_back(det);
            }
        }

        return finalDetections;
    }
};

} // namespace yolos_edgeplatform

#endif // YOLOS_EDGEPLATFORM_YOLO5_RDK_HPP
