// YOLO5_RKNN.hpp
#ifndef YOLOS_EDGEPLATFORM_YOLO5_RKNN_HPP
#define YOLOS_EDGEPLATFORM_YOLO5_RKNN_HPP

/**
 * @file YOLO5_RKNN.hpp
 * @brief YOLOv5 检测器（Rockchip RK3588 平台，NPU 硬件加速）
 *
 * 本头文件提供了针对 Rockchip RK3588 NPU（Neural Processing Unit）优化的
 * YOLOv5 目标检测的 header-only 实现。
 *
 * 核心特性：
 * - 单头文件设计，易于集成
 * - NPU 硬件加速（通过 RKNN API）
 * - 支持 uint8/int8/fp32 量化输出
 * - Anchor-based 后处理
 * - LetterBox 预处理
 *
 * 模型要求：
 * - 输入: NHWC-RGB, (1, H, W, 3), uint8
 * - 输出: 3 个张量，对应 3 个检测头（stride 8/16/32）
 *   - 每个张量: (1, grid_h, grid_w, 3*(5+CLASSES))
 *   - 布局: [x, y, w, h, conf, cls0, cls1, ..., clsN]
 *
 * Anchor 配置（默认 COCO）：
 *   stride=8:  [[10,13], [16,30], [33,23]]
 *   stride=16: [[30,61], [62,45], [59,119]]
 *   stride=32: [[116,90], [156,198], [373,326]]
 *
 * 使用示例：
 * @code
 * YOLO5RKNNDetector detector("yolov5s.rknn", "coco.names");
 * cv::Mat image = cv::imread("test.jpg");
 * auto results = detector.detect(image, 0.25f, 0.45f);
 * @endcode
 *
 * 作者: FANKYT
 * 日期: 2025
 * 参考: Rockchip RKNN Model Zoo YOLOv5 示例
 */

#include <iostream>
#include <vector>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <cstring>
#include <set>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>

// RKNN API
#include "rknn_api.h"

#include "det/BaseDetector.hpp"
#include "tools/ScopedTimer.hpp"
#include "tools/Common.hpp"

namespace yolos_edgeplatform {

/**
 * @brief YOLOv5 detector implementation for Rockchip RK3588 platform
 */
class YOLO5RKNNDetector : public BaseDetector {
public:
    /**
     * @brief 构造函数 - 加载 RKNN 模型并初始化检测器
     *
     * @param modelPath RKNN 模型文件路径 (.rknn)
     * @param labelsPath 类别名称文件路径（每行一个类别）
     * @param numClasses 检测类别数量（默认：80，用于 COCO）
     */
    YOLO5RKNNDetector(const std::string &modelPath,
                      const std::string &labelsPath,
                      int numClasses = 80)
        : numClasses_(numClasses), preprocessType_(PreprocessType::LETTERBOX), rknnCtx_(0) {

        ScopedTimer timer("Model Loading");

        // 加载类别名称
        classNames_ = loadClassNames(labelsPath);

        // 加载 RKNN 模型文件
        std::ifstream modelFile(modelPath, std::ios::binary | std::ios::ate);
        if (!modelFile.is_open()) {
            throw std::runtime_error("[ERROR] Cannot open model file: " + modelPath);
        }

        size_t modelSize = modelFile.tellg();
        modelFile.seekg(0, std::ios::beg);
        std::vector<char> modelData(modelSize);
        modelFile.read(modelData.data(), modelSize);
        modelFile.close();

        // 初始化 RKNN 模型
        int ret = rknn_init(&rknnCtx_, modelData.data(), modelSize, 0, nullptr);
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_init failed with code: " + std::to_string(ret));
        }

        // 查询输入输出数量
        rknn_input_output_num ioNum;
        ret = rknn_query(rknnCtx_, RKNN_QUERY_IN_OUT_NUM, &ioNum, sizeof(ioNum));
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_query RKNN_QUERY_IN_OUT_NUM failed");
        }

        if (ioNum.n_input != 1) {
            throw std::runtime_error("[ERROR] Model should have exactly 1 input");
        }
        if (ioNum.n_output != 3) {
            throw std::runtime_error("[ERROR] YOLOv5 model should have 3 outputs");
        }

        numOutputs_ = ioNum.n_output;

        // 查询输入属性
        inputAttr_.index = 0;
        ret = rknn_query(rknnCtx_, RKNN_QUERY_INPUT_ATTR, &inputAttr_, sizeof(inputAttr_));
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_query RKNN_QUERY_INPUT_ATTR failed");
        }

        // 获取输入尺寸（NHWC 格式）
        if (inputAttr_.fmt == RKNN_TENSOR_NCHW) {
            inputH_ = inputAttr_.dims[2];
            inputW_ = inputAttr_.dims[3];
            inputC_ = inputAttr_.dims[1];
        } else if (inputAttr_.fmt == RKNN_TENSOR_NHWC) {
            inputH_ = inputAttr_.dims[1];
            inputW_ = inputAttr_.dims[2];
            inputC_ = inputAttr_.dims[3];
        } else {
            throw std::runtime_error("[ERROR] Unsupported input tensor format");
        }

        std::cout << "[INFO] Input size: " << inputW_ << "x" << inputH_ << "x" << inputC_ << std::endl;
        std::cout << "[INFO] Input format: " << (inputAttr_.fmt == RKNN_TENSOR_NCHW ? "NCHW" : "NHWC") << std::endl;

        // 查询输出属性
        outputAttrs_.resize(numOutputs_);
        for (int i = 0; i < numOutputs_; i++) {
            outputAttrs_[i].index = i;
            ret = rknn_query(rknnCtx_, RKNN_QUERY_OUTPUT_ATTR, &outputAttrs_[i], sizeof(rknn_tensor_attr));
            if (ret != RKNN_SUCC) {
                throw std::runtime_error("[ERROR] rknn_query RKNN_QUERY_OUTPUT_ATTR failed");
            }
        }

        // 检查是否量化
        isQuant_ = (outputAttrs_[0].qnt_type == RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC &&
                    outputAttrs_[0].type != RKNN_TENSOR_FLOAT16);

        std::cout << "[INFO] Model quantization: " << (isQuant_ ? "Yes" : "No") << std::endl;
        std::cout << "[INFO] YOLO5 RKNN Detector initialized successfully" << std::endl;
        std::cout << "[INFO] Model: " << modelPath << std::endl;
        std::cout << "[INFO] Classes: " << numClasses_ << std::endl;
    }

    /**
     * @brief 析构函数 - 释放 RKNN 资源
     */
    ~YOLO5RKNNDetector() override {
        if (rknnCtx_ != 0) {
            rknn_destroy(rknnCtx_);
        }
    }

    /**
     * @brief 对图像执行目标检测
     */
    std::vector<Detection> detect(const cv::Mat &image,
                                  float confThreshold = 0.25f,
                                  float nmsThreshold = 0.45f) override {
        ScopedTimer timer("Overall Detection");

        // 预处理
        cv::Mat processedImage;
        LetterBoxInfo lbInfo;
        preprocess(image, processedImage, lbInfo);

        // 推理
        std::vector<rknn_output> outputs = inference(processedImage);

        // 后处理
        std::vector<Detection> detections = postprocess(outputs, confThreshold,
                                                        nmsThreshold, lbInfo);

        // 释放输出张量
        rknn_outputs_release(rknnCtx_, numOutputs_, outputs.data());

        return detections;
    }

    cv::Size getInputSize() const override {
        return cv::Size(inputW_, inputH_);
    }

    int getNumClasses() const override {
        return numClasses_;
    }

    const std::vector<std::string>& getClassNames() const override {
        return classNames_;
    }

    void setPreprocessType(PreprocessType type) override {
        preprocessType_ = type;
    }

private:
    // RKNN 上下文
    rknn_context rknnCtx_;
    rknn_tensor_attr inputAttr_;
    std::vector<rknn_tensor_attr> outputAttrs_;

    // 模型参数
    int inputH_ = 640;
    int inputW_ = 640;
    int inputC_ = 3;
    int numClasses_ = 80;
    int numOutputs_ = 3;
    bool isQuant_ = false;

    // 配置
    PreprocessType preprocessType_;
    std::vector<std::string> classNames_;

    // YOLOv5 Anchor 配置（COCO 标准）
    const int anchors_[3][6] = {
        {10, 13, 16, 30, 33, 23},      // stride 8
        {30, 61, 62, 45, 59, 119},     // stride 16
        {116, 90, 156, 198, 373, 326}  // stride 32
    };

    /**
     * @brief LetterBox 信息结构
     */
    struct LetterBoxInfo {
        float scale;   // 缩放比例
        int xPad;      // X 方向填充
        int yPad;      // Y 方向填充
    };

    /**
     * @brief 预处理图像 - LetterBox + RGB 转换
     */
    void preprocess(const cv::Mat &src, cv::Mat &dst, LetterBoxInfo &lbInfo) {
        ScopedTimer timer("Preprocessing");

        int srcW = src.cols;
        int srcH = src.rows;

        // 计算缩放比例
        float scale = std::min(static_cast<float>(inputW_) / srcW,
                              static_cast<float>(inputH_) / srcH);
        lbInfo.scale = scale;

        int newW = static_cast<int>(srcW * scale);
        int newH = static_cast<int>(srcH * scale);

        // Resize
        cv::Mat resized;
        cv::resize(src, resized, cv::Size(newW, newH));

        // LetterBox 填充（灰色边框 114）
        lbInfo.xPad = (inputW_ - newW) / 2;
        lbInfo.yPad = (inputH_ - newH) / 2;

        dst = cv::Mat(inputH_, inputW_, CV_8UC3, cv::Scalar(114, 114, 114));
        resized.copyTo(dst(cv::Rect(lbInfo.xPad, lbInfo.yPad, newW, newH)));

        // BGR → RGB
        cv::cvtColor(dst, dst, cv::COLOR_BGR2RGB);
    }

    /**
     * @brief 运行 RKNN 推理
     */
    std::vector<rknn_output> inference(const cv::Mat &image) {
        ScopedTimer timer("NPU Inference");

        // 设置输入
        rknn_input input;
        memset(&input, 0, sizeof(input));
        input.index = 0;
        input.type = RKNN_TENSOR_UINT8;
        input.fmt = RKNN_TENSOR_NHWC;
        input.size = inputW_ * inputH_ * inputC_;
        input.buf = image.data;

        int ret = rknn_inputs_set(rknnCtx_, 1, &input);
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_inputs_set failed");
        }

        // 执行推理
        ret = rknn_run(rknnCtx_, nullptr);
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_run failed");
        }

        // 获取输出
        std::vector<rknn_output> outputs(numOutputs_);
        memset(outputs.data(), 0, numOutputs_ * sizeof(rknn_output));
        for (int i = 0; i < numOutputs_; i++) {
            outputs[i].index = i;
            outputs[i].want_float = (!isQuant_);  // 量化模型保持 int8，非量化转 float
        }

        ret = rknn_outputs_get(rknnCtx_, numOutputs_, outputs.data(), nullptr);
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_outputs_get failed");
        }

        return outputs;
    }

    /**
     * @brief 反量化（int8 → float32）
     */
    inline float deqnt_affine_to_f32(int8_t qnt, int32_t zp, float scale) {
        return (static_cast<float>(qnt) - static_cast<float>(zp)) * scale;
    }

    /**
     * @brief 反量化（uint8 → float32）
     */
    inline float deqnt_affine_u8_to_f32(uint8_t qnt, int32_t zp, float scale) {
        return (static_cast<float>(qnt) - static_cast<float>(zp)) * scale;
    }

    /**
     * @brief 正向量化（float32 → int8）
     */
    inline int8_t qnt_f32_to_affine(float f32, int32_t zp, float scale) {
        float dst_val = (f32 / scale) + zp;
        return static_cast<int8_t>(std::max(-128.0f, std::min(127.0f, dst_val)));
    }

    /**
     * @brief 处理单个检测头（int8 量化）
     */
    int processHeadInt8(int8_t *output, const rknn_tensor_attr &attr,
                        int headIdx, float confThreshold,
                        std::vector<cv::Rect2f> &boxes,
                        std::vector<float> &scores,
                        std::vector<int> &classIds) {

        int validCount = 0;

        // 获取网格尺寸（NCHW: dims[2]=H, dims[3]=W）
        int gridH = attr.dims[2];
        int gridW = attr.dims[3];
        int gridLen = gridH * gridW;
        int stride = inputH_ / gridH;

        const int *anchor = anchors_[headIdx];
        int32_t zp = attr.zp;
        float scale = attr.scale;

        // 量化阈值
        int8_t thresInt8 = qnt_f32_to_affine(confThreshold, zp, scale);

        // 遍历 3 个 anchor × grid
        for (int a = 0; a < 3; a++) {
            for (int h = 0; h < gridH; h++) {
                for (int w = 0; w < gridW; w++) {
                    // 获取置信度（第 5 个通道）
                    int confIdx = ((a * (5 + numClasses_) + 4) * gridLen) + h * gridW + w;
                    int8_t boxConf = output[confIdx];

                    if (boxConf < thresInt8) {
                        continue;
                    }

                    // 解码边界框（前 4 个通道: x, y, w, h）
                    int baseIdx = (a * (5 + numClasses_)) * gridLen + h * gridW + w;

                    float boxX = deqnt_affine_to_f32(output[baseIdx], zp, scale) * 2.0f - 0.5f;
                    float boxY = deqnt_affine_to_f32(output[baseIdx + gridLen], zp, scale) * 2.0f - 0.5f;
                    float boxW = deqnt_affine_to_f32(output[baseIdx + 2 * gridLen], zp, scale) * 2.0f;
                    float boxH = deqnt_affine_to_f32(output[baseIdx + 3 * gridLen], zp, scale) * 2.0f;

                    boxX = (boxX + w) * stride;
                    boxY = (boxY + h) * stride;
                    boxW = boxW * boxW * anchor[a * 2];
                    boxH = boxH * boxH * anchor[a * 2 + 1];

                    boxX -= boxW / 2.0f;
                    boxY -= boxH / 2.0f;

                    // 找到最大类别置信度
                    int maxClassId = 0;
                    int8_t maxClassProb = output[baseIdx + 5 * gridLen];

                    for (int c = 1; c < numClasses_; c++) {
                        int8_t prob = output[baseIdx + (5 + c) * gridLen];
                        if (prob > maxClassProb) {
                            maxClassProb = prob;
                            maxClassId = c;
                        }
                    }

                    // 计算最终分数
                    float confF32 = deqnt_affine_to_f32(boxConf, zp, scale);
                    float classF32 = deqnt_affine_to_f32(maxClassProb, zp, scale);
                    float finalScore = confF32 * classF32;

                    if (finalScore >= confThreshold) {
                        boxes.push_back(cv::Rect2f(boxX, boxY, boxW, boxH));
                        scores.push_back(finalScore);
                        classIds.push_back(maxClassId);
                        validCount++;
                    }
                }
            }
        }

        return validCount;
    }

    /**
     * @brief 处理单个检测头（float32）
     */
    int processHeadFloat32(float *output, const rknn_tensor_attr &attr,
                           int headIdx, float confThreshold,
                           std::vector<cv::Rect2f> &boxes,
                           std::vector<float> &scores,
                           std::vector<int> &classIds) {

        int validCount = 0;

        int gridH = attr.dims[2];
        int gridW = attr.dims[3];
        int gridLen = gridH * gridW;
        int stride = inputH_ / gridH;

        const int *anchor = anchors_[headIdx];

        for (int a = 0; a < 3; a++) {
            for (int h = 0; h < gridH; h++) {
                for (int w = 0; w < gridW; w++) {
                    int confIdx = ((a * (5 + numClasses_) + 4) * gridLen) + h * gridW + w;
                    float boxConf = output[confIdx];

                    if (boxConf < confThreshold) {
                        continue;
                    }

                    int baseIdx = (a * (5 + numClasses_)) * gridLen + h * gridW + w;

                    float boxX = output[baseIdx] * 2.0f - 0.5f;
                    float boxY = output[baseIdx + gridLen] * 2.0f - 0.5f;
                    float boxW = output[baseIdx + 2 * gridLen] * 2.0f;
                    float boxH = output[baseIdx + 3 * gridLen] * 2.0f;

                    boxX = (boxX + w) * stride;
                    boxY = (boxY + h) * stride;
                    boxW = boxW * boxW * anchor[a * 2];
                    boxH = boxH * boxH * anchor[a * 2 + 1];

                    boxX -= boxW / 2.0f;
                    boxY -= boxH / 2.0f;

                    int maxClassId = 0;
                    float maxClassProb = output[baseIdx + 5 * gridLen];

                    for (int c = 1; c < numClasses_; c++) {
                        float prob = output[baseIdx + (5 + c) * gridLen];
                        if (prob > maxClassProb) {
                            maxClassProb = prob;
                            maxClassId = c;
                        }
                    }

                    float finalScore = boxConf * maxClassProb;

                    if (finalScore >= confThreshold) {
                        boxes.push_back(cv::Rect2f(boxX, boxY, boxW, boxH));
                        scores.push_back(finalScore);
                        classIds.push_back(maxClassId);
                        validCount++;
                    }
                }
            }
        }

        return validCount;
    }

    /**
     * @brief 后处理 RKNN 输出，生成检测结果
     */
    std::vector<Detection> postprocess(const std::vector<rknn_output> &outputs,
                                       float confThreshold, float nmsThreshold,
                                       const LetterBoxInfo &lbInfo) {
        ScopedTimer timer("Postprocessing");

        std::vector<cv::Rect2f> allBoxes;
        std::vector<float> allScores;
        std::vector<int> allClassIds;

        // 处理 3 个检测头（stride 8, 16, 32）
        for (int i = 0; i < 3; i++) {
            if (isQuant_) {
                processHeadInt8(static_cast<int8_t*>(outputs[i].buf),
                               outputAttrs_[i], i, confThreshold,
                               allBoxes, allScores, allClassIds);
            } else {
                processHeadFloat32(static_cast<float*>(outputs[i].buf),
                                  outputAttrs_[i], i, confThreshold,
                                  allBoxes, allScores, allClassIds);
            }
        }

        if (allBoxes.empty()) {
            return {};
        }

        // 分类别 NMS
        std::vector<Detection> finalDetections;
        std::set<int> uniqueClasses(allClassIds.begin(), allClassIds.end());

        for (int cls : uniqueClasses) {
            std::vector<cv::Rect2f> classBoxes;
            std::vector<float> classScores;

            for (size_t i = 0; i < allClassIds.size(); i++) {
                if (allClassIds[i] == cls) {
                    classBoxes.push_back(allBoxes[i]);
                    classScores.push_back(allScores[i]);
                }
            }

            // OpenCV NMS
            std::vector<int> nmsIndices;
            cv::dnn::NMSBoxes(classBoxes, classScores, confThreshold, nmsThreshold, nmsIndices);

            // 还原坐标（去除 LetterBox）
            for (int idx : nmsIndices) {
                Detection det;
                det.box.x = (classBoxes[idx].x - lbInfo.xPad) / lbInfo.scale;
                det.box.y = (classBoxes[idx].y - lbInfo.yPad) / lbInfo.scale;
                det.box.width = classBoxes[idx].width / lbInfo.scale;
                det.box.height = classBoxes[idx].height / lbInfo.scale;
                det.conf = classScores[idx];
                det.classId = cls;
                finalDetections.push_back(det);
            }
        }

        return finalDetections;
    }
};

} // namespace yolos_edgeplatform

#endif // YOLOS_EDGEPLATFORM_YOLO5_RKNN_HPP
