// YOLO8_RKNN.hpp
#ifndef YOLOS_EDGEPLATFORM_YOLO8_RKNN_HPP
#define YOLOS_EDGEPLATFORM_YOLO8_RKNN_HPP

/**
 * @file YOLO8_RKNN.hpp
 * @brief YOLOv8 检测器（Rockchip RK3588 平台，NPU 硬件加速）
 *
 * 本头文件提供了针对 Rockchip RK3588 NPU（Neural Processing Unit）优化的
 * YOLOv8 目标检测的 header-only 实现。
 *
 * 核心特性：
 * - 单头文件设计，易于集成
 * - NPU 硬件加速（通过 RKNN API）
 * - 支持 uint8/int8/fp32 量化输出
 * - DFL (Distribution Focal Loss) 后处理
 * - Anchor-free 检测
 * - LetterBox 预处理
 *
 * 模型要求：
 * - 输入: NHWC-RGB, (1, H, W, 3), uint8
 * - 输出: 6-9 个张量，对应 3 个检测头（stride 8/16/32）
 *   - 每个检测头包含 2-3 个张量：
 *     - bbox 张量: (1, dfl_len*4, grid_h, grid_w) - 用于 DFL 解码
 *     - score 张量: (1, CLASSES, grid_h, grid_w) - 类别分数
 *     - score_sum 张量（可选）: (1, 1, grid_h, grid_w) - 快速过滤
 *
 * DFL 解码：
 * - 使用 softmax 将距离分布转换为边界框坐标
 * - 默认 dfl_len = 16（可配置）
 * - 边界框格式：xyxy（左上角+右下角）
 *
 * 使用示例：
 * @code
 * YOLO8RKNNDetector detector("yolov8s.rknn", "coco.names");
 * cv::Mat image = cv::imread("test.jpg");
 * auto results = detector.detect(image, 0.25f, 0.45f);
 * @endcode
 *
 * 作者: FANKYT
 * 日期: 2025
 * 参考: Rockchip RKNN Model Zoo YOLOv8 示例
 */

#include <iostream>
#include <vector>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <cstring>
#include <set>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>

// RKNN API
#include "rknn_api.h"

#include "det/BaseDetector.hpp"
#include "tools/ScopedTimer.hpp"
#include "tools/Common.hpp"

namespace yolos_edgeplatform {

/**
 * @brief YOLOv8 detector implementation for Rockchip RK3588 platform
 */
class YOLO8RKNNDetector : public BaseDetector {
public:
    /**
     * @brief 构造函数 - 加载 RKNN 模型并初始化检测器
     *
     * @param modelPath RKNN 模型文件路径 (.rknn)
     * @param labelsPath 类别名称文件路径（每行一个类别）
     * @param numClasses 检测类别数量（默认：80，用于 COCO）
     * @param dflLen DFL 回归分布长度（默认：16）
     */
    YOLO8RKNNDetector(const std::string &modelPath,
                      const std::string &labelsPath,
                      int numClasses = 80,
                      int dflLen = 16)
        : numClasses_(numClasses), dflLen_(dflLen),
          preprocessType_(PreprocessType::LETTERBOX), rknnCtx_(0) {

        ScopedTimer timer("Model Loading");

        // 加载类别名称
        classNames_ = loadClassNames(labelsPath);

        // 加载 RKNN 模型文件
        std::ifstream modelFile(modelPath, std::ios::binary | std::ios::ate);
        if (!modelFile.is_open()) {
            throw std::runtime_error("[ERROR] Cannot open model file: " + modelPath);
        }

        size_t modelSize = modelFile.tellg();
        modelFile.seekg(0, std::ios::beg);
        std::vector<char> modelData(modelSize);
        modelFile.read(modelData.data(), modelSize);
        modelFile.close();

        // 初始化 RKNN 模型
        int ret = rknn_init(&rknnCtx_, modelData.data(), modelSize, 0, nullptr);
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_init failed with code: " + std::to_string(ret));
        }

        // 查询输入输出数量
        rknn_input_output_num ioNum;
        ret = rknn_query(rknnCtx_, RKNN_QUERY_IN_OUT_NUM, &ioNum, sizeof(ioNum));
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_query RKNN_QUERY_IN_OUT_NUM failed");
        }

        if (ioNum.n_input != 1) {
            throw std::runtime_error("[ERROR] Model should have exactly 1 input");
        }
        if (ioNum.n_output != 6 && ioNum.n_output != 9) {
            throw std::runtime_error("[ERROR] YOLOv8 model should have 6 or 9 outputs, got " +
                                   std::to_string(ioNum.n_output));
        }

        numOutputs_ = ioNum.n_output;
        outputsPerBranch_ = numOutputs_ / 3;  // 每个 stride 分支的输出数量 (2 或 3)

        // 查询输入属性
        inputAttr_.index = 0;
        ret = rknn_query(rknnCtx_, RKNN_QUERY_INPUT_ATTR, &inputAttr_, sizeof(inputAttr_));
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_query RKNN_QUERY_INPUT_ATTR failed");
        }

        // 获取输入尺寸（NHWC 格式）
        if (inputAttr_.fmt == RKNN_TENSOR_NCHW) {
            inputH_ = inputAttr_.dims[2];
            inputW_ = inputAttr_.dims[3];
            inputC_ = inputAttr_.dims[1];
        } else if (inputAttr_.fmt == RKNN_TENSOR_NHWC) {
            inputH_ = inputAttr_.dims[1];
            inputW_ = inputAttr_.dims[2];
            inputC_ = inputAttr_.dims[3];
        } else {
            throw std::runtime_error("[ERROR] Unsupported input tensor format");
        }

        std::cout << "[INFO] Input size: " << inputW_ << "x" << inputH_ << "x" << inputC_ << std::endl;
        std::cout << "[INFO] Input format: " << (inputAttr_.fmt == RKNN_TENSOR_NCHW ? "NCHW" : "NHWC") << std::endl;

        // 查询输出属性
        outputAttrs_.resize(numOutputs_);
        for (int i = 0; i < numOutputs_; i++) {
            outputAttrs_[i].index = i;
            ret = rknn_query(rknnCtx_, RKNN_QUERY_OUTPUT_ATTR, &outputAttrs_[i], sizeof(rknn_tensor_attr));
            if (ret != RKNN_SUCC) {
                throw std::runtime_error("[ERROR] rknn_query RKNN_QUERY_OUTPUT_ATTR failed");
            }
        }

        // 确定输出顺序和是否有 score_sum 张量
        determineOutputOrder();

        // 检查是否量化
        isQuant_ = (outputAttrs_[0].qnt_type == RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC &&
                    outputAttrs_[0].type != RKNN_TENSOR_FLOAT16);

        std::cout << "[INFO] Model quantization: " << (isQuant_ ? "Yes" : "No") << std::endl;
        std::cout << "[INFO] Outputs per branch: " << outputsPerBranch_
                  << (outputsPerBranch_ == 3 ? " (with score_sum)" : " (without score_sum)") << std::endl;
        std::cout << "[INFO] DFL length: " << dflLen_ << std::endl;
        std::cout << "[INFO] YOLO8 RKNN Detector initialized successfully" << std::endl;
        std::cout << "[INFO] Model: " << modelPath << std::endl;
        std::cout << "[INFO] Classes: " << numClasses_ << std::endl;
    }

    /**
     * @brief 析构函数 - 释放 RKNN 资源
     */
    ~YOLO8RKNNDetector() override {
        if (rknnCtx_ != 0) {
            rknn_destroy(rknnCtx_);
        }
    }

    /**
     * @brief 对图像执行目标检测
     */
    std::vector<Detection> detect(const cv::Mat &image,
                                  float confThreshold = 0.25f,
                                  float nmsThreshold = 0.45f) override {
        ScopedTimer timer("Overall Detection");

        // 预处理
        cv::Mat processedImage;
        LetterBoxInfo lbInfo;
        preprocess(image, processedImage, lbInfo);

        // 推理
        std::vector<rknn_output> outputs = inference(processedImage);

        // 后处理
        std::vector<Detection> detections = postprocess(outputs, confThreshold,
                                                        nmsThreshold, lbInfo);

        // 释放输出张量
        rknn_outputs_release(rknnCtx_, numOutputs_, outputs.data());

        return detections;
    }

    cv::Size getInputSize() const override {
        return cv::Size(inputW_, inputH_);
    }

    int getNumClasses() const override {
        return numClasses_;
    }

    const std::vector<std::string>& getClassNames() const override {
        return classNames_;
    }

    void setPreprocessType(PreprocessType type) override {
        preprocessType_ = type;
    }

private:
    // RKNN 上下文
    rknn_context rknnCtx_;
    rknn_tensor_attr inputAttr_;
    std::vector<rknn_tensor_attr> outputAttrs_;

    // 模型参数
    int inputH_ = 640;
    int inputW_ = 640;
    int inputC_ = 3;
    int numClasses_ = 80;
    int numOutputs_ = 6;
    int outputsPerBranch_ = 2;  // 每个 branch 的张量数（2 或 3）
    int dflLen_ = 16;           // DFL 分布长度
    bool isQuant_ = false;

    // 配置
    PreprocessType preprocessType_;
    std::vector<std::string> classNames_;

    /**
     * @brief LetterBox 信息结构
     */
    struct LetterBoxInfo {
        float scale;   // 缩放比例
        int xPad;      // X 方向填充
        int yPad;      // Y 方向填充
    };

    /**
     * @brief 确定输出张量顺序
     *
     * YOLOv8 可能有不同的输出顺序：
     * - 6 输出（无 score_sum）: [bbox_8, score_8, bbox_16, score_16, bbox_32, score_32]
     * - 9 输出（有 score_sum）: [bbox_8, score_8, sum_8, bbox_16, score_16, sum_16, bbox_32, score_32, sum_32]
     */
    void determineOutputOrder() {
        // 从第一个输出的维度推断 dfl_len
        if (outputAttrs_[0].fmt == RKNN_TENSOR_NCHW) {
            int channels = outputAttrs_[0].dims[1];
            // 如果是 bbox 输出，channels = dfl_len * 4
            if (channels % 4 == 0 && channels / 4 <= 20) {
                dflLen_ = channels / 4;
            }
        }

        std::cout << "[INFO] Detected output order: ";
        for (int i = 0; i < numOutputs_; i++) {
            int h = (outputAttrs_[i].fmt == RKNN_TENSOR_NCHW) ?
                    outputAttrs_[i].dims[2] : outputAttrs_[i].dims[1];
            int w = (outputAttrs_[i].fmt == RKNN_TENSOR_NCHW) ?
                    outputAttrs_[i].dims[3] : outputAttrs_[i].dims[2];
            int c = (outputAttrs_[i].fmt == RKNN_TENSOR_NCHW) ?
                    outputAttrs_[i].dims[1] : outputAttrs_[i].dims[3];

            std::string type = (c == dflLen_ * 4) ? "bbox" :
                              (c == numClasses_) ? "score" : "score_sum";
            std::cout << type << "(" << h << "x" << w << ") ";
        }
        std::cout << std::endl;
    }

    /**
     * @brief 预处理图像 - LetterBox + RGB 转换
     */
    void preprocess(const cv::Mat &src, cv::Mat &dst, LetterBoxInfo &lbInfo) {
        ScopedTimer timer("Preprocessing");

        int srcW = src.cols;
        int srcH = src.rows;

        // 计算缩放比例
        float scale = std::min(static_cast<float>(inputW_) / srcW,
                              static_cast<float>(inputH_) / srcH);
        lbInfo.scale = scale;

        int newW = static_cast<int>(srcW * scale);
        int newH = static_cast<int>(srcH * scale);

        // Resize
        cv::Mat resized;
        cv::resize(src, resized, cv::Size(newW, newH));

        // LetterBox 填充（灰色边框 114）
        lbInfo.xPad = (inputW_ - newW) / 2;
        lbInfo.yPad = (inputH_ - newH) / 2;

        dst = cv::Mat(inputH_, inputW_, CV_8UC3, cv::Scalar(114, 114, 114));
        resized.copyTo(dst(cv::Rect(lbInfo.xPad, lbInfo.yPad, newW, newH)));

        // BGR → RGB
        cv::cvtColor(dst, dst, cv::COLOR_BGR2RGB);
    }

    /**
     * @brief 运行 RKNN 推理
     */
    std::vector<rknn_output> inference(const cv::Mat &image) {
        ScopedTimer timer("NPU Inference");

        // 设置输入
        rknn_input input;
        memset(&input, 0, sizeof(input));
        input.index = 0;
        input.type = RKNN_TENSOR_UINT8;
        input.fmt = RKNN_TENSOR_NHWC;
        input.size = inputW_ * inputH_ * inputC_;
        input.buf = image.data;

        int ret = rknn_inputs_set(rknnCtx_, 1, &input);
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_inputs_set failed");
        }

        // 执行推理
        ret = rknn_run(rknnCtx_, nullptr);
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_run failed");
        }

        // 获取输出
        std::vector<rknn_output> outputs(numOutputs_);
        memset(outputs.data(), 0, numOutputs_ * sizeof(rknn_output));
        for (int i = 0; i < numOutputs_; i++) {
            outputs[i].index = i;
            outputs[i].want_float = (!isQuant_);  // 量化模型保持 int8，非量化转 float
        }

        ret = rknn_outputs_get(rknnCtx_, numOutputs_, outputs.data(), nullptr);
        if (ret != RKNN_SUCC) {
            throw std::runtime_error("[ERROR] rknn_outputs_get failed");
        }

        return outputs;
    }

    /**
     * @brief 反量化（int8 → float32）
     */
    inline float deqnt_affine_to_f32(int8_t qnt, int32_t zp, float scale) {
        return (static_cast<float>(qnt) - static_cast<float>(zp)) * scale;
    }

    /**
     * @brief 反量化（uint8 → float32）
     */
    inline float deqnt_affine_u8_to_f32(uint8_t qnt, int32_t zp, float scale) {
        return (static_cast<float>(qnt) - static_cast<float>(zp)) * scale;
    }

    /**
     * @brief 正向量化（float32 → int8）
     */
    inline int8_t qnt_f32_to_affine(float f32, int32_t zp, float scale) {
        float dst_val = (f32 / scale) + zp;
        return static_cast<int8_t>(std::max(-128.0f, std::min(127.0f, dst_val)));
    }

    /**
     * @brief 正向量化（float32 → uint8）
     */
    inline uint8_t qnt_f32_to_affine_u8(float f32, int32_t zp, float scale) {
        float dst_val = (f32 / scale) + zp;
        return static_cast<uint8_t>(std::max(0.0f, std::min(255.0f, dst_val)));
    }

    /**
     * @brief DFL (Distribution Focal Loss) 计算
     *
     * 将距离分布通过 softmax 转换为边界框坐标
     *
     * @param tensor DFL 分布张量（长度为 dfl_len*4）
     * @param dfl_len DFL 分布长度
     * @param box 输出边界框坐标 [left, top, right, bottom]
     */
    void computeDFL(float* tensor, int dfl_len, float* box) {
        for (int b = 0; b < 4; b++) {
            float exp_t[dfl_len];
            float exp_sum = 0;
            float acc_sum = 0;

            // 计算 softmax
            for (int i = 0; i < dfl_len; i++) {
                exp_t[i] = std::exp(tensor[i + b * dfl_len]);
                exp_sum += exp_t[i];
            }

            // 计算期望值
            for (int i = 0; i < dfl_len; i++) {
                acc_sum += exp_t[i] / exp_sum * i;
            }
            box[b] = acc_sum;
        }
    }

    /**
     * @brief 处理单个检测头（int8 量化）
     */
    int processHeadInt8(int8_t *bbox_output, int8_t *score_output, int8_t *score_sum_output,
                        const rknn_tensor_attr &bbox_attr, const rknn_tensor_attr &score_attr,
                        const rknn_tensor_attr &score_sum_attr,
                        int stride, float confThreshold,
                        std::vector<cv::Rect2f> &boxes,
                        std::vector<float> &scores,
                        std::vector<int> &classIds) {

        int validCount = 0;

        // 获取网格尺寸
        int gridH = (bbox_attr.fmt == RKNN_TENSOR_NCHW) ? bbox_attr.dims[2] : bbox_attr.dims[1];
        int gridW = (bbox_attr.fmt == RKNN_TENSOR_NCHW) ? bbox_attr.dims[3] : bbox_attr.dims[2];
        int gridLen = gridH * gridW;

        int32_t bbox_zp = bbox_attr.zp;
        float bbox_scale = bbox_attr.scale;
        int32_t score_zp = score_attr.zp;
        float score_scale = score_attr.scale;

        // 量化阈值
        int8_t thresInt8 = qnt_f32_to_affine(confThreshold, score_zp, score_scale);
        int8_t score_sum_thres = 0;
        if (score_sum_output != nullptr) {
            score_sum_thres = qnt_f32_to_affine(confThreshold, score_sum_attr.zp, score_sum_attr.scale);
        }

        // 遍历网格
        for (int i = 0; i < gridH; i++) {
            for (int j = 0; j < gridW; j++) {
                int offset = i * gridW + j;

                // 快速过滤（使用 score_sum）
                if (score_sum_output != nullptr && score_sum_output[offset] < score_sum_thres) {
                    continue;
                }

                // 找到最大类别分数
                int maxClassId = 0;
                int8_t maxScore = score_output[offset];

                for (int c = 1; c < numClasses_; c++) {
                    int8_t score = score_output[offset + c * gridLen];
                    if (score > maxScore) {
                        maxScore = score;
                        maxClassId = c;
                    }
                }

                // 检查阈值
                if (maxScore < thresInt8) {
                    continue;
                }

                // DFL 解码边界框
                float box[4];
                float before_dfl[dflLen_ * 4];

                for (int k = 0; k < dflLen_ * 4; k++) {
                    before_dfl[k] = deqnt_affine_to_f32(bbox_output[offset + k * gridLen],
                                                        bbox_zp, bbox_scale);
                }
                computeDFL(before_dfl, dflLen_, box);

                // 转换为 xyxy 格式
                float x1 = (-box[0] + j + 0.5f) * stride;
                float y1 = (-box[1] + i + 0.5f) * stride;
                float x2 = (box[2] + j + 0.5f) * stride;
                float y2 = (box[3] + i + 0.5f) * stride;

                // 检查边界框有效性
                if (x2 <= x1 || y2 <= y1) {
                    continue;
                }

                // 反量化分数
                float finalScore = deqnt_affine_to_f32(maxScore, score_zp, score_scale);

                boxes.push_back(cv::Rect2f(x1, y1, x2 - x1, y2 - y1));
                scores.push_back(finalScore);
                classIds.push_back(maxClassId);
                validCount++;
            }
        }

        return validCount;
    }

    /**
     * @brief 处理单个检测头（uint8 量化）
     */
    int processHeadUInt8(uint8_t *bbox_output, uint8_t *score_output, uint8_t *score_sum_output,
                         const rknn_tensor_attr &bbox_attr, const rknn_tensor_attr &score_attr,
                         const rknn_tensor_attr &score_sum_attr,
                         int stride, float confThreshold,
                         std::vector<cv::Rect2f> &boxes,
                         std::vector<float> &scores,
                         std::vector<int> &classIds) {

        int validCount = 0;

        int gridH = (bbox_attr.fmt == RKNN_TENSOR_NCHW) ? bbox_attr.dims[2] : bbox_attr.dims[1];
        int gridW = (bbox_attr.fmt == RKNN_TENSOR_NCHW) ? bbox_attr.dims[3] : bbox_attr.dims[2];
        int gridLen = gridH * gridW;

        int32_t bbox_zp = bbox_attr.zp;
        float bbox_scale = bbox_attr.scale;
        int32_t score_zp = score_attr.zp;
        float score_scale = score_attr.scale;

        uint8_t thresU8 = qnt_f32_to_affine_u8(confThreshold, score_zp, score_scale);
        uint8_t score_sum_thres = 0;
        if (score_sum_output != nullptr) {
            score_sum_thres = qnt_f32_to_affine_u8(confThreshold, score_sum_attr.zp, score_sum_attr.scale);
        }

        for (int i = 0; i < gridH; i++) {
            for (int j = 0; j < gridW; j++) {
                int offset = i * gridW + j;

                if (score_sum_output != nullptr && score_sum_output[offset] < score_sum_thres) {
                    continue;
                }

                int maxClassId = 0;
                uint8_t maxScore = score_output[offset];

                for (int c = 1; c < numClasses_; c++) {
                    uint8_t score = score_output[offset + c * gridLen];
                    if (score > maxScore) {
                        maxScore = score;
                        maxClassId = c;
                    }
                }

                if (maxScore < thresU8) {
                    continue;
                }

                float box[4];
                float before_dfl[dflLen_ * 4];

                for (int k = 0; k < dflLen_ * 4; k++) {
                    before_dfl[k] = deqnt_affine_u8_to_f32(bbox_output[offset + k * gridLen],
                                                           bbox_zp, bbox_scale);
                }
                computeDFL(before_dfl, dflLen_, box);

                float x1 = (-box[0] + j + 0.5f) * stride;
                float y1 = (-box[1] + i + 0.5f) * stride;
                float x2 = (box[2] + j + 0.5f) * stride;
                float y2 = (box[3] + i + 0.5f) * stride;

                if (x2 <= x1 || y2 <= y1) {
                    continue;
                }

                float finalScore = deqnt_affine_u8_to_f32(maxScore, score_zp, score_scale);

                boxes.push_back(cv::Rect2f(x1, y1, x2 - x1, y2 - y1));
                scores.push_back(finalScore);
                classIds.push_back(maxClassId);
                validCount++;
            }
        }

        return validCount;
    }

    /**
     * @brief 处理单个检测头（float32）
     */
    int processHeadFloat32(float *bbox_output, float *score_output, float *score_sum_output,
                           const rknn_tensor_attr &bbox_attr,
                           int stride, float confThreshold,
                           std::vector<cv::Rect2f> &boxes,
                           std::vector<float> &scores,
                           std::vector<int> &classIds) {

        int validCount = 0;

        int gridH = (bbox_attr.fmt == RKNN_TENSOR_NCHW) ? bbox_attr.dims[2] : bbox_attr.dims[1];
        int gridW = (bbox_attr.fmt == RKNN_TENSOR_NCHW) ? bbox_attr.dims[3] : bbox_attr.dims[2];
        int gridLen = gridH * gridW;

        for (int i = 0; i < gridH; i++) {
            for (int j = 0; j < gridW; j++) {
                int offset = i * gridW + j;

                if (score_sum_output != nullptr && score_sum_output[offset] < confThreshold) {
                    continue;
                }

                int maxClassId = 0;
                float maxScore = score_output[offset];

                for (int c = 1; c < numClasses_; c++) {
                    float score = score_output[offset + c * gridLen];
                    if (score > maxScore) {
                        maxScore = score;
                        maxClassId = c;
                    }
                }

                if (maxScore < confThreshold) {
                    continue;
                }

                float box[4];
                float before_dfl[dflLen_ * 4];

                for (int k = 0; k < dflLen_ * 4; k++) {
                    before_dfl[k] = bbox_output[offset + k * gridLen];
                }
                computeDFL(before_dfl, dflLen_, box);

                float x1 = (-box[0] + j + 0.5f) * stride;
                float y1 = (-box[1] + i + 0.5f) * stride;
                float x2 = (box[2] + j + 0.5f) * stride;
                float y2 = (box[3] + i + 0.5f) * stride;

                if (x2 <= x1 || y2 <= y1) {
                    continue;
                }

                boxes.push_back(cv::Rect2f(x1, y1, x2 - x1, y2 - y1));
                scores.push_back(maxScore);
                classIds.push_back(maxClassId);
                validCount++;
            }
        }

        return validCount;
    }

    /**
     * @brief 后处理 RKNN 输出，生成检测结果
     */
    std::vector<Detection> postprocess(const std::vector<rknn_output> &outputs,
                                       float confThreshold, float nmsThreshold,
                                       const LetterBoxInfo &lbInfo) {
        ScopedTimer timer("Postprocessing");

        std::vector<cv::Rect2f> allBoxes;
        std::vector<float> allScores;
        std::vector<int> allClassIds;

        // 处理 3 个检测头（stride 8, 16, 32）
        int strides[3] = {8, 16, 32};

        for (int i = 0; i < 3; i++) {
            int bbox_idx = i * outputsPerBranch_;
            int score_idx = i * outputsPerBranch_ + 1;
            int score_sum_idx = (outputsPerBranch_ == 3) ? (i * outputsPerBranch_ + 2) : -1;

            void* bbox_buf = outputs[bbox_idx].buf;
            void* score_buf = outputs[score_idx].buf;
            void* score_sum_buf = (score_sum_idx >= 0) ? outputs[score_sum_idx].buf : nullptr;

            if (isQuant_) {
                if (outputAttrs_[bbox_idx].type == RKNN_TENSOR_UINT8) {
                    // uint8 量化
                    rknn_tensor_attr score_sum_attr;
                    if (score_sum_buf != nullptr) {
                        score_sum_attr = outputAttrs_[score_sum_idx];
                    }

                    processHeadUInt8(static_cast<uint8_t*>(bbox_buf),
                                    static_cast<uint8_t*>(score_buf),
                                    static_cast<uint8_t*>(score_sum_buf),
                                    outputAttrs_[bbox_idx],
                                    outputAttrs_[score_idx],
                                    score_sum_attr,
                                    strides[i], confThreshold,
                                    allBoxes, allScores, allClassIds);
                } else {
                    // int8 量化
                    rknn_tensor_attr score_sum_attr;
                    if (score_sum_buf != nullptr) {
                        score_sum_attr = outputAttrs_[score_sum_idx];
                    }

                    processHeadInt8(static_cast<int8_t*>(bbox_buf),
                                   static_cast<int8_t*>(score_buf),
                                   static_cast<int8_t*>(score_sum_buf),
                                   outputAttrs_[bbox_idx],
                                   outputAttrs_[score_idx],
                                   score_sum_attr,
                                   strides[i], confThreshold,
                                   allBoxes, allScores, allClassIds);
                }
            } else {
                // float32
                processHeadFloat32(static_cast<float*>(bbox_buf),
                                  static_cast<float*>(score_buf),
                                  static_cast<float*>(score_sum_buf),
                                  outputAttrs_[bbox_idx],
                                  strides[i], confThreshold,
                                  allBoxes, allScores, allClassIds);
            }
        }

        if (allBoxes.empty()) {
            return {};
        }

        // 分类别 NMS
        std::vector<Detection> finalDetections;
        std::set<int> uniqueClasses(allClassIds.begin(), allClassIds.end());

        for (int cls : uniqueClasses) {
            std::vector<cv::Rect2f> classBoxes;
            std::vector<float> classScores;

            for (size_t i = 0; i < allClassIds.size(); i++) {
                if (allClassIds[i] == cls) {
                    classBoxes.push_back(allBoxes[i]);
                    classScores.push_back(allScores[i]);
                }
            }

            // OpenCV NMS
            std::vector<int> nmsIndices;
            cv::dnn::NMSBoxes(classBoxes, classScores, confThreshold, nmsThreshold, nmsIndices);

            // 还原坐标（去除 LetterBox）
            for (int idx : nmsIndices) {
                Detection det;
                det.box.x = (classBoxes[idx].x - lbInfo.xPad) / lbInfo.scale;
                det.box.y = (classBoxes[idx].y - lbInfo.yPad) / lbInfo.scale;
                det.box.width = classBoxes[idx].width / lbInfo.scale;
                det.box.height = classBoxes[idx].height / lbInfo.scale;
                det.conf = classScores[idx];
                det.classId = cls;
                finalDetections.push_back(det);
            }
        }

        return finalDetections;
    }
};

} // namespace yolos_edgeplatform

#endif // YOLOS_EDGEPLATFORM_YOLO8_RKNN_HPP
