// YOLO11_POSE_RDK.hpp
#ifndef YOLOS_EDGEPLATFORM_YOLO11_POSE_RDK_HPP
#define YOLOS_EDGEPLATFORM_YOLO11_POSE_RDK_HPP

/**
 * @file YOLO11_POSE_RDK.hpp
 * @brief YOLO11 姿态检测器（D-Robotics RDK 平台，BPU 硬件加速）
 *
 * 本头文件提供了针对 D-Robotics RDK X5 BPU（Brain Processing Unit）优化的
 * YOLO11 姿态估计的 header-only 实现。
 *
 * 核心特性：
 * - 单头文件设计，易于集成
 * - BPU 硬件加速（通过 libDNN API）
 * - 支持 NV12 输入格式 (YUV420SP)
 * - 17个 COCO 关键点检测
 * - DFL (Distribution Focal Loss) 后处理用于 bbox 回归
 * - 可配置的预处理方式（Resize/LetterBox）
 * - RAII 资源管理（自动释放 BPU 资源）
 *
 * 模型要求：
 * - 输入: NV12 (YUV420SP), (1, 3, H, W)
 * - 输出: 9 个张量，对应 3 个检测头（小/中/大目标）
 *   每个检测头包含:
 *   - bbox_reg 张量: (1, H/stride, W/stride, 64), int32, SCALE 量化
 *   - cls 张量: (1, H/stride, W/stride, 1), float32, NONE 量化（person 类）
 *   - kpts 张量: (1, H/stride, W/stride, 51), float32, NONE 量化（17个关键点 × 3维）
 *
 * 输出顺序（需自动确定）：
 *   stride=8:  bbox_8, cls_8, kpts_8   (小目标)
 *   stride=16: bbox_16, cls_16, kpts_16 (中目标)
 *   stride=32: bbox_32, cls_32, kpts_32 (大目标)
 *
 * 关键点格式 (COCO 17点):
 *   0: 鼻子, 1-2: 眼睛, 3-4: 耳朵, 5-6: 肩膀
 *   7-8: 肘部, 9-10: 手腕, 11-12: 髋部, 13-14: 膝盖, 15-16: 脚踝
 *   每个关键点: (x, y, visibility/confidence)
 *
 * 使用示例：
 * @code
 * YOLO11PoseDetector detector("yolo11n_pose.bin", "person.names");
 * cv::Mat image = cv::imread("test.jpg");
 * auto results = detector.detect(image, 0.25f, 0.45f, 0.5f);
 * @endcode
 *
 * 作者: FANKYT
 * 日期: 2025
 */

#include <iostream>
#include <vector>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <cstring>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>

// RDK BPU libDNN API
#include "dnn/hb_dnn.h"
#include "dnn/hb_dnn_ext.h"
#include "dnn/hb_sys.h"

#include "pose/BasePoseDetector.hpp"
#include "tools/ScopedTimer.hpp"
#include "tools/Common.hpp"

namespace yolos_edgeplatform {

/**
 * @brief YOLO11 Pose 检测器实现（RDK X5 平台）
 */
class YOLO11PoseDetector : public BasePoseDetector {
public:
    /**
     * @brief 构造函数 - 加载 BPU 模型并初始化检测器
     *
     * @param modelPath .bin 模型文件路径
     * @param labelsPath 类别名称文件路径（每行一个类别，通常只有 person）
     * @param numClasses 检测类别数量（默认 1，仅 person）
     * @param numKeypoints 关键点数量（默认 17，COCO 格式）
     * @param kptEncode 关键点编码维度（默认 3：x, y, visibility）
     * @param reg DFL 回归 bins（默认 16）
     */
    YOLO11PoseDetector(const std::string &modelPath,
                       const std::string &labelsPath,
                       int numClasses = 1,
                       int numKeypoints = 17,
                       int kptEncode = 3,
                       int reg = 16)
        : numClasses_(numClasses),
          numKeypoints_(numKeypoints),
          kptEncode_(kptEncode),
          reg_(reg),
          preprocessType_(PreprocessType::LETTERBOX) {

        ScopedTimer timer("Model Loading");

        // 加载类别名称
        classNames_ = loadClassNames(labelsPath);
        std::cout << "[INFO] Loaded " << classNames_.size() << " class names" << std::endl;

        // 初始化 BPU 模型
        const char *modelFile = modelPath.c_str();
        int ret = hbDNNInitializeFromFiles(&packedDNNHandle_, &modelFile, 1);
        if (ret != 0) {
            throw std::runtime_error("[ERROR] hbDNNInitializeFromFiles failed with code: " +
                                     std::to_string(ret));
        }

        // 获取模型信息
        const char **modelNameList;
        int modelCount = 0;
        hbDNNGetModelNameList(&modelNameList, &modelCount, packedDNNHandle_);
        if (modelCount > 1) {
            std::cout << "[WARN] Multiple models in bin file, using first one" << std::endl;
        }

        const char *modelName = modelNameList[0];
        std::cout << "[INFO] Model name: " << modelName << std::endl;

        ret = hbDNNGetModelHandle(&dnnHandle_, packedDNNHandle_, modelName);
        if (ret != 0) {
            throw std::runtime_error("[ERROR] hbDNNGetModelHandle failed");
        }

        // 获取输入属性
        int32_t inputCount = 0;
        hbDNNGetInputCount(&inputCount, dnnHandle_);
        if (inputCount != 1) {
            throw std::runtime_error("[ERROR] Model should have exactly 1 input");
        }

        hbDNNGetInputTensorProperties(&inputProperties_, dnnHandle_, 0);

        // 自动检测输入张量类型并适配
        if (inputProperties_.tensorType == HB_DNN_IMG_TYPE_RGB) {
            inputImageType_ = InputImageType::RGB;
            std::cout << "[INFO] Input type: RGB (NCHW, int8 quantized)" << std::endl;
        } else if (inputProperties_.tensorType == HB_DNN_IMG_TYPE_NV12) {
            inputImageType_ = InputImageType::NV12;
            std::cout << "[INFO] Input type: NV12 (YUV420SP)" << std::endl;
        } else {
            throw std::runtime_error("[ERROR] Unsupported input tensor type");
        }

        // 验证输入张量类型（应为 4D）
        if (inputProperties_.validShape.numDimensions != 4) {
            throw std::runtime_error("[ERROR] Input tensor should be 4D (NCHW layout)");
        }

        // 验证输入张量布局
        if (inputProperties_.tensorLayout != HB_DNN_LAYOUT_NCHW) {
            throw std::runtime_error("[ERROR] Input tensor layout should be NCHW");
        }

        // 获取输入尺寸
        inputH_ = inputProperties_.validShape.dimensionSize[2];
        inputW_ = inputProperties_.validShape.dimensionSize[3];
        std::cout << "[INFO] Input size: " << inputW_ << "x" << inputH_ << std::endl;

        // 获取输出属性
        int32_t outputCount = 0;
        hbDNNGetOutputCount(&outputCount, dnnHandle_);
        if (outputCount != 9) {
            throw std::runtime_error("[ERROR] YOLO11 Pose model should have 9 outputs, got " +
                                   std::to_string(outputCount));
        }

        // 自动确定输出顺序
        determineOutputOrder();

        std::cout << "[INFO] YOLO11 Pose Detector initialized successfully" << std::endl;
        std::cout << "[INFO] Model: " << modelPath << std::endl;
        std::cout << "[INFO] Classes: " << numClasses_ << ", Keypoints: " << numKeypoints_ << std::endl;
    }

    /**
     * @brief 析构函数 - 释放 BPU 资源
     */
    ~YOLO11PoseDetector() override {
        if (packedDNNHandle_) {
            hbDNNRelease(packedDNNHandle_);
            std::cout << "[INFO] BPU model released" << std::endl;
        }
    }

    /**
     * @brief 对输入图像执行姿态检测
     *
     * @param image 输入图像 (BGR 格式)
     * @param confThreshold 置信度阈值 (默认 0.25)
     * @param nmsThreshold NMS IoU 阈值 (默认 0.45)
     * @param kptThreshold 关键点置信度阈值 (默认 0.5)
     * @return std::vector<PoseDetection> 姿态检测结果
     */
    std::vector<PoseDetection> detect(const cv::Mat &image,
                                      float confThreshold = 0.25f,
                                      float nmsThreshold = 0.45f,
                                      float kptThreshold = 0.5f) override {
        ScopedTimer timer("Overall Detection");

        // 前处理：BGR -> NV12，并进行 Resize/LetterBox
        cv::Mat processedImage;
        int xShift = 0, yShift = 0;
        float scale = 1.0f;
        preprocess(image, processedImage, xShift, yShift, scale);

        // 推理：BPU 前向传播
        std::vector<hbDNNTensor> outputTensors = inference(processedImage);

        // 后处理：解析输出，DFL 解码，关键点解码，NMS
        std::vector<PoseDetection> detections = postprocess(
            outputTensors, confThreshold, nmsThreshold, kptThreshold,
            xShift, yShift, scale);

        // 释放输出张量内存
        for (auto &tensor : outputTensors) {
            hbSysFreeMem(&tensor.sysMem[0]);
        }

        return detections;
    }

    /**
     * @brief 获取模型期望的输入尺寸
     */
    cv::Size getInputSize() const override {
        return cv::Size(inputW_, inputH_);
    }

    /**
     * @brief 获取类别数量
     */
    int getNumClasses() const override {
        return numClasses_;
    }

    /**
     * @brief 获取类别名称列表
     */
    const std::vector<std::string>& getClassNames() const override {
        return classNames_;
    }

    /**
     * @brief 设置预处理方法
     *
     * @param type 预处理类型（RESIZE 或 LETTERBOX）
     */
    void setPreprocessType(PreprocessType type) override {
        preprocessType_ = type;
    }

private:
    // BPU 相关句柄
    hbPackedDNNHandle_t packedDNNHandle_ = nullptr; ///< Packed DNN 句柄
    hbDNNHandle_t dnnHandle_ = nullptr;             ///< DNN 模型句柄
    hbDNNTensorProperties inputProperties_;         ///< 输入张量属性

    // 模型参数
    int inputH_ = 640;          ///< 输入高度
    int inputW_ = 640;          ///< 输入宽度
    int numClasses_ = 1;        ///< 类别数量（通常只有 person）
    int numKeypoints_ = 17;     ///< 关键点数量（COCO 标准）
    int kptEncode_ = 3;         ///< 关键点编码维度（x, y, vis）
    int reg_ = 16;              ///< DFL 回归 bins

    // 输出顺序映射 (按 stride 8/16/32 排列)
    // 每个 stride 对应 3 个输出：bbox_reg, cls, kpts
    int outputOrder_[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};

    // 配置
    PreprocessType preprocessType_;       ///< 预处理方式
    InputImageType inputImageType_;       ///< 记录模型的输入格式
    std::vector<std::string> classNames_; ///< 类别名称列表

    /**
     * @brief 自动确定输出张量的顺序映射
     *
     * YOLO11 Pose 的 9 个输出需要按照固定顺序处理：
     * - 3个尺度（stride 8, 16, 32）
     * - 每个尺度 3 个输出：bbox_reg (64通道), cls (CLASSES通道), kpts (17×3通道)
     *
     * 本函数通过检查输出张量的 shape 和 quantiType 来自动确定正确顺序。
     * 映射后的顺序：
     *   outputOrder_[0,1,2] = bbox_8, cls_8, kpts_8   (stride=8)
     *   outputOrder_[3,4,5] = bbox_16, cls_16, kpts_16 (stride=16)
     *   outputOrder_[6,7,8] = bbox_32, cls_32, kpts_32 (stride=32)
     */
    void determineOutputOrder() {
        // 计算每个 stride 对应的特征图尺寸
        int32_t H_8 = inputH_ / 8;
        int32_t H_16 = inputH_ / 16;
        int32_t H_32 = inputH_ / 32;
        int32_t W_8 = inputW_ / 8;
        int32_t W_16 = inputW_ / 16;
        int32_t W_32 = inputW_ / 32;

        // 期望的输出顺序：[H, W, C]
        // 注意：bbox_reg 是 64 通道，cls 是 numClasses_ 通道，kpts 是 (numKeypoints_ * kptEncode_) 通道
        int32_t expectedShapes[9][3] = {
            {H_8, W_8, 64},                           // bbox_8  (SCALE 量化)
            {H_8, W_8, numClasses_},                  // cls_8   (NONE 量化)
            {H_8, W_8, numKeypoints_ * kptEncode_},   // kpts_8  (NONE 量化)
            {H_16, W_16, 64},                         // bbox_16
            {H_16, W_16, numClasses_},                // cls_16
            {H_16, W_16, numKeypoints_ * kptEncode_}, // kpts_16
            {H_32, W_32, 64},                         // bbox_32
            {H_32, W_32, numClasses_},                // cls_32
            {H_32, W_32, numKeypoints_ * kptEncode_}, // kpts_32
        };

        // 自动匹配输出张量顺序
        for (int i = 0; i < 9; i++) {
            for (int j = 0; j < 9; j++) {
                hbDNNTensorProperties outputProps;
                hbDNNGetOutputTensorProperties(&outputProps, dnnHandle_, j);

                int32_t h = outputProps.validShape.dimensionSize[1];
                int32_t w = outputProps.validShape.dimensionSize[2];
                int32_t c = outputProps.validShape.dimensionSize[3];

                if (h == expectedShapes[i][0] &&
                    w == expectedShapes[i][1] &&
                    c == expectedShapes[i][2]) {
                    outputOrder_[i] = j;
                    break;
                }
            }
        }

        // 验证映射的正确性（所有索引应该互不相同且为 0-8）
        int sum = 0;
        for (int i = 0; i < 9; i++) {
            sum += outputOrder_[i];
        }
        if (sum != 0+1+2+3+4+5+6+7+8) {
            std::cout << "[WARN] Output order auto-detection may have failed, using default order" << std::endl;
            for (int i = 0; i < 9; i++) {
                outputOrder_[i] = i;
            }
        } else {
            std::cout << "[INFO] Output order detected: {";
            for (int i = 0; i < 9; i++) {
                std::cout << outputOrder_[i] << (i < 8 ? ", " : "");
            }
            std::cout << "}" << std::endl;
        }

        // 打印每个输出的量化类型（用于调试）
        std::cout << "[INFO] Output quantization types:" << std::endl;
        for (int i = 0; i < 9; i++) {
            hbDNNTensorProperties outputProps;
            hbDNNGetOutputTensorProperties(&outputProps, dnnHandle_, outputOrder_[i]);

            std::string quantType = "UNKNOWN";
            if (outputProps.quantiType == SHIFT) quantType = "SHIFT";
            else if (outputProps.quantiType == SCALE) quantType = "SCALE";
            else if (outputProps.quantiType == NONE) quantType = "NONE";

            std::cout << "  output[" << i << "] (raw_idx=" << outputOrder_[i] << "): "
                      << "(" << outputProps.validShape.dimensionSize[1] << ", "
                      << outputProps.validShape.dimensionSize[2] << ", "
                      << outputProps.validShape.dimensionSize[3] << "), "
                      << quantType << std::endl;
        }
    }

    /**
     * @brief 前处理：BGR 图像转换为模型需要的格式并应用 Resize/LetterBox
     *
     * 根据模型输入类型自动选择预处理方法:
     * - RGB: letterBox/resize → BGR2RGB + NCHW + 量化(u8-128)
     * - NV12: letterBox/resize → BGR2YUV_I420 → I420转NV12
     *
     * @param src 输入 BGR 图像
     * @param dst 输出图像 (RGB: BGR HWC, NV12: height=H*3/2, width=W, CV_8UC1)
     * @param xShift LetterBox 水平偏移量
     * @param yShift LetterBox 垂直偏移量
     * @param scale 缩放比例
     */
    void preprocess(const cv::Mat &src, cv::Mat &dst,
                    int &xShift, int &yShift, float &scale) {
        ScopedTimer timer("Preprocessing");

        // Step 1: Resize 或 LetterBox
        cv::Mat resizedBGR;
        if (preprocessType_ == PreprocessType::LETTERBOX) {
            // LetterBox: 保持宽高比
            scale = std::min(static_cast<float>(inputH_) / src.rows,
                           static_cast<float>(inputW_) / src.cols);

            int newW = static_cast<int>(src.cols * scale);
            int newH = static_cast<int>(src.rows * scale);

            xShift = (inputW_ - newW) / 2;
            yShift = (inputH_ - newH) / 2;
            int xOther = inputW_ - newW - xShift;
            int yOther = inputH_ - newH - yShift;

            cv::resize(src, resizedBGR, cv::Size(newW, newH));
            cv::copyMakeBorder(resizedBGR, resizedBGR, yShift, yOther, xShift, xOther,
                             cv::BORDER_CONSTANT, cv::Scalar(127, 127, 127));
        } else {
            // Resize: 直接拉伸
            cv::resize(src, resizedBGR, cv::Size(inputW_, inputH_));
            scale = std::min(static_cast<float>(inputH_) / src.rows,
                           static_cast<float>(inputW_) / src.cols);
            xShift = 0;
            yShift = 0;
        }

        if (inputImageType_ == InputImageType::NV12) {
            // NV12 模式：BGR -> YUV I420 -> NV12
            cv::Mat yuvMat;
            cv::cvtColor(resizedBGR, yuvMat, cv::COLOR_BGR2YUV_I420);
            uint8_t *yuv = yuvMat.ptr<uint8_t>();

            // I420 -> NV12 (YUV420SP)
            // NV12 格式：Y 平面（H×W） + UV 交错平面（H/2×W）
            dst = cv::Mat(inputH_ * 3 / 2, inputW_, CV_8UC1);
            uint8_t *nv12Data = dst.ptr<uint8_t>();

            int ySize = inputH_ * inputW_;
            int uvHeight = inputH_ / 2;
            int uvWidth = inputW_ / 2;

            // 复制 Y 平面
            std::memcpy(nv12Data, yuv, ySize);

            // 交错复制 UV 平面 (I420: YYYYYYYY UU VV -> NV12: YYYYYYYY UVUVUVUV)
            uint8_t *nv12_uv = nv12Data + ySize;
            uint8_t *i420_u = yuv + ySize;
            uint8_t *i420_v = i420_u + uvHeight * uvWidth;

            for (int i = 0; i < uvHeight * uvWidth; i++) {
                nv12_uv[2 * i] = i420_u[i];       // U
                nv12_uv[2 * i + 1] = i420_v[i];   // V
            }
        } else {
            // RGB 模式：保持 BGR 格式，在 inference 中转换
            dst = resizedBGR;
        }
    }

    /**
     * @brief 推理：在 BPU 上执行模型推理
     *
     * 根据输入格式自动选择数据处理方式:
     * - RGB: BGR→RGB + NCHW + 量化(u8-128)
     * - NV12: 直接拷贝 NV12 数据
     *
     * @param image 预处理后的图像 (RGB: BGR HWC, NV12: height=H*3/2, width=W)
     * @return std::vector<hbDNNTensor> 输出张量列表
     */
    std::vector<hbDNNTensor> inference(const cv::Mat &image) {
        ScopedTimer timer("BPU Inference");

        // 分配输入张量内存
        hbDNNTensor inputTensor;
        inputTensor.properties = inputProperties_;

        if (inputImageType_ == InputImageType::RGB) {
            // RGB 模式：分配 3*H*W 内存
            hbSysAllocCachedMem(&inputTensor.sysMem[0], 3 * inputH_ * inputW_);

            // Convert BGR to RGB and apply quantization (u8-128 -> int8)
            uint8_t *srcData = image.ptr<uint8_t>();
            int8_t *dstData = reinterpret_cast<int8_t*>(inputTensor.sysMem[0].virAddr);

            for (int h = 0; h < inputH_; h++) {
                for (int w = 0; w < inputW_; w++) {
                    int srcIdx = h * inputW_ * 3 + w * 3;
                    int rIdx = (0 * inputH_ * inputW_) + h * inputW_ + w;
                    int gIdx = (1 * inputH_ * inputW_) + h * inputW_ + w;
                    int bIdx = (2 * inputH_ * inputW_) + h * inputW_ + w;

                    dstData[rIdx] = static_cast<int8_t>(srcData[srcIdx + 2] - 128); // R
                    dstData[gIdx] = static_cast<int8_t>(srcData[srcIdx + 1] - 128); // G
                    dstData[bIdx] = static_cast<int8_t>(srcData[srcIdx + 0] - 128); // B
                }
            }
        } else {
            // NV12 模式：分配 1.5*H*W 内存
            int inputSize = inputH_ * inputW_ * 3 / 2; // NV12 大小
            hbSysAllocCachedMem(&inputTensor.sysMem[0], inputSize);

            // 复制 NV12 数据到 BPU 内存
            std::memcpy(inputTensor.sysMem[0].virAddr, image.ptr<uint8_t>(), inputSize);
        }

        hbSysFlushMem(&inputTensor.sysMem[0], HB_SYS_MEM_CACHE_CLEAN);

        // 分配输出张量内存
        std::vector<hbDNNTensor> outputTensors(9);
        for (int i = 0; i < 9; i++) {
            hbDNNGetOutputTensorProperties(&outputTensors[i].properties, dnnHandle_, i);
            int outAlignedSize = outputTensors[i].properties.alignedByteSize;
            hbSysAllocCachedMem(&outputTensors[i].sysMem[0], outAlignedSize);
        }

        // 执行推理
        hbDNNTaskHandle_t taskHandle = nullptr;
        hbDNNInferCtrlParam inferCtrlParam;
        HB_DNN_INITIALIZE_INFER_CTRL_PARAM(&inferCtrlParam);

        hbDNNInfer(&taskHandle, outputTensors.data(), &inputTensor, dnnHandle_, &inferCtrlParam);
        hbDNNWaitTaskDone(taskHandle, 0);
        hbDNNReleaseTask(taskHandle);

        // 释放输入张量
        hbSysFreeMem(&inputTensor.sysMem[0]);

        return outputTensors;
    }

    /**
     * @brief 后处理：解析 BPU 输出，进行 DFL 解码、关键点解码和 NMS
     *
     * @param outputs BPU 输出张量列表（9个）
     * @param confThreshold 置信度阈值
     * @param nmsThreshold NMS IoU 阈值
     * @param kptThreshold 关键点置信度阈值
     * @param xShift LetterBox 水平偏移
     * @param yShift LetterBox 垂直偏移
     * @param scale 缩放比例
     * @return std::vector<PoseDetection> 姿态检测结果
     */
    std::vector<PoseDetection> postprocess(
        const std::vector<hbDNNTensor> &outputs,
        float confThreshold, float nmsThreshold, float kptThreshold,
        int xShift, int yShift, float scale) {

        ScopedTimer timer("Postprocessing");

        // 计算原始阈值（Sigmoid 前的值，利用单调性提前过滤）
        float confThreshRaw = -std::log(1.0f / confThreshold - 1.0f);
        float kptThreshRaw = -std::log(1.0f / kptThreshold - 1.0f);

        // 临时存储所有检测结果（NMS 前）
        std::vector<cv::Rect2d> bboxes;
        std::vector<float> scores;
        std::vector<std::vector<cv::Point2f>> allKptsXY;
        std::vector<std::vector<float>> allKptsScores;

        // 处理 3 个检测头（stride: 8, 16, 32）
        int strides[3] = {8, 16, 32};
        int heights[3] = {inputH_ / 8, inputH_ / 16, inputH_ / 32};
        int widths[3] = {inputW_ / 8, inputW_ / 16, inputW_ / 32};

        for (int headIdx = 0; headIdx < 3; headIdx++) {
            // 获取当前检测头的输出索引
            int bboxIdx = outputOrder_[headIdx * 3 + 0];  // bbox_reg
            int clsIdx = outputOrder_[headIdx * 3 + 1];   // cls
            int kptsIdx = outputOrder_[headIdx * 3 + 2];  // kpts

            // 验证量化类型
            if (outputs[bboxIdx].properties.quantiType != SCALE) {
                std::cerr << "[ERROR] bbox output[" << headIdx << "] should be SCALE quantized" << std::endl;
                continue;
            }
            if (outputs[clsIdx].properties.quantiType != NONE) {
                std::cerr << "[ERROR] cls output[" << headIdx << "] should be NONE quantized" << std::endl;
                continue;
            }
            if (outputs[kptsIdx].properties.quantiType != NONE) {
                std::cerr << "[ERROR] kpts output[" << headIdx << "] should be NONE quantized" << std::endl;
                continue;
            }

            // 刷新 BPU 缓存
            hbSysFlushMem(&outputs[bboxIdx].sysMem[0], HB_SYS_MEM_CACHE_INVALIDATE);
            hbSysFlushMem(&outputs[clsIdx].sysMem[0], HB_SYS_MEM_CACHE_INVALIDATE);
            hbSysFlushMem(&outputs[kptsIdx].sysMem[0], HB_SYS_MEM_CACHE_INVALIDATE);

            // 获取输出数据指针
            auto *bboxRaw = reinterpret_cast<int32_t*>(outputs[bboxIdx].sysMem[0].virAddr);
            auto *clsRaw = reinterpret_cast<float*>(outputs[clsIdx].sysMem[0].virAddr);
            auto *kptsRaw = reinterpret_cast<float*>(outputs[kptsIdx].sysMem[0].virAddr);
            auto *bboxScale = reinterpret_cast<float*>(outputs[bboxIdx].properties.scale.scaleData);

            int H = heights[headIdx];
            int W = widths[headIdx];
            int stride = strides[headIdx];

            // 遍历特征图
            for (int h = 0; h < H; h++) {
                for (int w = 0; w < W; w++) {
                    // 获取当前网格的 cls, bbox, kpts 指针
                    float *curCls = clsRaw;
                    int32_t *curBbox = bboxRaw;
                    float *curKpts = kptsRaw;

                    // 更新指针到下一个网格
                    clsRaw += numClasses_;
                    bboxRaw += reg_ * 4;
                    kptsRaw += numKeypoints_ * kptEncode_;

                    // 找到最大置信度类别（通常只有一个类别 person）
                    int bestClsId = 0;
                    for (int c = 1; c < numClasses_; c++) {
                        if (curCls[c] > curCls[bestClsId]) {
                            bestClsId = c;
                        }
                    }

                    // 提前过滤：置信度过低直接跳过
                    if (curCls[bestClsId] < confThreshRaw) {
                        continue;
                    }

                    // 计算 Sigmoid 后的置信度
                    float conf = 1.0f / (1.0f + std::exp(-curCls[bestClsId]));

                    // ========== DFL 解码 bbox ==========
                    // 将 4 × reg_ 的分布转换为 ltrb（left, top, right, bottom）
                    float ltrb[4] = {0.0f, 0.0f, 0.0f, 0.0f};
                    for (int i = 0; i < 4; i++) {
                        float sum = 0.0f;
                        for (int j = 0; j < reg_; j++) {
                            int idx = reg_ * i + j;
                            // 反量化：int32 -> float，使用 SCALE 量化参数
                            float dfl = std::exp(static_cast<float>(curBbox[idx]) * bboxScale[j]);
                            ltrb[i] += dfl * j;
                            sum += dfl;
                        }
                        ltrb[i] /= sum;
                    }

                    // 剔除不合格的框（宽高为负）
                    if (ltrb[2] + ltrb[0] <= 0 || ltrb[3] + ltrb[1] <= 0) {
                        continue;
                    }

                    // dist2bbox: 将 ltrb 转换为 xyxy
                    float x1 = (w + 0.5f - ltrb[0]) * stride;
                    float y1 = (h + 0.5f - ltrb[1]) * stride;
                    float x2 = (w + 0.5f + ltrb[2]) * stride;
                    float y2 = (h + 0.5f + ltrb[3]) * stride;

                    // ========== 解码关键点 ==========
                    // 关键点格式：17 × 3 (x, y, vis)
                    // 解码公式：x = (kpt_x * 2.0 + grid_w) * stride
                    std::vector<cv::Point2f> kptXY(numKeypoints_);
                    std::vector<float> kptScore(numKeypoints_);

                    for (int k = 0; k < numKeypoints_; k++) {
                        float kptX = (curKpts[kptEncode_ * k + 0] * 2.0f + w) * stride;
                        float kptY = (curKpts[kptEncode_ * k + 1] * 2.0f + h) * stride;
                        float kptVis = curKpts[kptEncode_ * k + 2]; // visibility/confidence (原始值)

                        kptXY[k] = cv::Point2f(kptX, kptY);
                        kptScore[k] = kptVis;
                    }

                    // 保存到临时列表
                    bboxes.push_back(cv::Rect2d(x1, y1, x2 - x1, y2 - y1));
                    scores.push_back(conf);
                    allKptsXY.push_back(kptXY);
                    allKptsScores.push_back(kptScore);
                }
            }
        }

        // ========== NMS ==========
        std::vector<int> indices;
        cv::dnn::NMSBoxes(bboxes, scores, confThreshold, nmsThreshold, indices, 1.0f, 300);

        // ========== 生成最终结果 ==========
        std::vector<PoseDetection> finalDetections;
        finalDetections.reserve(indices.size());

        for (int idx : indices) {
            PoseDetection pose;

            // 边界框：映射回原图坐标
            pose.box.x = (bboxes[idx].x - xShift) / scale;
            pose.box.y = (bboxes[idx].y - yShift) / scale;
            pose.box.width = bboxes[idx].width / scale;
            pose.box.height = bboxes[idx].height / scale;
            pose.conf = scores[idx];
            pose.classId = 0; // person

            // 关键点：映射回原图坐标，并应用置信度阈值过滤
            pose.keypoints.reserve(numKeypoints_);
            for (int k = 0; k < numKeypoints_; k++) {
                KeyPoint kpt;
                kpt.x = (allKptsXY[idx][k].x - xShift) / scale;
                kpt.y = (allKptsXY[idx][k].y - yShift) / scale;
                kpt.confidence = allKptsScores[idx][k];

                // 可选：过滤低置信度关键点（设置为 0）
                if (kpt.confidence < kptThreshRaw) {
                    kpt.confidence = 0.0f;
                }

                pose.keypoints.push_back(kpt);
            }

            finalDetections.push_back(pose);
        }

        std::cout << "[INFO] Detected " << finalDetections.size() << " poses" << std::endl;
        return finalDetections;
    }
};

} // namespace yolos_edgeplatform

#endif // YOLOS_EDGEPLATFORM_YOLO11_POSE_RDK_HPP
