#include <iostream>
#include <vector>
#include <string>
#include <chrono>
#include <algorithm> 

#include <openvino/openvino.hpp>
#include <opencv2/opencv.hpp>

// 用于NMS
struct Box {
    cv::Rect rect;
    float confidence;
    int index; 
};

// 存储最终检测结果
struct Detection {
    cv::Rect box;
    std::vector<cv::Point3f> keypoints;
};

// NMS
std::vector<int> customNMS(std::vector<Box>& boxes, float nms_threshold) {
    if (boxes.empty()) {
        return {};
    }

    // 按置信度降序排序
    std::sort(boxes.begin(), boxes.end(), [](const Box& a, const Box& b) {
        return a.confidence > b.confidence;
    });

    std::vector<int> keep_indices;
    std::vector<bool> suppressed(boxes.size(), false);

    for (size_t i = 0; i < boxes.size(); ++i) {
        if (suppressed[i]) {
            continue;
        }
        keep_indices.push_back(boxes[i].index);

        for (size_t j = i + 1; j < boxes.size(); ++j) {
            if (suppressed[j]) {
                continue;
            }

            // 计算IoU (Intersection over Union)
            cv::Rect intersection = boxes[i].rect & boxes[j].rect;
            float iou = static_cast<float>(intersection.area()) / static_cast<float>(boxes[i].rect.area() + boxes[j].rect.area() - intersection.area());

            // 抑制IoU大于阈值的框
            if (iou > nms_threshold) {
                suppressed[j] = true;
            }
        }
    }
    return keep_indices;
}

int main() {
    const std::string model_path = "yolo11n-pose_int8_openvino_model/yolo11n-pose.xml";
    const int camera_index = 1;
    const float conf_threshold = 0.5f;
    const float kpt_threshold = 0.5f;
    const float nms_threshold = 0.5f;

    // 初始化 OpenVINO 
    ov::Core core;
    auto compiled_model = core.compile_model(model_path, "CPU");
    auto infer_request = compiled_model.create_infer_request();
    auto input_port = compiled_model.input();
    auto input_shape = input_port.get_shape();
    size_t input_height = input_shape[2];
    size_t input_width = input_shape[3];

    cv::VideoCapture cap(camera_index);
    if (!cap.isOpened()) {
        std::cerr << "错误: 无法打开摄像头。" << std::endl;
        return -1;
    }
    
    cv::Mat frame;
    auto prev_time = std::chrono::high_resolution_clock::now();

    while (true) {
        cap.read(frame);
        if (frame.empty()) break;

        float original_h = frame.rows;
        float original_w = frame.cols;

        // 缩放图像
        cv::Mat resized_image;
        cv::resize(frame, resized_image, cv::Size(input_width, input_height));
        
        // 创建输入Tensor
        ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape());
        float* input_data = input_tensor.data<float>();

        // c. HWC -> NCHW 转换 和 归一化
        // 遍历每个像素
        for (size_t h = 0; h < input_height; ++h) {
            for (size_t w = 0; w < input_width; ++w) {
                for (size_t c = 0; c < 3; ++c) {
                    // 假设模型输入已经是BGR（YOLOv11通常是RGB，但为简化我们先假设BGR）
                    // 经过测试差别不大，可以忽略通道影响
                    // NCHW 布局: N=1, C=3, H=input_height, W=input_width
                    input_data[c * (input_height * input_width) + h * input_width + w] =
                        static_cast<float>(resized_image.at<cv::Vec3b>(h, w)[c]) / 255.0f;
                }
            }
        }
        infer_request.set_input_tensor(input_tensor);

        infer_request.infer();

        // ==================== NMS ====================
        auto output_tensor = infer_request.get_output_tensor();
        const float* output_data = output_tensor.data<const float>();
        
        // 收集候选框
        std::vector<Box> candidate_boxes;
        std::vector<cv::Mat> all_keypoints;
        
        for (int i = 0; i < output_tensor.get_shape()[2]; ++i) { // 遍历8400个输出
            // OpenVINO 输出是 NCHW (1, 56, 8400)，直接访问
            float confidence = output_data[4 * output_tensor.get_shape()[2] + i];
            if (confidence > conf_threshold) {
                float cx = output_data[0 * output_tensor.get_shape()[2] + i];
                float cy = output_data[1 * output_tensor.get_shape()[2] + i];
                float w = output_data[2 * output_tensor.get_shape()[2] + i];
                float h = output_data[3 * output_tensor.get_shape()[2] + i];
                
                Box box;
                box.rect = cv::Rect(static_cast<int>(cx - w / 2),
                                    static_cast<int>(cy - h / 2),
                                    static_cast<int>(w),
                                    static_cast<int>(h));
                box.confidence = confidence;
                box.index = i; // 保存原始索引
                candidate_boxes.push_back(box);
            }
        }

        std::vector<int> keep_indices = customNMS(candidate_boxes, nms_threshold);

        // 最终结果
        std::vector<Detection> detections;
        float scale_w = original_w / input_width;
        float scale_h = original_h / input_height;
        
        for (int idx : keep_indices) {
            // 通过原始索引找到原始的完整输出
            const float* single_output = output_data + idx;
            
            Detection det;
            float cx = single_output[0 * output_tensor.get_shape()[2]];
            float cy = single_output[1 * output_tensor.get_shape()[2]];
            float w = single_output[2 * output_tensor.get_shape()[2]];
            float h = single_output[3 * output_tensor.get_shape()[2]];

            det.box.x = static_cast<int>((cx - w / 2) * scale_w);
            det.box.y = static_cast<int>((cy - h / 2) * scale_h);
            det.box.width = static_cast<int>(w * scale_w);
            det.box.height = static_cast<int>(h * scale_h);

            for (int k = 0; k < 17; ++k) {
                float kpt_x = single_output[(5 + k * 3) * output_tensor.get_shape()[2]] * scale_w;
                float kpt_y = single_output[(5 + k * 3 + 1) * output_tensor.get_shape()[2]] * scale_h;
                float kpt_conf = single_output[(5 + k * 3 + 2) * output_tensor.get_shape()[2]];
                det.keypoints.push_back(cv::Point3f(kpt_x, kpt_y, kpt_conf));
            }
            detections.push_back(det);
        }
        
        // 可视化
        for (const auto& det : detections) {
            cv::rectangle(frame, det.box, cv::Scalar(0, 255, 0), 2);
            for (const auto& kpt : det.keypoints) {
                if (kpt.z > kpt_threshold) {
                    cv::circle(frame, cv::Point(kpt.x, kpt.y), 5, cv::Scalar(0, 0, 255), -1);
                }
            }
        }

        auto curr_time = std::chrono::high_resolution_clock::now();
        std::chrono::duration<double> diff = curr_time - prev_time;
        prev_time = curr_time;
        double fps = 1.0 / diff.count();
        cv::putText(frame, "FPS: " + std::to_string(static_cast<int>(fps)), cv::Point(10, 30), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);
        cv::imshow("Pose Estimation C++ (Custom NMS)", frame);
        if (cv::waitKey(1) == 'q') break;
    }

    cap.release();
    cv::destroyAllWindows();
    return 0;
}
