#pragma once
#include "rclcpp/rclcpp.hpp"
#include <sensor_msgs/msg/image.hpp>
#include <cv_bridge/cv_bridge.h>
#include <geometry_msgs/msg/point_stamped.hpp>
#include <vision_msgs/msg/detection2_d_array.hpp>
// TF2 相关
#include <tf2_ros/buffer.h>
#include <tf2_ros/transform_listener.h>  
#include "detection.hpp"
#include "yolov8.h"
#include <queue>
#include <mutex>
#include <condition_variable>
#include <thread>
#include "detection.hpp"
#include "yolov8.h"
#include <chrono>
#include "rclcpp/rate.hpp"
// Action 相关
#include "nav2_msgs/action/navigate_to_pose.hpp"
#include "rclcpp_action/rclcpp_action.hpp"

#define MAX_DEPTH 5000.0f;

// 定义一个结构体用于在线程间传递图像数据
struct ImageData {
    cv::Mat image;
    std_msgs::msg::Header header; // 保留原始消息的header
};

class ImageSubscriberNode : public rclcpp::Node {
public:
    // 定义导航Action类型
    using NavigateToPose = nav2_msgs::action::NavigateToPose;
    using GoalHandleNavigate = rclcpp_action::ClientGoalHandle<NavigateToPose>;

    ImageSubscriberNode(rknn_app_context_t* ctx) : Node("image_subscriber_node"), 
    ctx_(ctx),
    tf_buffer_(std::make_shared<tf2_ros::Buffer>(this->get_clock())),
    tf_listener_(std::make_shared<tf2_ros::TransformListener>(*tf_buffer_)),
    detection_array_publisher_(nullptr)
    {
        // 订阅彩色图片
        rgb_subscription_ = this->create_subscription<sensor_msgs::msg::Image>(
            "/camera/color/image_raw", 10,
            std::bind(&ImageSubscriberNode::image_callback, this, std::placeholders::_1)
        );
         // 订阅深度图像
        depth_subscription_ = this->create_subscription<sensor_msgs::msg::Image>(
            "/camera/depth/image_raw", 10,
            std::bind(&ImageSubscriberNode::depth_callback, this, std::placeholders::_1));

        detection_image_publisher_ = this->create_publisher<sensor_msgs::msg::Image>(
            "/detection_result_image", 10);

        // 创建导航Action客户端
        navigation_action_client_ = rclcpp_action::create_client<NavigateToPose>(this, "navigate_to_pose");

        detection_array_publisher_ = this->create_publisher<vision_msgs::msg::Detection2DArray>(
            "/detections", 10);

        // workflow_command_publisher_ = this->create_publisher<std_msgs::Int8>(
        //     "/workflow_command", 10 
        // )

        // 相机内参 - 需要根据实际相机校准设置
        camera_matrix_ = (cv::Mat_<double>(3, 3) << 
            504.0, 0.0, 313.7,
            0.0, 504.7, 224.5,
            0.0, 0.0, 1.0);

        target_filter_ = {"PLASTIC"};

        // 启动推理和发布线程
        inference_thread_ = std::thread(&ImageSubscriberNode::inference_loop, this);
        publish_thread_ = std::thread(&ImageSubscriberNode::publish_loop, this);
    }

    ~ImageSubscriberNode() {
        // 在析构函数中停止线程
        // 设置停止标志并通知条件变量，确保线程退出
        stop_threads_ = true;
        input_queue_cond_var_.notify_all();
        output_queue_cond_var_.notify_all();

        if (inference_thread_.joinable()) {
            inference_thread_.join();
        }
        if (publish_thread_.joinable()) {
            publish_thread_.join();
        }
    }

private:
    // 处理接收图像
    void image_callback(const sensor_msgs::msg::Image::SharedPtr msg) 
    {
        static std::chrono::steady_clock::time_point last_cb_time = std::chrono::steady_clock::now();
        auto now = std::chrono::steady_clock::now();
        double interval = std::chrono::duration<double, std::milli>(now - last_cb_time).count();
        
        if (interval < 200) { // 5fps = 200ms
            return; // 丢弃本帧
        }
        
        last_cb_time = now;
        
        try {
            cv::Mat image_bgr = cv_bridge::toCvCopy(msg, "bgr8")->image;
            
            // 检查是否有对应的深度图像
            std::unique_lock<std::mutex> depth_lock(depth_mutex_);
            if (latest_depth_image_.empty()) {
                RCLCPP_WARN(this->get_logger(), "没有深度图像可用，跳过处理");
                return;
            }

            ImageData input_data;
            input_data.image = image_bgr;
            input_data.header = msg->header;

            std::unique_lock<std::mutex> lock(input_queue_mutex_);
            // 限制输入队列长度，只保留最新一帧
            while (input_queue_.size() > 1) {
                input_queue_.pop(); // 丢弃旧帧
            }
            input_queue_.push(input_data);
            lock.unlock();
            input_queue_cond_var_.notify_one();
        } catch (cv_bridge::Exception& e) {
            RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
        }
    }

    // 处理深度图像
    void depth_callback(const sensor_msgs::msg::Image::SharedPtr msg) 
    {
        try {
            cv::Mat depth_image = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::TYPE_16UC1)->image;
            std::unique_lock<std::mutex> lock(depth_mutex_);
            latest_depth_image_ = depth_image;
        } catch (cv_bridge::Exception& e) {
            RCLCPP_ERROR(this->get_logger(), "Depth cv_bridge exception: %s", e.what());
        }
    }   

    void inference_loop() 
    {
        rclcpp::Rate rate(5.0); // 5Hz
        while (!stop_threads_) {
            ImageData input_data;
            {
                std::unique_lock<std::mutex> lock(input_queue_mutex_);
                input_queue_cond_var_.wait(lock, [this]{ return !input_queue_.empty() || stop_threads_; });
                if (stop_threads_) {
                    return;
                }
                input_data = input_queue_.front();
                input_queue_.pop();
            }

            // 记录推理开始时间
            auto infer_start = std::chrono::steady_clock::now();

            // 获取深度图像
            cv::Mat depth_image;
            {
                std::unique_lock<std::mutex> depth_lock(depth_mutex_);
                depth_image = latest_depth_image_.clone();
            }

            // 调用 TargetFind 函数进行目标检测和定位
            DetectionResult output_data = TargetFind(
                ctx_,
                input_data.image,
                depth_image,
                camera_matrix_,
                target_filter_,
                input_data.header,
                *tf_buffer_,
                this->get_logger()
            );

            // 设置输出数据的header
            output_data.header = input_data.header;

            auto infer_end = std::chrono::steady_clock::now();
            double infer_time = std::chrono::duration<double, std::milli>(infer_end - infer_start).count();
            // RCLCPP_INFO(this->get_logger(), "推理耗时: %.2f ms, 检测到 %zu 个目标", 
            //             infer_time, output_data.detected_objects.size());

            // 将结果放入输出队列
            std::unique_lock<std::mutex> lock(output_queue_mutex_);
            output_queue_.push(output_data);
            lock.unlock();
            output_queue_cond_var_.notify_one();

            rate.sleep();
        }
    }

    enum NavigationState {
        IDLE,        // 空闲状态，可以发送新目标
        NAVIGATING,  // 正在导航中
        GRASPING,     // 正在抓取
        FINSHED
    };
    
    NavigationState navigation_state_ = IDLE;
    std::mutex navigation_mutex_;
    std::condition_variable navigation_cond_var_;

    void publish_loop() {
        while (!stop_threads_) {
            DetectionResult output_data;
            {
                std::unique_lock<std::mutex> lock(output_queue_mutex_);
                output_queue_cond_var_.wait(lock, [this]{ 
                    return !output_queue_.empty() || stop_threads_; 
                });
                
                if (stop_threads_) return;
                
                output_data = output_queue_.front();
                output_queue_.pop();
            }

            // 发布检测结果图像
            sensor_msgs::msg::Image output_msg;
            try {
                 cv_bridge::CvImage cv_image(output_data.header, "bgr8", output_data.processed_image);
                 cv_image.toImageMsg(output_msg);
                 detection_image_publisher_->publish(output_msg);
            } catch (cv_bridge::Exception& e) {
                RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
            }
            
            // 检查导航状态
            {
                std::unique_lock<std::mutex> nav_lock(navigation_mutex_);
                navigation_cond_var_.wait(nav_lock, [this] {
                    return navigation_state_ == IDLE || stop_threads_;
                });
                
                if (stop_threads_) return;
            }

            // 发布目标位置和检测结果
            if (!output_data.detected_objects.empty()) {
                // 选择最接近的目标
                auto closest_obj = *std::min_element(output_data.detected_objects.begin(),
                                                    output_data.detected_objects.end(),
                                                    [](const DetectedObject& a, const DetectedObject& b) {
                                                        return std::hypot(a.position_3d.point.x, 
                                                                          a.position_3d.point.y) < 
                                                               std::hypot(b.position_3d.point.x, 
                                                                          b.position_3d.point.y);
                                                    });
                
                // 创建目标位姿消息
                geometry_msgs::msg::PoseStamped goal_pose;
                goal_pose.header.frame_id = "map";
                goal_pose.header.stamp = this->now();
                goal_pose.pose.position = closest_obj.position_3d.point;
                // goal_pose.pose.position.y -= OFFSET;
                // 设置面向机器人的方向
                geometry_msgs::msg::PoseStamped robot_pose;
                try 
                {
                    // 获取机器人当前位置
                    // geometry_msgs::msg::PoseStamped robot_pose;
                    robot_pose.header.frame_id = "base_link";  // 机器人坐标系
                    robot_pose.header.stamp = goal_pose.header.stamp;  // 使用相同时间戳
                    
                    // 查找从map到base_link的变换
                    auto robot_pose_map = tf_buffer_->transform(robot_pose, "map", 
                        tf2::durationFromSec(0.5));  // 500ms超时

                    RCLCPP_INFO(this->get_logger(), 
                        "机器人位置: [%.2f, %.2f, %.2f]",
                        robot_pose_map.pose.position.x,
                        robot_pose_map.pose.position.y,
                        robot_pose_map.pose.position.z
                    );

                    // 计算目标到机器人的方向
                    goal_pose.pose.orientation = calculate_orientation(
                        goal_pose.pose.position,
                        robot_pose_map.pose.position
                    );
                    RCLCPP_DEBUG(this->get_logger(), "已设置目标朝向机器人方向");
                // } catch (const tf2::TransformException &ex) {
                //     // 使用默认朝向前方作为后备方案
                //     goal_pose.pose.orientation.w = 1.0;
                //     goal_pose.pose.orientation.x = 0.0;
                //     goal_pose.pose.orientation.y = 0.0;
                //     goal_pose.pose.orientation.z = 0.0;
                //     RCLCPP_WARN(this->get_logger(), "无法获取机器人位置: %s, 使用默认方向", ex.what());
                // }
                // std::cout<<"机器人坐标： "<<std::endl
                //     <<"x: "<<robot_pose_map.pose.position.x<<std::endl
                //     <<"y: "<<robot_pose_map.pose.position.y<<std::endl
                //     <<"z: "<<robot_pose_map.pose.position.z<<std::endl;
                // 记录发布信息
                // goal_pose.pose.position.y = -goal_pose.pose.position.y;
                RCLCPP_INFO(this->get_logger(), 
                    "发布目标: [%.2f, %.2f, %.2f] 朝向: [%.2f, %.2f, %.2f, %.2f]",
                    goal_pose.pose.position.x, 
                    goal_pose.pose.position.y, 
                    goal_pose.pose.position.z,
                    goal_pose.pose.orientation.x,
                    goal_pose.pose.orientation.y,
                    goal_pose.pose.orientation.z,
                    goal_pose.pose.orientation.w
                );

                // 设置导航状态为进行中
                {
                    std::unique_lock<std::mutex> nav_lock(navigation_mutex_);
                    navigation_state_ = NAVIGATING;
                }
                // 通过Action发送导航目标
                send_navigation_goal(goal_pose);
                    
                // 发布2D检测结果（用于调试）
                vision_msgs::msg::Detection2DArray detections_msg;
                detections_msg.header = output_data.header;
                    
                for (const auto& obj : output_data.detected_objects) {
                    vision_msgs::msg::Detection2D detection;
                    detection.header = output_data.header;
                    detection.bbox.center.position.x = obj.bbox.x + obj.bbox.width / 2.0;
                    detection.bbox.center.position.y = obj.bbox.y + obj.bbox.height / 2.0;
                    detection.bbox.center.theta = 0.0;
                    detection.bbox.size_x = obj.bbox.width;
                    detection.bbox.size_y = obj.bbox.height;

                    vision_msgs::msg::ObjectHypothesisWithPose hyp;
                    hyp.hypothesis.class_id = obj.label;
                    hyp.hypothesis.score = obj.confidence;

                    // 设置位置信息
                    hyp.pose.pose.position = obj.position_3d.point;
                    hyp.pose.pose.orientation.w = 1.0; // 有效四元数

                    detection.results.push_back(hyp);
                    detections_msg.detections.push_back(detection);
                }
                    
                detection_array_publisher_->publish(detections_msg);
            }
        }
    }

    // 发送导航目标Action
    void send_navigation_goal(const geometry_msgs::msg::PoseStamped & goal_pose) {
        // 检查Action服务器是否可用
        if (!navigation_action_client_->wait_for_action_server(std::chrono::seconds(5))) {
            RCLCPP_ERROR(this->get_logger(), "导航Action服务器未响应");
            return;
        }

        // 设置目标
        auto goal_msg = NavigateToPose::Goal();
        goal_msg.pose = goal_pose;

        // 设置发送选项
        auto send_goal_options = rclcpp_action::Client<NavigateToPose>::SendGoalOptions();
        send_goal_options.goal_response_callback =
            std::bind(&ImageSubscriberNode::goal_response_callback, this, std::placeholders::_1);
        send_goal_options.feedback_callback =
            std::bind(&ImageSubscriberNode::feedback_callback, this, std::placeholders::_1, std::placeholders::_2);
        send_goal_options.result_callback =
            std::bind(&ImageSubscriberNode::result_callback, this, std::placeholders::_1);

        // 发送目标
        RCLCPP_INFO(this->get_logger(), "发送导航目标");
        navigation_action_client_->async_send_goal(goal_msg, send_goal_options);
    }

    // Action回调函数
    void goal_response_callback(const GoalHandleNavigate::SharedPtr & goal_handle) {
        if (!goal_handle) {
            RCLCPP_ERROR(this->get_logger(), "目标被拒绝");
            // 重置导航状态
            set_navigation_state(IDLE);
        } else {
            RCLCPP_INFO(this->get_logger(), "目标接受，开始导航");
        }
    }

    void feedback_callback(
        GoalHandleNavigate::SharedPtr,
        const std::shared_ptr<const NavigateToPose::Feedback> feedback)
    {
        // 获取当前目标点位置
        auto goal_pose = feedback->current_pose; // 当前目标点位置
        // 手动计算距离
        double dx = goal_pose.pose.position.x - feedback->current_pose.pose.position.x;
        double dy = goal_pose.pose.position.y - feedback->current_pose.pose.position.y;
        double manual_distance = std::hypot(dx, dy);

        //  RCLCPP_INFO(this->get_logger(), 
        // "剩余距离: %.2f米 | 手动计算: %.2f米 | 当前位置: [%.2f, %.2f]",
        // feedback->distance_remaining,
        // manual_distance,
        // feedback->current_pose.pose.position.x,
        // feedback->current_pose.pose.position.y);
    }

    void result_callback(const GoalHandleNavigate::WrappedResult & result) {
        switch (result.code) {
            case rclcpp_action::ResultCode::SUCCEEDED:
                RCLCPP_INFO(this->get_logger(), "导航成功!");
                // 进入抓取状态
                // set_navigation_state(GRASPING);
                // execute_grasp_sequence();
                break;
            case rclcpp_action::ResultCode::ABORTED:
                RCLCPP_ERROR(this->get_logger(), "导航被中止");
                set_navigation_state(IDLE);
                break;
            case rclcpp_action::ResultCode::CANCELED:
                RCLCPP_WARN(this->get_logger(), "导航取消");
                set_navigation_state(IDLE);
                break;
            default:
                RCLCPP_ERROR(this->get_logger(), "未知结果");
                set_navigation_state(IDLE);
                break;
        }
    }

    // 设置导航状态辅助函数
    void set_navigation_state(NavigationState state) {
        std::unique_lock<std::mutex> lock(navigation_mutex_);
        navigation_state_ = state;
        navigation_cond_var_.notify_all();
    }

    rknn_app_context_t* ctx_;
    rclcpp::Subscription<sensor_msgs::msg::Image>::SharedPtr rgb_subscription_;
    rclcpp::Subscription<sensor_msgs::msg::Image>::SharedPtr depth_subscription_;
    rclcpp::Publisher<sensor_msgs::msg::Image>::SharedPtr detection_image_publisher_;
    rclcpp_action::Client<NavigateToPose>::SharedPtr navigation_action_client_;
    rclcpp::Publisher<vision_msgs::msg::Detection2DArray>::SharedPtr detection_array_publisher_;
    // rclcpp::Publisher<std_msgs::Int8>::SharedPtr workflow_command_publisher_;
    // TF2 相关
    std::shared_ptr<tf2_ros::Buffer> tf_buffer_;
    std::shared_ptr<tf2_ros::TransformListener> tf_listener_;
    
    // 相机内参
    cv::Mat camera_matrix_;
    
    // 目标类别过滤器
    std::vector<std::string> target_filter_;
    
    // 深度图像缓存
    cv::Mat latest_depth_image_;
    std::mutex depth_mutex_;

    // 线程安全队列和相关同步机制
    std::queue<ImageData> input_queue_;
    std::mutex input_queue_mutex_;
    std::condition_variable input_queue_cond_var_;

    std::queue<DetectionResult> output_queue_;
    std::mutex output_queue_mutex_;
    std::condition_variable output_queue_cond_var_;

    std::thread inference_thread_;
    std::thread publish_thread_;
    std::atomic<bool> stop_threads_ {false};
};