#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/Image.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <opencv2/opencv.hpp>
#include <Eigen/Dense>
#include <vector>
#include <iostream>
#include <tf/transform_datatypes.h>  // 包含 tf 头文件
#include <message_filters/subscriber.h>  // 包含 message_filters 头文件
#include <tf2/LinearMath/Quaternion.h>  // 包含 tf2 头文件
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>  // 包含 tf2_geometry_msgs 头文件

// 定义相机内参矩阵
cv::Mat CameraMatrix = (cv::Mat_<double>(3, 3) << 
    525.0, 0, 319.5,  // fx, cx
    0, 525.0, 239.5,  // fy, cy
    0, 0, 1);

// 摄像头数据处理函数
void processCameraData(const cv::Mat& img1, const cv::Mat& img2, Eigen::VectorXd& pose_change) {
    // 提取特征点
    std::vector<cv::KeyPoint> keypoints1, keypoints2;
    cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
    detector->detect(img1, keypoints1);
    detector->detect(img2, keypoints2);

    // 特征点描述符
    cv::Mat descriptors1, descriptors2;
    cv::Ptr<cv::DescriptorExtractor> extractor = cv::ORB::create();
    extractor->compute(img1, keypoints1, descriptors1);
    extractor->compute(img2, keypoints2, descriptors2);

    // 特征点匹配
    std::vector<cv::DMatch> matches;
    cv::BFMatcher matcher;
    matcher.match(descriptors1, descriptors2, matches);

    // 过滤匹配结果
    std::vector<cv::Point2f> points1, points2;
    for (const auto& match : matches) {
        points1.push_back(keypoints1[match.queryIdx].pt);
        points2.push_back(keypoints2[match.trainIdx].pt);
    }

    // 计算相机运动
    cv::Mat E, R, t, rvec;
    E = cv::findEssentialMat(points1, points2, CameraMatrix, cv::RANSAC, 0.999, 1.0);
    cv::recoverPose(E, points1, points2, CameraMatrix, R, t);

    // 将旋转矩阵转换为旋转矢量
    cv::Rodrigues(R, rvec);

    // 将相机运动转换为位置和姿态
    pose_change = Eigen::VectorXd::Zero(6);
    pose_change << t.at<double>(0), t.at<double>(1), t.at<double>(2),
                    rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2);
}

// 图像回调函数
void imageCallback(const sensor_msgs::ImageConstPtr& msg, cv::Mat& prev_img, ros::Publisher& pose_pub) {
    cv_bridge::CvImagePtr cv_ptr;
    try {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::MONO8);
    } catch (cv_bridge::Exception& e) {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }

    cv::Mat curr_img = cv_ptr->image;

    if (!prev_img.empty()) {
        Eigen::VectorXd pose_change;
        processCameraData(prev_img, curr_img, pose_change);

        // 创建 PoseWithCovarianceStamped 消息
        geometry_msgs::PoseWithCovarianceStamped pose_msg;
        pose_msg.header = msg->header;
        pose_msg.pose.pose.position.x = pose_change(0);
        pose_msg.pose.pose.position.y = pose_change(1);
        pose_msg.pose.pose.position.z = pose_change(2);

        // 将旋转矢量转换为四元数
        tf::Quaternion q;
        q.setRPY(pose_change(3), pose_change(4), pose_change(5));

        // 将 tf::Quaternion 转换为 tf2::Quaternion
        tf2::Quaternion q2(q.getX(), q.getY(), q.getZ(), q.getW());

        // 使用 tf2::toMsg 将 tf2::Quaternion 转换为 geometry_msgs::Quaternion
        geometry_msgs::Quaternion geo_q;
        tf2::convert(q2, geo_q);  // 确保使用常量引用

        pose_msg.pose.pose.orientation = geo_q;

        // 发布位姿变化
        pose_pub.publish(pose_msg);
    }

    prev_img = curr_img;
}

int main(int argc, char** argv) {
    ros::init(argc, argv, "camera_odometry");
    ros::NodeHandle nh;

    // 订阅摄像头图像话题
    message_filters::Subscriber<sensor_msgs::Image> image_sub(nh, "/camera/image_raw", 1);
    cv::Mat prev_img;

    // 创建位姿变化发布者
    ros::Publisher pose_pub = nh.advertise<geometry_msgs::PoseWithCovarianceStamped>("/camera_pose", 10);

    // 注册图像回调函数
    image_sub.registerCallback(boost::bind(imageCallback, _1, boost::ref(prev_img), boost::ref(pose_pub)));

    ros::spin();

    return 0;
}
