#include <rclcpp/rclcpp.hpp>
#include <rclcpp/qos.hpp>  // 添加此行以包含QoS设置
#include <sensor_msgs/msg/laser_scan.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <cmath>
#include <vector>
#include <sophus/se2.hpp>  
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Geometry>

using SE2 = Sophus::SE2d;
using Vec2d = Eigen::Vector2d;
using Vec2i = Eigen::Vector2i;
using Vec2f = Eigen::Vector2f;
using Vec3b = Eigen::Matrix<char, 3, 1>;
using Vec3d = Eigen::Vector3d;
using Mat3d = Eigen::Matrix3d;
using SO2 = Sophus::SO2d;


class Map2DNode : public rclcpp::Node {
public:
    Map2DNode() : Node("2D mapping") {
        rclcpp::QoS qos(rclcpp::KeepLast(10)); 
        qos.reliability(RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT); // 显式设置可靠传输的QoS策略
        scan_sub_ = this->create_subscription<sensor_msgs::msg::LaserScan>(
            "/scan", qos, std::bind(&Map2DNode::laserScanCallback,  this, std::placeholders::_1)); // 订阅激光雷达的 /scan 话题
        this->declare_parameter<std::string>("scan_topic", "/scan");
        scan_topic_ = this->get_parameter("scan_topic").as_string();
        buildmodel();
        Init();
    }

    // 似然场的模板
    struct ModelPoint {
        ModelPoint(int dx, int dy, float res) : dx_(dx), dy_(dy), residual_(res) {}
        int dx_ = 0;
        int dy_ = 0;
        float residual_ = 0;   
    };

    struct Frame {
        Frame() {}
        Frame(sensor_msgs::msg::LaserScan::SharedPtr scan) : scan_(scan) {}

        size_t id_ = 0;               // scan id
        size_t keyframe_id_ = 0;      // 关键帧 id
        double timestamp_ = 0;        // 时间戳，一般不用
        sensor_msgs::msg::LaserScan::SharedPtr scan_ = nullptr;  // 激光扫描数据
        SE2 pose_;                    // 位姿，world to scan, T_w_c
        SE2 pose_submap_;             // 位姿，submap to scan, T_s_c
    };

    struct Submap {
        Submap(const SE2& pose) : pose_(pose) {
            occu_map_.SetPose(pose_);
            field_.SetPose(pose_);
        }

        std::vector<std::shared_ptr<Frame>>& GetFrames() { return frames_; }
        size_t NumFrames() const { return frames_.size(); }

        void SetId(size_t id) { id_ = id; }
        size_t GetId() const { return id_; }

        void SetPose(const SE2& pose);
        SE2 GetPose() const { return pose_; }

        cv::Mat GetOccupancyGrid() const { return occupancy_grid_; }

        SE2 pose_;  // submap的pose, Tws
        size_t id_ = 0;

        std::vector<std::shared_ptr<Frame>> frames_;  // 一个submap中的关键帧
        LikelihoodField field_;                       // 用于匹配
        OccupancyMap occu_map_;                       // 用于生成栅格地图
    };


private:
    SE2 pose_;  // T_W_S
    // 定义激光雷达的消息类型
    sensor_msgs::msg::LaserScan::SharedPtr source_ = nullptr;
    sensor_msgs::msg::LaserScan::SharedPtr last_scan = nullptr;
    sensor_msgs::msg::LaserScan::SharedPtr current_scan = nullptr;
    std::vector<ModelPoint> model_;  // 2D 模板
    cv::Mat field_;                  // 场函数
    bool has_outside_pts_ = false;   // 是否含有出了这个场的点
    float resolution_ = 60;  // 图像的分辨率（1米中包含的像素点）
    int image_size = 1000;



    // 数据成员
    size_t frame_id_ = 0;
    size_t keyframe_id_ = 0;
    size_t submap_id_ = 0;
    bool with_loop_closing = false;
    bool first_scan_ = true;
    std::shared_ptr<Frame> current_frame_ = nullptr;
    std::shared_ptr<Frame> last_frame_ = nullptr;
    std::shared_ptr<Frame> last_keyframe_ = nullptr;
    std::shared_ptr<Submap> current_submap_ = nullptr;
    SE2 motion_guess_;
    cv::Mat occupancy_grid_;  // 8bit 占据栅格图像


    std::vector<std::shared_ptr<Submap>> all_submaps_;
    std::shared_ptr<LoopClosing> loop_closing_ = nullptr;  // 回环检测

    // 参数
    inline static constexpr double keyframe_pos_th_ = 0.3;              // 关键帧位移量
    inline static constexpr double keyframe_ang_th_ = 15 * M_PI / 180;  // 关键帧角度量
    inline static constexpr double closest_th_ = 0.2;         // 近距离阈值
    inline static constexpr double endpoint_close_th_ = 0.1;  // 末端点障碍物近距离阈值
    inline static constexpr double resolution_ = 20.0;        // 1m 多少像素
    inline static constexpr float inv_resolution_ = 0.05;     // 1个像素多少米（栅格分辨率）
    inline static constexpr int image_size_ = 1000;           // 图像大小
    inline static constexpr int model_size_ = 400;            // 模板像素大小


    void buildmodel() {
        for (int x = -model_size_; x <= model_size_; x++) {
            for (int y = -model_size_; y <= model_size_; y++) {
                Model2DPoint pt;
                pt.dx_ = x;
                pt.dy_ = y;
                pt.range_ = sqrt(x * x + y * y) * inv_resolution_;
                pt.angle_ = std::atan2(y, x);
                pt.angle_ = pt.angle_ > M_PI ? pt.angle_ - 2 * M_PI : pt.angle_;  // limit in 2pi
                model_.push_back(pt);
            }
        }
    }

    bool Init(bool with_loop_closing) {
        keyframe_id_ = 0;
        current_submap_ = std::make_shared<Submap>(SE2());
        all_submaps_.emplace_back(current_submap_);

        if (with_loop_closing) {
            loop_closing_ = std::make_shared<LoopClosing>();
            loop_closing_->AddNewSubmap(current_submap_);
        }

        return true;
    }

    bool ProcessScan(sensor_msgs::msg::LaserScan::SharedPtr scan) {
        current_frame_ = std::make_shared<Frame>(scan);
        current_frame_->id_ = frame_id_++;

        if (last_frame_) {
            // set pose from last frame
            // current_frame_->pose_ = last_frame_->pose_;
            current_frame_->pose_ = last_frame_->pose_ * motion_guess_;
            current_frame_->pose_submap_ = last_frame_->pose_submap_;
        }

        // 利用scan matching来匹配地图
        if (!first_scan_) {
            // 第一帧无法匹配，直接加入到occupancy map
            current_submap_->MatchScan(current_frame_);
        }

        // current_submap_->AddScanInOccupancyMap(current_frame_);
        first_scan_ = false;
        bool is_kf = IsKeyFrame();

        if (is_kf) {
            AddKeyFrame();
            AddScanInOccupancyMap(current_frame_);

            // 处理回环检测
            if (loop_closing_) {
                loop_closing_->AddNewFrame(current_frame_);
            }

            if (has_outside_pts_ || (current_submap_->NumFrames()) > 50) {
                /// 走出了submap或者单个submap中的关键帧较多
                ExpandSubmap();
            }
        }

        /// 可视化输出
        auto occu_image = GetOccupancyGridBlackWhite();
        Visualize2DScan(current_frame_->scan_, current_frame_->pose_, occu_image, Vec3b(0, 0, 255), 1000, 20.0,
                        current_submap_->GetPose());
        cv::putText(occu_image, "submap " + std::to_string(current_submap_->GetId()), cv::Point2f(20, 20),
                    cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(0, 255, 0));
        cv::putText(occu_image, "keyframes " + std::to_string(current_submap_->NumFrames()), cv::Point2f(20, 50),
                    cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(0, 255, 0));
        cv::imshow("occupancy map", occu_image);

        auto field_image = GetFieldImage();
        Visualize2DScan(current_frame_->scan_, current_frame_->pose_, field_image, Vec3b(0, 0, 255), 1000, 20.0,
                        current_submap_->GetPose());
        cv::imshow("likelihood", field_image);

        /// global map
        if (is_kf) {
            cv::imshow("global map", ShowGlobalMap());
        }

        cv::waitKey(10);

        if (last_frame_) {
            motion_guess_ = last_frame_->pose_.inverse() * current_frame_->pose_;
        }

        last_frame_ = current_frame_;

        return true;
    }

    bool Submap::MatchScan(std::shared_ptr<Frame> frame) {
        SetSourceScan(frame->scan_);
        AlignG2O(frame->pose_submap_);
        frame->pose_ = pose_ * frame->pose_submap_;  // T_w_c = T_w_s * T_s_c

        return true;
    }

    bool AlignG2O(SE2& init_pose) {
        using BlockSolverType = g2o::BlockSolver<g2o::BlockSolverTraits<3, 1>>;
        using LinearSolverType = g2o::LinearSolverCholmod<BlockSolverType::PoseMatrixType>;
        auto* solver = new g2o::OptimizationAlgorithmLevenberg(
            g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
        g2o::SparseOptimizer optimizer;
        optimizer.setAlgorithm(solver);

        auto* v = new VertexSE2();
        v->setId(0);
        v->setEstimate(init_pose);
        optimizer.addVertex(v);

        const double range_th = 15.0;  // 不考虑太远的scan，不准
        const double rk_delta = 0.8;

        has_outside_pts_ = false;
        // 遍历source
        for (size_t i = 0; i < source_->ranges.size(); ++i) {
            float r = source_->ranges[i];
            if (r < source_->range_min || r > source_->range_max) {
                continue;
            }

            if (r > range_th) {
                continue;
            }

            float angle = source_->angle_min + i * source_->angle_increment;
            if (angle < source_->angle_min + 30 * M_PI / 180.0 || angle > source_->angle_max - 30 * M_PI / 180.0) {
                continue;
            }

            auto e = new EdgeSE2LikelihoodFiled(field_, r, angle, resolution_);
            e->setVertex(0, v);

            if (e->IsOutSide()) {
                has_outside_pts_ = true;
                delete e;
                continue;
            }

            e->setInformation(Eigen::Matrix<double, 1, 1>::Identity());
            auto rk = new g2o::RobustKernelHuber;
            rk->setDelta(rk_delta);
            e->setRobustKernel(rk);
            optimizer.addEdge(e);
        }

        optimizer.setVerbose(false);
        optimizer.initializeOptimization();
        optimizer.optimize(10);

        init_pose = v->estimate();
        return true;
    }

    size_t NumFrames() const { return frames_.size(); }

    void SetSourceScan(sensor_msgs::msg::LaserScan::SharedPtr scan) { source_ = scan; }

    bool IsKeyFrame() {
        if (last_keyframe_ == nullptr) {
            return true;
        }

        SE2 delta_pose = last_keyframe_->pose_.inverse() * current_frame_->pose_;

        // 检查当前帧相对于上一关键帧的位移和旋转是否超过阈值
        // delta_pose.translation().norm() 计算当前帧相对于上一关键帧的平移向量的模长
        // keyframe_pos_th_ 是关键帧位移量的阈值
        // fabs(delta_pose.so2().log()) 计算当前帧相对于上一关键帧的旋转角度的绝对值
        // keyframe_ang_th_ 是关键帧角度量的阈值
        if (delta_pose.translation().norm() > keyframe_pos_th_ || fabs(delta_pose.so2().log()) > keyframe_ang_th_) {
            return true;
        }

        return false;
    }

    void AddKeyFrame() {
        // 输出关键帧的信息
        RCLCPP_INFO(this->get_logger(), "Add keyframe %d", keyframe_id_);
        current_frame_->keyframe_id_ = keyframe_id_++;
        current_submap_->AddKeyFrame(current_frame_);
        last_keyframe_ = current_frame_;
    }

    void ExpandSubmap() {
        // 当前submap作为历史地图放入loop closing
        if (loop_closing_) {
            loop_closing_->AddFinishedSubmap(current_submap_);
        }

        // 将当前submap替换成新的
        auto last_submap = current_submap_;

        // debug
        // cv::imwrite("./data/ch6/submap_" + std::to_string(last_submap->GetId()) + ".png",
        //             last_submap->GetOccuMap().GetOccupancyGridBlackWhite());

        current_submap_ = std::make_shared<Submap>(current_frame_->pose_);
        current_frame_->pose_submap_ = SE2();  // 这个归零

        current_submap_->SetId(++submap_id_);
        AddKeyFrame(current_frame_);
        SetOccuFromOtherSubmap(last_submap);  // 把上一帧的数据也放进来，不让一个submap显得太空

        current_submap_->AddScanInOccupancyMap(current_frame_);
        all_submaps_.emplace_back(current_submap_);

        if (loop_closing_) {
            loop_closing_->AddNewSubmap(current_submap_);
        }
        
                << " with pose: " << current_submap_->GetPose().translation().transpose() << ", "
                << current_submap_->GetPose().so2().log();
    }


    void SetOccuFromOtherSubmap(std::shared_ptr<Submap> other) {
        auto frames_in_other = other->GetFrames();
        // 取最近10个帧
        for (size_t i = frames_in_other.size() - 10; i < frames_in_other.size(); ++i) {
            if (i > 0) {
                AddLidarFrame(frames_in_other[i]);
            }
        }
        SetFieldImageFromOccuMap(occu_map_.GetOccupancyGrid());
    }

    void AddScanInOccupancyMap(std::shared_ptr<Frame> frame) {
        AddLidarFrame(frame, OccupancyMap::GridMethod::MODEL_POINTS);  // 更新栅格地图中的格子
        SetFieldImageFromOccuMap(occu_map_.GetOccupancyGrid());           // 更新场函数图像
    }

    void LikelihoodField::SetFieldImageFromOccuMap(const cv::Mat& occu_map) {
        const int boarder = 25;
        field_ = cv::Mat(1000, 1000, CV_32F, 30.0);

        for (int x = boarder; x < occu_map.cols - boarder; ++x) {
            for (int y = boarder; y < occu_map.rows - boarder; ++y) {
                if (occu_map.at<uchar>(y, x) < 127) {
                    // 在该点生成一个model
                    for (auto& model_pt : model_) {
                        int xx = int(x + model_pt.dx_);
                        int yy = int(y + model_pt.dy_);
                        if (xx >= 0 && xx < field_.cols && yy >= 0 && yy < field_.rows &&
                            field_.at<float>(yy, xx) > model_pt.residual_) {
                            field_.at<float>(yy, xx) = model_pt.residual_;
                        }
                    }
                }
            }
        }
    }

    void AddLidarFrame(std::shared_ptr<Frame> frame, GridMethod method) {
        auto& scan = frame->scan_;
        
        // 此处不能直接使用frame->pose_submap_，因为frame可能来自上一个地图
        // 此时frame->pose_submap_还未更新，依旧是frame在上一个地图中的pose
        SE2 pose_in_submap = pose_.inverse() * frame->pose_;
        float theta = pose_in_submap.so2().log();
        has_outside_pts_ = false;

        // 先计算末端点所在的网格
        std::set<Vec2i, less_vec<2>> endpoints;

        for (size_t i = 0; i < scan->ranges.size(); ++i) {
            if (scan->ranges[i] < scan->range_min || scan->ranges[i] > scan->range_max) {
                continue;
            }

            double real_angle = scan->angle_min + i * scan->angle_increment;
            double x = scan->ranges[i] * std::cos(real_angle);
            double y = scan->ranges[i] * std::sin(real_angle);

            endpoints.emplace(World2Image(frame->pose_ * Vec2d(x, y)));
        }

        if (method == GridMethod::MODEL_POINTS) {
            // 遍历模板，生成白色点
            std::for_each(std::execution::par_unseq, model_.begin(), model_.end(), [&](const Model2DPoint& pt) {
                Vec2i pos_in_image = World2Image(frame->pose_.translation());
                Vec2i pw = pos_in_image + Vec2i(pt.dx_, pt.dy_);  // submap下

                if (pt.range_ < closest_th_) {
                    // 小距离内认为无物体
                    SetPoint(pw, false);
                    return;
                }

                double angle = pt.angle_ - theta;  // 激光系下角度
                double range = FindRangeInAngle(angle, scan);

                if (range < scan->range_min || range > scan->range_max) {
                    /// 某方向无测量值时，认为无效
                    /// 但离机器比较近时，涂白
                    if (pt.range_ < endpoint_close_th_) {
                        SetPoint(pw, false);
                    }
                    return;
                }

                if (range > pt.range_ && endpoints.find(pw) == endpoints.end()) {
                    /// 末端点与车体连线上的点，涂白
                    SetPoint(pw, false);
                }
            });
        } else {
            Vec2i start = World2Image(frame->pose_.translation());
            std::for_each(std::execution::par_unseq, endpoints.begin(), endpoints.end(),
                        [this, &start](const auto& pt) { BresenhamFilling(start, pt); });
        }

        /// 末端点涂黑
        std::for_each(endpoints.begin(), endpoints.end(), [this](const auto& pt) { SetPoint(pt, true); });
    }

    cv::Mat GetOccupancyGridBlackWhite() const {
        cv::Mat image(image_size_, image_size_, CV_8UC3);
        for (int x = 0; x < occupancy_grid_.cols; ++x) {
            for (int y = 0; y < occupancy_grid_.rows; ++y) {
                if (occupancy_grid_.at<uchar>(y, x) == 127) {
                    image.at<cv::Vec3b>(y, x) = cv::Vec3b(127, 127, 127);
                } else if (occupancy_grid_.at<uchar>(y, x) < 127) {
                    image.at<cv::Vec3b>(y, x) = cv::Vec3b(0, 0, 0);
                } else if (occupancy_grid_.at<uchar>(y, x) > 127) {
                    image.at<cv::Vec3b>(y, x) = cv::Vec3b(255, 255, 255);
                }
            }
        }

        return image;
    }


    void Visualize2DScan(const sensor_msgs::msg::LaserScan::SharedPtr scan, const SE2& pose, cv::Mat& image, const Vec3b& color, int image_size = 1000,
                            float resolution = 60.0, const SE2& pose_submap = SE2()) {
        if (image.data == nullptr) {
            image = cv::Mat(image_size, image_size, CV_8UC3, cv::Vec3b(255, 255, 255));
        }

        for (size_t i = 0; i < scan->ranges.size(); ++i) {
            if (scan->ranges[i] < scan->range_min || scan->ranges[i] > scan->range_max) {
                continue;
            }

            double real_angle = scan->angle_min + i * scan->angle_increment;
            double x = scan->ranges[i] * std::cos(real_angle);
            double y = scan->ranges[i] * std::sin(real_angle);

            if (real_angle < scan->angle_min + 30 * M_PI / 180.0 || real_angle > scan->angle_max - 30 * M_PI / 180.0) {
                continue;
            }

            Vec2d psubmap = pose_submap.inverse() * (pose * Vec2d(x, y));

            int image_x = int(psubmap[0] * resolution + image_size / 2);
            int image_y = int(psubmap[1] * resolution + image_size / 2);
            if (image_x >= 0 && image_x < image.cols && image_y >= 0 && image_y < image.rows) {
                image.at<Vec3b>(image_y, image_x) = Vec3b(color[0], color[1], color[2]);
            }
        }

        // 同时画出pose自身所在位置
        Vec2d pose_in_image =
            pose_submap.inverse() * (pose.translation()) * double(resolution) + Vec2d(image_size / 2, image_size / 2);
        cv::circle(image, cv::Point2f(pose_in_image[0], pose_in_image[1]), 5, cv::Scalar(color[0], color[1], color[2]), 2);
    }


    cv::Mat GetFieldImage() {
        cv::Mat image(field_.rows, field_.cols, CV_8UC3);
        for (int x = 0; x < field_.cols; ++x) {
            for (int y = 0; y < field_.rows; ++y) {
                float r = field_.at<float>(y, x) * 255.0 / 30.0;
                image.at<cv::Vec3b>(y, x) = cv::Vec3b(uchar(r), uchar(r), uchar(r));
            }
        }

        return image;
    }

    cv::Mat ShowGlobalMap(int max_size) {
        //// TODO 全局地图固定大小，使用动态分辨率
        Vec2f top_left = Vec2f(999999, 999999);
        Vec2f bottom_right = Vec2f(-999999, -999999);

        const float submap_resolution = 20.0;  // 子地图分辨率（1米多少个像素）
        const float submap_size = 50.0;        // 单个submap大小

        /// 计算全局地图物理边界
        for (auto m : all_submaps_) {
            Vec2d c = m->GetPose().translation();
            if (top_left[0] > c[0] - submap_size / 2) {
                top_left[0] = c[0] - submap_size / 2;
            }
            if (top_left[1] > c[1] - submap_size / 2) {
                top_left[1] = c[1] - submap_size / 2;
            }

            if (bottom_right[0] < c[0] + submap_size / 2) {
                bottom_right[0] = c[0] + submap_size / 2;
            }
            if (bottom_right[1] < c[1] + submap_size / 2) {
                bottom_right[1] = c[1] + submap_size / 2;
            }
        }

        if (top_left[0] > bottom_right[0] || top_left[1] > bottom_right[1]) {
            return cv::Mat();
        }

        /// 全局地图物理中心
        Vec2f global_center = Vec2f((top_left[0] + bottom_right[0]) / 2.0, (top_left[1] + bottom_right[1]) / 2.0);
        float phy_width = bottom_right[0] - top_left[0];   // 物理尺寸
        float phy_height = bottom_right[1] - top_left[1];  // 物理尺寸
        float global_map_resolution = 0;

        if (phy_width > phy_height) {
            global_map_resolution = max_size / phy_width;
        } else {
            global_map_resolution = max_size / phy_height;
        }

        Vec2f c = global_center;
        int c_x = global_center[0] * global_map_resolution;
        int c_y = global_center[1] * global_map_resolution;
        global_center = Vec2f(c_x / global_map_resolution, c_y / global_map_resolution);  // 全局地图图像中心

        int width = int((bottom_right[0] - top_left[0]) * global_map_resolution + 0.5);
        int height = int((bottom_right[1] - top_left[1]) * global_map_resolution + 0.5);

        Vec2f center_image = Vec2f(width / 2, height / 2);
        cv::Mat output_image(height, width, CV_8UC3, cv::Scalar(127, 127, 127));

        std::vector<Vec2i> render_data;
        render_data.reserve(width * height);
        for (int x = 0; x < width; ++x) {
            for (int y = 0; y < height; ++y) {
                render_data.emplace_back(Vec2i(x, y));
            }
        }

        std::for_each(std::execution::par_unseq, render_data.begin(), render_data.end(), [&](const Vec2i& xy) {
            int x = xy[0], y = xy[1];
            Vec2f pw = (Vec2f(x, y) - center_image) / global_map_resolution + c;  // 世界坐标

            for (auto& m : all_submaps_) {
                Vec2f ps = m->GetPose().inverse().cast<float>() * pw;  // in submap
                Vec2i pt = (ps * submap_resolution + Vec2f(500, 500)).cast<int>();

                if (pt[0] < 0 || pt[0] >= 1000 || pt[1] < 0 || pt[1] >= 1000) {
                    continue;
                }

                uchar value = m->GetOccupancyGrid().at<uchar>(pt[1], pt[0]);
                if (value > 127) {
                    if (m == current_submap_) {
                        output_image.at<cv::Vec3b>(y, x) = cv::Vec3b(235, 250, 230);
                    } else {
                        output_image.at<cv::Vec3b>(y, x) = cv::Vec3b(255, 255, 255);
                    }
                    break;
                } else if (value < 127) {
                    if (m == current_submap_) {
                        output_image.at<cv::Vec3b>(y, x) = cv::Vec3b(230, 20, 30);
                    } else {
                        output_image.at<cv::Vec3b>(y, x) = cv::Vec3b(0, 0, 0);
                    }
                    break;
                }
            }
        });

        for (auto& m : all_submaps_) {
            /// submap pose 在全局地图中的投影
            SE2f submap_pose = m->GetPose().cast<float>();
            Vec2f submap_center = submap_pose.translation();
            Vec2f submap_xw = submap_pose * Vec2f(1.0, 0);
            Vec2f submap_yw = submap_pose * Vec2f(0, 1.0);

            Vec2f center_map = (submap_center - global_center) * global_map_resolution + center_image;
            Vec2f x_map = (submap_xw - global_center) * global_map_resolution + center_image;
            Vec2f y_map = (submap_yw - global_center) * global_map_resolution + center_image;

            // x轴和y轴
            cv::line(output_image, cv::Point2f(center_map.x(), center_map.y()), cv::Point2f(x_map.x(), x_map.y()),
                    cv::Scalar(0, 0, 255), 2);
            cv::line(output_image, cv::Point2f(center_map.x(), center_map.y()), cv::Point2f(y_map.x(), y_map.y()),
                    cv::Scalar(0, 255, 0), 2);
            cv::putText(output_image, std::to_string(m->GetId()), cv::Point2f(center_map.x() + 10, center_map.y() - 10),
                        cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(255, 0, 0));

            // 轨迹
            for (const auto& frame : m->GetFrames()) {
                Vec2f p_map =
                    (frame->pose_.translation().cast<float>() - global_center) * global_map_resolution + center_image;
                cv::circle(output_image, cv::Point2f(p_map.x(), p_map.y()), 1, cv::Scalar(0, 0, 255), 1);
            }
        }

        if (loop_closing_) {
            /// 回环检测的pose graph
            auto loops = loop_closing_->GetLoops();
            for (auto lc : loops) {
                auto first_id = lc.first.first;
                auto second_id = lc.first.second;

                Vec2f c1 = all_submaps_[first_id]->GetPose().translation().cast<float>();
                Vec2f c2 = all_submaps_[second_id]->GetPose().translation().cast<float>();

                Vec2f c1_map = (c1 - global_center) * global_map_resolution + center_image;
                Vec2f c2_map = (c2 - global_center) * global_map_resolution + center_image;

                cv::line(output_image, cv::Point2f(c1_map.x(), c1_map.y()), cv::Point2f(c2_map.x(), c2_map.y()),
                        cv::Scalar(255, 0, 0), 2);
            }
        }

        return output_image;
    }



    std::string scan_topic_;
    rclcpp::Subscription<sensor_msgs::msg::LaserScan>::SharedPtr scan_sub_;
};

int main(int argc, char **argv) {
    rclcpp::init(argc, argv);
    rclcpp::spin(std::make_shared<Map2DNode>());
    rclcpp::shutdown();
    return 0;
}
