/**
 * Copyright (c) 2017, California Institute of Technology.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright notice,
 *    this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright notice,
 *    this list of conditions and the following disclaimer in the documentation
 *    and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 *
 * The views and conclusions contained in the software and documentation are
 * those of the authors and should not be interpreted as representing official
 * policies, either expressed or implied, of the California Institute of
 * Technology.
 */

#include "apriltag_ros/common_functions.h"
#include "image_geometry/pinhole_camera_model.h"

#include "common/homography.h"
#include "tag36h11.h"
#include "tag25h9.h"
#include "tag16h5.h"
#include <Eigen/Dense>
#include <tf/transform_broadcaster.h>
#include <tf/transform_listener.h>
#include <tf/transform_datatypes.h>

namespace apriltag_ros
{

TagDetector::TagDetector(ros::NodeHandle pnh) :
    family_(getAprilTagOption<std::string>(pnh, "tag_family", "tag36h11")),
    threads_(getAprilTagOption<int>(pnh, "tag_threads", 4)),
    decimate_(getAprilTagOption<double>(pnh, "tag_decimate", 1.0)),
    blur_(getAprilTagOption<double>(pnh, "tag_blur", 0.0)),
    refine_edges_(getAprilTagOption<int>(pnh, "tag_refine_edges", 1)),
    debug_(getAprilTagOption<int>(pnh, "tag_debug", 0)),
    tag_test_flag_(getAprilTagOption<bool>(pnh, "tag_test_flag", false)),
    tag_force_v_(getAprilTagOption<bool>(pnh, "tag_force_v", true)),
    tag_v_thresh_(getAprilTagOption<double>(pnh, "tag_v_thresh", 20.0)),
    tag_rot_deg_(getAprilTagOption<int>(pnh, "tag_rot_deg", 1)),
    decode_sharpening_(getAprilTagOption<double>(pnh, "decode_sharpening", 0.25)),
    max_change_distance_(getAprilTagOption<double>(pnh, "max_change_distance", 5.0)),
    max_rotation_distance_(getAprilTagOption<double>(pnh, "max_rotation_distance", 1.0)),
    close_pose_lost_(getAprilTagOption<bool>(pnh, "close_pose_lost", false)),
    publish_tf_(getAprilTagOption<bool>(pnh, "publish_tf", false)),
    detect_more_family_tag_(getAprilTagOption<bool>(pnh, "detect_more_family_tag", false)),
    max_hamming_distance_(getAprilTagOption<int>(pnh, "max_hamming_distance", 2)),
    qtp_max_nmaxima(getAprilTagOption<int>(pnh, "qtp_max_nmaxima", 10)),
    qtp_min_cluster_pixels(getAprilTagOption<int>(pnh, "qtp_min_cluster_pixels", 5)),
    qtp_max_line_fit_mse(getAprilTagOption<double>(pnh, "qtp_max_line_fit_mse", 10.0)),
    qtp_cos_critical_rad(getAprilTagOption<double>(pnh, "qtp_cos_critical_rad", 10.0)),
    qtp_deglitch(getAprilTagOption<bool>(pnh, "qtp_deglitch", false)),
    qtp_min_white_black_diff(getAprilTagOption<int>(pnh, "qtp_min_white_black_diff", 5)),
    windowSize_(getAprilTagOption<int>(pnh, "windowSize", 5)),
    polyOrder_(getAprilTagOption<int>(pnh, "polyOrder", 2)),
    windowIndex_(getAprilTagOption<int>(pnh, "windowIndex", 4)),
    deltaXT(getAprilTagOption<double>(pnh, "deltaXT", 0.2)),
    deltaYT(getAprilTagOption<double>(pnh, "deltaYT", 0.2)),
    deltaZT(getAprilTagOption<double>(pnh, "deltaZT", 0.2)),
    pnp_angle_thresh_(getAprilTagOption<double>(pnh, "pnp_angle_thresh", 15.0)),
    use_tag_frame_id_(getAprilTagOption<int>(pnh, "use_tag_frame_id", 0)),
    use_camera2tag_(getAprilTagOption<int>(pnh, "use_camera2tag", 1)),
    all_tag_into_together_(getAprilTagOption<int>(pnh, "all_tag_into_together", 1)),
    inverse_method_(getAprilTagOption<int>(pnh, "inverse_method", 1)),
    pcl_tag_cloudptr_(new pcl::PointCloud<pcl::PointXYZRGB>()),
    pcl_corner_cloudptr_(new pcl::PointCloud<pcl::PointXYZRGB>()),
    pcl_path_cloudptr_(new pcl::PointCloud<pcl::PointXYZRGB>()),
    pcl_abnormal_path_cloudptr_(new pcl::PointCloud<pcl::PointXYZRGB>()),
    pcl_best_path_cloudptr0_(new pcl::PointCloud<pcl::PointXYZRGB>()),
    pcl_best_path_cloudptr_(new pcl::PointCloud<pcl::PointXYZRGB>())
{
  init_flag_ = false;
  pose_lost_flag_ = true;
  pcl_best_path_cloudptr_->resize(1);
  SGCoeffs_ = computeSGCoeffs(windowSize_, polyOrder_);
  // pcl_corner_cloud_.resize(20);
  // Parse standalone tag descriptions specified by user (stored on ROS
  // parameter server)
  XmlRpc::XmlRpcValue standalone_tag_descriptions;
  if(!pnh.getParam("standalone_tags", standalone_tag_descriptions))
  {
    ROS_WARN("No april tags specified");
  }
  else
  {
    try
    {
      standalone_tag_descriptions_ =
          parseStandaloneTags(standalone_tag_descriptions);
    }
    catch(XmlRpc::XmlRpcException e)
    {
      // in case any of the asserts in parseStandaloneTags() fail
      ROS_ERROR_STREAM("Error loading standalone tag descriptions: " <<
                       e.getMessage().c_str());
    }
  }

  // parse tag bundle descriptions specified by user (stored on ROS parameter
  // server)
  XmlRpc::XmlRpcValue tag_bundle_descriptions;
  if(!pnh.getParam("tag_bundles", tag_bundle_descriptions))
  {
    ROS_WARN("No tag bundles specified");
  }
  else
  {
    try
    {
      tag_bundle_descriptions_ = parseTagBundles(tag_bundle_descriptions);
    }
    catch(XmlRpc::XmlRpcException e)
    {
      // In case any of the asserts in parseStandaloneTags() fail
      ROS_ERROR_STREAM("Error loading tag bundle descriptions: " <<
                       e.getMessage().c_str());
    }
  }

  // Optionally remove duplicate detections in scene. Defaults to removing
  if(!pnh.getParam("remove_duplicates", remove_duplicates_))
  {
    ROS_WARN("remove_duplicates parameter not provided. Defaulting to true");
    remove_duplicates_ = true;
  }

  // Define the tag family whose tags should be searched for in the camera
  // images
  if (family_ == "tag36h11")
  {
    tf_ = tag36h11_create();
  }
  else if (family_ == "tag25h9")
  {
    tf_ = tag25h9_create();
  }
  else if (family_ == "tag16h5")
  {
    tf_ = tag16h5_create();
  }
  else
  {
    ROS_WARN("Invalid tag family specified! Aborting");
    exit(1);
  }

  // Create the AprilTag 2 detector
  td_ = apriltag_detector_create();
  apriltag_detector_add_family_bits(td_, tf_, max_hamming_distance_);
  if (detect_more_family_tag_) {
    // tf_tag16h5_ = tag16h5_create();
    // apriltag_detector_add_family(td_, tf_tag16h5_);
    tf_tag25h9_ = tag25h9_create();
    apriltag_detector_add_family_bits(td_, tf_tag25h9_, max_hamming_distance_);
  }
  td_->quad_decimate = (float)decimate_;
  td_->quad_sigma = (float)blur_;
  td_->nthreads = threads_;
  td_->debug = debug_;
  td_->tag_test_flag = tag_test_flag_;
  td_->tag_force_v = tag_force_v_;
  td_->tag_v_thresh = tag_v_thresh_;
  td_->tag_rot_deg = tag_rot_deg_;
  td_->decode_sharpening = decode_sharpening_;

  td_->refine_edges = refine_edges_;

  // apriltag_quad_thresh_params
  td_->qtp.max_nmaxima = qtp_max_nmaxima;
  td_->qtp.min_cluster_pixels = qtp_min_cluster_pixels;

  td_->qtp.max_line_fit_mse = qtp_max_line_fit_mse;
  td_->qtp.cos_critical_rad = cos(qtp_cos_critical_rad * M_PI / 180);
  td_->qtp.deglitch = qtp_deglitch;
  td_->qtp.min_white_black_diff = qtp_min_white_black_diff;

  detections_ = NULL;

  // Get tf frame name to use for the camera
  if (!pnh.getParam("camera_frame", camera_tf_frame_))
  {
    ROS_WARN_STREAM("Camera frame not specified, using 'camera'");
    camera_tf_frame_ = "camera";
  }
  std::cout << "camera_tf_frame: " << camera_tf_frame_ << std::endl;
  
  std::vector<int> color;
  color = {255, 0, 0};  // 红
  colors.emplace_back(color);
  color = {0, 255, 0};  // 绿
  colors.emplace_back(color);
  color = {255, 128, 0};  // 橙
  colors.emplace_back(color);
  color = {0, 255, 255};  // 青
  colors.emplace_back(color);
  color = {255, 255, 0};  // 黄
  colors.emplace_back(color);
  // color = {128, 255, 0};
  // colors.emplace_back(color);
  color = {0, 0, 255};   // 蓝
  colors.emplace_back(color);
  color = {255, 0, 255};   // 紫
  colors.emplace_back(color);

  tag_path_pubilsher_ = nh_.advertise<nav_msgs::Path>("/zhz/tag_path", 1, true);
  path_pubilsher_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_path", 1, true);
  path_ipubilsher_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_ipath", 1, true);
  smooth_path_pubilsher_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_smooth_path", 1, true);
  path_pubilsher0_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_path0", 1, true);
  path_pubilsher1_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_path1", 1, true);
  path_pubilsher2_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_path2", 1, true);
  path_pubilsher3_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_path3", 1, true);
  path_pubilsher4_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_path4", 1, true);
  path_pubilsher5_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_path5", 1, true);
  path_pubilsher6_ = nh_.advertise<nav_msgs::Path>("/zhz/camera_path6", 1, true);

  pub_rviz_tag_ = nh_.advertise<sensor_msgs::PointCloud2>(
                        "/zhz/driver/cloud_tag", 1, true);
  pub_rviz_path_ = nh_.advertise<sensor_msgs::PointCloud2>(
                        "/zhz/driver/cloud_path", 1, true);
  pub_rviz_abnormal_path_ = nh_.advertise<sensor_msgs::PointCloud2>(
                        "/zhz/driver/cloud_abnormal_path", 1, true);
  pub_rviz_best_path_ = nh_.advertise<sensor_msgs::PointCloud2>(
                        "/zhz/driver/cloud_best_path", 1, true);
  pub_rviz_best_path0_ = nh_.advertise<sensor_msgs::PointCloud2>(
                        "/zhz/driver/cloud_best_path0", 1, true);
  pub_rviz_corner_ = nh_.advertise<sensor_msgs::PointCloud2>(
                        "/zhz/driver/cloud_corner", 1, true);

  apriltag_pose_pub_ = nh_.advertise<apriltag_ros::AprilTagPose>(
                    "/zhz/apriltag_pose", 1, true);
  geometry_msgs_poseCovariance_pub_ = nh_.advertise<geometry_msgs::PoseWithCovarianceStamped>(
                    "/zhz/camera_pose", 1, true);
  // odomtry_publisher_ = nh_.advertise<nav_msgs::Odometry>("/mavros/local_position/odom", 1, true);
  odomtry_publisher_ = nh_.advertise<nav_msgs::Odometry>("/zhz/camera_odomtry", 1, true);
}

// destructor
TagDetector::~TagDetector() {
  // free memory associated with tag detector
  apriltag_detector_destroy(td_);

  // Free memory associated with the array of tag detections
  apriltag_detections_destroy(detections_);

  // free memory associated with tag family
  if (family_ == "tag36h11")
  {
    tag36h11_destroy(tf_);
  }
  else if (family_ == "tag25h9")
  {
    tag25h9_destroy(tf_);
  }
  else if (family_ == "tag16h5")
  {
    tag16h5_destroy(tf_);
  }
  if (detect_more_family_tag_) {
    // tag16h5_destroy(tf_tag16h5_);
    tag16h5_destroy(tf_tag25h9_);
  }
}

void TagDetector::PublishCloudInfo(ros::Publisher& pub, 
            pcl::PointCloud<pcl::PointXYZRGB>::Ptr& cloud, std_msgs::Header& header) {
  // pcl::PointCloud<pcl::PointXYZRGB> pcl_path_cloud;
  pcl::VoxelGrid<pcl::PointXYZRGB> sor;
  float vs = 0.02;
  if (cloud->size() > 1000000){
    sor.setInputCloud(cloud);
    sor.filter(*cloud);
  }

  // pcl::PointCloud<pcl::PointXYZRGB>::Ptr input_cloud(
  //               new pcl::PointCloud<pcl::PointXYZRGB>);
  //2.半径滤波
	// pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_filter(
  //               new pcl::PointCloud<pcl::PointXYZRGB>);
	{
    // pcl::RadiusOutlierRemoval<pcl::PointXYZRGB> sor;
    // sor.setInputCloud(pcl_tag_cloudptr_);
    // sor.setRadiusSearch(0.01);
    // sor.setMinNeighborsInRadius(15);
    // sor.setNegative(false); 
    // sor.filter(*pcl_tag_cloudptr_); 

    // sor.setInputCloud(pcl_corner_cloudptr_);
    // sor.filter(*pcl_corner_cloudptr_);
  }


  sensor_msgs::PointCloud2 rviz_path_msg_;
  pcl::toROSMsg(*cloud, rviz_path_msg_);
  rviz_path_msg_.header = header;
  // rviz_path_msg_.header.frame_id = "world";
  // rviz_path_msg_.header.frame_id = use_tag_frame_id_ ? "tag_6" : "map";
  // rviz_path_msg_.header.stamp = ros::Time::now();
  pub.publish(rviz_path_msg_);
}

void TagDetector::PublishRvizInfo(std_msgs::Header header) {
  
  header.frame_id = camera_tf_frame_;
  PublishCloudInfo(pub_rviz_tag_, pcl_tag_cloudptr_, header);
  PublishCloudInfo(pub_rviz_corner_, pcl_corner_cloudptr_, header);
  PublishCloudInfo(pub_rviz_path_, pcl_path_cloudptr_, header);
  PublishCloudInfo(pub_rviz_abnormal_path_, pcl_abnormal_path_cloudptr_, header);
  PublishCloudInfo(pub_rviz_best_path_, pcl_best_path_cloudptr_, header);
  PublishCloudInfo(pub_rviz_best_path0_, pcl_best_path_cloudptr0_, header);
  // 用于映射到图片上
  // pcl_corner_cloud_ = *pcl_corner_cloudptr_;

  // tag_path_.header = header;
  camera_path_.header = header;
  camera_ipath_.header = header;
  camera_ipath_.header.frame_id = "tag_6";
  // camera_path_.header.frame_id = camera_tf_frame_;
  camera_smooth_path_.header = header;
  camera_path0_.header = header;
  camera_path1_.header = header;
  camera_path2_.header = header;
  camera_path3_.header = header;
  camera_path4_.header = header;
  camera_path5_.header = header;
  camera_path6_.header = header;

  // tag_path_pubilsher_.publish(tag_path_);
  path_pubilsher_.publish(camera_path_);
  path_ipubilsher_.publish(camera_ipath_);
  smooth_path_pubilsher_.publish(camera_smooth_path_);
  path_pubilsher0_.publish(camera_path0_);
  path_pubilsher1_.publish(camera_path1_); 
  path_pubilsher2_.publish(camera_path2_); 
  path_pubilsher3_.publish(camera_path3_); 
  path_pubilsher4_.publish(camera_path4_); 
  path_pubilsher5_.publish(camera_path5_); 
  path_pubilsher6_.publish(camera_path6_);
}

std::pair<AprilTagDetectionArray, Apriltags> TagDetector::detectTags (
    const cv_bridge::CvImagePtr& image,
    const sensor_msgs::CameraInfoConstPtr& camera_info) {
  // Convert image to AprilTag code's format
  cv::Mat gray_image;
  cv::cvtColor(image->image, gray_image, CV_BGR2GRAY);
  image_u8_t apriltag_image = { .width = gray_image.cols,
                                  .height = gray_image.rows,
                                  .stride = gray_image.cols,
                                  .buf = gray_image.data
  };

  image_geometry::PinholeCameraModel camera_model;
  camera_model.fromCameraInfo(camera_info);

  // Get camera intrinsic properties for rectified image.
  double fx = camera_model.fx(); // focal length in camera x-direction [px]
  double fy = camera_model.fy(); // focal length in camera y-direction [px]
  double cx = camera_model.cx(); // optical center x-coordinate [px]
  double cy = camera_model.cy(); // optical center y-coordinate [px]
  auto distCoeffs = camera_model.distortionCoeffs();
  // auto intrinsicMatrix = camera_model.intrinsicMatrix();

  // Run AprilTag 2 algorithm on the image
  if (detections_)
  {
    apriltag_detections_destroy(detections_);
    detections_ = NULL;
  }
  detections_ = apriltag_detector_detect(td_, &apriltag_image);

  // If remove_dulpicates_ is set to true, then duplicate tags are not allowed.
  // Thus any duplicate tag IDs visible in the scene must include at least 1
  // erroneous detection. Remove any tags with duplicate IDs to ensure removal
  // of these erroneous detections 
  if (remove_duplicates_)
  {
    removeDuplicates();
  }

  // Compute the estimated translation and rotation individually for each
  // detected tag
  AprilTagDetectionArray tag_detection_array;
  apriltag_ros::Apriltags aprilslam_tag_detections;
  std::vector<std::string > detection_names;
  std::vector<Eigen::Vector3d> detection_translates;
  std::map<std::string, std::vector<cv::Point3d > > bundleObjectPoints;
  std::map<std::string, std::vector<cv::Point2d > > bundleImagePoints;
  int detections_size = zarray_size(detections_);
  pcl_corner_cloud_.resize(detections_size * 13);
  for (int i=0; i < zarray_size(detections_); i++)
  {
    // Get the i-th detected tag
    apriltag_detection_t *detection;
    zarray_get(detections_, i, &detection);

    // Bootstrap this for loop to find this tag's description amongst
    // the tag bundles. If found, add its points to the bundle's set of
    // object-image corresponding points (tag corners) for cv::solvePnP.
    // Don't yet run cv::solvePnP on the bundles, though, since we're still in
    // the process of collecting all the object-image corresponding points
    int tagID = detection->id;
    bool is_part_of_bundle = false;
    for (unsigned int j=0; j<tag_bundle_descriptions_.size(); j++)
    {
      // Iterate over the registered bundles
      TagBundleDescription bundle = tag_bundle_descriptions_[j];

      if (bundle.id2idx_.find(tagID) != bundle.id2idx_.end())
      {
        // This detected tag belongs to the j-th tag bundle (its ID was found in
        // the bundle description)
        is_part_of_bundle = true;
        std::string bundleName = bundle.name();

        //===== Corner points in the world frame coordinates
        double s = bundle.memberSize(tagID)/2;
        addObjectPoints(s, bundle.memberT_oi(tagID),
                        bundleObjectPoints[bundleName]);

        //===== Corner points in the image frame coordinates
        addImagePoints(detection, bundleImagePoints[bundleName]);
      }
    }

    // Find this tag's description amongst the standalone tags
    // Print warning when a tag was found that is neither part of a
    // bundle nor standalone (thus it is a tag in the environment
    // which the user specified no description for, or Apriltags
    // misdetected a tag (bad ID or a false positive)).
    StandaloneTagDescription* standaloneDescription;
    if (!findStandaloneTagDescription(tagID, standaloneDescription,
                                      !is_part_of_bundle))
    {
      continue; 
    }

    std::string  family_name = detection->family->name;


    // std::cout << " family_name : " << family_name << std::endl;
    // std::cout << " tagID : " << tagID << std::endl;
    // std::cout << " hamming : " << detection->hamming << std::endl;
    // std::cout << " decision_margin : " << detection->decision_margin << std::endl;
    // std::cout << " c0 : " << detection->c[0] << std::endl;
    // std::cout << " c1 : " << detection->c[1] << std::endl;

    if (tagID > 6) {
      continue;
    } else if (tagID == 6) {
      if (family_name != "tag25h9") {
        continue;
      }
    } else {
      if (family_name != "tag36h11") {
        continue;
      }
    }

    //=================================================================
    // The remainder of this for loop is concerned with standalone tag
    // poses!
    double tag_size = standaloneDescription->size();

    // Get estimated tag pose in the camera frame.
    //
    // Note on frames:
    // The raw AprilTag 2 uses the following frames:
    //   - camera frame: looking from behind the camera (like a
    //     photographer), x is right, y is up and z is towards you
    //     (i.e. the back of camera)
    //   - tag frame: looking straight at the tag (oriented correctly),
    //     x is right, y is down and z is away from you (into the tag).
    // But we want:
    //   - camera frame: looking from behind the camera (like a
    //     photographer), x is right, y is down and z is straight
    //     ahead
    //   - tag frame: looking straight at the tag (oriented correctly),
    //     x is right, y is up and z is towards you (out of the tag).
    // Using these frames together with cv::solvePnP directly avoids
    // AprilTag 2's frames altogether.
    // TODO solvePnP[Ransac] better?
    std::vector<cv::Point3d > standaloneTagObjectPoints;
    std::vector<cv::Point2d > standaloneTagImagePoints;
    addObjectPoints(tag_size/2, cv::Matx44d::eye(), standaloneTagObjectPoints);
    addImagePoints(detection, standaloneTagImagePoints);
    Eigen::Matrix4d transform = getRelativeTransform(standaloneTagObjectPoints,
                                                     standaloneTagImagePoints,
                                                     fx, fy, cx, cy, distCoeffs);
    Eigen::Matrix3d rot = transform.block(0, 0, 3, 3);
    Eigen::Quaternion<double> rot_quaternion(rot);
    
    geometry_msgs::PoseWithCovarianceStamped tag_pose =
        makeTagPose(transform, rot_quaternion, image->header,
        standaloneDescription->translate2center());

    // Add the detection to the back of the tag detection array
    AprilTagDetection tag_detection;
    // tag_pose.header.frame_id = standaloneDescription->frame_name();
    tag_detection.pose = tag_pose;
    tag_detection.id.push_back(detection->id);
    tag_detection.size.push_back(tag_size);
    tag_detection.hamming.push_back(detection->hamming);
    tag_detection_array.detections.push_back(tag_detection);
    detection_names.push_back(standaloneDescription->frame_name());
    detection_translates.push_back(standaloneDescription->translate2center());

    Apriltag aprilslam_tag;
    aprilslam_tag.id = detection->id;
    aprilslam_tag.family = detection->id == 6 ? "tag25h9" : "tag36h11";
    aprilslam_tag.hamming_distance = 0;
    aprilslam_tag.size = tag_size;

    geometry_msgs::Point center;
    for (int i_corner = 0; i_corner < standaloneTagImagePoints.size(); i_corner++) {
      geometry_msgs::Point ros_point;
      ros_point.x = standaloneTagImagePoints[i_corner].x;
      ros_point.y = standaloneTagImagePoints[i_corner].y;
      ros_point.z = 0;
      aprilslam_tag.corners.push_back(ros_point);
      // std::cout<<ros_point;
      center.x += ros_point.x;
      center.y += ros_point.y ;
    }
    center.x = center.x / 4;
    center.y = center.y / 4;
    center.z = 0;
    aprilslam_tag.center = center;
    aprilslam_tag.pose = tag_pose.pose.pose;
    aprilslam_tag_detections.apriltags.push_back(aprilslam_tag);  

    if (tag_test_flag_) {
      for (int k = 0; k < 4; k++) {
        pcl::PointXYZRGB p;
        p.x = detection->p[k][0];
        p.y = detection->p[k][1];
        p.z = 0;
        // int index = k;
        int index = tagID;
        p.r = colors[index][0];
        p.g = colors[index][1];
        p.b = colors[index][2];
        pcl_corner_cloudptr_->points.push_back(p);
        pcl_corner_cloud_.points[i*12 + k] = p;
      }

      int ind = 0;
      for (auto s : standaloneTagObjectPoints) {
        pcl::PointXYZRGB p;
        p.x = s.x;
        p.y = s.y;
        p.z = s.z;
        // int index = ind % 4;
        int index = tagID;
        p.r = colors[index][0];
        p.g = colors[index][1];
        p.b = colors[index][2];
        pcl_tag_cloudptr_->points.push_back(p);
        ind++;
        pcl_corner_cloud_.points[i*12 + ind + 4] = p;
      }
      ind = 0;
      for (auto s : standaloneTagImagePoints) {
        pcl::PointXYZRGB p;
        p.x = s.x;
        p.y = s.y;
        p.z = 0;
        // int index = ind % 4;
        int index = tagID;
        p.r = colors[index][0];
        p.g = colors[index][1];
        p.b = colors[index][2];
        pcl_tag_cloudptr_->points.push_back(p);
        ind++;
        pcl_corner_cloud_.points[i*12 + ind + 8] = p;
      }
    }
  }

  //=================================================================
  // Estimate bundle origin pose for each bundle in which at least one
  // member tag was detected

  for (unsigned int j=0; j<tag_bundle_descriptions_.size(); j++)
  {
    // Get bundle name
    std::string bundleName = tag_bundle_descriptions_[j].name();

    std::map<std::string,
             std::vector<cv::Point3d> >::iterator it =
        bundleObjectPoints.find(bundleName);
    if (it != bundleObjectPoints.end())
    {
      // Some member tags of this bundle were detected, get the bundle's
      // position!
      TagBundleDescription& bundle = tag_bundle_descriptions_[j];

      Eigen::Matrix4d transform =
          getRelativeTransform(bundleObjectPoints[bundleName],
                               bundleImagePoints[bundleName], fx, fy, cx, cy, distCoeffs);
      Eigen::Matrix3d rot = transform.block(0, 0, 3, 3);
      Eigen::Quaternion<double> rot_quaternion(rot);

      geometry_msgs::PoseWithCovarianceStamped bundle_pose =
          makeTagPose(transform, rot_quaternion, image->header);

      // Add the detection to the back of the tag detection array
      AprilTagDetection tag_detection;
      tag_detection.pose = bundle_pose;
      tag_detection.id = bundle.bundleIds();
      tag_detection.size = bundle.bundleSizes();
      tag_detection_array.detections.push_back(tag_detection);
      detection_names.push_back(bundle.name());
    }
  }

  /*
  if(_config.frame_pose){
    _ros_frame_pose_pub = nh.advertise<geometry_msgs::PoseStamped>(_config.frame_pose_topic, 10);
    _pub_latest_odometry = nh.advertise<nav_msgs::Odometry>(_config.frame_odometry_topic, 1000);
    _ros_all_path_pub = nh.advertise<nav_msgs::Path>(_config.path_topic + "_all", 10);
    _ros_path_all.header.frame_id = "map";

    std::function<void(const FramePoseMessageConstPtr&)> publish_frame_pose_function = 
        [&](const FramePoseMessageConstPtr& frame_pose_message){
      geometry_msgs::PoseStamped pose_stamped;
      pose_stamped.header.stamp = ros::Time().fromSec(frame_pose_message->time);
      pose_stamped.header.frame_id = "map";
      pose_stamped.pose.position.x = frame_pose_message->pose(0, 3);
      pose_stamped.pose.position.y = frame_pose_message->pose(1, 3);
      pose_stamped.pose.position.z = frame_pose_message->pose(2, 3);
      Eigen::Quaterniond q(frame_pose_message->pose.block<3, 3>(0, 0));
      pose_stamped.pose.orientation.x = q.x();
      pose_stamped.pose.orientation.y = q.y();
      pose_stamped.pose.orientation.z = q.z();
      pose_stamped.pose.orientation.w = q.w();
      _ros_frame_pose_pub.publish(pose_stamped);

      nav_msgs::Odometry odometry;
      odometry.header.stamp = ros::Time().fromSec(frame_pose_message->time);
      odometry.header.frame_id = "map";
      odometry.pose.pose.position.x = frame_pose_message->pose(0, 3);
      odometry.pose.pose.position.y = frame_pose_message->pose(1, 3);
      odometry.pose.pose.position.z = frame_pose_message->pose(2, 3);
      odometry.pose.pose.orientation.x = q.x();
      odometry.pose.pose.orientation.y = q.y();
      odometry.pose.pose.orientation.z = q.z();
      odometry.pose.pose.orientation.w = q.w();
      _pub_latest_odometry.publish(odometry);

      _ros_path_all.header.stamp = ros::Time().fromSec(frame_pose_message->time);
      _ros_path_all.poses.push_back(pose_stamped);
      // if (_ros_path_all.poses.size() % 2 == 0) {
      // }
      _ros_all_path_pub.publish(_ros_path_all);

      static tf::TransformBroadcaster br;
      tf::Transform transform;
      tf::Quaternion tf_q;
      transform.setOrigin(tf::Vector3(frame_pose_message->pose(0, 3),
                                      frame_pose_message->pose(1, 3),
                                      frame_pose_message->pose(2, 3)));
      tf_q.setW(q.w());
      tf_q.setX(q.x());
      tf_q.setY(q.y());
      tf_q.setZ(q.z());
      transform.setRotation(tf_q);
      br.sendTransform(tf::StampedTransform(transform, pose_stamped.header.stamp, "map", "camera"));
    };
    _frame_pose_publisher.Register(publish_frame_pose_function);
    _frame_pose_publisher.Start();
  }
  
  */
  
  // If set, publish the transform /tf topic
  if (publish_tf_) {
    apriltag_ros::AprilTagDetection det;
    geometry_msgs::PoseStamped pose_stamped;
    // pose_stamped.header.frame_id = "world";
    pose_stamped.header =image->header;
    if (use_tag_frame_id_) {
      pose_stamped.header.frame_id = "tag";
    } else {
      pose_stamped.header.frame_id = camera_tf_frame_;
    }
    // pose_stamped.header.stamp = ros::Time::now();

    int det_size = tag_detection_array.detections.size();
    int best_id = -1;
    int best_id_hamming = max_hamming_distance_ + 1;
    bool find_best = false;
    int frame_num = 200000;

    for (unsigned int i=0; i < tag_detection_array.detections.size(); i++) {
      geometry_msgs::PoseStamped pose;
      pose.pose = tag_detection_array.detections[i].pose.pose.pose;
      pose.header = tag_detection_array.detections[i].pose.header;
      tf::Stamped<tf::Transform> tag_transform;
      tf::poseStampedMsgToTF(pose, tag_transform);
      tf_pub_.sendTransform(tf::StampedTransform(tag_transform,
                                                tag_transform.stamp_,
                                                camera_tf_frame_,
                                                detection_names[i]));

      // std::cout << i << camera_tf_frame_ << " \n";
      // std::cout << i << " detection_names: " <<detection_names[i] << " \n";

      if (tag_test_flag_){
        auto header = tag_detection_array.detections[i].pose.header;
        // pose_stamped.header = header;
        pose_stamped.pose = tag_detection_array.detections[i].pose.pose.pose;
        // auto inverse_pose = inversePose(tag_detection_array.detections[i].pose);
        // pose_stamped.pose = inverse_pose.pose.pose;

        int id = tag_detection_array.detections[i].id[0];

        auto s = pose_stamped.pose.position;
        pcl::PointXYZRGB p;
        p.x = s.x;
        p.y = s.y;
        p.z = s.z;
        int index = id % colors.size();
        p.r = colors[index][0];
        p.g = colors[index][1];
        p.b = colors[index][2];
        pcl_path_cloudptr_->points.push_back(p);

        switch (id) {
          case 0:
            if(camera_path0_.poses.size() > frame_num) camera_path0_.poses.clear();
            camera_path0_.poses.push_back(pose_stamped);
            break;
          case 1:
            if(camera_path1_.poses.size() > frame_num) camera_path1_.poses.clear();
            camera_path1_.poses.push_back(pose_stamped);
            break;
          case 2:
            if(camera_path2_.poses.size() > frame_num) camera_path2_.poses.clear();
            camera_path2_.poses.push_back(pose_stamped);
            break;
          case 3:  
            if(camera_path3_.poses.size() > frame_num) camera_path3_.poses.clear();
            camera_path3_.poses.push_back(pose_stamped);
            break;
          case 4:
            if(camera_path4_.poses.size() > frame_num) camera_path4_.poses.clear();
            camera_path4_.poses.push_back(pose_stamped);
            break;
          case 5:
            if(camera_path5_.poses.size() > frame_num) camera_path5_.poses.clear();
            camera_path5_.poses.push_back(pose_stamped);
            break;
          case 6:
            if(camera_path6_.poses.size() > frame_num) camera_path6_.poses.clear();
            camera_path6_.poses.push_back(pose_stamped);
            break;
          default:
            break;
        }
      }
      
      // 这里的pose使用tag到cam的，避免抖动,找到最佳pose
      int cur_id = tag_detection_array.detections[i].id[0];
      auto position = pose.pose.position;
      Eigen::Vector3d cur(position.x, position.y,position.z);
      if (init_flag_ && (pre_point_ - cur).norm() > max_change_distance_) {
        if (!pose_lost_flag_) {
          continue; 
        } else {
        // td_->tag_test_flag = true;
        std::cout << " cur_id Warning!!!: " << cur_id << std::endl;
        std::cout << " pre_point_: \n" << pre_point_.transpose() << std::endl;
        std::cout << " cur_point: \n" << cur.transpose() << std::endl;
        std::cout << std::endl;

        }
      }

      if (!find_best) {
        if (det_size == 1 || cur_id == 6) {
          int temp_hamming = tag_detection_array.detections[i].hamming[0];
          if (temp_hamming < best_id_hamming) {
            det = tag_detection_array.detections[i];
            best_id = cur_id;
            best_id_hamming = temp_hamming;
            find_best = true;
          }
        }
      }

    }

    if (!find_best && det_size > 1) {
      // 中型二维码优先选择
      for (unsigned int i=0; i < det_size; i++) {
        int cur_id = tag_detection_array.detections[i].id[0];
        if (cur_id == 2 || cur_id == 4 || cur_id == 5) {
          auto position = tag_detection_array.detections[i].pose.pose.pose.position;
          Eigen::Vector3d cur(position.x, position.y, position.z);
          int temp_hamming = tag_detection_array.detections[i].hamming[0];
          if (!init_flag_ || (pre_point_ - cur).norm() <= max_change_distance_) {
            if (temp_hamming < best_id_hamming) {
              find_best = true;
              det = tag_detection_array.detections[i];
              best_id = tag_detection_array.detections[i].id[0];
              best_id_hamming = temp_hamming;
            }
          }
        }
      }
      // 小二维码优先级最低
      if (!find_best) {
        for (unsigned int i=0; i < det_size; i++) {
          int temp_hamming = tag_detection_array.detections[i].hamming[0];
          if (temp_hamming < best_id_hamming) {
            find_best = true;
            det = tag_detection_array.detections[i];
            best_id = tag_detection_array.detections[i].id[0];
            best_id_hamming = temp_hamming;
          }
        } 
      }
    }

    // std::cout << " det_size : " << det_size << std::endl;
    // std::cout << " find_best : " << find_best << std::endl;
    // std::cout << " best_id: " << best_id << std::endl;


    bool pose_abnormal_flag = false;
    // std::cout << find_best << " debug 6: " << best_id << std::endl;
    // std::cout << "pose_lost_flag_ : " << pose_lost_flag_ << " close_pose_lost_ : " << close_pose_lost_ << std::endl;

    // 当间隔比较长时，若果找到best，使用当前姿态；初始无姿态，作为post lost
    if (init_flag_) {
      // 这里pre_pose_stamp_ 隐含了最佳姿态
      if ((pose_stamped.header.stamp - pre_pose_stamp_).toSec() <= 0.08) {
        pose_lost_flag_ = false;
      } else {
        if (find_best) {
          pose_lost_flag_ = false;
          pose_abnormal_flag = true;
        } else {
          pose_lost_flag_ = true;
        }
      }
    } else {
      pose_lost_flag_ = false;
    }

    geometry_msgs::PoseWithCovarianceStamped geometry_msgs_poseCovariance;
    apriltag_ros::AprilTagPose apriltagPose;
    apriltagPose.header = pose_stamped.header;
    geometry_msgs_poseCovariance.header = pose_stamped.header;
    // geometry_msgs_poseCovariance.pose = det.pose.pose;
    auto inverse_pose = inversePose(det.pose);
    geometry_msgs_poseCovariance.pose = inverse_pose.pose;
    geometry_msgs_poseCovariance.pose.covariance.fill(0);

    apriltagPose.max_change_distance = max_change_distance_;
    apriltagPose.det_size = det_size;
    apriltagPose.hamming = best_id_hamming;
    apriltagPose.pose_abnormal_flag = pose_abnormal_flag;
    apriltagPose.pose = geometry_msgs_poseCovariance;
    apriltagPose.id = best_id;
    apriltagPose.pose_lost_flag = pose_lost_flag_;

    nav_msgs::Odometry odometry;
    odometry.header = pose_stamped.header;
    // odometry.header.frame_id = "map";
    odometry.pose = geometry_msgs_poseCovariance.pose;

    if (!use_tag_frame_id_) {
      odometry.header.frame_id = camera_tf_frame_;
      odometry.child_frame_id = "tag";
    } else {
      odometry.header.frame_id = "tag";
      odometry.child_frame_id = camera_tf_frame_;
    }

    if (!smooth_odometry_.empty()) {
      int mid = smooth_odometry_.size() / 2;
      double dt = (odometry.header.stamp - smooth_odometry_[mid].header.stamp).toSec();
      if (dt > 1e-5) {
        double deltaX = odometry.pose.pose.position.x - prevOdometry_.pose.pose.position.x;
        double deltaY = odometry.pose.pose.position.y - prevOdometry_.pose.pose.position.y;
        double deltaZ = odometry.pose.pose.position.z - prevOdometry_.pose.pose.position.z;
        double linearVelX = deltaX / dt;
        double linearVelY = deltaY / dt;
        double linearVelZ = deltaZ / dt;

        if (deltaX < deltaXT && deltaY < deltaYT && deltaZ < deltaZT) {
          odometry.pose = smooth_odometry_[mid].pose;
        }

        // Create a Twist message to publish linear velocity
        geometry_msgs::Twist velMsg;
        velMsg.linear.x = linearVelX;
        velMsg.linear.y = linearVelY;
        velMsg.linear.z = linearVelZ;
        auto speed = Eigen::Vector3d(linearVelX, linearVelY, linearVelZ);
        velMsg.angular.x = speed.norm();
        velMsg.angular.y = speed.squaredNorm();
        odometry.twist.twist = velMsg;
      }
    }

    if (find_best && (!pose_lost_flag_ || close_pose_lost_)) {

      // std::cout << "id: " << best_id << "========== " << std::endl;
      // std::cout << " roll: " <<det.pose.pose.covariance[0]  << 
      //             " pitch: " <<  det.pose.pose.covariance[1] << 
      //             " yaw: " << det.pose.pose.covariance[2] << std::endl;

      StandaloneTagDescription* standaloneDescription;
      if (findStandaloneTagDescription(best_id, standaloneDescription, false)) {
        apriltagPose.size = standaloneDescription->size();
      }

      auto position = det.pose.pose.pose.position;
      // auto inverse_pose = inversePose(det.pose);
      // auto position = inverse_pose.pose.pose.position;
      // 这里相机路径使用det的
      Eigen::Vector3d cur(position.x, position.y,position.z);
      if (init_flag_) {
        auto dis = (pre_point_ - cur).norm();
        apriltagPose.change_distance = dis;
        apriltagPose.flag = 100 * (1 - dis / max_change_distance_);
        // confidence, valid: 1.0f; invalid: 0.0
        if (pose_lost_flag_ || apriltagPose.flag < 80) {
          odometry.pose.covariance[0] = 0.0;
        } else {
          odometry.pose.covariance[0] = 1.0;
        }
      }

      if (tag_test_flag_) {
        static std::deque<Eigen::Vector3d> positions;
        positions.push_back(cur);
        if (!init_flag_ || (pre_point_ - cur).norm() >= 0.01) {
          if (1) {
            pose_stamped.pose = det.pose.pose.pose;
            auto inverse_pose = inversePose(det.pose);
          } else {
            static Eigen::Vector3d sum;
            if (positions.size() > 10) {
              sum -= positions.front();
              positions.pop_front();
            }
            int mid = positions.size()/2;
            auto p = positions[mid];
            geometry_msgs::Point gp;
            gp.x = p.x(); gp.y = p.y(); gp.z = p.z();
            pose_stamped.pose.position = gp;
          }

          if(camera_path_.poses.size() > frame_num) camera_path_.poses.clear();
          camera_path_.poses.push_back(pose_stamped);
          pose_stamped.pose = inverse_pose.pose.pose;
          camera_ipath_.poses.push_back(pose_stamped);
        }

        auto& s = position;
        pcl::PointXYZRGB p;
        p.x = s.x;
        p.y = s.y;
        p.z = s.z;
        int index = best_id;
        p.r = colors[index][0];
        p.g = colors[index][1];
        p.b = colors[index][2];

        if (pose_abnormal_flag) {
          // smooth_poses_.clear();
          pcl_abnormal_path_cloudptr_->points.push_back(p);
        }

        pcl_best_path_cloudptr_->points[0] = p;
        pcl_best_path_cloudptr0_->points.push_back(p);
      }

      if (!init_flag_) {
        init_flag_ = true;
      }
      pre_point_ = cur;
      pre_pose_stamp_ = apriltagPose.header.stamp;
    }

    // 目前只在找到最优时，才发送
    if (find_best && (!pose_lost_flag_ || close_pose_lost_)) {
      geometry_msgs_poseCovariance_pub_.publish(geometry_msgs_poseCovariance);
      apriltag_pose_pub_.publish(apriltagPose);
      odomtry_publisher_.publish(odometry);
      prevOdometry_ = odometry;

      // pose_stamped.pose = det.pose.pose.pose;
      pose_stamped.pose = odometry.pose.pose;
      auto& pos = pose_stamped.pose.position;
      auto& ori = pose_stamped.pose.orientation;
      Eigen::VectorXd cur_pose(7);
      cur_pose << pos.x, pos.y, pos.z, ori.w, ori.x, ori.y, ori.z;
      smooth_poses_.push_back(cur_pose);
      if (smooth_poses_.size() > windowSize_) {
        smooth_poses_.pop_front();
      }
      smooth_odometry_.push_back(odometry);
      if (smooth_odometry_.size() > windowSize_) {
        smooth_odometry_.pop_front();
      }
      
      // 使用Savitzky-Golay滤波器平滑多维数据
      Eigen::VectorXd smoothedData = sgolayfilt(smooth_poses_);
      pos.x = smoothedData[0];
      pos.y = smoothedData[1];
      pos.z = smoothedData[2];
      ori.w = smoothedData[3];
      ori.x = smoothedData[4];
      ori.y = smoothedData[5];
      ori.z = smoothedData[6];
      if(camera_smooth_path_.poses.size() > frame_num) {
        camera_smooth_path_.poses.clear();
      }
      prevOdometry_.pose.pose = pose_stamped.pose;
      camera_smooth_path_.poses.push_back(pose_stamped);
      // 输出平滑后的数据
      // std::cout << "Original: " << cur_pose << ", Smoothed: " << smoothedData << std::endl;
    } else {
      odomtry_publisher_.publish(prevOdometry_);
    }

    if (tag_test_flag_) {
      PublishRvizInfo(pose_stamped.header);
    }
  }

  static auto pre_tag_detection_array = tag_detection_array;
  static auto pre_aprilslam_tag_detections = aprilslam_tag_detections;
  if (tag_detection_array.detections.size() > 0) {
    pre_tag_detection_array = tag_detection_array;
    pre_aprilslam_tag_detections = aprilslam_tag_detections;
  } else {
    tag_detection_array = pre_tag_detection_array;
    aprilslam_tag_detections = pre_aprilslam_tag_detections;
  }
  tag_detection_array.header = image->header;
  aprilslam_tag_detections.header = image->header;
  // aprilslam_tag_detections.header.stamp = ros::Time::now();
  // aprilslam_tag_detections.header.frame_id = "tag";
  return {tag_detection_array, aprilslam_tag_detections};
}

int TagDetector::idComparison (const void* first, const void* second)
{
  int id1 = ((apriltag_detection_t*) first)->id;
  int id2 = ((apriltag_detection_t*) second)->id;
  return (id1 < id2) ? -1 : ((id1 == id2) ? 0 : 1);
}

void TagDetector::removeDuplicates ()
{
  zarray_sort(detections_, &idComparison);
  int count = 0;
  bool duplicate_detected = false;
  while (true)
  {
    if (count > zarray_size(detections_)-1)
    {
      // The entire detection set was parsed
      return;
    }
    apriltag_detection_t *detection;
    zarray_get(detections_, count, &detection);
    int id_current = detection->id;
    // Default id_next value of -1 ensures that if the last detection
    // is a duplicated tag ID, it will get removed
    int id_next = -1;
    if (count < zarray_size(detections_)-1)
    {
      zarray_get(detections_, count+1, &detection);
      id_next = detection->id;
    }
    if (id_current == id_next || (id_current != id_next && duplicate_detected))
    {
      duplicate_detected = true;
      // Remove the current tag detection from detections array
      int shuffle = 0;
      zarray_remove_index(detections_, count, shuffle);
      if (id_current != id_next)
      {
        ROS_WARN_STREAM("Pruning tag ID " << id_current << " because it "
                        "appears more than once in the image.");
        duplicate_detected = false; // Reset
      }
      continue;
    }
    else
    {
      count++;
    }
  }
}

void TagDetector::addObjectPoints (
    double s, cv::Matx44d T_oi, std::vector<cv::Point3d >& objectPoints) const
{
  // Add to object point vector the tag corner coordinates in the bundle frame
  // Going counterclockwise starting from the bottom left corner
  objectPoints.push_back(T_oi.get_minor<3, 4>(0, 0)*cv::Vec4d(-s,-s, 0, 1));
  objectPoints.push_back(T_oi.get_minor<3, 4>(0, 0)*cv::Vec4d( s,-s, 0, 1));
  objectPoints.push_back(T_oi.get_minor<3, 4>(0, 0)*cv::Vec4d( s, s, 0, 1));
  objectPoints.push_back(T_oi.get_minor<3, 4>(0, 0)*cv::Vec4d(-s, s, 0, 1));
}

void TagDetector::addImagePoints (
    apriltag_detection_t *detection,
    std::vector<cv::Point2d >& imagePoints) const
{
  // Add to image point vector the tag corners in the image frame
  // Going counterclockwise starting from the bottom left corner
  double tag_x[4] = {-1,1,1,-1};
  double tag_y[4] = {1,1,-1,-1}; // Negated because AprilTag tag local
                                 // frame has y-axis pointing DOWN
                                 // while we use the tag local frame
                                 // with y-axis pointing UP
  for (int i=0; i<4; i++)
  {
    // Homography projection taking tag local frame coordinates to image pixels
    double im_x, im_y;
    homography_project(detection->H, tag_x[i], tag_y[i], &im_x, &im_y);
    imagePoints.push_back(cv::Point2d(im_x, im_y));
  }
}

bool TagDetector::calculateAngle(cv::Mat& v1, cv::Mat& v2) {
  // 计算模长
  double norm1 = cv::norm(v1);
  double norm2 = cv::norm(v2);

  // 检查模长是否为零
  double thr = 1e-6;
  if (norm1 < thr || norm2 < thr) {
    return false;
  }
  // 单位化向量
  cv::Mat unitV1 = v1 / norm1;
  cv::Mat unitV2 = v2 / norm2;
  // 计算点积
  double dotProduct = unitV1.dot(unitV2);
  // 限制点积在 [-1, 1] 范围内，以避免 acos 的未定义行为
  // std::cout <<"1dotProduct: " << dotProduct << std::endl;
  dotProduct = std::max(-1.0, std::min(1.0, dotProduct));
  // std::cout <<"2dotProduct: " << dotProduct << std::endl;

  double angle = acos(dotProduct) * 180 / M_PI;
  double delat_angle = (norm2 - norm1)* 180 / M_PI;
  double change = cv::norm(unitV1+unitV2);
  std::cout <<"v1: " << v1 << std::endl;
  std::cout <<"v2: " << v2 << std::endl;
  // 5.7度
  // if (std::abs(angle2) < pnp_angle_thresh_ && (180 - angle < pnp_angle_thresh_)) {
  // 前后旋转角度方向反向，接近不可能
  // std::abs(delat_angle) < pnp_angle_thresh_ && (180 - angle < pnp_angle_thresh_)
  // std::abs(180 - norm2 * 180 / M_PI) < pnp_angle_thresh_
  // v2 = v1;
  std::cout <<"change: " << change << " delat_angle: " << delat_angle << " angle: " << angle << std::endl;
  if (change < pnp_angle_thresh_) {
    
    // std::cout <<"else angle: " << angle << std::endl;
    // std::cout <<"else delat_angle: " << delat_angle << std::endl;    
    // std::cout <<"else delat_angle: " << norm2 * 180 / M_PI << std::endl;  
    // if () {
    //   // v2 = -unitV2 * M_PI;
    //   // std::cout <<"180 angle: " << angle << std::endl;
    //   // std::cout <<"180 delat_angle: " << delat_angle << std::endl;    
    //   // std::cout <<"180 delat_angle: " << norm2 * 180 / M_PI << std::endl;    
    // } else{
    // }
    std::cout <<"angle: " << angle << std::endl;
    std::cout <<"delat_angle: " << delat_angle << std::endl;
    std::cout <<"norm1: " << norm1 << std::endl;
    std::cout <<"norm2: " << norm2 << std::endl;
    std::cout <<"magnitude1: " << norm1 * 180 / M_PI << std::endl;
    std::cout <<"magnitude2: " << norm2 * 180 / M_PI << std::endl;
    std::cout <<"v1: " << v1 << std::endl;
    std::cout <<"v2: " << v2 << std::endl;
    v2 = v1;
    return true;
  }
  return false;
}

void TagDetector::TransDifference(Eigen::Matrix4d &T) {
  // double threshold = 0.1; // 设置阈值
  
  double currentTimestamp; // 当前时间戳
  // 计算当前时间戳
  currentTimestamp = ros::Time::now().toSec(); // 或者使用你自己的时间获取方式
  static double lastTimestamp = currentTimestamp; // 上一个时间戳
  static Eigen::Matrix4d lastT = T;

  // 检查时间间隔
  if (currentTimestamp - lastTimestamp < 0.08) {
    // 计算变换差异
    Eigen::Matrix3d currentRotation = T.topLeftCorner(3, 3);; // 当前旋转
    Eigen::Matrix3d lastRotation = lastT.topLeftCorner(3, 3);

    // 计算旋转差异（可以使用 Frobenius 范数或其他方法）
    double rotationDifference = (currentRotation - lastRotation).norm();

    // 计算平移差异
    Eigen::Vector3d currentTranslation = T.col(3).head(3);
    Eigen::Vector3d lastTranslation = lastT.col(3).head(3);
    double translationDifference = (currentTranslation - lastTranslation).norm();

    // 检查是否超过阈值
    if (rotationDifference > max_rotation_distance_ || translationDifference > max_change_distance_) {
        // 使用原始数据
        T.topLeftCorner(3, 3) = lastRotation; // 使用上一个旋转
        T.col(3).head(3) = lastTranslation; // 使用上一个平移
        std::cout << "Warning: rotationDifference: " << rotationDifference << std::endl;
        std::cout << "translationDifference: " << translationDifference << std::endl;
        std::cout << "Use last T: \n " << lastT << std::endl;
    }
  }
  lastT = T;
  lastTimestamp = currentTimestamp;
  // 更新上一个时间戳
}

Eigen::Matrix4d TagDetector::getRelativeTransform(
    std::vector<cv::Point3d > objectPoints,
    std::vector<cv::Point2d > imagePoints,
    double fx, double fy, double cx, double cy, cv::Mat_<double> distCoeffs)
{
  // perform Perspective-n-Point camera pose estimation using the
  // above 3D-2D point correspondences
  cv::Mat rvec, tvec;
  cv::Matx33d cameraMatrix(fx,  0, cx,
                           0,  fy, cy,
                           0,   0,  1);
  // std::cout << "distCoeffs: " << distCoeffs << std::endl;
  // cv::Vec4f distCoeffs(0,0,0,0); // distortion coefficients 4, 5, 8, 12 or 14 elements. 
  // (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])

  // cv::Mat distCoeffs = cv::Mat_<double>(1,5) << k1, k2, p1, p2, k3;
  // TODO Perhaps something like SOLVEPNP_EPNP would be faster? Would
  // need to first check WHAT is a bottleneck in this code, and only
  // do this if PnP solution is the bottleneck.
  cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
  // bool success = cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, true);
  // if (success) {
  //   for (int i = 0; i < 3; i++) {
  //       pre_rvec.at<double>(i) = rvec.at<double>(i); // 逐个赋值旋转向量
  //       pre_tvec.at<double>(i) = tvec.at<double>(i); // 逐个赋值平移向量
  //   }
  // }
  // 计算模长
  // static cv::Mat pre_rvec = rvec;
  // if (rvec_deque_.size() > 20) {
  //   rvec_deque_.pop_front();
  // }
  // Eigen::VectorXd smoothed(3); // 初始化大小为 3 的向量
  // for (int i = 0; i < 3; i++) {
  //   smoothed[i] = rvec.at<double>(i); // 逐个赋值
  // }
  // rvec_deque_.push_back(smoothed);
  // // 使用Savitzky-Golay滤波器平滑多维数据
  // Eigen::VectorXd smoothedData = sgolayfilt(rvec_deque_);
  // pre_rvec = (cv::Mat_<double>(3, 1) << smoothedData[0], smoothedData[1],smoothedData[2]);
  

  // std::cout <<"first: " << pre_rvec << std::endl;
  // 会改变rvec结果
  // calculateAngle(pre_rvec, rvec);

  cv::Matx33d R;
  cv::Rodrigues(rvec, R);
  Eigen::Matrix3d wRo;
  wRo << R(0,0), R(0,1), R(0,2), R(1,0), R(1,1), R(1,2), R(2,0), R(2,1), R(2,2);

  //  / M_PI *180
  // Eigen::Vector3d eulerAngle= wRo.eulerAngles(2,1,0);
  // std::cout << "eulerAngle: " << eulerAngle.transpose() << std::endl;

  // Eigen::AngleAxisd rollAngle(M_PI, Eigen::Vector3d::UnitX());
  // Eigen::AngleAxisd pitchAngle(0, Eigen::Vector3d::UnitY());
  // // Eigen::AngleAxisd rollAngle(eulerAngle[2], Eigen::Vector3d::UnitX());
  // // Eigen::AngleAxisd pitchAngle(eulerAngle[1], Eigen::Vector3d::UnitY());
  // Eigen::AngleAxisd yawAngle(eulerAngle[0], Eigen::Vector3d::UnitZ());
  // Eigen::Quaterniond q = yawAngle * pitchAngle * rollAngle;
  // auto wRo2 = q.toRotationMatrix();
  // std::cout << "eulerAngle2: " << wRo2.eulerAngles(2,1,0).transpose() << std::endl;
  // wRo = wRo2;

  // 原版apriltags2_ros最后输出的其实是从我们定义的tag世界坐标系(w)到相机坐标系©的矩阵变换
  // 而我们最后要得到机器人位姿其实是从机器人坐标系(b)到tag世界坐标系(w)的变换
  // Eigen::Vector3d t;
  // t << tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2);//t^c_w
  // t = - wRo.transpose() * t;//t^w_b = -[R^c_w]^T * t^c_w

  // Eigen::Matrix3d rr;
  // rr << 0,0,1,-1,0,0,0,-1,0;//R^b_c
  // wRo = rr * wRo;//R^b_c * R^c_w
  // wRo = wRo.transpose();//R^w_b = [R^c_w]^T * R^c_b = [R^b_c * R^c_w]^T

  Eigen::Matrix4d T; // homogeneous transformation matrix
  T.topLeftCorner(3, 3) = wRo;
  T.col(3).head(3) <<
      tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2);
  T.row(3) << 0,0,0,1;

  TransDifference(T);
  return T;
}

geometry_msgs::PoseWithCovarianceStamped TagDetector::inversePose(
  geometry_msgs::PoseWithCovarianceStamped poseOrign) {
  
  if (!use_tag_frame_id_) {
    return poseOrign;
  }
  geometry_msgs::PoseWithCovarianceStamped inverse_pose;
  inverse_pose.header = poseOrign.header;
  auto pose = poseOrign.pose.pose;
  auto poseCovariance = poseOrign.pose;
  Eigen::Quaterniond q(pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z);
  q.normalize();
  Eigen::Matrix4d transform = Eigen::Matrix4d::Identity();
  Eigen::Matrix4d matrix = Eigen::Matrix4d::Identity();
  Eigen::Matrix3d R = q.toRotationMatrix();
  transform.block<3, 3>(0, 0) = R;
  transform(0, 3) = pose.position.x;
  transform(1, 3) = pose.position.y;
  transform(2, 3) = pose.position.z;
  int method = inverse_method_;
  if (method == 0) {
    // 计算四元数的逆
    // Eigen::Quaterniond q_inv = q.conjugate(); // 对于单位四元数，逆等于共轭
    Eigen::Quaterniond q_inv = q.inverse(); // 对于单位四元数，逆等于共轭
    q_inv.normalize();
    // 定义平移向量
    Eigen::Vector3d t = transform.block<3, 1>(0, 3) * -1;  
    // 将平移向量转换为四元数 (0, tx, ty, tz)
    Eigen::Quaterniond t_q(0, t.x(), t.y(), t.z());
    // 计算旋转后的平移
    Eigen::Quaterniond rotated_t_q = q_inv * t_q * q; // 旋转平移向量
    // 提取旋转后的平移向量
    Eigen::Vector3d rotated_t(rotated_t_q.x(), rotated_t_q.y(), rotated_t_q.z());
    // 输出结果
    matrix.block<3, 1>(0, 3) = rotated_t;
    q = q_inv;
    // std::cout << "Original Translation Vector: " << t.transpose() << std::endl;
    // std::cout << "Rotated Translation Vector: " << rotated_t.transpose() << std::endl;
  } else if (method == 1) {
    // double epsilon = 1e-6; // 小的正数
    // double scale = 1.0; // 例如，x 接近 100
    // 计算从 B 到 A 的旋转矩阵 R' 和平移向量 t'
    // Eigen::Matrix4d matrix = transform.inverse(); // 这样计算是错误的，不能保证旋转是单位阵
    
    // Eigen::Matrix3d R_inv = (R / scale + epsilon * Eigen::Matrix3d::Identity()).inverse() * scale;  // 旋转的逆
    Eigen::Matrix3d R_inv = R.inverse();  // 旋转的逆
    Eigen::Vector3d t_inv = -R_inv * transform.block<3, 1>(0, 3);     // 平移的计算
    // Eigen::Vector3d t_inv = -1 * transform.block<3, 1>(0, 3);     // 平移的计算
    matrix.block<3, 3>(0, 0) = R_inv;
    matrix.block<3, 1>(0, 3) = t_inv;
    // std::cout << "before:\n" << q.coeffs().transpose() << std::endl;
    q = Eigen::Quaterniond(R_inv);
    // std::cout << "after:\n" << q.coeffs().transpose() << std::endl;
    // std::cout << "Translation from B to A (t'):\n" << q.inverse().coeffs().transpose() << std::endl;
    // std::cout << "Translation from B to A (t'):\n" << q.conjugate().coeffs().transpose() << std::endl;
    /*
    for (long i = 0; i<k; ++i) {
        if (singularValues_inv(i) > pinvtoler)
            singularValues_inv(i) = 1.0 / singularValues_inv(i);
        else singularValues_inv(i) = 0;
    }
    for (long i = 0; i < k; ++i) 
    {
        singularValues_inv_mat(i, i) = singularValues_inv(i);
    }
    X=(svd.matrixV())*(singularValues_inv_mat)*(svd.matrixU().transpose());//X=VS+U*

    // 使用SVD计算伪逆
    Eigen::JacobiSVD<Eigen::MatrixXd> svd(R, Eigen::ComputeThinU | Eigen::ComputeThinV);
    Eigen::VectorXd singularValues = svd.singularValues();  // 获取奇异值
    // 创建伪逆矩阵
    Eigen::MatrixXd pseudoInv = svd.matrixV() * singularValues.asDiagonal().inverse() * svd.matrixU().transpose();
    double conditionNumber = svd.singularValues()(0) / svd.singularValues()(svd.singularValues().size() - 1);
    std::cout << "R_inv matrix:\n" << R_inv << std::endl;
    std::cout << "transpose matrix:\n" << R.transpose() << std::endl; 
    std::cout << "R iden matrix:\n" << R * R_inv << std::endl; 
    std::cout << "pseudoInverse matrix:\n" << pseudoInv.transpose() << std::endl; 
    Eigen::Vector3d t_inv2 = -pseudoInv * transform.block<3, 1>(0, 3);     // 平移的计算
    Eigen::Vector3d t_inv3= -1 * transform.block<3, 1>(0, 3);     // 平移的计算
    if (t_inv.norm() > 1) {
      std::cout << "Condition Number: " << conditionNumber << std::endl;
      std::cout << "t.transpose():\n" << transform.block<3, 1>(0, 3).transpose() << std::endl;
      std::cout << "t_inv.transpose():\n" << t_inv.transpose() << std::endl; 
      std::cout << "pseudoInverse.transpose():\n" << t_inv2.transpose() << std::endl; 
    }
    Eigen::Matrix4d I =  transform * matrix;
    std::cout << "Original matrix:\n" << transform << std::endl;
    std::cout << "Inverse matrix:\n" << matrix << std::endl;
    std::cout << "transform.inverse():\n" << transform.inverse() << std::endl; 
    std::cout << "iden matrix:\n" << I << std::endl; 
    if (I.isApprox(Eigen::Matrix4d::Identity())) {
        std::cout << "A * A_inv is the identity matrix." << std::endl;
    } else {
        std::cout << "A * A_inv is not the identity matrix." << std::endl;
    }
    */
  } else {
    // 获取从 B 到 A 的变换
    tf::StampedTransform transformBA;
    tf::StampedTransform transformAB;
    try {
        listener.lookupTransform(camera_tf_frame_, "tag_6", ros::Time(0), transformAB);
        listener.lookupTransform("tag_6", camera_tf_frame_, ros::Time(0), transformBA);
    } catch (tf::TransformException &ex) {
        ROS_WARN("%s", ex.what());
        ros::Duration(1.0).sleep();
    }
    // 输出从 B 到 A 的平移和旋转
    std::cout << "Translation from B to A: " << transformBA.getOrigin().x() << ", "
              << transformBA.getOrigin().y() << ", " << transformBA.getOrigin().z() << std::endl;
    std::cout << "Translation from A to B: " << transformAB.getOrigin().x() << ", "
              << transformAB.getOrigin().y() << ", " << transformAB.getOrigin().z() << std::endl;
    std::cout << "Rotation from B to A: " << transformBA.getRotation().x() << ", "
              << transformBA.getRotation().y() << ", " << transformBA.getRotation().z() << ", "
              << transformBA.getRotation().w() << std::endl;

    Eigen::Quaterniond q_inv(transformBA.getRotation().w(),
    transformBA.getRotation().x(), transformBA.getRotation().y(), transformBA.getRotation().z());

    // Eigen::Vector3d t_inv(transformBA.getOrigin());
    Eigen::Vector3d t_inv(transformBA.getOrigin().x(), transformBA.getOrigin().y(), transformBA.getOrigin().z());
    matrix.block<3, 1>(0, 3) = t_inv;
    q = q_inv;
  }

  q.normalize();
  pose.orientation.w = q.w();
  pose.orientation.x = q.x();
  pose.orientation.y = q.y();
  pose.orientation.z = q.z();
  pose.position.x = matrix(0, 3);
  pose.position.y = matrix(1, 3);
  pose.position.z = matrix(2, 3);


  tf::Quaternion quat;
  tf::quaternionMsgToTF(pose.orientation, quat);
  quat.normalize();
  double roll, pitch, yaw;//定义存储roll,pitch and yaw的容器
  tf::Matrix3x3(quat).getRPY(roll, pitch, yaw); //进行转换
  // Eigen::Vector3d eulerAngle= R_inv.eulerAngles(2,1,0) / M_PI *180;
  // 两个顺序是相反的 eulerAngle[0] 对应yaw
  // std::cout << "eulerAngle:" << eulerAngle.transpose() << std::endl;
  // std::cout << "roll :" << roll / M_PI *180 << " pitch:" << pitch / M_PI *180 << " yaw:" << yaw / M_PI *180 << std::endl;
  // pose.pose.covariance 6 * 6
  poseCovariance.covariance[0] = roll / M_PI *180;
  poseCovariance.covariance[1] = pitch/ M_PI *180;
  poseCovariance.covariance[2] = yaw/ M_PI *180;
  poseCovariance.pose = pose;
  inverse_pose.pose = poseCovariance;

  // 旋转测试
  if(0){
    // 定义一个四元数
    tf::Quaternion quat;
    // (cos(M_PI/4), 0, 0.0, sin(M_PI/4)); // 例如，绕Z轴旋转90度，恰哈是这样90比较特殊
    // quat.setRPY(0,0,M_PI/6);	// yaw转动30度的四元数
    quat.setRPY(M_PI/3, M_PI/4,M_PI/2);	// yaw转动90度的四元数
    quat.normalize();
    // 使用tf库计算欧拉角
    double roll_tf, pitch_tf, yaw_tf, yaw;
    tf::Matrix3x3(quat).getRPY(roll_tf, pitch_tf,yaw_tf );
    yaw = tf::getYaw(quat);
    // 使用Eigen库计算欧拉角
    Eigen::Quaterniond eigenQuat(quat.w(), quat.x(), quat.y(), quat.z());
    eigenQuat.normalize();
    Eigen::Matrix3d rotation = eigenQuat.toRotationMatrix();
    Eigen::Vector3d eulerAngle = rotation.eulerAngles(2, 1, 0); // ZYX顺序
    // 输出结果
    std::cout << "TF RPY (radians): " << roll_tf << ", " << pitch_tf << ", " << yaw_tf << ", yaw: " << yaw << std::endl;
    std::cout << "Eigen RPY (radians): " << eulerAngle[0] << ", " << eulerAngle[1] << ", " << eulerAngle[2] << std::endl;
    // 转换为度
    std::cout << "TF RPY (degrees): " << roll_tf * 180 / M_PI << ", " << pitch_tf * 180 / M_PI << ", " << yaw_tf * 180 / M_PI << std::endl;
    std::cout << "Eigen RPY (degrees): " << eulerAngle[0] * 180 / M_PI << ", " << eulerAngle[1] * 180 / M_PI << ", " << eulerAngle[2] * 180 / M_PI << std::endl;

    Eigen::Matrix4d T_init = Eigen::Matrix4d::Identity();
    Eigen::AngleAxisd rollAngle(roll_tf, Eigen::Vector3d::UnitX());
    Eigen::AngleAxisd pitchAngle(pitch_tf, Eigen::Vector3d::UnitY());
    Eigen::AngleAxisd yawAngle(yaw_tf, Eigen::Vector3d::UnitZ());
    Eigen::Quaterniond q = yawAngle * pitchAngle * rollAngle;
    T_init.block<3, 3>(0, 0) = q.toRotationMatrix();
    std::cout << "\n gnss_trans: \n" << T_init << std::endl;
    std::cout << "\nq conjugate: \n" << q.coeffs().transpose() << std::endl;
    std::cout << "\nq transpose: \n" << q.toRotationMatrix().eulerAngles(2, 1, 0).transpose() / M_PI * 180 << std::endl;
  }

  return inverse_pose;
 }

geometry_msgs::PoseWithCovarianceStamped TagDetector::makeTagPose(
    const Eigen::Matrix4d& transform,
    Eigen::Quaternion<double> rot_quaternion,
    const std_msgs::Header& header,
    Eigen::Vector3d translate)
{
  geometry_msgs::PoseWithCovarianceStamped pose;
  pose.header = header;
  // Eigen::Matrix3d rot = transform.block(0, 0, 3, 3);
  // Eigen::Vector3d eulerAngle = rot.eulerAngles(2, 1, 0); // ZYX顺序
  // Eigen::AngleAxisd rollAngle(0, Eigen::Vector3d::UnitX());
  // Eigen::AngleAxisd pitchAngle(0, Eigen::Vector3d::UnitY());
  // Eigen::AngleAxisd yawAngle(eulerAngle.x(), Eigen::Vector3d::UnitZ());
  // Eigen::Quaterniond rot_quaternion = yawAngle * pitchAngle * rollAngle;
  // Eigen::Matrix3d new_rot = rot_quaternion.toRotationMatrix();

  rot_quaternion.normalize();
  //===== Position and orientation
  // 将所有tag 对齐到中心
  Eigen::Vector3d t_c;
  if (all_tag_into_together_) {
    t_c = transform.block<3, 1>(0, 3) - transform.block<3, 3>(0, 0) * translate;
  } else {
    t_c = transform.block<3, 1>(0, 3);
  }
  // Eigen::Vector3d t_c = transform.block<3, 1>(0, 3) - new_rot * translate;
  // pose.pose.pose.position.x    = transform(0, 3);
  // pose.pose.pose.position.y    = transform(1, 3);
  // pose.pose.pose.position.z    = transform(2, 3);
  pose.pose.pose.position.x    = t_c.x();
  pose.pose.pose.position.y    = t_c.y();
  pose.pose.pose.position.z    = t_c.z();
  pose.pose.pose.orientation.x = rot_quaternion.x();
  pose.pose.pose.orientation.y = rot_quaternion.y();
  pose.pose.pose.orientation.z = rot_quaternion.z();
  pose.pose.pose.orientation.w = rot_quaternion.w();


  // use_camera2tag_ 这里获取的是camera 到tag的变换
  if (0){
    // 原版apriltags2_ros最后输出的其实是从我们定义的世界坐标系(w)到相机坐标系©的矩阵变换
    // 而我们最后要得到机器人位姿其实是从机器人坐标系(b)到世界坐标系(w)的变换
    // 当前假定机器人坐标系(b)与相机坐标系重合
    Eigen::Vector3d t = transform.block<3, 1>(0, 3);
    // Eigen::Vector3d t = transform.block<1, 3>(0, 3);
    // auto tvec = pose.pose.pose.position;
    // t << tvec.x, tvec.y, tvec.z;//t^c_w
    // 世界坐标系(tag)到相机坐标系©的矩阵变换
    // std::cout << "before t: " << t << std::endl;
    auto cRt = transform.block<3, 3>(0, 0);
    // 相机到 tag 坐标系的平移
    t = - cRt.transpose() * t;//t^w_b = -[R^c_w]^T * t^c_w
    // std::cout << "after t: " << t << std::endl;

    // 相机到IMU
    Eigen::Matrix3d R_wt = Eigen::Matrix3d::Identity();
    if (0) {
      double roll = 0.0 * M_PI / 180.0;  // 将角度转换为弧度
      double pitch = 0.0 * M_PI / 180.0;  // 将角度转换为弧度
      double yaw = 0.0 * M_PI / 180.0;  // 将角度转换为弧度
      // 3.0 初始化欧拉角(Z-Y-X，即RPY)
      Eigen::Vector3d eulerAngle(yaw,pitch,roll);
      // 3.1 欧拉角转旋转向量
      Eigen::AngleAxisd rollAngle(Eigen::AngleAxisd(eulerAngle(2),Eigen::Vector3d::UnitX()));
      Eigen::AngleAxisd pitchAngle(Eigen::AngleAxisd(eulerAngle(1),Eigen::Vector3d::UnitY()));
      Eigen::AngleAxisd yawAngle(Eigen::AngleAxisd(eulerAngle(0),Eigen::Vector3d::UnitZ()));
      Eigen::AngleAxisd rotation_vector;
      R_wt =yawAngle*pitchAngle*rollAngle;
    }

    // R_wt << 0,0,1,-1,0,0,0,-1,0;//R^w_t
    // Eigen::Vector3d t_wt = -1 * translate;
    Eigen::Vector3d t_wt = translate;
    // t_wt << 0,0,0;//R^w_t
    auto wRc = R_wt * (cRt.transpose());
    // tag 坐标系的平移，转到世界坐标系
    if (all_tag_into_together_) {
      t = R_wt * t + t_wt;
    }
    // std::cout << "2 after t: " << t << std::endl;
    Eigen::Quaternion<double> rot_quaternion2(wRc);

    pose.pose.pose.position.x    = t.x();
    pose.pose.pose.position.y    = t.y();
    pose.pose.pose.position.z    = t.z();
    pose.pose.pose.orientation.x = rot_quaternion2.x();
    pose.pose.pose.orientation.y = rot_quaternion2.y();
    pose.pose.pose.orientation.z = rot_quaternion2.z();
    pose.pose.pose.orientation.w = rot_quaternion2.w();
  }


  {
    tf::Quaternion quat;
    tf::quaternionMsgToTF(pose.pose.pose.orientation, quat);
    quat.normalize();
    double roll, pitch, yaw;//定义存储roll,pitch and yaw的容器
    tf::Matrix3x3(quat).getRPY(roll, pitch, yaw); //进行转换
    // pose.pose.covariance 6 * 6
    pose.pose.covariance[0] = roll / M_PI *180;
    pose.pose.covariance[1] = pitch/ M_PI *180;
    pose.pose.covariance[2] = yaw/ M_PI *180;
    // pose.pose.covariance[3] = roll;
    // pose.pose.covariance[4] = pitch;
    // pose.pose.covariance[5] = yaw;
  }

  return pose;
}

void TagDetector::drawDetections (cv_bridge::CvImagePtr image)
{
  for (int i = 0; i < zarray_size(detections_); i++)
  {
    apriltag_detection_t *det;
    zarray_get(detections_, i, &det);

    // Check if this ID is present in config/tags.yaml
    // Check if is part of a tag bundle
    int tagID = det->id;
    bool is_part_of_bundle = false;
    for (unsigned int j=0; j<tag_bundle_descriptions_.size(); j++)
    {
      TagBundleDescription bundle = tag_bundle_descriptions_[j];
      if (bundle.id2idx_.find(tagID) != bundle.id2idx_.end())
      {
        is_part_of_bundle = true;
        break;
      }
    }
    // std::cout << i << " tagID: " << tagID << std::endl;
    // std::cout << i << " is_part_of_bundle: " << is_part_of_bundle << std::endl;
    // If not part of a bundle, check if defined as a standalone tag
    StandaloneTagDescription* standaloneDescription;
    if (!is_part_of_bundle &&
        !findStandaloneTagDescription(tagID, standaloneDescription, false))
    {
      // Neither a standalone tag nor part of a bundle, so this is a "rogue"
      // tag, skip it.
      // std::cout << i << " ! findStandaloneTagDescription: " << tagID << std::endl;
      // for (int i =0; i < 10; i++) {
      //   std::string tx = "find:" + tagID;
      //   cv::putText(image->image, tx,
      //             cv::Point((int)(10),(int)(10)),
      //             cv::FONT_HERSHEY_SCRIPT_SIMPLEX, 2, cv::Scalar(0xff, 0x99, 0), 2);    
      //   // tag_detections_image_pub_debug_.publish(image->toImageMsg());
      //   // sleep(1);
      // }
      continue;
    }

    // Draw tag outline with edge colors green, blue, blue, red
    // (going counter-clockwise, starting from lower-left corner in
    // tag coords). cv::Scalar(Blue, Green, Red) format for the edge
    // colors!
    int thickness = 2;
    line(image->image, cv::Point((int)det->p[0][0], (int)det->p[0][1]),
         cv::Point((int)det->p[1][0], (int)det->p[1][1]),
         cv::Scalar(0, 0xff, 0), thickness); // green
    line(image->image, cv::Point((int)det->p[0][0], (int)det->p[0][1]),
         cv::Point((int)det->p[3][0], (int)det->p[3][1]),
         cv::Scalar(0, 0, 0xff), thickness); // red
    line(image->image, cv::Point((int)det->p[1][0], (int)det->p[1][1]),
         cv::Point((int)det->p[2][0], (int)det->p[2][1]),
         cv::Scalar(0xff, 0, 0), thickness); // blue
    line(image->image, cv::Point((int)det->p[2][0], (int)det->p[2][1]),
         cv::Point((int)det->p[3][0], (int)det->p[3][1]),
         cv::Scalar(0xff, 0, 0), thickness); // blue

    // Print tag ID in the middle of the tag
    std::stringstream ss;
    ss << det->id;
    cv::String text = ss.str();
    int fontface = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
    double fontscale = 2;
    int baseline;
    cv::Size textsize = cv::getTextSize(text, fontface,
                                        fontscale, 2, &baseline);
    cv::putText(image->image, text,
                cv::Point((int)(det->c[0]-textsize.width/2),
                          (int)(det->c[1]+textsize.height/2)),
                fontface, fontscale, cv::Scalar(0xff, 0x99, 0), 2);
  }


  for (auto p : pcl_corner_cloud_) {
    cv::circle(image->image, cv::Point((int)(p.x),(int)( p.y)),
          5, cv::Scalar(p.b, p.g, p.r), -1); // 最后一个参数为负数表示实心圆
  }
}

// Parse standalone tag descriptions
std::map<int, StandaloneTagDescription> TagDetector::parseStandaloneTags (
    XmlRpc::XmlRpcValue& standalone_tags)
{
  // Create map that will be filled by the function and returned in the end
  std::map<int, StandaloneTagDescription> descriptions;
  // Ensure the type is correct
  ROS_ASSERT(standalone_tags.getType() == XmlRpc::XmlRpcValue::TypeArray);
  // Loop through all tag descriptions
  for (int32_t i = 0; i < standalone_tags.size(); i++)
  {

    // i-th tag description
    XmlRpc::XmlRpcValue& tag_description = standalone_tags[i];

    // Assert the tag description is a struct
    ROS_ASSERT(tag_description.getType() ==
               XmlRpc::XmlRpcValue::TypeStruct);
    // Assert type of field "id" is an int
    ROS_ASSERT(tag_description["id"].getType() ==
               XmlRpc::XmlRpcValue::TypeInt);
    // Assert type of field "size" is a double
    ROS_ASSERT(tag_description["size"].getType() ==
               XmlRpc::XmlRpcValue::TypeDouble);

    int id = (int)tag_description["id"]; // tag id
    // Tag size (square, side length in meters)
    double size = (double)tag_description["size"];

    // Custom frame name, if such a field exists for this tag
    std::string frame_name;
    if(tag_description.hasMember("name"))
    {
      // Assert type of field "name" is a string
      ROS_ASSERT(tag_description["name"].getType() ==
                 XmlRpc::XmlRpcValue::TypeString);
      frame_name = (std::string)tag_description["name"];
    }
    else
    {
      std::stringstream frame_name_stream;
      frame_name_stream << "tag_" << id;
      frame_name = frame_name_stream.str();
    }

    
    auto & tag = tag_description;
      // Get this tag's pose with respect to the bundle origin
    double x  = xmlRpcGetDoubleWithDefault(tag, "x", 0.);
    double y  = xmlRpcGetDoubleWithDefault(tag, "y", 0.);
    double z  = xmlRpcGetDoubleWithDefault(tag, "z", 0.);
    double qw = xmlRpcGetDoubleWithDefault(tag, "qw", 1.);
    double qx = xmlRpcGetDoubleWithDefault(tag, "qx", 0.);
    double qy = xmlRpcGetDoubleWithDefault(tag, "qy", 0.);
    double qz = xmlRpcGetDoubleWithDefault(tag, "qz", 0.);

    Eigen::Vector3d translate(x,y,z);
    // Eigen::Quaterniond q_tag(qw, qx, qy, qz);
    // q_tag.normalize();
    // Eigen::Matrix3d R_oi = q_tag.toRotationMatrix();
    // Build the rigid transform from tag_j to the bundle origin
    // cv::Matx44d T_mj(R_oi(0,0), R_oi(0,1), R_oi(0,2), x,
    //                   R_oi(1,0), R_oi(1,1), R_oi(1,2), y,
    //                   R_oi(2,0), R_oi(2,1), R_oi(2,2), z,
    //                   0,         0,         0,         1);

    ROS_INFO_STREAM(" " << i << ") id: " << id << ", size: " << size << ", "
                        << "p = [" << x << "," << y << "," << z << "], "
                        << "q = [" << qw << "," << qx << "," << qy << ","
                        << qz << "]");  


    StandaloneTagDescription description(id, size, frame_name, translate);
    ROS_INFO_STREAM("Loaded tag config: " << id << ", size: " <<
                    size << ", frame_name: " << frame_name.c_str());
    // Add this tag's description to map of descriptions
    descriptions.insert(std::make_pair(id, description));
  }

  return descriptions;
}

// parse tag bundle descriptions
std::vector<TagBundleDescription > TagDetector::parseTagBundles (
    XmlRpc::XmlRpcValue& tag_bundles)
{
  std::vector<TagBundleDescription > descriptions;
  ROS_ASSERT(tag_bundles.getType() == XmlRpc::XmlRpcValue::TypeArray);

  // Loop through all tag bundle descritions
  for (int32_t i=0; i<tag_bundles.size(); i++)
  {
    ROS_ASSERT(tag_bundles[i].getType() == XmlRpc::XmlRpcValue::TypeStruct);
    // i-th tag bundle description
    XmlRpc::XmlRpcValue& bundle_description = tag_bundles[i];

    std::string bundleName;
    if (bundle_description.hasMember("name"))
    {
      ROS_ASSERT(bundle_description["name"].getType() ==
                 XmlRpc::XmlRpcValue::TypeString);
      bundleName = (std::string)bundle_description["name"];
    }
    else
    {
      std::stringstream bundle_name_stream;
      bundle_name_stream << "bundle_" << i;
      bundleName = bundle_name_stream.str();
    }
    TagBundleDescription bundle_i(bundleName);
    ROS_INFO("Loading tag bundle '%s'",bundle_i.name().c_str());
    
    ROS_ASSERT(bundle_description["layout"].getType() ==
               XmlRpc::XmlRpcValue::TypeArray);
    XmlRpc::XmlRpcValue& member_tags = bundle_description["layout"];

    // Loop through each member tag of the bundle
    for (int32_t j=0; j<member_tags.size(); j++)
    {      
      ROS_ASSERT(member_tags[j].getType() == XmlRpc::XmlRpcValue::TypeStruct);
      XmlRpc::XmlRpcValue& tag = member_tags[j];

      ROS_ASSERT(tag["id"].getType() == XmlRpc::XmlRpcValue::TypeInt);
      int id = tag["id"];

      ROS_ASSERT(tag["size"].getType() == XmlRpc::XmlRpcValue::TypeDouble);
      double size = tag["size"];

      // Make sure that if this tag was specified also as standalone,
      // then the sizes match
      StandaloneTagDescription* standaloneDescription;
      if (findStandaloneTagDescription(id, standaloneDescription, false))
      {
        ROS_ASSERT(size == standaloneDescription->size()); 
      }
      
      // Get this tag's pose with respect to the bundle origin
      double x  = xmlRpcGetDoubleWithDefault(tag, "x", 0.);
      double y  = xmlRpcGetDoubleWithDefault(tag, "y", 0.);
      double z  = xmlRpcGetDoubleWithDefault(tag, "z", 0.);
      double qw = xmlRpcGetDoubleWithDefault(tag, "qw", 1.);
      double qx = xmlRpcGetDoubleWithDefault(tag, "qx", 0.);
      double qy = xmlRpcGetDoubleWithDefault(tag, "qy", 0.);
      double qz = xmlRpcGetDoubleWithDefault(tag, "qz", 0.);
      Eigen::Quaterniond q_tag(qw, qx, qy, qz);
      q_tag.normalize();
      Eigen::Matrix3d R_oi = q_tag.toRotationMatrix();

      // Build the rigid transform from tag_j to the bundle origin
      cv::Matx44d T_mj(R_oi(0,0), R_oi(0,1), R_oi(0,2), x,
                       R_oi(1,0), R_oi(1,1), R_oi(1,2), y,
                       R_oi(2,0), R_oi(2,1), R_oi(2,2), z,
                       0,         0,         0,         1);

      // Register the tag member
      bundle_i.addMemberTag(id, size, T_mj);
      ROS_INFO_STREAM(" " << j << ") id: " << id << ", size: " << size << ", "
                          << "p = [" << x << "," << y << "," << z << "], "
                          << "q = [" << qw << "," << qx << "," << qy << ","
                          << qz << "]");
    }
    descriptions.push_back(bundle_i);
  }
  return descriptions;
}

double TagDetector::xmlRpcGetDouble (XmlRpc::XmlRpcValue& xmlValue,
                                     std::string field) const
{
  ROS_ASSERT((xmlValue[field].getType() == XmlRpc::XmlRpcValue::TypeDouble) ||
             (xmlValue[field].getType() == XmlRpc::XmlRpcValue::TypeInt));
  if (xmlValue[field].getType() == XmlRpc::XmlRpcValue::TypeInt)
  {
    int tmp = xmlValue[field];
    return (double)tmp;
  }
  else
  {
    return xmlValue[field];
  }
}

double TagDetector::xmlRpcGetDoubleWithDefault (XmlRpc::XmlRpcValue& xmlValue,
                                                std::string field,
                                                double defaultValue) const
{
  if (xmlValue.hasMember(field))
  {
    ROS_ASSERT((xmlValue[field].getType() == XmlRpc::XmlRpcValue::TypeDouble) ||
        (xmlValue[field].getType() == XmlRpc::XmlRpcValue::TypeInt));
    if (xmlValue[field].getType() == XmlRpc::XmlRpcValue::TypeInt)
    {
      int tmp = xmlValue[field];
      return (double)tmp;
    }
    else
    {
      return xmlValue[field];
    }
  }
  else
  {
    return defaultValue;
  }
}

bool TagDetector::findStandaloneTagDescription (
    int id, StandaloneTagDescription*& descriptionContainer, bool printWarning)
{
  std::map<int, StandaloneTagDescription>::iterator description_itr =
      standalone_tag_descriptions_.find(id);
  if (description_itr == standalone_tag_descriptions_.end())
  {
    if (printWarning)
    {
      ROS_WARN_THROTTLE(10.0, "Requested description of standalone tag ID [%d],"
                        " but no description was found...",id);
    }
    return false;
  }
  descriptionContainer = &(description_itr->second);
  return true;
}

} // namespace apriltag_ros
