/*
 * COPYRIGHT AND PERMISSION NOTICE
 * Penn Software MSCKF_VIO
 * Copyright (C) 2017 The Trustees of the University of Pennsylvania
 * All rights reserved.
 */

#include <iostream>
#include <algorithm>
#include <set>
#include <Eigen/Dense>

#include <sensor_msgs/image_encodings.h>
#include <random_numbers/random_numbers.h>

#include <msckf_vio/CameraMeasurement.h>
#include <msckf_vio/TrackingInfo.h>
#include <msckf_vio/image_processor.h>
#include <msckf_vio/utils.h>

using namespace std;
using namespace cv;
using namespace Eigen;

namespace msckf_vio {
ImageProcessor::ImageProcessor(ros::NodeHandle& n) :
    nh(n),
    is_first_img(true),
    //img_transport(n),
    stereo_sub(10),
    prev_features_ptr(new GridFeatures()),
    curr_features_ptr(new GridFeatures()) {
    return;
}

ImageProcessor::~ImageProcessor()
{
    destroyAllWindows();
    //ROS_INFO("Feature lifetime statistics:");
    //featureLifetimeStatistics();
    return;
}

/**
* @brief ROS的参数服务器中读取相关参数
*
* 配置文件位于/config/
* 读取相机模型的类型、相机内参以及相机与imu之间的外参
* 读取图像的分辨率（长宽）
*/
bool ImageProcessor::loadParameters()
{
    // Camera calibration parameters
    nh.param<string>("cam0/distortion_model",
      cam0_distortion_model, string("radtan"));
    nh.param<string>("cam1/distortion_model",
      cam1_distortion_model, string("radtan"));

    vector<int> cam0_resolution_temp(2);
    nh.getParam("cam0/resolution", cam0_resolution_temp);
    cam0_resolution[0] = cam0_resolution_temp[0];
    cam0_resolution[1] = cam0_resolution_temp[1];

    vector<int> cam1_resolution_temp(2);
    nh.getParam("cam1/resolution", cam1_resolution_temp);
    cam1_resolution[0] = cam1_resolution_temp[0];
    cam1_resolution[1] = cam1_resolution_temp[1];

    vector<double> cam0_intrinsics_temp(4);
    nh.getParam("cam0/intrinsics", cam0_intrinsics_temp);
    cam0_intrinsics[0] = cam0_intrinsics_temp[0];
    cam0_intrinsics[1] = cam0_intrinsics_temp[1];
    cam0_intrinsics[2] = cam0_intrinsics_temp[2];
    cam0_intrinsics[3] = cam0_intrinsics_temp[3];

    vector<double> cam1_intrinsics_temp(4);
    nh.getParam("cam1/intrinsics", cam1_intrinsics_temp);
    cam1_intrinsics[0] = cam1_intrinsics_temp[0];
    cam1_intrinsics[1] = cam1_intrinsics_temp[1];
    cam1_intrinsics[2] = cam1_intrinsics_temp[2];
    cam1_intrinsics[3] = cam1_intrinsics_temp[3];

    vector<double> cam0_distortion_coeffs_temp(4);
    nh.getParam("cam0/distortion_coeffs",
      cam0_distortion_coeffs_temp);
    cam0_distortion_coeffs[0] = cam0_distortion_coeffs_temp[0];
    cam0_distortion_coeffs[1] = cam0_distortion_coeffs_temp[1];
    cam0_distortion_coeffs[2] = cam0_distortion_coeffs_temp[2];
    cam0_distortion_coeffs[3] = cam0_distortion_coeffs_temp[3];

    vector<double> cam1_distortion_coeffs_temp(4);
    nh.getParam("cam1/distortion_coeffs",
      cam1_distortion_coeffs_temp);
    cam1_distortion_coeffs[0] = cam1_distortion_coeffs_temp[0];
    cam1_distortion_coeffs[1] = cam1_distortion_coeffs_temp[1];
    cam1_distortion_coeffs[2] = cam1_distortion_coeffs_temp[2];
    cam1_distortion_coeffs[3] = cam1_distortion_coeffs_temp[3];

    cv::Mat     T_imu_cam0 = utils::getTransformCV(nh, "cam0/T_cam_imu");
    cv::Matx33d R_imu_cam0(T_imu_cam0(cv::Rect(0,0,3,3)));
    cv::Vec3d   t_imu_cam0 = T_imu_cam0(cv::Rect(3,0,1,3));
    R_cam0_imu = R_imu_cam0.t();
    t_cam0_imu = -R_imu_cam0.t() * t_imu_cam0;

    cv::Mat T_cam0_cam1 = utils::getTransformCV(nh, "cam1/T_cn_cnm1");
    cv::Mat T_imu_cam1 = T_cam0_cam1 * T_imu_cam0;
    cv::Matx33d R_imu_cam1(T_imu_cam1(cv::Rect(0,0,3,3)));
    cv::Vec3d   t_imu_cam1 = T_imu_cam1(cv::Rect(3,0,1,3));
    R_cam1_imu = R_imu_cam1.t();
    t_cam1_imu = -R_imu_cam1.t() * t_imu_cam1;

    // Processor parameters
    nh.param<int>("grid_row", processor_config.grid_row, 4);
    nh.param<int>("grid_col", processor_config.grid_col, 4);
    nh.param<int>("grid_min_feature_num",
      processor_config.grid_min_feature_num, 2);
    nh.param<int>("grid_max_feature_num",
      processor_config.grid_max_feature_num, 4);
    nh.param<int>("pyramid_levels",
      processor_config.pyramid_levels, 3);
    nh.param<int>("patch_size",
      processor_config.patch_size, 31);
    nh.param<int>("fast_threshold",
      processor_config.fast_threshold, 20);
    nh.param<int>("max_iteration",
      processor_config.max_iteration, 30);
    nh.param<double>("track_precision",
      processor_config.track_precision, 0.01);
    nh.param<double>("ransac_threshold",
      processor_config.ransac_threshold, 3);
    nh.param<double>("stereo_threshold",
      processor_config.stereo_threshold, 3);

    ROS_INFO("===========================================");
    ROS_INFO("cam0_resolution: %d, %d",
      cam0_resolution[0], cam0_resolution[1]);
    ROS_INFO("cam0_intrinscs: %f, %f, %f, %f",
      cam0_intrinsics[0], cam0_intrinsics[1],
      cam0_intrinsics[2], cam0_intrinsics[3]);
    ROS_INFO("cam0_distortion_model: %s",
      cam0_distortion_model.c_str());
    ROS_INFO("cam0_distortion_coefficients: %f, %f, %f, %f",
      cam0_distortion_coeffs[0], cam0_distortion_coeffs[1],
      cam0_distortion_coeffs[2], cam0_distortion_coeffs[3]);

    ROS_INFO("cam1_resolution: %d, %d",
      cam1_resolution[0], cam1_resolution[1]);
    ROS_INFO("cam1_intrinscs: %f, %f, %f, %f",
      cam1_intrinsics[0], cam1_intrinsics[1],
      cam1_intrinsics[2], cam1_intrinsics[3]);
    ROS_INFO("cam1_distortion_model: %s",
      cam1_distortion_model.c_str());
    ROS_INFO("cam1_distortion_coefficients: %f, %f, %f, %f",
      cam1_distortion_coeffs[0], cam1_distortion_coeffs[1],
      cam1_distortion_coeffs[2], cam1_distortion_coeffs[3]);

    cout << R_imu_cam0 << endl;
    cout << t_imu_cam0.t() << endl;

    ROS_INFO("grid_row: %d",
      processor_config.grid_row);
    ROS_INFO("grid_col: %d",
      processor_config.grid_col);
    ROS_INFO("grid_min_feature_num: %d",
      processor_config.grid_min_feature_num);
    ROS_INFO("grid_max_feature_num: %d",
      processor_config.grid_max_feature_num);
    ROS_INFO("pyramid_levels: %d",
      processor_config.pyramid_levels);
    ROS_INFO("patch_size: %d",
      processor_config.patch_size);
    ROS_INFO("fast_threshold: %d",
      processor_config.fast_threshold);
    ROS_INFO("max_iteration: %d",
      processor_config.max_iteration);
    ROS_INFO("track_precision: %f",
      processor_config.track_precision);
    ROS_INFO("ransac_threshold: %f",
      processor_config.ransac_threshold);
    ROS_INFO("stereo_threshold: %f",
      processor_config.stereo_threshold);
    ROS_INFO("===========================================");
    return true;
}

/**
* @brief 创建ros发布和订阅的主题
*
* 发布节点：相机的特征量测和跟踪信息
* 订阅节点：两个相机的图像以及imu
*/
bool ImageProcessor::createRosIO()
{
    feature_pub = nh.advertise<CameraMeasurement>(
      "features", 3);
    tracking_info_pub = nh.advertise<TrackingInfo>(
      "tracking_info", 1);
    image_transport::ImageTransport it(nh);
    debug_stereo_pub = it.advertise("debug_stereo_image", 1);

    cam0_img_sub.subscribe(nh, "cam0_image", 10);
    cam1_img_sub.subscribe(nh, "cam1_image", 10);
    stereo_sub.connectInput(cam0_img_sub, cam1_img_sub);
    stereo_sub.registerCallback(&ImageProcessor::stereoCallback, this);
    imu_sub = nh.subscribe("imu", 50,
      &ImageProcessor::imuCallback, this);

    return true;
}

/**
* @brief 视觉前端初始化，从ROS的参数服务器中读取相关参数以及创建ros发布和订阅的主题
*
* 载入参数服务器中的相关参数
* 创建ros发布和订阅的主题
*/
bool ImageProcessor::initialize()
{
    if (!loadParameters()) return false;
    ROS_INFO("Finish loading ROS parameters...");

    // Create feature detector.
    detector_ptr = FastFeatureDetector::create(
      processor_config.fast_threshold);

    if (!createRosIO()) return false;
    ROS_INFO("Finish creating ROS IO...");

    return true;
}

/**
* @brief 将imu的消息类型保存在缓冲中
*
*/
void ImageProcessor::stereoCallback(const sensor_msgs::ImageConstPtr& cam0_img, const sensor_msgs::ImageConstPtr& cam1_img)
{

    //cout << "==================================" << endl;

    // Get the current image.
    cam0_curr_img_ptr = cv_bridge::toCvShare(cam0_img,
      sensor_msgs::image_encodings::MONO8);
    cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img,
      sensor_msgs::image_encodings::MONO8);

    //! Step1: 构造当前帧图像的金字塔
    // Build the image pyramids once since they're used at multiple places
    createImagePyramids();

    //! Step2: 跟踪程序
    //! Step2.1: 如果是第一帧图像
    // Detect features in the first frame.
    if (is_first_img)
    {
        ros::Time start_time = ros::Time::now();
        initializeFirstFrame();
        //ROS_INFO("Detection time: %f",
        //    (ros::Time::now()-start_time).toSec());
        is_first_img = false;

        // Draw results.
        start_time = ros::Time::now();
        drawFeaturesStereo();
        //ROS_INFO("Draw features: %f",
        //    (ros::Time::now()-start_time).toSec());
    }
    //! Step2.2: 如果是第二帧图像，则直接跟踪即可
    else
    {
        // Track the feature in the previous image.
        ros::Time start_time = ros::Time::now();
        trackFeatures();
        //ROS_INFO("Tracking time: %f",
        //    (ros::Time::now()-start_time).toSec());

        // Add new features into the current image.
        start_time = ros::Time::now();
        addNewFeatures();
        //ROS_INFO("Addition time: %f",
        //    (ros::Time::now()-start_time).toSec());

        // Add new features into the current image.
        start_time = ros::Time::now();
        pruneGridFeatures();
        //ROS_INFO("Prune grid features: %f",
        //    (ros::Time::now()-start_time).toSec());

        // Draw results.
        start_time = ros::Time::now();
        drawFeaturesStereo();
        //ROS_INFO("Draw features: %f",
        //    (ros::Time::now()-start_time).toSec());
    }

    //ros::Time start_time = ros::Time::now();
    //updateFeatureLifetime();
    //ROS_INFO("Statistics: %f",
    //    (ros::Time::now()-start_time).toSec());

    // Publish features in the current image.
    ros::Time start_time = ros::Time::now();
    publish();
    //ROS_INFO("Publishing: %f",
    //    (ros::Time::now()-start_time).toSec());

    // Update the previous image and previous features.
    cam0_prev_img_ptr = cam0_curr_img_ptr;
    prev_features_ptr = curr_features_ptr;
    std::swap(prev_cam0_pyramid_, curr_cam0_pyramid_);

    // Initialize the current features to empty vectors.
    curr_features_ptr.reset(new GridFeatures());
    for (int code = 0; code < processor_config.grid_row*processor_config.grid_col; ++code)
    {
        (*curr_features_ptr)[code] = vector<FeatureMetaData>(0);
    }

    return;
}

void ImageProcessor::imuCallback(const sensor_msgs::ImuConstPtr& msg)
{
    // Wait for the first image to be set.
    if (is_first_img) return;
    imu_msg_buffer.push_back(*msg);
    return;
}

/*
 *  构造当前帧图像
 */
void ImageProcessor::createImagePyramids()
{
    const Mat& curr_cam0_img = cam0_curr_img_ptr->image;
    buildOpticalFlowPyramid(
      curr_cam0_img, curr_cam0_pyramid_,
      Size(processor_config.patch_size, processor_config.patch_size),
      processor_config.pyramid_levels, true, BORDER_REFLECT_101,
      BORDER_CONSTANT, false);

    const Mat& curr_cam1_img = cam1_curr_img_ptr->image;
    buildOpticalFlowPyramid(
      curr_cam1_img, curr_cam1_pyramid_,
      Size(processor_config.patch_size, processor_config.patch_size),
      processor_config.pyramid_levels, true, BORDER_REFLECT_101,
      BORDER_CONSTANT, false);
}

/**
* @brief 第一帧图像初始化
* 提取fast关键点，用光流进行跟踪匹配关键点对（klt）
* 对图像画格子，对格子内提取一定数量的特征点
*
*/
void ImageProcessor::initializeFirstFrame()
{
    //! Step1: 划分格子
    // Size of each grid.
    const Mat& img = cam0_curr_img_ptr->image;
    static int grid_height = img.rows / processor_config.grid_row;
    static int grid_width = img.cols / processor_config.grid_col;

    //!Step2:提取FAST角点
    // Detect new features on the frist image.
    vector<KeyPoint> new_features(0);
    detector_ptr->detect(img, new_features);

    //! Step2.1: 将提取的特征点给左相机
    // Find the stereo matched points for the newly
    // detected features.
    vector<cv::Point2f> cam0_points(new_features.size());
    for (int i = 0; i < new_features.size(); ++i)
        cam0_points[i] = new_features[i].pt;

    //! Step3: 通过KLT算法找到右目的匹配点
    vector<cv::Point2f> cam1_points(0);
    vector<unsigned char> inlier_markers(0);
    stereoMatch(cam0_points, cam1_points, inlier_markers);

    //! Step4: 筛选出内点
    vector<cv::Point2f> cam0_inliers(0);
    vector<cv::Point2f> cam1_inliers(0);
    vector<float> response_inliers(0);
    for (int i = 0; i < inlier_markers.size(); ++i)
    {
        if (inlier_markers[i] == 0) continue;
        cam0_inliers.push_back(cam0_points[i]);
        cam1_inliers.push_back(cam1_points[i]);
        response_inliers.push_back(new_features[i].response);
    }

    //! Step5: 将Features放到各自的Grid下
    // Group the features into grids
    GridFeatures grid_new_features;
    for (int code = 0; code < processor_config.grid_row*processor_config.grid_col; ++code)
        grid_new_features[code] = vector<FeatureMetaData>(0);

    for (int i = 0; i < cam0_inliers.size(); ++i)
    {
        const cv::Point2f& cam0_point = cam0_inliers[i];
        const cv::Point2f& cam1_point = cam1_inliers[i];
        const float& response = response_inliers[i];

        int row = static_cast<int>(cam0_point.y / grid_height);
        int col = static_cast<int>(cam0_point.x / grid_width);
        int code = row*processor_config.grid_col + col;

        FeatureMetaData new_feature;
        new_feature.response = response;
        new_feature.cam0_point = cam0_point;
        new_feature.cam1_point = cam1_point;
        grid_new_features[code].push_back(new_feature);
    }

    //! Step6: 根据每个Feature的响应度分类
    // Sort the new features in each grid based on its response.
    for (auto& item : grid_new_features)
        std::sort(item.second.begin(), item.second.end(), &ImageProcessor::featureCompareByResponse);

    //! Step7: 取每一个Grid中角点响应值最强的几个角点，然后统一存放到curr_features_ptr中
    // Collect new features within each grid with high response.
    for (int code = 0; code < processor_config.grid_row*processor_config.grid_col; ++code)
    {
        vector<FeatureMetaData>& features_this_grid = (*curr_features_ptr)[code];
        vector<FeatureMetaData>& new_features_this_grid = grid_new_features[code];

        for (int k = 0; k < processor_config.grid_min_feature_num && k < new_features_this_grid.size(); ++k)
        {
            features_this_grid.push_back(new_features_this_grid[k]);
            features_this_grid.back().id = next_feature_id++;
            features_this_grid.back().lifetime = 1;
        }
    }

    return;
}

/*
 * 根据旋转矩阵预测角点位置
 */
void ImageProcessor::predictFeatureTracking(const vector<cv::Point2f>& input_pts, const cv::Matx33f& R_p_c,
                                            const cv::Vec4d& intrinsics, vector<cv::Point2f>& compensated_pts)
{
    // Return directly if there are no input features.
    if (input_pts.size() == 0)
    {
        compensated_pts.clear();
        return;
    }
    compensated_pts.resize(input_pts.size());

    // Intrinsic matrix.
    cv::Matx33f K(intrinsics[0], 0.0, intrinsics[2],
                    0.0, intrinsics[1], intrinsics[3],
                    0.0, 0.0, 1.0);

    //! 假设平移为0，求取单应性矩阵
    cv::Matx33f H = K * R_p_c * K.inv();

    for (int i = 0; i < input_pts.size(); ++i)
    {
        cv::Vec3f p1(input_pts[i].x, input_pts[i].y, 1.0f);
        cv::Vec3f p2 = H * p1;
        compensated_pts[i].x = p2[0] / p2[2];
        compensated_pts[i].y = p2[1] / p2[2];
    }

    return;
}

/**
* @brief KLT跟踪Features
* 提取fast关键点，用光流进行跟踪匹配关键点对（klt）
* 对图像画格子，对格子内提取一定数量的特征点
*
*/
void ImageProcessor::trackFeatures()
{
    //!
    // Size of each grid.
    static int grid_height = cam0_curr_img_ptr->image.rows / processor_config.grid_row;
    static int grid_width = cam0_curr_img_ptr->image.cols / processor_config.grid_col;

    // Compute a rough relative rotation which takes a vector
    // from the previous frame to the current frame.
    //! Step1：根据IMU数据，先积分出一个旋转
    Matx33f cam0_R_p_c;  //R_kk-1
    Matx33f cam1_R_p_c;
    integrateImuData(cam0_R_p_c, cam1_R_p_c);

    // Organize the features in the previous image.
    vector<FeatureIDType> prev_ids(0);
    vector<int> prev_lifetime(0);
    vector<Point2f> prev_cam0_points(0);
    vector<Point2f> prev_cam1_points(0);

    //! Step2: 提取出左右目的角点
    for (const auto& item : *prev_features_ptr)
    {
        for (const auto& prev_feature : item.second)
        {
            prev_ids.push_back(prev_feature.id);
            prev_lifetime.push_back(prev_feature.lifetime);
            prev_cam0_points.push_back(prev_feature.cam0_point);
            prev_cam1_points.push_back(prev_feature.cam1_point);
        }
    }

    // Number of the features before tracking.
    before_tracking = prev_cam0_points.size();

    // Abort tracking if there is no features in
    // the previous frame.
    if (prev_ids.size() == 0)
        return;

    // Track features using LK optical flow method.
    vector<Point2f> curr_cam0_points(0);
    vector<unsigned char> track_inliers(0);

    //! Step3：根据旋转矩阵预测下一帧Features的位置
    predictFeatureTracking(prev_cam0_points, cam0_R_p_c, cam0_intrinsics, curr_cam0_points);

    //! Step4: LKT跟踪出下一帧角点位置
    calcOpticalFlowPyrLK(prev_cam0_pyramid_, curr_cam0_pyramid_, prev_cam0_points, curr_cam0_points,
                        track_inliers, noArray(), Size(processor_config.patch_size, processor_config.patch_size),
                        processor_config.pyramid_levels, TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
                        processor_config.max_iteration, processor_config.track_precision), cv::OPTFLOW_USE_INITIAL_FLOW);

    //! Step5: 删除外点，筛选出内点
    // Mark those tracked points out of the image region
    // as untracked.
    //! Step5.1: 图像外的点
    for (int i = 0; i < curr_cam0_points.size(); ++i)
    {
        if (track_inliers[i] == 0)
            continue;
        if (curr_cam0_points[i].y < 0 || curr_cam0_points[i].y > cam0_curr_img_ptr->image.rows-1 ||
        curr_cam0_points[i].x < 0 || curr_cam0_points[i].x > cam0_curr_img_ptr->image.cols-1)
            track_inliers[i] = 0;
    }

    // Collect the tracked points.
    vector<FeatureIDType> prev_tracked_ids(0);
    vector<int> prev_tracked_lifetime(0);
    vector<Point2f> prev_tracked_cam0_points(0);
    vector<Point2f> prev_tracked_cam1_points(0);
    vector<Point2f> curr_tracked_cam0_points(0);

    removeUnmarkedElements(prev_ids, track_inliers, prev_tracked_ids);
    removeUnmarkedElements(prev_lifetime, track_inliers, prev_tracked_lifetime);
    removeUnmarkedElements(prev_cam0_points, track_inliers, prev_tracked_cam0_points);
    removeUnmarkedElements(prev_cam1_points, track_inliers, prev_tracked_cam1_points);
    removeUnmarkedElements(curr_cam0_points, track_inliers, curr_tracked_cam0_points);

    // Number of features left after tracking.
    after_tracking = curr_tracked_cam0_points.size();


    // Outlier removal involves three steps, which forms a close
    // loop between the previous and current frames of cam0 (left)
    // and cam1 (right). Assuming the stereo matching between the
    // previous cam0 and cam1 images are correct, the three steps are:
    //
    // prev frames cam0 ----------> cam1
    //              |                |
    //              |ransac          |ransac
    //              |   stereo match |
    // curr frames cam0 ----------> cam1
    //
    // 1) Stereo matching between current images of cam0 and cam1.
    // 2) RANSAC between previous and current images of cam0.
    // 3) RANSAC between previous and current images of cam1.
    //
    // For Step 3, tracking between the images is no longer needed.
    // The stereo matching results are directly used in the RANSAC.

    // Step 1: stereo matching.
    //! Step5.2: 通过左右目匹配删除外点
    vector<Point2f> curr_cam1_points(0);
    vector<unsigned char> match_inliers(0);
    stereoMatch(curr_tracked_cam0_points, curr_cam1_points, match_inliers);

    vector<FeatureIDType> prev_matched_ids(0);
    vector<int> prev_matched_lifetime(0);
    vector<Point2f> prev_matched_cam0_points(0);
    vector<Point2f> prev_matched_cam1_points(0);
    vector<Point2f> curr_matched_cam0_points(0);
    vector<Point2f> curr_matched_cam1_points(0);

    removeUnmarkedElements(prev_tracked_ids, match_inliers, prev_matched_ids);
    removeUnmarkedElements(prev_tracked_lifetime, match_inliers, prev_matched_lifetime);
    removeUnmarkedElements(prev_tracked_cam0_points, match_inliers, prev_matched_cam0_points);
    removeUnmarkedElements(prev_tracked_cam1_points, match_inliers, prev_matched_cam1_points);
    removeUnmarkedElements(curr_tracked_cam0_points, match_inliers, curr_matched_cam0_points);
    removeUnmarkedElements(curr_cam1_points, match_inliers, curr_matched_cam1_points);

    // Number of features left after stereo matching.
    after_matching = curr_matched_cam0_points.size();

    //! Step5.3: 左相机前后帧的Ransac
    // Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.
    vector<int> cam0_ransac_inliers(0);
    twoPointRansac(prev_matched_cam0_points, curr_matched_cam0_points, cam0_R_p_c, cam0_intrinsics, cam0_distortion_model,
                    cam0_distortion_coeffs, processor_config.ransac_threshold,
                    0.99, cam0_ransac_inliers);

    //! Step5.3: 右相机前后帧的Ransac
    vector<int> cam1_ransac_inliers(0);
    twoPointRansac(prev_matched_cam1_points, curr_matched_cam1_points, cam1_R_p_c, cam1_intrinsics, cam1_distortion_model,
                    cam1_distortion_coeffs, processor_config.ransac_threshold,
                    0.99, cam1_ransac_inliers);

    // Number of features after ransac.
    after_ransac = 0;

    for (int i = 0; i < cam0_ransac_inliers.size(); ++i)
    {
        if (cam0_ransac_inliers[i] == 0 ||
        cam1_ransac_inliers[i] == 0) continue;
        int row = static_cast<int>(
        curr_matched_cam0_points[i].y / grid_height);
        int col = static_cast<int>(
        curr_matched_cam0_points[i].x / grid_width);
        int code = row*processor_config.grid_col + col;
        (*curr_features_ptr)[code].push_back(FeatureMetaData());

        FeatureMetaData& grid_new_feature = (*curr_features_ptr)[code].back();
        grid_new_feature.id = prev_matched_ids[i];
        grid_new_feature.lifetime = ++prev_matched_lifetime[i];
        grid_new_feature.cam0_point = curr_matched_cam0_points[i];
        grid_new_feature.cam1_point = curr_matched_cam1_points[i];

        ++after_ransac;
    }

    // Compute the tracking rate.
    int prev_feature_num = 0;
    for (const auto& item : *prev_features_ptr)
        prev_feature_num += item.second.size();

    int curr_feature_num = 0;
    for (const auto& item : *curr_features_ptr)
        curr_feature_num += item.second.size();

    ROS_INFO_THROTTLE(0.5,
    "\033[0;32m candidates: %d; track: %d; match: %d; ransac: %d/%d=%f\033[0m",
    before_tracking, after_tracking, after_matching,
    curr_feature_num, prev_feature_num,
    static_cast<double>(curr_feature_num)/
    (static_cast<double>(prev_feature_num)+1e-5));
    //printf(
    //    "\033[0;32m candidates: %d; raw track: %d; stereo match: %d; ransac: %d/%d=%f\033[0m\n",
    //    before_tracking, after_tracking, after_matching,
    //    curr_feature_num, prev_feature_num,
    //    static_cast<double>(curr_feature_num)/
    //    (static_cast<double>(prev_feature_num)+1e-5));

    return;
}

/**
 * @brief 对两帧图像对做特征匹配，对极几何约束剔除外点
 * @param cam0_points：第一帧图像帧的关键点位置
 * @return cam1_points:第二帧图像中的关键点位置
 * @return inlier_markers:匹配成功返回1，否则为0
 *
 */
void ImageProcessor::stereoMatch(const vector<cv::Point2f>& cam0_points, vector<cv::Point2f>& cam1_points, vector<unsigned char>& inlier_markers)
{

    if (cam0_points.size() == 0)
        return;

    //! Step1: 对左图的角点做畸变矫正后，然后利用右图的相机参数将角点畸变到右图上
    if(cam1_points.size() == 0)
    {
        // Initialize cam1_points by projecting cam0_points to cam1 using the
        // rotation from stereo extrinsics
        const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu;
        vector<cv::Point2f> cam0_points_undistorted;
        undistortPoints(cam0_points, cam0_intrinsics, cam0_distortion_model,
                        cam0_distortion_coeffs, cam0_points_undistorted,
                        R_cam0_cam1);
        cam1_points = distortPoints(cam0_points_undistorted, cam1_intrinsics,
                                    cam1_distortion_model, cam1_distortion_coeffs);
    }

    //! Step2: 利用KLT算法跟踪左目角点
    // Track features using LK optical flow method.
    calcOpticalFlowPyrLK(curr_cam0_pyramid_, curr_cam1_pyramid_, cam0_points, cam1_points,
      inlier_markers, noArray(), Size(processor_config.patch_size, processor_config.patch_size),
      processor_config.pyramid_levels,
      TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
                   processor_config.max_iteration,
                   processor_config.track_precision),
      cv::OPTFLOW_USE_INITIAL_FLOW);

    //! Step3: 剔除外点
    //! Step3.1：将超出右图范围的点剔除
    // Mark those tracked points out of the image region
    // as untracked.
    for (int i = 0; i < cam1_points.size(); ++i)
    {
        if (inlier_markers[i] == 0)
            continue;
        if (cam1_points[i].y < 0 ||
            cam1_points[i].y > cam1_curr_img_ptr->image.rows-1 ||
            cam1_points[i].x < 0 ||
            cam1_points[i].x > cam1_curr_img_ptr->image.cols-1)
          inlier_markers[i] = 0;
    }

    //! Step3.2.1: 计算左右目之间的外参，进而计算出E矩阵
    // Compute the relative rotation between the cam0
    // frame and cam1 frame.
    const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu;
    const cv::Vec3d t_cam0_cam1 = R_cam1_imu.t() * (t_cam0_imu-t_cam1_imu);
    // Compute the essential matrix.
    const cv::Matx33d t_cam0_cam1_hat(
      0.0, -t_cam0_cam1[2], t_cam0_cam1[1],
      t_cam0_cam1[2], 0.0, -t_cam0_cam1[0],
      -t_cam0_cam1[1], t_cam0_cam1[0], 0.0);
    const cv::Matx33d E = t_cam0_cam1_hat * R_cam0_cam1;

    //! Step3.2.2: 通过极线约束，进一步剔除外点
    // Further remove outliers based on the known
    // essential matrix.
    vector<cv::Point2f> cam0_points_undistorted(0);
    vector<cv::Point2f> cam1_points_undistorted(0);
    undistortPoints(
      cam0_points, cam0_intrinsics, cam0_distortion_model,
      cam0_distortion_coeffs, cam0_points_undistorted);
    undistortPoints(
      cam1_points, cam1_intrinsics, cam1_distortion_model,
      cam1_distortion_coeffs, cam1_points_undistorted);

    double norm_pixel_unit = 4.0 / (
      cam0_intrinsics[0]+cam0_intrinsics[1]+
      cam1_intrinsics[0]+cam1_intrinsics[1]);

    for (int i = 0; i < cam0_points_undistorted.size(); ++i)
    {
        if (inlier_markers[i] == 0) continue;
        cv::Vec3d pt0(cam0_points_undistorted[i].x,
            cam0_points_undistorted[i].y, 1.0);
        cv::Vec3d pt1(cam1_points_undistorted[i].x,
            cam1_points_undistorted[i].y, 1.0);
        cv::Vec3d epipolar_line = E * pt0;

        //! 点到极线的距离
        double error = fabs((pt1.t() * epipolar_line)[0]) / sqrt(
            epipolar_line[0]*epipolar_line[0]+
            epipolar_line[1]*epipolar_line[1]);
        if (error > processor_config.stereo_threshold*norm_pixel_unit)
          inlier_markers[i] = 0;
    }

    return;
}

/*
 *  在跟踪过程中添加新的角点
 *
 */
void ImageProcessor::addNewFeatures()
{
    const Mat& curr_img = cam0_curr_img_ptr->image;

    //! Step1：建立角点检测的Mask
    // Size of each grid.
    static int grid_height =
    cam0_curr_img_ptr->image.rows / processor_config.grid_row;
    static int grid_width =
    cam0_curr_img_ptr->image.cols / processor_config.grid_col;

    // Create a mask to avoid redetecting existing features.
    Mat mask(curr_img.rows, curr_img.cols, CV_8U, Scalar(1));

    for (const auto& features : *curr_features_ptr)
    {
        for (const auto& feature : features.second)
        {
            const int y = static_cast<int>(feature.cam0_point.y);
            const int x = static_cast<int>(feature.cam0_point.x);

            //! 取图像上长宽隔为5的区域
            int up_lim = y-2, bottom_lim = y+3, left_lim = x-2, right_lim = x+3;
            if (up_lim < 0)up_lim = 0;
            if (bottom_lim > curr_img.rows)
                bottom_lim = curr_img.rows;
            if (left_lim < 0) left_lim = 0;
            if (right_lim > curr_img.cols) right_lim = curr_img.cols;

            Range row_range(up_lim, bottom_lim);
            Range col_range(left_lim, right_lim);
            mask(row_range, col_range) = 0;
        }
    }

    //! Step2: 根据建立的Mask提取角点
    // Detect new features.
    vector<KeyPoint> new_features(0);
    detector_ptr->detect(curr_img, new_features, mask);

    //! Step2： 选取响应值较大的角点
    // Collect the new detected features based on the grid.
    // Select the ones with top response within each grid afterwards.
    vector<vector<KeyPoint> > new_feature_sieve(
    processor_config.grid_row*processor_config.grid_col);
    for (const auto& feature : new_features)
    {
        int row = static_cast<int>(feature.pt.y / grid_height);
        int col = static_cast<int>(feature.pt.x / grid_width);
        new_feature_sieve[row*processor_config.grid_col+col].push_back(feature);
    }

    new_features.clear();
    for (auto& item : new_feature_sieve)
    {
        if (item.size() > processor_config.grid_max_feature_num)
        {
            std::sort(item.begin(), item.end(), &ImageProcessor::keyPointCompareByResponse);
            item.erase(item.begin()+processor_config.grid_max_feature_num, item.end());
        }
        new_features.insert(new_features.end(), item.begin(), item.end());
    }

    int detected_new_features = new_features.size();

    //! Step3: 在右目上寻找左目的匹配点
    // Find the stereo matched points for the newly
    // detected features.
    vector<cv::Point2f> cam0_points(new_features.size());
    for (int i = 0; i < new_features.size(); ++i)
        cam0_points[i] = new_features[i].pt;

    vector<cv::Point2f> cam1_points(0);
    vector<unsigned char> inlier_markers(0);
    stereoMatch(cam0_points, cam1_points, inlier_markers);

    //! Step4: 仅提取匹配内点
    vector<cv::Point2f> cam0_inliers(0);
    vector<cv::Point2f> cam1_inliers(0);
    vector<float> response_inliers(0);
    for (int i = 0; i < inlier_markers.size(); ++i)
    {
        if (inlier_markers[i] == 0)
            continue;
        cam0_inliers.push_back(cam0_points[i]);
        cam1_inliers.push_back(cam1_points[i]);
        response_inliers.push_back(new_features[i].response);
    }

    //! 判断角点提取质量
    int matched_new_features = cam0_inliers.size();
    if (matched_new_features < 5 &&
    static_cast<double>(matched_new_features)/
    static_cast<double>(detected_new_features) < 0.1)
    ROS_WARN("Images at [%f] seems unsynced...",
    cam0_curr_img_ptr->header.stamp.toSec());

    //! Step5:将新提取的角点存储到对应的格子中
    // Group the features into grids
    GridFeatures grid_new_features;
    for (int code = 0; code < processor_config.grid_row*processor_config.grid_col; ++code)
        grid_new_features[code] = vector<FeatureMetaData>(0);

    for (int i = 0; i < cam0_inliers.size(); ++i)
    {
        const cv::Point2f& cam0_point = cam0_inliers[i];
        const cv::Point2f& cam1_point = cam1_inliers[i];
        const float& response = response_inliers[i];

        int row = static_cast<int>(cam0_point.y / grid_height);
        int col = static_cast<int>(cam0_point.x / grid_width);
        int code = row*processor_config.grid_col + col;

        FeatureMetaData new_feature;
        new_feature.response = response;
        new_feature.cam0_point = cam0_point;
        new_feature.cam1_point = cam1_point;
        grid_new_features[code].push_back(new_feature);
    }

    //! Step6: 根据响应值将新提取的角点排序
    // Sort the new features in each grid based on its response.
    for (auto& item : grid_new_features)
        std::sort(item.second.begin(), item.second.end(), &ImageProcessor::featureCompareByResponse);

    //! Step6: 将新提取的角点，存入到原来角点的格子中
    int new_added_feature_num = 0;
    // Collect new features within each grid with high response.
    for (int code = 0; code <
    processor_config.grid_row*processor_config.grid_col; ++code)
    {
        vector<FeatureMetaData>& features_this_grid = (*curr_features_ptr)[code];
        vector<FeatureMetaData>& new_features_this_grid = grid_new_features[code];

        //! 如果角点总数达到限制，则不用新增加角点
        if (features_this_grid.size() >= processor_config.grid_min_feature_num)
            continue;

        int vacancy_num = processor_config.grid_min_feature_num -
        features_this_grid.size();
        for (int k = 0; k < vacancy_num && k < new_features_this_grid.size(); ++k)
        {
            features_this_grid.push_back(new_features_this_grid[k]);
            features_this_grid.back().id = next_feature_id++;
            features_this_grid.back().lifetime = 1;

            ++new_added_feature_num;
        }
    }

    //printf("\033[0;33m detected: %d; matched: %d; new added feature: %d\033[0m\n",
    //    detected_new_features, matched_new_features, new_added_feature_num);

    return;
}

/*
 * 剔除角点个数超过限制的grid中，角点响应值较弱的角点
 *
 */
void ImageProcessor::pruneGridFeatures()
{
    for (auto& item : *curr_features_ptr)
    {
        auto& grid_features = item.second;
        // Continue if the number of features in this grid does
        // not exceed the upper bound.
        if (grid_features.size() <= processor_config.grid_max_feature_num)
            continue;

        std::sort(grid_features.begin(), grid_features.end(), &ImageProcessor::featureCompareByLifetime);

        grid_features.erase(grid_features.begin()+ processor_config.grid_max_feature_num, grid_features.end());
    }
    return;
}

/**
* @brief 计算原图像帧关键点对应的矫正位置
* @param pts_in：原图像帧的关键点位置
* @param intrinsics:内参矩阵
* @param distortion_model：相机模型
* @param distortion_coeffs:畸变系数
* @return pts_out:畸变矫正后的关键点位置
* @param rectification_matrix:矫正矩阵，即两个相机之间的外参数
* @param new_intrinsics:新的内参矩阵,默认值
*
*/
void ImageProcessor::undistortPoints(const vector<cv::Point2f>& pts_in, const cv::Vec4d& intrinsics,
            const string& distortion_model, const cv::Vec4d& distortion_coeffs,
            vector<cv::Point2f>& pts_out, const cv::Matx33d &rectification_matrix, const cv::Vec4d &new_intrinsics)
{

    if (pts_in.size() == 0)
        return;

    const cv::Matx33d K(
      intrinsics[0], 0.0, intrinsics[2],
      0.0, intrinsics[1], intrinsics[3],
      0.0, 0.0, 1.0);

    const cv::Matx33d K_new(
      new_intrinsics[0], 0.0, new_intrinsics[2],
      0.0, new_intrinsics[1], new_intrinsics[3],
      0.0, 0.0, 1.0);

    if (distortion_model == "radtan")
    {
        cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
                        rectification_matrix, K_new);
    }
    else if (distortion_model == "equidistant")
    {
        cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
                                 rectification_matrix, K_new);
    }
    else
    {
        ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", distortion_model.c_str());
        cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
                        rectification_matrix, K_new);
    }

    return;
}

vector<cv::Point2f> ImageProcessor::distortPoints(
    const vector<cv::Point2f>& pts_in,
    const cv::Vec4d& intrinsics,
    const string& distortion_model,
    const cv::Vec4d& distortion_coeffs) {

  const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
                      0.0, intrinsics[1], intrinsics[3],
                      0.0, 0.0, 1.0);

  vector<cv::Point2f> pts_out;
  if (distortion_model == "radtan") {
    vector<cv::Point3f> homogenous_pts;
    cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
    cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
                      distortion_coeffs, pts_out);
  } else if (distortion_model == "equidistant") {
    cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
  } else {
    ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
                  distortion_model.c_str());
    vector<cv::Point3f> homogenous_pts;
    cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
    cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
                      distortion_coeffs, pts_out);
  }

  return pts_out;
}

/*
 * 积分角速度求解前后帧相机之间的旋转
 */
void ImageProcessor::integrateImuData(Matx33f& cam0_R_p_c, Matx33f& cam1_R_p_c)
{
    //! Step1: 找到IMU buffer的头和尾
    //! Step1.1: 根据上一帧图像找到IMU buffer的头
    // Find the start and the end limit within the imu msg buffer.
    auto begin_iter = imu_msg_buffer.begin();
    while (begin_iter != imu_msg_buffer.end())
    {
        if ((begin_iter->header.stamp - cam0_prev_img_ptr->header.stamp).toSec() < -0.01)
            ++begin_iter;
        else
            break;
    }

    //! Step1.2: 根据当前帧图像找到IMU buffer的尾
    auto end_iter = begin_iter;
    while (end_iter != imu_msg_buffer.end())
    {
        if ((end_iter->header.stamp- cam0_curr_img_ptr->header.stamp).toSec() < 0.005)
          ++end_iter;
        else
          break;
    }

    //! Step2：计算buffer中角速度的均值
    // Compute the mean angular velocity in the IMU frame.
    Vec3f mean_ang_vel(0.0, 0.0, 0.0);
    for (auto iter = begin_iter; iter < end_iter; ++iter)
        mean_ang_vel += Vec3f(iter->angular_velocity.x, iter->angular_velocity.y, iter->angular_velocity.z);
    if (end_iter-begin_iter > 0)
        mean_ang_vel *= 1.0f / (end_iter-begin_iter);

    //! 将角速度转到相机坐标系下
    // Transform the mean angular velocity from the IMU
    // frame to the cam0 and cam1 frames.
    Vec3f cam0_mean_ang_vel = R_cam0_imu.t() * mean_ang_vel;
    Vec3f cam1_mean_ang_vel = R_cam1_imu.t() * mean_ang_vel;

    //! Step3: 利用罗德里格斯公式，将角度转换为旋转矩阵,https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html?highlight=rodrigues#rodrigues
    // Compute the relative rotation.
    double dtime = (cam0_curr_img_ptr->header.stamp- cam0_prev_img_ptr->header.stamp).toSec();
    Rodrigues(cam0_mean_ang_vel*dtime, cam0_R_p_c);
    Rodrigues(cam1_mean_ang_vel*dtime, cam1_R_p_c);
    cam0_R_p_c = cam0_R_p_c.t();
    cam1_R_p_c = cam1_R_p_c.t();

    // Delete the useless and used imu messages.
    imu_msg_buffer.erase(imu_msg_buffer.begin(), end_iter);
    return;
}

//! 缩放当前帧的Features，使其到中心点的距离为sqrt(2)，类似于H和F矩阵求解过程中，归一化的第二步
void ImageProcessor::rescalePoints(vector<Point2f>& pts1, vector<Point2f>& pts2, float& scaling_factor)
{
    scaling_factor = 0.0f;

    for (int i = 0; i < pts1.size(); ++i)
    {
        scaling_factor += sqrt(pts1[i].dot(pts1[i]));
        scaling_factor += sqrt(pts2[i].dot(pts2[i]));
    }

    scaling_factor = (pts1.size()+pts2.size()) / scaling_factor * sqrt(2.0f);

    for (int i = 0; i < pts1.size(); ++i)
    {
        pts1[i] *= scaling_factor;
        pts2[i] *= scaling_factor;
    }

    return;
}

/**
 * @brief 两点法Ransac筛选内点
 * @param pts1：上一时刻的关键点位置
 * @param pts2:当前时刻跟踪匹配到的关键点位置
 * @param R_p_c:根据imu信息计算得到的两个时刻相机的相对旋转信息， R_kk-1
 * @param distortion_model,intrinsics：相机内参和畸变模型
 * @param inlier_error：内点可接受的阈值（关键点距离差）
 * @param success_probability：成功的概率
 * @return inlier_markers：内点标志位
 *
 */
void ImageProcessor::twoPointRansac(const vector<Point2f>& pts1, const vector<Point2f>& pts2, const cv::Matx33f& R_p_c, const cv::Vec4d& intrinsics,
    const std::string& distortion_model, const cv::Vec4d& distortion_coeffs,
    const double& inlier_error, const double& success_probability, vector<int>& inlier_markers)
{

    // Check the size of input point size.
    if (pts1.size() != pts2.size())
        ROS_ERROR("Sets of different size (%lu and %lu) are used...",
    pts1.size(), pts2.size());

    //! 计算好外点阈值以及Ransac的迭代次数
    double norm_pixel_unit = 2.0 / (intrinsics[0]+intrinsics[1]);
    int iter_num = static_cast<int>(ceil(log(1-success_probability) / log(1-0.7*0.7)));

    // Initially, mark all points as inliers.
    inlier_markers.clear();
    inlier_markers.resize(pts1.size(), 1);

    //! 对所有点做畸变矫正
    // Undistort all the points.
    vector<Point2f> pts1_undistorted(pts1.size());
    vector<Point2f> pts2_undistorted(pts2.size());
    undistortPoints(pts1, intrinsics, distortion_model, distortion_coeffs, pts1_undistorted);
    undistortPoints(pts2, intrinsics, distortion_model, distortion_coeffs, pts2_undistorted);

    //! 利用旋转矩阵补偿上一帧Features，主要是旋转补偿
    // Compenstate the points in the previous image with
    // the relative rotation.
    for (auto& pt : pts1_undistorted)
    {
        Vec3f pt_h(pt.x, pt.y, 1.0f);
        //Vec3f pt_hc = dR * pt_h;
        Vec3f pt_hc = R_p_c * pt_h;
        pt.x = pt_hc[0];
        pt.y = pt_hc[1];
    }

    //! 缩放角点，使其距离中心点的距离为sqrt(2)
    // Normalize the points to gain numerical stability.
    float scaling_factor = 0.0f;
    rescalePoints(pts1_undistorted, pts2_undistorted, scaling_factor);
    norm_pixel_unit *= scaling_factor;

    //! 计算光流差值
    // Compute the difference between previous and current points,
    // which will be used frequently later.
    vector<Point2d> pts_diff(pts1_undistorted.size());
    for (int i = 0; i < pts1_undistorted.size(); ++i)
        pts_diff[i] = pts1_undistorted[i] - pts2_undistorted[i];

    //! 根据光流距离筛选一部分内点，并计算平均光流距离
    // Mark the point pairs with large difference directly.
    // BTW, the mean distance of the rest of the point pairs
    // are computed.
    double mean_pt_distance = 0.0;
    int raw_inlier_cntr = 0;
    for (int i = 0; i < pts_diff.size(); ++i)
    {
        double distance = sqrt(pts_diff[i].dot(pts_diff[i]));
        // 25 pixel distance is a pretty large tolerance for normal motion.
        // However, to be used with aggressive motion, this tolerance should
        // be increased significantly to match the usage.
        if (distance > 50.0*norm_pixel_unit)
        {
            inlier_markers[i] = 0;
        }
        else
        {
            mean_pt_distance += distance;
            ++raw_inlier_cntr;
        }
    }
    mean_pt_distance /= raw_inlier_cntr;

    //! 如果内点数小于3，则直接返回
    // If the current number of inliers is less than 3, just mark
    // all input as outliers. This case can happen with fast
    // rotation where very few features are tracked.
    if (raw_inlier_cntr < 3)
    {
        for (auto& marker : inlier_markers) marker = 0;
            return;
    }

    // Before doing 2-point RANSAC, we have to check if the motion
    // is degenerated, meaning that there is no translation between
    // the frames, in which case, the model of the RANSAC does not
    // work. If so, the distance between the matched points will
    // be almost 0.
    //if (mean_pt_distance < inlier_error*norm_pixel_unit) {

    //!根据光流距离再做一次较为严格的内点筛选，并且要检查是否处于退化运动，这里是指静止？
    if (mean_pt_distance < norm_pixel_unit)
    {
        //ROS_WARN_THROTTLE(1.0, "Degenerated motion...");
        for (int i = 0; i < pts_diff.size(); ++i)
        {
            if (inlier_markers[i] == 0)
                continue;
            if (sqrt(pts_diff[i].dot(pts_diff[i])) > inlier_error*norm_pixel_unit)
                inlier_markers[i] = 0;
        }
        return;
    }

    /* 由对极约束性质： x2^TEx1=0    (1)
     *     ===> x2^T[t]xRx1=0 ==> x2^T[t]xx1', 根据叉乘性质，
     *     ===> x2^T[x1']xt=0     (2)
     *
     */
    // In the case of general motion, the RANSAC model can be applied.
    // The three column corresponds to tx, ty, and tz respectively.
    MatrixXd coeff_t(pts_diff.size(), 3);
    for (int i = 0; i < pts_diff.size(); ++i)
    {
        coeff_t(i, 0) = pts_diff[i].y;
        coeff_t(i, 1) = -pts_diff[i].x;
        coeff_t(i, 2) = pts1_undistorted[i].x*pts2_undistorted[i].y - pts1_undistorted[i].y*pts2_undistorted[i].x;
    }

    //! 存储内点索引
    vector<int> raw_inlier_idx;
    for (int i = 0; i < inlier_markers.size(); ++i)
    {
        if (inlier_markers[i] != 0)
            raw_inlier_idx.push_back(i);
    }

    vector<int> best_inlier_set;
    double best_error = 1e10;
    random_numbers::RandomNumberGenerator random_gen;

    //! 开始执行Ransac迭代
    for (int iter_idx = 0; iter_idx < iter_num; ++iter_idx)
    {
        // Randomly select two point pairs.
        // Although this is a weird way of selecting two pairs, but it
        // is able to efficiently avoid selecting repetitive pairs.
        //! Step1: 生成两个随机数
        int select_idx1 = random_gen.uniformInteger(0, raw_inlier_idx.size()-1);
        int select_idx_diff = random_gen.uniformInteger(1, raw_inlier_idx.size()-1);

        int select_idx2 = select_idx1+select_idx_diff<raw_inlier_idx.size() ?
                            select_idx1+select_idx_diff :
                            select_idx1+select_idx_diff-raw_inlier_idx.size();

        int pair_idx1 = raw_inlier_idx[select_idx1];
        int pair_idx2 = raw_inlier_idx[select_idx2];

        // Construct the model;
        //! 模型构造
        Vector2d coeff_tx(coeff_t(pair_idx1, 0), coeff_t(pair_idx2, 0));
        Vector2d coeff_ty(coeff_t(pair_idx1, 1), coeff_t(pair_idx2, 1));
        Vector2d coeff_tz(coeff_t(pair_idx1, 2), coeff_t(pair_idx2, 2));

        //! 求取1范数
        vector<double> coeff_l1_norm(3);
        coeff_l1_norm[0] = coeff_tx.lpNorm<1>();
        coeff_l1_norm[1] = coeff_ty.lpNorm<1>();
        coeff_l1_norm[2] = coeff_tz.lpNorm<1>();

        //! 求取1范数的最小值
        int base_indicator = min_element(coeff_l1_norm.begin(), coeff_l1_norm.end())-coeff_l1_norm.begin();

        /*
         *  对平移量t做初步求解
         *  假设t中存在尺度因子，所以分别将t设为(1,0,0),(0,1,0),(0,0,1)
         *  然后根据上面(2)式求取t的其他两个量，则使用两组点几个求解，即使用两个coeff_t中的元素即可
         *  以(1,0,0)为例，
         *  | y2-y1    x1-x2     x1y2-x2y1     |  ·| 1  |
         *  | y'2-y'1  x'1-x'2   x'1y'2-x'2y1' |   | t1 |  = 0
         *                                         | t2 |
         */

        Vector3d model(0.0, 0.0, 0.0);
        if (base_indicator == 0)
        {
            Matrix2d A;
            A << coeff_ty, coeff_tz;
            Vector2d solution = A.inverse() * (-coeff_tx);
            model(0) = 1.0;
            model(1) = solution(0);
            model(2) = solution(1);
        }
        else if (base_indicator ==1)
        {
            Matrix2d A;
            A << coeff_tx, coeff_tz;
            Vector2d solution = A.inverse() * (-coeff_ty);
            model(0) = solution(0);
            model(1) = 1.0;
            model(2) = solution(1);
        }
        else
        {
            Matrix2d A;
            A << coeff_tx, coeff_ty;
            Vector2d solution = A.inverse() * (-coeff_tz);
            model(0) = solution(0);
            model(1) = solution(1);
            model(2) = 1.0;
        }

        //! 利用初步求解的t计算极线约束误差
        // Find all the inliers among point pairs.
        VectorXd error = coeff_t * model;

        //! 选取在误差范围之内的匹配点组合
        vector<int> inlier_set;
        for (int i = 0; i < error.rows(); ++i)
        {
            if (inlier_markers[i] == 0)
                continue;
            if (std::abs(error(i)) < inlier_error*norm_pixel_unit)
                inlier_set.push_back(i);
        }

        // If the number of inliers is small, the current
        // model is probably wrong.
        if (inlier_set.size() < 0.2*pts1_undistorted.size())
            continue;

        //! 筛选出极限约束误差较小的匹配点组合
        // Refit the model using all of the possible inliers.
        VectorXd coeff_tx_better(inlier_set.size());
        VectorXd coeff_ty_better(inlier_set.size());
        VectorXd coeff_tz_better(inlier_set.size());
        for (int i = 0; i < inlier_set.size(); ++i)
        {
            coeff_tx_better(i) = coeff_t(inlier_set[i], 0);
            coeff_ty_better(i) = coeff_t(inlier_set[i], 1);
            coeff_tz_better(i) = coeff_t(inlier_set[i], 2);
        }

        //! 再重新计算一次t
        Vector3d model_better(0.0, 0.0, 0.0);
        if (base_indicator == 0)
        {
            MatrixXd A(inlier_set.size(), 2);
            A << coeff_ty_better, coeff_tz_better;
            Vector2d solution = (A.transpose() * A).inverse() * A.transpose() * (-coeff_tx_better);
            model_better(0) = 1.0;
            model_better(1) = solution(0);
            model_better(2) = solution(1);
        }
        else if (base_indicator ==1)
        {
            MatrixXd A(inlier_set.size(), 2);
            A << coeff_tx_better, coeff_tz_better;
            Vector2d solution = (A.transpose() * A).inverse() * A.transpose() * (-coeff_ty_better);
            model_better(0) = solution(0);
            model_better(1) = 1.0;
            model_better(2) = solution(1);
        }
        else
        {
            MatrixXd A(inlier_set.size(), 2);
            A << coeff_tx_better, coeff_ty_better;
            Vector2d solution = (A.transpose() * A).inverse() * A.transpose() * (-coeff_tz_better);
            model_better(0) = solution(0);
            model_better(1) = solution(1);
            model_better(2) = 1.0;
        }

        //! 重新计算极线约束误差
        // Compute the error and upate the best model if possible.
        VectorXd new_error = coeff_t * model_better;

        double this_error = 0.0;
        for (const auto& inlier_idx : inlier_set)
            this_error += std::abs(new_error(inlier_idx));
            this_error /= inlier_set.size();

        if (inlier_set.size() > best_inlier_set.size())
        {
            best_error = this_error;
            best_inlier_set = inlier_set;
        }
    }

    // Fill in the markers.
    inlier_markers.clear();
    inlier_markers.resize(pts1.size(), 0);
    for (const auto& inlier_idx : best_inlier_set)
        inlier_markers[inlier_idx] = 1;

    //printf("inlier ratio: %lu/%lu\n",
    //    best_inlier_set.size(), inlier_markers.size());

    return;
}

void ImageProcessor::publish()
{

    // Publish features.
    CameraMeasurementPtr feature_msg_ptr(new CameraMeasurement);
    feature_msg_ptr->header.stamp = cam0_curr_img_ptr->header.stamp;

    vector<FeatureIDType> curr_ids(0);
    vector<Point2f> curr_cam0_points(0);
    vector<Point2f> curr_cam1_points(0);

    for (const auto& grid_features : (*curr_features_ptr))
    {
        for (const auto& feature : grid_features.second)
        {
            curr_ids.push_back(feature.id);
            curr_cam0_points.push_back(feature.cam0_point);
            curr_cam1_points.push_back(feature.cam1_point);
        }
    }

    vector<Point2f> curr_cam0_points_undistorted(0);
    vector<Point2f> curr_cam1_points_undistorted(0);

    undistortPoints(
    curr_cam0_points, cam0_intrinsics, cam0_distortion_model,
    cam0_distortion_coeffs, curr_cam0_points_undistorted);
    undistortPoints(
    curr_cam1_points, cam1_intrinsics, cam1_distortion_model,
    cam1_distortion_coeffs, curr_cam1_points_undistorted);

    for (int i = 0; i < curr_ids.size(); ++i)
    {
        feature_msg_ptr->features.push_back(FeatureMeasurement());
        feature_msg_ptr->features[i].id = curr_ids[i];
        feature_msg_ptr->features[i].u0 = curr_cam0_points_undistorted[i].x;
        feature_msg_ptr->features[i].v0 = curr_cam0_points_undistorted[i].y;
        feature_msg_ptr->features[i].u1 = curr_cam1_points_undistorted[i].x;
        feature_msg_ptr->features[i].v1 = curr_cam1_points_undistorted[i].y;
    }

    feature_pub.publish(feature_msg_ptr);

    // Publish tracking info.
    //! 发布跟踪信息，主要是跟踪到的角点的个数
    TrackingInfoPtr tracking_info_msg_ptr(new TrackingInfo());
    tracking_info_msg_ptr->header.stamp = cam0_curr_img_ptr->header.stamp;
    tracking_info_msg_ptr->before_tracking = before_tracking;
    tracking_info_msg_ptr->after_tracking = after_tracking;
    tracking_info_msg_ptr->after_matching = after_matching;
    tracking_info_msg_ptr->after_ransac = after_ransac;
    tracking_info_pub.publish(tracking_info_msg_ptr);

    return;
}

void ImageProcessor::drawFeaturesMono() {
  // Colors for different features.
  Scalar tracked(0, 255, 0);
  Scalar new_feature(0, 255, 255);

  static int grid_height =
    cam0_curr_img_ptr->image.rows / processor_config.grid_row;
  static int grid_width =
    cam0_curr_img_ptr->image.cols / processor_config.grid_col;

  // Create an output image.
  int img_height = cam0_curr_img_ptr->image.rows;
  int img_width = cam0_curr_img_ptr->image.cols;
  Mat out_img(img_height, img_width, CV_8UC3);
  cvtColor(cam0_curr_img_ptr->image, out_img, CV_GRAY2RGB);

  // Draw grids on the image.
  for (int i = 1; i < processor_config.grid_row; ++i) {
    Point pt1(0, i*grid_height);
    Point pt2(img_width, i*grid_height);
    line(out_img, pt1, pt2, Scalar(255, 0, 0));
  }
  for (int i = 1; i < processor_config.grid_col; ++i) {
    Point pt1(i*grid_width, 0);
    Point pt2(i*grid_width, img_height);
    line(out_img, pt1, pt2, Scalar(255, 0, 0));
  }

  // Collect features ids in the previous frame.
  vector<FeatureIDType> prev_ids(0);
  for (const auto& grid_features : *prev_features_ptr)
    for (const auto& feature : grid_features.second)
      prev_ids.push_back(feature.id);

  // Collect feature points in the previous frame.
  map<FeatureIDType, Point2f> prev_points;
  for (const auto& grid_features : *prev_features_ptr)
    for (const auto& feature : grid_features.second)
      prev_points[feature.id] = feature.cam0_point;

  // Collect feature points in the current frame.
  map<FeatureIDType, Point2f> curr_points;
  for (const auto& grid_features : *curr_features_ptr)
    for (const auto& feature : grid_features.second)
      curr_points[feature.id] = feature.cam0_point;

  // Draw tracked features.
  for (const auto& id : prev_ids) {
    if (prev_points.find(id) != prev_points.end() &&
        curr_points.find(id) != curr_points.end()) {
      cv::Point2f prev_pt = prev_points[id];
      cv::Point2f curr_pt = curr_points[id];
      circle(out_img, curr_pt, 3, tracked);
      line(out_img, prev_pt, curr_pt, tracked, 1);

      prev_points.erase(id);
      curr_points.erase(id);
    }
  }

  // Draw new features.
  for (const auto& new_curr_point : curr_points) {
    cv::Point2f pt = new_curr_point.second;
    circle(out_img, pt, 3, new_feature, -1);
  }

  imshow("Feature", out_img);
  waitKey(5);
}

void ImageProcessor::drawFeaturesStereo() {

  if(debug_stereo_pub.getNumSubscribers() > 0)
  {
    // Colors for different features.
    Scalar tracked(0, 255, 0);
    Scalar new_feature(0, 255, 255);

    static int grid_height =
      cam0_curr_img_ptr->image.rows / processor_config.grid_row;
    static int grid_width =
      cam0_curr_img_ptr->image.cols / processor_config.grid_col;

    // Create an output image.
    int img_height = cam0_curr_img_ptr->image.rows;
    int img_width = cam0_curr_img_ptr->image.cols;
    Mat out_img(img_height, img_width*2, CV_8UC3);
    cvtColor(cam0_curr_img_ptr->image,
             out_img.colRange(0, img_width), CV_GRAY2RGB);
    cvtColor(cam1_curr_img_ptr->image,
             out_img.colRange(img_width, img_width*2), CV_GRAY2RGB);

    // Draw grids on the image.
    for (int i = 1; i < processor_config.grid_row; ++i) {
      Point pt1(0, i*grid_height);
      Point pt2(img_width*2, i*grid_height);
      line(out_img, pt1, pt2, Scalar(255, 0, 0));
    }
    for (int i = 1; i < processor_config.grid_col; ++i) {
      Point pt1(i*grid_width, 0);
      Point pt2(i*grid_width, img_height);
      line(out_img, pt1, pt2, Scalar(255, 0, 0));
    }
    for (int i = 1; i < processor_config.grid_col; ++i) {
      Point pt1(i*grid_width+img_width, 0);
      Point pt2(i*grid_width+img_width, img_height);
      line(out_img, pt1, pt2, Scalar(255, 0, 0));
    }

    // Collect features ids in the previous frame.
    vector<FeatureIDType> prev_ids(0);
    for (const auto& grid_features : *prev_features_ptr)
      for (const auto& feature : grid_features.second)
        prev_ids.push_back(feature.id);

    // Collect feature points in the previous frame.
    map<FeatureIDType, Point2f> prev_cam0_points;
    map<FeatureIDType, Point2f> prev_cam1_points;
    for (const auto& grid_features : *prev_features_ptr)
      for (const auto& feature : grid_features.second) {
        prev_cam0_points[feature.id] = feature.cam0_point;
        prev_cam1_points[feature.id] = feature.cam1_point;
      }

    // Collect feature points in the current frame.
    map<FeatureIDType, Point2f> curr_cam0_points;
    map<FeatureIDType, Point2f> curr_cam1_points;
    for (const auto& grid_features : *curr_features_ptr)
      for (const auto& feature : grid_features.second) {
        curr_cam0_points[feature.id] = feature.cam0_point;
        curr_cam1_points[feature.id] = feature.cam1_point;
      }

    // Draw tracked features.
    for (const auto& id : prev_ids) {
      if (prev_cam0_points.find(id) != prev_cam0_points.end() &&
          curr_cam0_points.find(id) != curr_cam0_points.end()) {
        cv::Point2f prev_pt0 = prev_cam0_points[id];
        cv::Point2f prev_pt1 = prev_cam1_points[id] + Point2f(img_width, 0.0);
        cv::Point2f curr_pt0 = curr_cam0_points[id];
        cv::Point2f curr_pt1 = curr_cam1_points[id] + Point2f(img_width, 0.0);

        circle(out_img, curr_pt0, 3, tracked, -1);
        circle(out_img, curr_pt1, 3, tracked, -1);
        line(out_img, prev_pt0, curr_pt0, tracked, 1);
        line(out_img, prev_pt1, curr_pt1, tracked, 1);

        prev_cam0_points.erase(id);
        prev_cam1_points.erase(id);
        curr_cam0_points.erase(id);
        curr_cam1_points.erase(id);
      }
    }

    // Draw new features.
    for (const auto& new_cam0_point : curr_cam0_points) {
      cv::Point2f pt0 = new_cam0_point.second;
      cv::Point2f pt1 = curr_cam1_points[new_cam0_point.first] +
        Point2f(img_width, 0.0);

      circle(out_img, pt0, 3, new_feature, -1);
      circle(out_img, pt1, 3, new_feature, -1);
    }

    cv_bridge::CvImage debug_image(cam0_curr_img_ptr->header, "bgr8", out_img);
    debug_stereo_pub.publish(debug_image.toImageMsg());
  }
  //imshow("Feature", out_img);
  //waitKey(5);

  return;
}

void ImageProcessor::updateFeatureLifetime() {
  for (int code = 0; code <
      processor_config.grid_row*processor_config.grid_col; ++code) {
    vector<FeatureMetaData>& features = (*curr_features_ptr)[code];
    for (const auto& feature : features) {
      if (feature_lifetime.find(feature.id) == feature_lifetime.end())
        feature_lifetime[feature.id] = 1;
      else
        ++feature_lifetime[feature.id];
    }
  }

  return;
}

void ImageProcessor::featureLifetimeStatistics() {

  map<int, int> lifetime_statistics;
  for (const auto& data : feature_lifetime) {
    if (lifetime_statistics.find(data.second) ==
        lifetime_statistics.end())
      lifetime_statistics[data.second] = 1;
    else
      ++lifetime_statistics[data.second];
  }

  for (const auto& data : lifetime_statistics)
    cout << data.first << " : " << data.second << endl;

  return;
}

} // end namespace msckf_vio
