#include"visual_odom/frontend.h"

namespace test_visual_odom{

Frontend::Frontend(Config::Ptr p_config, Camera::Ptr p_camera_left, Camera::Ptr p_camera_right){
    gftt_ = cv::GFTTDetector::create(p_config->num_features_, 0.01, 20);
    num_features_init_ = p_config->num_features_init_;
    num_features_ = p_config->num_features_;

    p_camera_left_ = p_camera_left;
    p_camera_right_ = p_camera_right;

    map_.reset(new Map());
}

bool Frontend::addFrame(Frame::Ptr frame){
    p_current_frame_ = frame;
    switch (status_)
    {
    case FrontendStatus::INITING:
        stereoInit();
        break;
    case FrontendStatus::TRACKING_GOOD:
        ROS_DEBUG("Tracking good");
    case FrontendStatus::TRACKING_BAD:
        leftImgTrack();
        break;
    case FrontendStatus::LOST:
            ROS_DEBUG("track lost");
            break;
    default:
        break;
    }
    
    p_show_pub_->pub_frame(p_current_frame_);
    p_show_pub_->pub_landmark();
    
    p_last_frame_ = p_current_frame_;
    return true;
}

bool Frontend::stereoInit() {
    //获取左右目之间对应上的特征点
    int num_features_left = detectFeatures();
    int num_coor_features = findFeaturesInRight();
    if (num_coor_features < num_features_init_) {
        return false;
    }

    //利用双目相机之间的标定好的变换矩阵，三角化得到路标，设定初始位姿(左目)为原点  
    bool build_map_success = buildInitMap();
    if (build_map_success) {
        status_ = FrontendStatus::TRACKING_GOOD;
        return true;
    }
    return false;
}

int Frontend::detectFeatures() {
    cv::Mat mask(p_current_frame_->left_img_.size(), CV_8UC1, 255);
    //帧的特征点跟踪：这个mask是用来将当前帧和之前帧成功跟踪特征点进行掩盖的，不希望再检测到这些特征点，此时，需要新的landmark
    for (auto &feat : p_current_frame_->feature_points_left_) {
        cv::rectangle(mask, feat->keyPoint2D_.pt - cv::Point2f(10, 10),
                      feat->keyPoint2D_.pt + cv::Point2f(10, 10), 0, CV_FILLED);
    }

    vector<cv::KeyPoint> keypoints;
    gftt_->detect(p_current_frame_->left_img_, keypoints, mask);
    int cnt_detected = 0;
    for (auto &kp : keypoints) {
        p_current_frame_->feature_points_left_.emplace_back(
            FeaturePoint::Ptr(new FeaturePoint(kp, p_current_frame_)));
        cnt_detected++;
    }

    return cnt_detected;
}

int Frontend::findFeaturesInRight() {
    //使用光流跟踪，实现特征点筛选
    std::vector<cv::Point2f> kps_left, kps_right;
    for (auto &kp : p_current_frame_->feature_points_left_) {
        kps_left.emplace_back(kp->keyPoint2D_.pt);
        //帧跟踪：先是解决当前帧与上一帧的特帧跟踪问题，如果跟踪成功，则会使用之前的landmark，做一个右目的特征跟踪判断的初始值
        auto mp = kp->p_landmark_.lock();
        if (mp) {
            // use projected points as initial guess
            auto px =
                p_camera_right_->world2pixel(mp->pos_world_, p_current_frame_->getPose());
            kps_right.push_back(cv::Point2f(px[0], px[1]));
        } else {
            //使用和左目一样的特征点位置，这用于初始化
            kps_right.emplace_back(kp->keyPoint2D_.pt);
        }
    }

    std::vector<uchar> status;
    cv::Mat error;
    //光流跟踪，筛选它特征点
    cv::calcOpticalFlowPyrLK(
        p_current_frame_->left_img_, p_current_frame_->right_img_, kps_left,
        kps_right, status, error, cv::Size(11, 11), 3,
        cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30,
                         0.01),
        cv::OPTFLOW_USE_INITIAL_FLOW);

    int num_good_pts = 0;
    for (size_t i = 0; i < status.size(); ++i) {
        if (status[i]) {
            cv::KeyPoint kp(kps_right[i], 7);
            FeaturePoint::Ptr feat(new FeaturePoint(kp,p_current_frame_));
            p_current_frame_->feature_points_right_.emplace_back(feat);
            num_good_pts++;
        } else{
            p_current_frame_->feature_points_right_.emplace_back(nullptr);
        }
    }
    return num_good_pts;
}

bool Frontend::buildInitMap() {
    std::vector<SE3> poses{p_camera_left_->pose(), p_camera_right_->pose()};
    size_t cnt_init_landmarks = 0;
    for (size_t i = 0; i < p_current_frame_->feature_points_left_.size(); ++i) {
        if (p_current_frame_->feature_points_right_[i] == nullptr){
            //代码效率来说，可以不用赋值空指针，但是，可读性来说，可以写上
            // p_current_frame_->feature_points_left_[i]->p_landmark_ = nullptr;
            continue;  
        } 
        
        // create map point from triangulation
        std::vector<Eigen::Vector3d> points{
            p_camera_left_->pixel2camera(
                Eigen::Vector2d(p_current_frame_->feature_points_left_[i]->keyPoint2D_.pt.x,
                     p_current_frame_->feature_points_left_[i]->keyPoint2D_.pt.y)),
            p_camera_right_->pixel2camera(
                Eigen::Vector2d(p_current_frame_->feature_points_right_[i]->keyPoint2D_.pt.x,
                     p_current_frame_->feature_points_right_[i]->keyPoint2D_.pt.y))};
        Eigen::Vector3d pworld = Eigen::Vector3d::Zero();

        if (triangulation(poses, points, pworld) && pworld[2] > 0) {
            auto new_landmark = LandMark::createNewLandmark();
            new_landmark->setPos(pworld);
            new_landmark->addFeaturePoint2D(p_current_frame_->feature_points_left_[i]);
            new_landmark->addFeaturePoint2D(p_current_frame_->feature_points_right_[i]);
            p_current_frame_->feature_points_left_[i]->p_landmark_ = new_landmark;
            p_current_frame_->feature_points_right_[i]->p_landmark_ = new_landmark;
            cnt_init_landmarks++;
            map_->insertLandMark(new_landmark);
        }
    }
    p_current_frame_->setKeyFrame();
    map_->insertKeyFrame(p_current_frame_);
    p_show_pub_->updateMap(map_);

    ROS_DEBUG_STREAM("Set frame " <<p_current_frame_->frame_id_ << " as keyframe "
            << p_current_frame_->keyframe_id_);

    //使用ros log
    ROS_DEBUG_STREAM("Initial map created with " << cnt_init_landmarks
              << " landmarks");

    return true;
}

bool Frontend::leftImgTrack() {
    if (p_last_frame_) {
        p_current_frame_->setPose(relative_motion_ * p_last_frame_->getPose());  //假设短时间是匀速运动，使用上上帧与上帧的T以及上帧的绝对系位姿，得到当前帧的位姿
    }

    int num_track_last = trackLastFrame();
    tracking_inliers_ = estimateCurrentPose();

    if (tracking_inliers_ > num_features_tracking_) {
        // tracking good
        status_ = FrontendStatus::TRACKING_GOOD;
    } else if (tracking_inliers_ > num_features_tracking_bad_) {
        // tracking bad
        status_ = FrontendStatus::TRACKING_BAD;
    } else {
        // lost
        status_ = FrontendStatus::LOST;
    }

    featurePointsFitForKeyframe();
    relative_motion_ = p_current_frame_->getPose() * p_last_frame_->getPose().inverse();

    return true;
}

int Frontend::trackLastFrame() {
    // use LK flow to estimate points in the right image
    std::vector<cv::Point2f> kps_last, kps_current;
    for (auto &kp : p_last_frame_->feature_points_left_) {
        if (kp->p_landmark_.lock()) {  //判断该特征点是否有landmark
            // use project point
            auto mp = kp->p_landmark_.lock();
            auto px =
                p_camera_left_->world2pixel(mp->pos_world_, p_current_frame_->getPose());  //由于后端优化，可能会更改调整pose，从而使得landmark投影与检测的特征点的位置稍有区别
            kps_last.push_back(kp->keyPoint2D_.pt);
            kps_current.push_back(cv::Point2f(px[0], px[1]));
        } else {
            kps_last.push_back(kp->keyPoint2D_.pt);
            kps_current.push_back(kp->keyPoint2D_.pt);
        }
    }

    std::vector<uchar> status;
    cv::Mat error;
    cv::calcOpticalFlowPyrLK(
        p_last_frame_->left_img_, p_current_frame_->left_img_, kps_last,
        kps_current, status, error, cv::Size(11, 11), 3,
        cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30,
                         0.01),
        cv::OPTFLOW_USE_INITIAL_FLOW);

    int num_good_pts = 0;

    for (size_t i = 0; i < status.size(); ++i) {
        if (status[i]) {
            cv::KeyPoint kp(kps_current[i], 7);
            FeaturePoint::Ptr feature(new FeaturePoint(kp, p_current_frame_));
            feature->p_landmark_ = p_last_frame_->feature_points_left_[i]->p_landmark_;  
            p_current_frame_->feature_points_left_.push_back(feature);
            num_good_pts++;
        }
    }

    ROS_DEBUG_STREAM("Find " << num_good_pts << " in the last image.");
    return num_good_pts;
}

int Frontend::estimateCurrentPose() {
    // setup g2o
    typedef g2o::BlockSolver_6_3 BlockSolverType;
    typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType>
        LinearSolverType;
    auto solver = new g2o::OptimizationAlgorithmLevenberg(
        g2o::make_unique<BlockSolverType>(
            g2o::make_unique<LinearSolverType>()));
    g2o::SparseOptimizer optimizer;
    optimizer.setAlgorithm(solver);

    // vertex
    VertexPose *vertex_pose = new VertexPose();  // camera vertex_pose
    vertex_pose->setId(0);
    vertex_pose->setEstimate(p_current_frame_->getPose());
    optimizer.addVertex(vertex_pose);

    // edges
    int index = 1;
    std::vector<EdgeProjectionPoseOnly *> edges;
    std::vector<FeaturePoint::Ptr> features;
    for (size_t i = 0; i < p_current_frame_->feature_points_left_.size(); ++i) {
        auto mp = p_current_frame_->feature_points_left_[i]->p_landmark_.lock();
        if (mp) {
            features.push_back(p_current_frame_->feature_points_left_[i]);
            EdgeProjectionPoseOnly *edge =
                new EdgeProjectionPoseOnly(mp->pos_world_, p_camera_left_->getCameraIntrinsics());
            edge->setId(index);
            edge->setVertex(0, vertex_pose);
            edge->setMeasurement(Eigen::Vector2d(p_current_frame_->feature_points_left_[i]->keyPoint2D_.pt.x, 
                                                                                               p_current_frame_->feature_points_left_[i]->keyPoint2D_.pt.y ));
            edge->setInformation(Eigen::Matrix2d::Identity());
            edge->setRobustKernel(new g2o::RobustKernelHuber);
            edges.push_back(edge);
            optimizer.addEdge(edge);
            index++;
        }
    }

    // estimate the Pose the determine the outliers
    const double chi2_th = 5.991;
    int cnt_outlier = 0;
    for (int iteration = 0; iteration < 4; ++iteration) {
        vertex_pose->setEstimate(p_current_frame_->getPose());
        optimizer.initializeOptimization();
        optimizer.optimize(10);
        cnt_outlier = 0;

        // count the outliers
        for (size_t i = 0; i < edges.size(); ++i) {
            auto e = edges[i];
            if (features[i]->is_outlier_) {
                e->computeError();
            }
            if (e->chi2() > chi2_th) {
                features[i]->is_outlier_ = true;
                e->setLevel(1);
                cnt_outlier++;
            } else {
                features[i]->is_outlier_ = false;
                e->setLevel(0);
            };

            if (iteration == 2) {
                e->setRobustKernel(nullptr);
            }
        }
    }

    ROS_DEBUG_STREAM( "Outlier/Inlier in pose estimating: " << cnt_outlier << "/"
              << features.size() - cnt_outlier);
    // Set pose and outlier
    p_current_frame_->setPose(vertex_pose->estimate());

    ROS_DEBUG_STREAM("Current Pose = \n" << p_current_frame_->getPose().matrix());

    for (auto &feat : features) {
        if (feat->is_outlier_) {
            feat->p_landmark_.reset();
            feat->is_outlier_ = false;  // maybe we can still use it in future
        }
    }
    return features.size() - cnt_outlier;
}

bool Frontend::featurePointsFitForKeyframe() {
    if (tracking_inliers_ >= num_features_needed_for_keyframe_) {
        // still have enough features, don't insert keyframe
        return false;
    }
    // current frame is a new keyframe
    p_current_frame_->setKeyFrame();
    map_->insertKeyFrame(p_current_frame_);
    //add current frame observed landmark. 不需要在位姿估计中做，只有发现这可能是关键帧的时候，才做这件事。减少计算量。这些特征点是之前检测的关键帧中，通过光流法成功跟踪的
    for (auto &feat : p_current_frame_->feature_points_left_) {
        auto mp = feat->p_landmark_.lock();
        if (mp) mp->addFeaturePoint2D(feat);
    }

    ROS_DEBUG_STREAM("insert frame " <<p_current_frame_->frame_id_<< " for map to judege keyframe "
              << p_current_frame_->keyframe_id_);

    detectFeatures();  // detect new features

    // track in right image
    findFeaturesInRight();
    // 帧跟踪：三角化没有三角化的2D特征点
    triangulateNewPoints();

    return true;
}

int Frontend::triangulateNewPoints() {
    std::vector<SE3> poses{p_camera_left_->pose(), p_camera_right_->pose()};
    SE3 current_pose_Twc = p_current_frame_->getPose().inverse();
    int cnt_triangulated_pts = 0;
    for (size_t i = 0; i < p_current_frame_->feature_points_left_.size(); ++i) {
        if (p_current_frame_->feature_points_left_[i]->p_landmark_.expired() &&
            p_current_frame_->feature_points_right_[i] != nullptr) {
            // 左图的特征点未关联地图点且存在右图匹配点，尝试三角化
            std::vector<Eigen::Vector3d> points{
                p_camera_left_->pixel2camera(
                    Eigen::Vector2d(p_current_frame_->feature_points_left_[i]->keyPoint2D_.pt.x,
                         p_current_frame_->feature_points_left_[i]->keyPoint2D_.pt.y)),
                p_camera_right_->pixel2camera(
                    Eigen::Vector2d(p_current_frame_->feature_points_right_[i]->keyPoint2D_.pt.x,
                         p_current_frame_->feature_points_right_[i]->keyPoint2D_.pt.y))};
            Eigen::Vector3d pworld = Eigen::Vector3d::Zero();

            if (triangulation(poses, points, pworld) && pworld[2] > 0) {
                auto new_landmark = LandMark::createNewLandmark();
                pworld = current_pose_Twc * pworld;
                new_landmark->setPos(pworld);
                new_landmark->addFeaturePoint2D(
                    p_current_frame_->feature_points_left_[i]);
                new_landmark->addFeaturePoint2D(
                    p_current_frame_->feature_points_left_[i]);

                p_current_frame_->feature_points_left_[i]->p_landmark_ = new_landmark;
                p_current_frame_->feature_points_left_[i]->p_landmark_ = new_landmark;
                map_->insertLandMark(new_landmark);
                cnt_triangulated_pts++;
            }
        }
    }
    p_show_pub_->updateMap(map_);
    ROS_DEBUG_STREAM("new landmarks: " << cnt_triangulated_pts);
    return cnt_triangulated_pts;
}

}