//
// Created by gaoxiang on 19-5-2.
//

#include <opencv2/opencv.hpp>


#include "feature.h"
#include "frontend.h"
#include <cstring>


namespace myslam {

    Frontend::Frontend() {
        sift_ = cv::SIFT::create();
        matcher_ = cv::DescriptorMatcher::create(cv::DescriptorMatcher::FLANNBASED);

    }

    bool Frontend::AddFrame(myslam::Frame::Ptr frame) {
        last_frame_ = current_frame_;
        current_frame_ = frame;

        switch (status_) {
            case FrontendStatus::INITING:
                StereoInit();
                break;
            case FrontendStatus::TRACKING_GOOD:
            case FrontendStatus::TRACKING_BAD:
                Track();
                break;
                //case FrontendStatus::LOST:
                //    Reset();
                //    break;
        }

        //last_frame_ = current_frame_;
        return true;
    }

    bool Frontend::Track() {
        //if (last_frame_) {
        //    current_frame_->SetPose(relative_motion_ * last_frame_->Pose());
        //}
        int num_features_current = DetectFeatures();

        int num_track_last = TrackLastFrame();

        LOG(INFO) << "当前帧:  " << current_frame_->id_ << " 与上一帧:  " << last_frame_->id_ << "有 " << num_track_last << " 对匹配特征点" << std::endl;

        ////tracking_inliers_ = EstimateCurrentPose();

        //if (tracking_inliers_ > num_features_tracking_) {
        //    // tracking good
        //    status_ = FrontendStatus::TRACKING_GOOD;
        //}
        //else if (tracking_inliers_ > num_features_tracking_bad_) {
        //    // tracking bad
        //    status_ = FrontendStatus::TRACKING_BAD;
        //}
        //else {
        //    // lost
        //    status_ = FrontendStatus::LOST;
        //}

        ////InsertKeyframe();

        return true;
    }




    //第一帧进来，只进行sift检测
    bool Frontend::StereoInit() {
        int num_features_left = DetectFeatures();
        //int num_coor_features = FindFeaturesInRight();

        //如果第一帧特征点太少，那么初始化失败，否则FrontendStatus::TRACKING_GOOD;
        if (num_features_left < num_features_init_) {
            return false;
        }
        else {

            status_ = FrontendStatus::TRACKING_GOOD;
        }


        return true;
    }


    

    //把检测到的特征点给current_frame_->features_left_.push_back
    //以及计算描述子current_frame_->descriptors
    int Frontend::DetectFeatures() {

        //cv::Mat mask(current_frame_->left_img_.size(), CV_8UC1, 255);
        //for (auto& feat : current_frame_->features_left_) {
        //    cv::rectangle(mask, feat->position_.pt - cv::Point2f(10, 10),
        //        feat->position_.pt + cv::Point2f(10, 10), 0, cv::FILLED);
        //}

        std::vector<cv::KeyPoint> keypoints;
        sift_->detect(current_frame_->left_img_, keypoints, current_frame_->masks);
        int cnt_detected = 0;
        for (auto& kp : keypoints) {
            current_frame_->features_left_.push_back(
                    Feature::Ptr(new Feature(current_frame_, kp)));
            cnt_detected++;
        }

        sift_->compute(current_frame_->left_img_, keypoints, current_frame_->descriptors);

        LOG(INFO) << "Detect " << cnt_detected << " new features";
        return cnt_detected;
    }


    //这一段是为了寻找当前帧和上一帧对应的特征点，kps_last[i]和 kps_current[i]一 一对应
    int Frontend::TrackLastFrame() {
        int num_good_pts = 0;
        std::vector<std::vector<cv::DMatch> > knn_matches;

        std::vector<cv::DMatch> good_matches;

        //Knn匹配两帧的特征点，并筛选好的匹配
        matcher_->knnMatch(last_frame_->descriptors, current_frame_->descriptors, knn_matches, 2);
        for (auto& knn_matche : knn_matches) {
            if (knn_matche[0].distance < ratio_thresh_ * knn_matche[1].distance) {
                good_matches.push_back(knn_matche[0]);
                num_good_pts++;
            }
        }

        //把筛选好的匹配，重新装入每帧的特征点，之前没筛选的特征点丢弃
        std::vector<cv::KeyPoint> last_keypoints;
        std::vector<cv::KeyPoint> current_keypoints;
        for (auto& knn_matche : good_matches) {
            last_keypoints.push_back((last_frame_->features_left_[knn_matche.queryIdx])->position_);
            current_keypoints.push_back((current_frame_->features_left_[knn_matche.trainIdx])->position_);

        }
        last_frame_->features_left_.clear();
        current_frame_->features_left_.clear();
        for (auto& kp : last_keypoints) {
            last_frame_->features_left_.push_back(
                    Feature::Ptr(new Feature(last_frame_, kp)));

        }
        for (auto& kp : current_keypoints) {
            current_frame_->features_left_.push_back(
                    Feature::Ptr(new Feature(current_frame_, kp)));
        }
        //根据新的特征点重新计算描述子
        sift_->compute(current_frame_->left_img_, current_keypoints, current_frame_->descriptors);
        sift_->compute(last_frame_->left_img_, last_keypoints, last_frame_->descriptors);

        if (is_display_match)
        {
            std::vector<cv::DMatch> matches;
            matcher_->match(last_frame_->descriptors, current_frame_->descriptors, matches);
            cv::Mat img_matches_bf;
            cv::drawMatches(last_frame_->left_img_, last_keypoints, current_frame_->left_img_, current_keypoints, matches, img_matches_bf);
            std::string save_name = ("matches" + std::to_string(current_frame_->id_) + std::to_string(last_frame_->id_) + ".jpg");
            cv::imwrite(save_name, img_matches_bf);
            //LOG(INFO) << "Find " << matches.size() << " match.";


        }

        //LOG(INFO) << "Find " << num_good_pts << " in the last image.";
        return num_good_pts;
    }

    int Frontend::FindFeaturesInRight() {
        // use LK flow to estimate points in the right image
        std::vector<cv::Point2f> kps_left, kps_right;
        for (auto& kp : current_frame_->features_left_) {
            kps_left.push_back(kp->position_.pt);
            auto mp = kp->map_point_.lock();
            if (mp) {
                //// use projected points as initial guess
                //auto px =
                //    camera_right_->world2pixel(mp->pos_, current_frame_->Pose());
                //kps_right.push_back(cv::Point2f(px[0], px[1]));
            }
            else {
                // use same pixel in left iamge
                kps_right.push_back(kp->position_.pt);
            }
        }

        std::vector<uchar> status;
        Mat error;
        cv::calcOpticalFlowPyrLK(
                current_frame_->left_img_, current_frame_->right_img_, kps_left,
                kps_right, status, error, cv::Size(11, 11), 3,
                cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30,
                                 0.01),
                cv::OPTFLOW_USE_INITIAL_FLOW);

        int num_good_pts = 0;
        for (size_t i = 0; i < status.size(); ++i) {
            if (status[i]) {
                cv::KeyPoint kp(kps_right[i], 7);
                Feature::Ptr feat(new Feature(current_frame_, kp));
                feat->is_on_left_image_ = false;
                current_frame_->features_right_.push_back(feat);
                num_good_pts++;
            }
            else {
                current_frame_->features_right_.push_back(nullptr);
            }
        }
        LOG(INFO) << "Find " << num_good_pts << " in the right image.";
        return num_good_pts;
    }



    bool Frontend::Reset() {
        LOG(INFO) << "Reset is not implemented. ";
        return true;
    }

}  // namespace myslam