
#include "feature/feature_matcher.h"

using namespace feature;

FeatureMatcher::FeatureMatcher(std::shared_ptr<FeatureDatabase> feature_database_ptr) : feature_database_ptr_(feature_database_ptr) {
	window_size_ = 60;
}

void FeatureMatcher::DescriptorMatch(uint64_t& feature_id, const uint8_t& camera_id, const uint64_t& frame_id, const uint64_t& last_frame_id, const std::vector<FeaturePoint>& feature_data) {
	// recovery last frame 
	std::map<uint64_t, FeaturePoint> last_feature_point = feature_database_ptr_->GetOneFrameFeaturePoints(camera_id, last_frame_id);
	// LOG(INFO) << " last_feature_point ===== : " << last_feature_point.size() << std::endl;
	
	std::vector<std::pair<uint64_t, uint64_t>> matched_point_idx;
    std::vector<Vector3d> last_point, curr_point;
	for(const auto& last_feature : last_feature_point) {
		const Vector2d& last_pt = last_feature.second.image_point;
		const auto curr_ptx_lower_boundary = last_pt[0] - window_size_;
        const auto curr_ptx_upper_boundary = last_pt[0] + window_size_;
        const auto curr_pty_lower_boundary = last_pt[1] - window_size_;
        const auto curr_pty_upper_boundary = last_pt[1] + window_size_;
        double best_dist = 1000.0;
        double second_best_dist = 1000.0;
        int64_t best_idx_2 = -1;

        const Vector128d& last_descriptor = last_feature.second.descriptor;

        // LOG(INFO) << " last_descriptor ===== : " << last_descriptor.transpose() << std::endl;

        uint64_t idx_2 = 0;
		for(const auto& curr_feature : feature_data) {
			const Vector2d& curr_pt = curr_feature.image_point;
			++idx_2;
			// discard features who is out of searched window size
            if (curr_pt[0] > curr_ptx_upper_boundary || curr_pt[0] < curr_ptx_lower_boundary || 
            	curr_pt[1] < curr_pty_lower_boundary || curr_pt[1] > curr_pty_upper_boundary) {
                continue;
            }
            const Vector128d& curr_descriptor = curr_feature.descriptor;

            // LOG(INFO) << " curr_descriptor ===== : " << curr_descriptor.transpose() << std::endl;

            double dist = ComputeDescriptorDistance(last_descriptor, curr_descriptor);

            // LOG(INFO) << " dist ===== : " << dist << std::endl;

            if(dist < best_dist) {
            	second_best_dist = best_dist;
                best_dist = dist;
                best_idx_2 = idx_2-1;
            } else if (dist < second_best_dist) {
                second_best_dist = dist;
            }
		}

        // LOG(INFO) << " best_dist ===== : " << best_dist << std::endl;
        // LOG(INFO) << " second_best_dist ===== : " << second_best_dist << std::endl;

        if(best_idx_2 < 0) {
            continue;
        }

        if (best_dist  >= 1.0) {
            continue;
        }
        // ratio test
        if (second_best_dist * 0.995 < best_dist) {
            continue;
        }
        
        std::pair<uint64_t, uint64_t> target = std::make_pair(best_idx_2, last_feature.first);
        auto iter = std::find(matched_point_idx.begin(), matched_point_idx.end(), target);
        if (iter != matched_point_idx.end()) { 
            continue;
        }

        // matched_point_idx[last_feature.first] = (uint64_t)best_idx_2; // current_feature id
	    // matched_point_idx.push_back({last_feature.first, (uint64_t)best_idx_2}); // current_feature id
        
        // current feature id and last feature id
        matched_point_idx.push_back({(uint64_t)best_idx_2, last_feature.first}); // current_feature id
        last_point.push_back(last_feature.second.normalized_point);
        curr_point.push_back(feature_data[best_idx_2].normalized_point);
    }
	
	// LOG(INFO) << " matched_point_idx ===== : " << matched_point_idx.size() << std::endl;

	if(matched_point_idx.empty()) {
		return;
	}
    std::vector<uchar> inliers;
    FilterByFundamentalMatrix(inliers, last_point, curr_point);

    std::map<uint64_t, uint64_t> inliers_ids;
    for(uint32_t i = 0; i < inliers.size(); ++i) {
        if(!inliers[i]) {
            continue;
        }
        std::pair<uint64_t, uint64_t> id_pair = matched_point_idx[i];
        inliers_ids[id_pair.first] = id_pair.second;
    }

    // LOG(INFO) << " inliers_ids ===== : " << inliers_ids.size() << std::endl;
    
    // LOG(INFO) << " feature_id 0===== : " << feature_id << std::endl;

    uint64_t current_idx = 0;
    for(const feature::FeaturePoint& feat_point : feature_data) {
      auto iter_inlier = inliers_ids.find(current_idx);
      if(iter_inlier != inliers_ids.end()) {
         feature_database_ptr_->UpdateFeatureDatabase(camera_id, frame_id, iter_inlier->second, feat_point);
      } else {
         ++feature_id;
         feature_database_ptr_->UpdateFeatureDatabase(camera_id, frame_id, feature_id, feat_point);
      }
      ++current_idx;
    }

    // LOG(INFO) << " feature_id 1===== : " << feature_id << std::endl;

}

void FeatureMatcher::OrbDescriptorMatch(uint64_t& feature_id, const uint8_t& camera_id, const uint64_t& frame_id, const uint64_t& last_frame_id, const std::vector<FeaturePoint>& feature_data) {
    // recovery last frame 
    std::map<uint64_t, FeaturePoint> last_feature_point = feature_database_ptr_->GetOneFrameFeaturePoints(camera_id, last_frame_id);
    // LOG(INFO) << " last_feature_point ===== : " << last_feature_point.size() << std::endl;
    
    std::vector<std::pair<uint64_t, uint64_t>> matched_point_idx;
    std::vector<Vector3d> last_point, curr_point;
    for(const auto& last_feature : last_feature_point) {
        const Vector2d& last_pt = last_feature.second.image_point;
        const auto curr_ptx_lower_boundary = last_pt[0] - window_size_;
        const auto curr_ptx_upper_boundary = last_pt[0] + window_size_;
        const auto curr_pty_lower_boundary = last_pt[1] - window_size_;
        const auto curr_pty_upper_boundary = last_pt[1] + window_size_;
        unsigned int best_dist = 1000;
        unsigned int second_best_dist = 1000;
        int64_t best_idx_2 = -1;

        const auto& last_descriptor = last_feature.second.descriptor_mat;

        // LOG(INFO) << " last_descriptor ===== : " << last_descriptor.transpose() << std::endl;

        uint64_t idx_2 = 0;
        for(const auto& curr_feature : feature_data) {
            const Vector2d& curr_pt = curr_feature.image_point;
            ++idx_2;
            // discard features who is out of searched window size
            if (curr_pt[0] > curr_ptx_upper_boundary || curr_pt[0] < curr_ptx_lower_boundary || 
                curr_pt[1] < curr_pty_lower_boundary || curr_pt[1] > curr_pty_upper_boundary) {
                continue;
            }
            const auto& curr_descriptor = curr_feature.descriptor_mat;

            // LOG(INFO) << " curr_descriptor ===== : " << curr_descriptor.transpose() << std::endl;

            unsigned int dist = ComputeDescriptorDistance(last_descriptor, curr_descriptor);

            // LOG(INFO) << " dist ===== : " << dist << std::endl;

            if(dist < best_dist) {
                second_best_dist = best_dist;
                best_dist = dist;
                best_idx_2 = idx_2-1;
            } else if (dist < second_best_dist) {
                second_best_dist = dist;
            }
        }

        LOG(INFO) << " best_dist ===== : " << best_dist << std::endl;
        // LOG(INFO) << " second_best_dist ===== : " << second_best_dist << std::endl;

        if(best_idx_2 < 0) {
            continue;
        }

        if (best_dist  >= 75) {
            continue;
        }
        // ratio test
        if (second_best_dist * 0.995 < best_dist) {
            continue;
        }
        
        std::pair<uint64_t, uint64_t> target = std::make_pair(best_idx_2, last_feature.first);
        auto iter = std::find(matched_point_idx.begin(), matched_point_idx.end(), target);
        if (iter != matched_point_idx.end()) { 
            continue;
        }

        // matched_point_idx[last_feature.first] = (uint64_t)best_idx_2; // current_feature id
        // matched_point_idx.push_back({last_feature.first, (uint64_t)best_idx_2}); // current_feature id
        
        // current feature id and last feature id
        matched_point_idx.push_back({(uint64_t)best_idx_2, last_feature.first}); // current_feature id
        last_point.push_back(last_feature.second.normalized_point);
        curr_point.push_back(feature_data[best_idx_2].normalized_point);
    }
    
    LOG(INFO) << " matched_point_idx ===== : " << matched_point_idx.size() << std::endl;

    if(matched_point_idx.empty()) {
        return;
    }
    std::vector<uchar> inliers;
    FilterByFundamentalMatrix(inliers, last_point, curr_point);

    std::map<uint64_t, uint64_t> inliers_ids;
    for(uint32_t i = 0; i < inliers.size(); ++i) {

        LOG(INFO) << " inliers[i] ===== : " << (int)inliers[i] << std::endl;

        if(!inliers[i]) {
            continue;
        }
        std::pair<uint64_t, uint64_t> id_pair = matched_point_idx[i];
        inliers_ids[id_pair.first] = id_pair.second;
    }

    LOG(INFO) << " inliers_ids ===== : " << inliers_ids.size() << std::endl;
    
    // LOG(INFO) << " feature_id 0===== : " << feature_id << std::endl;

    uint64_t current_idx = 0;
    for(const feature::FeaturePoint& feat_point : feature_data) {
      auto iter_inlier = inliers_ids.find(current_idx);
      if(iter_inlier != inliers_ids.end()) {
         feature_database_ptr_->UpdateFeatureDatabase(camera_id, frame_id, iter_inlier->second, feat_point);
      } else {
         ++feature_id;
         feature_database_ptr_->UpdateFeatureDatabase(camera_id, frame_id, feature_id, feat_point);
      }
      ++current_idx;
    } 
}

void FeatureMatcher::FilterByFundamentalMatrix(std::vector<uchar>& inliers, const std::vector<Vector3d>& last_point, const std::vector<Vector3d>& curr_point) {
    std::vector<cv::Point2f> last_normalized_points;
    std::vector<cv::Point2f> curr_normalized_points;
    for(int i = 0; i < curr_point.size(); ++i) {
        Vector3d last_pt = last_point[i];
        Vector3d curr_pt = curr_point[i];

        last_normalized_points.emplace_back(last_pt[0], last_pt[1]);
        curr_normalized_points.emplace_back(curr_pt[0], curr_pt[1]);
    }

    cv::Mat fundamental_matrix = cv::findFundamentalMat(
        last_normalized_points, curr_normalized_points, 
        cv::FM_RANSAC, 
        3.0,  // param1
        0.99, // param2
        inliers
    );

    (void)fundamental_matrix;
    
    // LOG(INFO) << " fundamental_matrix ===== : " << fundamental_matrix << std::endl;
}

void FeatureMatcher::RobustDescriptorMatch(uint64_t& feature_id, const uint8_t& camera_id, const uint64_t& frame_id, const uint64_t& last_frame_id, const std::vector<FeaturePoint>& feature_data) {
        // recovery last frame 
    std::map<uint64_t, FeaturePoint> last_feature_point = feature_database_ptr_->GetOneFrameFeaturePoints(camera_id, last_frame_id);
    // LOG(INFO) << " last_feature_point ===== : " << last_feature_point.size() << std::endl;
    
    std::vector<Vector2d> last_undistorted_point, curr_undistorted_point;
    std::vector<cv::KeyPoint> last_point, curr_point;
    std::vector<cv::Mat> last_vec_desc, curr_vec_desc;

    for(const auto& last_feature : last_feature_point) {
        last_point.push_back(last_feature.second.keypoint);
        last_vec_desc.push_back(last_feature.second.descriptor_mat);
        last_undistorted_point.push_back(last_feature.second.undistorted_point);
    }

    cv::Mat last_descriptor, curr_descriptor;
    cv::vconcat(last_vec_desc, last_descriptor);   

    for(const auto& curr_feature : feature_data) {    
        curr_point.push_back(curr_feature.keypoint);
        curr_vec_desc.push_back(curr_feature.descriptor_mat);
        curr_undistorted_point.push_back(curr_feature.undistorted_point);
    }
    cv::vconcat(curr_vec_desc, curr_descriptor);   

    std::vector<cv::DMatch> matches;
    RobustMatch(last_point, curr_point, last_descriptor, curr_descriptor, last_undistorted_point, curr_undistorted_point, matches);
}


void FeatureMatcher::RobustMatch(const std::vector<cv::KeyPoint> &pts0, const std::vector<cv::KeyPoint> &pts1, const cv::Mat &desc0,
                    const cv::Mat &desc1, const std::vector<Vector2d>& last_undistorted_point, const std::vector<Vector2d> curr_undistorted_point,
                    std::vector<cv::DMatch> &matches) {
  // Our 1to2 and 2to1 match vectors
  std::vector<std::vector<cv::DMatch>> matches0to1, matches1to0;

  // Match descriptors (return 2 nearest neighbours)
  matcher_->knnMatch(desc0, desc1, matches0to1, 2);
  matcher_->knnMatch(desc1, desc0, matches1to0, 2);

  // Do a ratio test for both matches
  RobustRatioTest(matches0to1);
  RobustRatioTest(matches1to0);

  // Finally do a symmetry test
  std::vector<cv::DMatch> matches_good;
  RobustSymmetryTest(matches0to1, matches1to0, matches_good);

  // Convert points into points for RANSAC
  std::vector<cv::Point2f> pts0_rsc, pts1_rsc;
  for (size_t i = 0; i < matches_good.size(); i++) {
    // Get our ids
    int index_pt0 = matches_good.at(i).queryIdx;
    int index_pt1 = matches_good.at(i).trainIdx;
    // Push back just the 2d point
    pts0_rsc.push_back(pts0[index_pt0].pt);
    pts1_rsc.push_back(pts1[index_pt1].pt);
  }

  // If we don't have enough points for ransac just return empty
  if (pts0_rsc.size() < 10)
    return;

  // Normalize these points, so we can then do ransac
  // We don't want to do ransac on distorted image uvs since the mapping is nonlinear
  std::vector<cv::Point2f> pts0_n, pts1_n;
  for (size_t i = 0; i < pts0_rsc.size(); i++) {
    Vector2d last_undist_pt = last_undistorted_point.at(matches_good.at(i).queryIdx);
    Vector2d curr_undist_pt = curr_undistorted_point.at(matches_good.at(i).trainIdx);
    cv::Point2f un_dist_pt0;
    un_dist_pt0.x = last_undist_pt[0];
    un_dist_pt0.y = last_undist_pt[1];
    cv::Point2f un_dist_pt1;
    un_dist_pt1.x = curr_undist_pt[0];
    un_dist_pt1.y = curr_undist_pt[1];
    pts0_n.push_back(un_dist_pt0);
    pts1_n.push_back(un_dist_pt0);
  }

  // Do RANSAC outlier rejection (note since we normalized the max pixel error is now in the normalized cords)
  std::vector<uchar> mask_rsc;
  cv::findFundamentalMat(pts0_n, pts1_n, cv::FM_RANSAC, 3.0, 0.995, mask_rsc);

  // Loop through all good matches, and only append ones that have passed RANSAC
  for (size_t i = 0; i < matches_good.size(); i++) {
    // Skip if bad ransac id
    if (mask_rsc[i] != 1)
      continue;
    // Else, lets append this match to the return array!
    matches.push_back(matches_good.at(i));
  }

  LOG(FATAL) << " matches0to1 ===== : " << matches.size() << ", " << matches0to1.size() << " , " << matches1to0.size() << std::endl;

}

void FeatureMatcher::RobustRatioTest(std::vector<std::vector<cv::DMatch>> &matches) {
  // Loop through all matches
  for (auto &match : matches) {
    // If 2 NN has been identified, else remove this feature
    if (match.size() > 1) {
      // check distance ratio, remove it if the ratio is larger
      if (match[0].distance / match[1].distance > 0.65) {
        match.clear();
      }
    } else {
      // does not have 2 neighbours, so remove it
      match.clear();
    }
  }
}

void FeatureMatcher::RobustSymmetryTest(std::vector<std::vector<cv::DMatch>> &matches1, std::vector<std::vector<cv::DMatch>> &matches2,
                                           std::vector<cv::DMatch> &good_matches) {
  // for all matches image 1 -> image 2
  for (auto &match1 : matches1) {
    // ignore deleted matches
    if (match1.empty() || match1.size() < 2)
      continue;
    // for all matches image 2 -> image 1
    for (auto &match2 : matches2) {
      // ignore deleted matches
      if (match2.empty() || match2.size() < 2)
        continue;
      // Match symmetry test
      if (match1[0].queryIdx == match2[0].trainIdx && match2[0].queryIdx == match1[0].trainIdx) {
        // add symmetrical match
        good_matches.emplace_back(cv::DMatch(match1[0].queryIdx, match1[0].trainIdx, match1[0].distance));
        // next match in image 1 -> image 2
        break;
      }
    }
  }
}