#include "feature/extractor.h"

using namespace feature;
/**
* @brief Compare keypoints based on their response value.
* @param first First keypoint
* @param second Second keypoint
*
* We want to have the keypoints with the highest values!
* See: https://stackoverflow.com/a/10910921
*/

static bool CompareResponse(cv::KeyPoint first, cv::KeyPoint second) { 
  return first.response > second.response; 
}

std::vector<FeaturePoint> Extractor::ExtractFeature(const cv::Mat& image) {

  std::vector<FeaturePoint> feature_data;
  cv::Mat gray_image = image;
  if(image.channels() != 1) {
      cv::cvtColor(image, gray_image, cv::COLOR_BGR2GRAY);
  }
  // Histogram equalize
  cv::Mat img, mask;
  if (histmethod_ == HistogramMethod::HISTOGRAM) {
    cv::equalizeHist(gray_image, img);
  } else if (histmethod_ == HistogramMethod::CLAHE) {
    double eq_clip_limit = 10.0;
    cv::Size eq_win_size = cv::Size(8, 8);
    cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(eq_clip_limit, eq_win_size);
    clahe->apply(gray_image, img);
  } else {
    img = gray_image;
  }

  std::vector<cv::KeyPoint> keypoints;
  PerformGriding(img, mask, keypoints, 1000, true);
  if(keypoints.empty()) {
      return feature_data;
  }
  // For all new points, extract their descriptors
  cv::Mat descriptor;
  this->orb->compute(img, keypoints, descriptor);

  // Create a 2D occupancy grid for this current image
  // Note that we scale this down, so that each grid point is equal to a set of pixels
  // This means that we will reject points that less then grid_px_size points away then existing features
  
  cv::Size size((int)((float)img.cols / (float)min_px_dist), (int)((float)img.rows / (float)min_px_dist));
  cv::Mat grid_2d = cv::Mat::zeros(size, CV_8UC1);
  
  // For all good matches, lets append to our returned vectors
  // NOTE: if we multi-thread this atomic can cause some randomness due to multiple thread detecting features
  // NOTE: this is due to the fact that we select update features based on feat id
  // NOTE: thus the order will matter since we try to select oldest (smallest id) to update with
  // NOTE: not sure how to remove... maybe a better way?
  for (size_t i = 0; i < keypoints.size(); i++) {
    // Get current left keypoint, check that it is in bounds
    cv::KeyPoint kpt = keypoints.at(i);
    int x = (int)kpt.pt.x;
    int y = (int)kpt.pt.y;
    int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
    int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
    if (x_grid < 0 || x_grid >= size.width || y_grid < 0 || y_grid >= size.height || x < 0 || x >= img.cols || y < 0 || y >= img.rows) {
      continue;
    }
    // Check if this keypoint is near another point
    if (grid_2d.at<uint8_t>(y_grid, x_grid) > 127)
      continue;

    // Else we are good, append our keypoints and descriptors
    FeaturePoint feature_point;

    feature_point.image_point << kpt.pt.x, kpt.pt.y;
    feature_point.keypoint = kpt;
    feature_point.descriptor_mat = descriptor.row((int)i);

    feature_data.push_back(feature_point);

    // Set our IDs to be unique IDs here, will later replace with corrected ones, after temporal matching
    grid_2d.at<uint8_t>(y_grid, x_grid) = 255;
  }

  return feature_data;
}

void Extractor::PerformGriding(const cv::Mat &img, const cv::Mat &mask, std::vector<cv::KeyPoint> &pts, int num_features, bool nonmax_suppression) {
    // We want to have equally distributed features
    // NOTE: If we have more grids than number of total points, we calc the biggest grid we can do
    // NOTE: Thus if we extract 1 point per grid we have
    // NOTE:    -> 1 = num_features / (grid_x * grid_y))
    // NOTE:    -> grid_x = ratio * grid_y (keep the original grid ratio)
    // NOTE:    -> grid_y = sqrt(num_features / ratio)
    if (num_features < grid_x * grid_y) {
      double ratio = (double)grid_x / (double)grid_y;
      grid_y = std::ceil(std::sqrt(num_features / ratio));
      grid_x = std::ceil(grid_y * ratio);
    }
    int num_features_grid = (int)((double)num_features / (double)(grid_x * grid_y)) + 1;
    assert(grid_x > 0);
    assert(grid_y > 0);
    assert(num_features_grid > 0);

    // Calculate the size our extraction boxes should be
    int size_x = img.cols / grid_x;
    int size_y = img.rows / grid_y;

    // Make sure our sizes are not zero
    assert(size_x > 0);
    assert(size_y > 0);

    // Parallelize our 2d grid extraction!!
    int ct_cols = std::floor(img.cols / size_x);
    int ct_rows = std::floor(img.rows / size_y);
    std::vector<std::vector<cv::KeyPoint>> collection(ct_cols * ct_rows);
    parallel_for_(cv::Range(0, ct_cols * ct_rows), LambdaBody([&](const cv::Range &range) {
                    for (int r = range.start; r < range.end; r++) {
                      // Calculate what cell xy value we are in
                      int x = r % ct_cols * size_x;
                      int y = r / ct_cols * size_y;

                      // Skip if we are out of bounds
                      if (x + size_x > img.cols || y + size_y > img.rows)
                        continue;

                      // Calculate where we should be extracting from
                      cv::Rect img_roi = cv::Rect(x, y, size_x, size_y);

                      // Extract FAST features for this part of the image
                      std::vector<cv::KeyPoint> pts_new;
                      cv::FAST(img(img_roi), pts_new, threshold, nonmax_suppression);

                      // Now lets get the top number from this
                      std::sort(pts_new.begin(), pts_new.end(), CompareResponse);

                      // Append the "best" ones to our vector
                      // Note that we need to "correct" the point u,v since we extracted it in a ROI
                      // So we should append the location of that ROI in the image
                      for (size_t i = 0; i < (size_t)num_features_grid && i < pts_new.size(); i++) {

                        // Create keypoint
                        cv::KeyPoint pt_cor = pts_new.at(i);
                        pt_cor.pt.x += (float)x;
                        pt_cor.pt.y += (float)y;

                        // Reject if out of bounds (shouldn't be possible...)
                        if ((int)pt_cor.pt.x < 0 || (int)pt_cor.pt.x > img.cols || (int)pt_cor.pt.y < 0 || (int)pt_cor.pt.y > img.rows)
                          continue;

                        // Check if it is in the mask region
                        // NOTE: mask has max value of 255 (white) if it should be removed
                        if(!mask.empty()) {
                          if (mask.at<uint8_t>((int)pt_cor.pt.y, (int)pt_cor.pt.x) > 127) {
                            continue;
                          }
                        } 

                        collection.at(r).push_back(pt_cor);
                      }
                    }
                  }));

    // Combine all the collections into our single vector
    for (size_t r = 0; r < collection.size(); r++) {
      pts.insert(pts.end(), collection.at(r).begin(), collection.at(r).end());
    }

    // Return if no points
    if (pts.empty())
      return;

    // Sub-pixel refinement parameters
    cv::Size win_size = cv::Size(5, 5);
    cv::Size zero_zone = cv::Size(-1, -1);
    cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 20, 0.001);

    // Get vector of points
    std::vector<cv::Point2f> pts_refined;
    for (size_t i = 0; i < pts.size(); i++) {
      pts_refined.push_back(pts.at(i).pt);
    }

    // Finally get sub-pixel for all extracted features
    cv::cornerSubPix(img, pts_refined, win_size, zero_zone, term_crit);

    // Save the refined points!
    for (size_t i = 0; i < pts.size(); i++) {
      pts.at(i).pt = pts_refined.at(i);
    }
}
