/*
 * Copyright 2025 The Authors
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 *@file    : feature.h
 *@brief   : feature
 *@authors : zhanglei, zhanglei_723@126.com
 *@version : v1.0
 *@data    : 2025/10/19
 *
 */

#ifndef EXTRACTOR_H_
#define EXTRACTOR_H_

#include <opencv2/opencv.hpp>

#include "common/type.h"
#include "feature/feature.h"

namespace feature {

/**
 * @brief Helper class to do OpenCV parallelization
 *
 * This is a utility class required to build with older version of opencv
 * On newer versions this doesn't seem to be needed, but here we just use it to ensure we can work for more opencv version.
 * https://answers.opencv.org/question/65800/how-to-use-lambda-as-a-parameter-to-parallel_for_/?answer=130691#post-id-130691
 */
class LambdaBody : public cv::ParallelLoopBody {
public:
  explicit LambdaBody(const std::function<void(const cv::Range &)> &body) { _body = body; }
  void operator()(const cv::Range &range) const override { _body(range); }

private:
  std::function<void(const cv::Range &)> _body;
};

/**
 * @brief Descriptor-based visual tracking
 *
 * Here we use descriptor matching to track features from one frame to the next.
 * We track both temporally, and across stereo pairs to get stereo constraints.
 * Right now we use ORB descriptors as we have found it is the fastest when computing descriptors.
 * Tracks are then rejected based on a ratio test and ransac.
 */
class Extractor {
public:
    /**
   * @brief Desired pre-processing image method.
   */
  enum HistogramMethod { NONE, HISTOGRAM, CLAHE };

  explicit Extractor() {}

  /**
   * @brief Process a new monocular image
   * @param message Contains our timestamp, images, and camera ids
   * @param msg_id the camera index in message data vector
   */
  std::vector<FeaturePoint> ExtractFeature(const cv::Mat& image);
protected:
  /**
   * @brief This function will perform grid extraction using FAST.
   * @param img Image we will do FAST extraction on
   * @param mask Region of the image we do not want to extract features in (255 = do not detect features)
   * @param valid_locs Valid 2d grid locations we will extract in (instead of the whole image)
   * @param pts vector of extracted points we will return
   * @param num_features max number of features we want to extract
   * @param grid_x size of grid in the x-direction / u-direction
   * @param grid_y size of grid in the y-direction / v-direction
   * @param threshold FAST threshold paramter (10 is a good value normally)
   * @param nonmaxSuppression if FAST should perform non-max suppression (true normally)
   *
   * Given a specified grid size, this will try to extract fast features from each grid.
   * It will then return the best from each grid in the return vector.
   */
  void PerformGriding(const cv::Mat &img, const cv::Mat &mask, std::vector<cv::KeyPoint> &pts, int num_features,
                      bool nonmax_suppression = true);

  // Our orb extractor
  cv::Ptr<cv::ORB> orb = cv::ORB::create();

  // Parameters for our FAST grid detector
  int threshold = 20;
  int grid_x = 5;
  int grid_y = 5;

  // Minimum pixel distance to be "far away enough" to be a different extracted feature
  int min_px_dist = 10;

  // The ratio between two kNN matches, if that ratio is larger then this threshold
  // then the two features are too close, so should be considered ambiguous/bad match
  double knn_ratio = 0.85;

  HistogramMethod histmethod_ = HistogramMethod::CLAHE;
};

} // namespace feature

#endif /*  */
