/*!
  @file feature_extractor.cpp
  @brief Brief description
  @author Kyohei Otsu <kyohei@kth.se>
  @date 2013
*/

#include <algorithm>
#include <limits>
#include <vector>
#include <fstream>

#include <ros/ros.h>
#include <sensor_msgs/Image.h> 
#include <sensor_msgs/CameraInfo.h> 
#include <visualization_msgs/Marker.h>

#include <message_filters/subscriber.h> 
#include <message_filters/time_synchronizer.h>

#include <boost/foreach.hpp>

#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp> 
#include <opencv2/imgproc/imgproc.hpp> 
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <sensor_msgs/PointCloud2.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/ros/conversions.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/filters/passthrough.h>

#include "mmo_vo/CameraMatrices.h"
#include "mmo_vo/DetectorInquiry.h"
#include "mmo_vo/marker_generator.h"

using namespace std;
using namespace mmo_vo;

namespace vo
{

//! class description 
/*! This is a detailed description of the class */
class FeatureExtractor {
 private:
  // typedefs
  typedef sensor_msgs::PointCloud2 PC2MSG;
  typedef pcl::PointXYZ POINT;
  typedef pcl::PointCloud<POINT> CLOUD;


  //! ROS node handler
  ros::NodeHandle nh_;
  //! ROS node handler in the private namespace
  ros::NodeHandle private_nh_;
  //!
  image_transport::ImageTransport it_;
  //! Image subscription
  image_transport::CameraSubscriber img_sub_;
  //! Marker publisher
  ros::Publisher marker_pub_;
  //! Image publisher
  image_transport::CameraPublisher img_pub_;

  //! Time synchronized subscriber
  message_filters::Subscriber<sensor_msgs::Image> imgL_sub_;
  //! Time synchronized subscriber
  message_filters::Subscriber<sensor_msgs::Image> imgR_sub_;
  //! Time synchronizer
  boost::shared_ptr< message_filters::TimeSynchronizer<sensor_msgs::Image, sensor_msgs::Image> > sync_;

  //! Camera intrinsic matrix
  cv::Mat A_;
  //! Camera distortion matrix
  cv::Mat D_;
  //! Camera baseline
  double baseline_;
  //! Camera sensor size
  double sensor_len_;

  //! keypoints output directory
  std::string kp_out_dir_;
  //! frame_counter
  int frame_cnt;


  //--- private functions

  //! update previous keypoints and descriptor
  void filter_keypoints(std::vector<cv::KeyPoint> &kp, cv::Mat &desc, std::vector<int> &indices, std::vector<cv::KeyPoint> &kp_new, cv::Ptr<cv::Mat> &desc_new);

  //! draw line between keypoins
  void draw_line_between_keypoints(cv::Mat &src, std::vector<cv::KeyPoint> &kp1, std::vector<cv::KeyPoint> &kp2, std::vector<cv::DMatch> &matches, cv::Mat &dst, cv::Scalar color = cv::Scalar(0, 0, 255), float scale = 1.0);

  //! show scaled image
  void imshow(const char* winname, cv::Mat &img, float scale);

 protected:
  //! Pointer to detector 
  cv::Ptr<cv::FeatureDetector> detector;
  //! Pointer to detector 
  cv::Ptr<cv::DescriptorExtractor> descriptor;
  //! Pointer to detector
  cv::Ptr<cv::DescriptorMatcher> matcher;
  
  //! Compute matches between descriptor sets
  int find_good_matches(cv::Mat &query, cv::Mat &train, std::vector<cv::DMatch> &matches);

  //! Compute 3D positioin of features
  bool stereo_triangulation(std::vector< std::vector<cv::KeyPoint> > &vkp, std::vector<cv::DMatch> &matches, CLOUD::Ptr &cloud, std::vector<int> &indices);

  //! radius filter for tracking
  int filter_radius(std::vector<cv::KeyPoint> &kp1, std::vector<cv::KeyPoint> &kp2, std::vector<cv::DMatch> &matches, float radius=50);

 public:
  //! Constructor
  FeatureExtractor(ros::NodeHandle &nh) : 
      nh_(nh), private_nh_("~"), it_(nh),
      detector( cv::FeatureDetector::create("GridHARRIS") ),
      descriptor( cv::DescriptorExtractor::create("BRIEF") ),
      matcher( cv::DescriptorMatcher::create("BruteForce-Hamming") )
      //matcher( cv::DescriptorMatcher::create("FlannBased") )
  {
    // initializing pub/sub, server
    //img_sub_ = it_.subscribeCamera(nh_.resolveName("image_in"), 5, &FeatureExtractor::image_cb, this);
    img_pub_ = it_.advertiseCamera(nh_.resolveName("image_out"), 5);
    marker_pub_ = nh_.advertise<visualization_msgs::Marker>(nh_.resolveName("marker_out"), 10);
    imgL_sub_.subscribe(nh_, nh_.resolveName("imageL_in"), 5);
    imgR_sub_.subscribe(nh_, nh_.resolveName("imageR_in"), 5);
    sync_.reset(new message_filters::TimeSynchronizer<sensor_msgs::Image, sensor_msgs::Image>(imgL_sub_, imgR_sub_, 10));
    sync_->registerCallback(boost::bind(&FeatureExtractor::stereo_image_cb, this, _1, _2));

    // initializing optional parameters
    // e.g.) private_nh_.param<TYPE>("PARAM_NAME", PARAM_VAR, DEFAULT_VALUE);
    std::string camera_yaml;
    private_nh_.param<std::string>("camera_yaml", camera_yaml, "");
    if(camera_yaml.empty()) ROS_ERROR("Please specify 'camera_yaml' parameter.");
    private_nh_.param<std::string>("keypoints_out_dir", kp_out_dir_, "");

    // initializing member variables
    frame_cnt = 0;
    cv::FileStorage camera_yaml_fs(camera_yaml, cv::FileStorage::READ);
    if(!camera_yaml_fs.isOpened()) 
    {
      ROS_ERROR("Camera yaml file open falied");
    }
    else
    {
      camera_yaml_fs["A"] >> A_;
      camera_yaml_fs["D"] >> D_;
      camera_yaml_fs["baseline"] >> baseline_;
      camera_yaml_fs["sensor_len"] >> sensor_len_;
      ROS_INFO_STREAM("Reading camera params");
      ROS_INFO_STREAM("--> A: " << A_);
      ROS_INFO_STREAM("--> D: " << D_);
      ROS_INFO_STREAM("--> baseline: " << baseline_);
      ROS_INFO_STREAM("--> sensor_len: " << sensor_len_); // not used
    }
  }

  //! Destructor
  ~FeatureExtractor()
  {
  }

  //! image callback
  void image_cb(const sensor_msgs::Image::ConstPtr &msg, const sensor_msgs::CameraInfo::ConstPtr &info);
  void stereo_image_cb(const sensor_msgs::Image::ConstPtr &, const sensor_msgs::Image::ConstPtr &);

};

//! obsoleted
void FeatureExtractor::image_cb(const sensor_msgs::Image::ConstPtr &msg, const sensor_msgs::CameraInfo::ConstPtr &info)
{
  //-- ask the current algorithm
  ROS_INFO("Image callback called. Asking current algorithm..");
  ros::ServiceClient detector_client = nh_.serviceClient<DetectorInquiry>(nh_.resolveName("detector_inquiry"));
  DetectorInquiry inquiry;
  inquiry.request.id = inquiry.request.INQUIRY;
  static int prev_id = -1;
  if(detector_client.call(inquiry))
  {
    if(prev_id != inquiry.response.id)
    {
      ROS_INFO_STREAM("Current crient: " << inquiry.response.id);
      prev_id = inquiry.response.id;
    }
  }
  else
  {
    ROS_ERROR("Detector inquiry failed");
  }

  //-- convert image type 
  cv_bridge::CvImage::Ptr cv_ptr;
  try
  {
    cv_ptr = cv_bridge::toCvCopy(msg, "bgr8");
  }
  catch(cv_bridge::Exception &e)
  {
    ROS_ERROR("cv_bridge: conversion error");
    ROS_BREAK();
  }

  //-- preparing objects
  cv::Ptr<cv::FeatureDetector> detector = cv::FeatureDetector::create("HARRIS");
  cv::Ptr<cv::DescriptorExtractor> descriptor = cv::DescriptorExtractor::create("BRIEF");

  //-- set parameters
  switch(inquiry.response.id)
  {
    case 0:
    //case inquiry.response.FEATURE_HARRIS:
      detector->set("nfeatures", 1000);
      detector->set("qualityLevel", 0.01);
      detector->set("minDistance", 5.0);
      //detector->set("blockSize", 7);
      detector->set("useHarrisDetector", true);
      detector->set("k", 0.04);
      break;
    default:
      break;
  }

  //-- extracting features
  vector<cv::KeyPoint> keypoints;
  detector->detect(cv_ptr->image, keypoints);

  ROS_INFO_STREAM("Num of features: " << keypoints.size());

  //-- display
  {
    vector<cv::KeyPoint>::iterator itr = keypoints.begin();
    while(itr != keypoints.end())
    {
      cv::circle(cv_ptr->image, itr->pt, 3, cv::Scalar(255, 0, 0), -1);
      itr++;
    }
    img_pub_.publish(cv_ptr->toImageMsg(), info);
    /* not working. will be fixed later..?
       MarkerGenerator mg;
       visualization_msgs::Marker::Ptr marker = mg.generate_feature_pts(keypoints);
       marker->header.stamp = msg->header.stamp; 
       marker->header.frame_id = msg->header.frame_id;
       marker_pub_.publish(marker);
     */
  }
}

void FeatureExtractor::stereo_image_cb(const sensor_msgs::Image::ConstPtr &imgL_msg, const sensor_msgs::Image::ConstPtr &imgR_msg)
{
  ROS_INFO("Frame: %05d", frame_cnt);
  //-- ask the current algorithm
  ROS_INFO("Image callback called. Asking current algorithm..");
  ros::ServiceClient detector_client = nh_.serviceClient<DetectorInquiry>(nh_.resolveName("detector_inquiry"));
  DetectorInquiry inquiry;
  inquiry.request.id = inquiry.request.INQUIRY;
  static int prev_id = -1;
  bool detector_changed = false;
  if(detector_client.call(inquiry))
  {
    if(prev_id != inquiry.response.id)
    {
      ROS_INFO_STREAM("Current crient: " << inquiry.response.id);
      prev_id = inquiry.response.id;
      detector_changed = true;
    }
  }
  else
  {
    ROS_ERROR("Detector inquiry failed");
  }

  ROS_INFO("Extracting feature points");

  //-- convert image type 
  cv_bridge::CvImage::Ptr cv_ptrL;
  cv_bridge::CvImage::Ptr cv_ptrR;
  try
  {
    cv_ptrL = cv_bridge::toCvCopy(imgL_msg, "bgr8");
    cv_ptrR = cv_bridge::toCvCopy(imgR_msg, "bgr8");
  }
  catch(cv_bridge::Exception &e)
  {
    ROS_ERROR("cv_bridge: conversion error");
    return;
  }
  vector<cv::Mat> images;
  images.push_back(cv_ptrL->image);
  images.push_back(cv_ptrR->image);
  vector<cv::Mat> mask_images;
  cv::Mat mask = cv::Mat::zeros(images[0].size(), CV_8UC1);
  cv::rectangle(mask, cv::Point(0, 0.25*mask.rows), cv::Point(mask.cols, mask.rows), 255, -1);
  mask_images.push_back(mask);
  mask_images.push_back(mask);


  //-- set parameters
  if(detector_changed)
  {
    switch(inquiry.response.id)
    {
      case 0: // harris
        //case inquiry.response.FEATURE_HARRIS:
        detector->set("nfeatures", 1000);
        detector->set("qualityLevel", 0.01);
        detector->set("minDistance", 5.0);
        //detector->set("blockSize", 7);
        detector->set("useHarrisDetector", true);
        detector->set("k", 0.04);
        break;
      case 3: // SIFT
        ROS_INFO("SIFT parameters");
        //detector = cv::FeatureDetector::create("GridSIFT");
        break;
      default:
        break;
    }
    ROS_INFO("detector updated");
  }

  //-- Depth measurement and Feature Tracking

  // extract features
  // keypoints on current frames (left and right)
  vector< vector<cv::KeyPoint> > vkp;
  vector<cv::Mat> vdesc;
  CLOUD::Ptr cloud(new CLOUD);
  // keypoints on previous frame (left)
  static vector<cv::KeyPoint> kp_prL;
  static cv::Ptr<cv::Mat> desc_prL(new cv::Mat);
  static CLOUD::Ptr cloud_pr(new CLOUD);

  detector->detect(images, vkp, mask_images);
  descriptor->compute(images, vkp, vdesc);
  ROS_INFO_STREAM("--> Num of feature left : " << vkp[0].size());
  ROS_INFO_STREAM("--> Num of feature right: " << vkp[1].size());

  // find matches (stereo)
  vector<cv::DMatch> stereo_matches;
  find_good_matches(vdesc[0], vdesc[1], stereo_matches);
  ROS_INFO_STREAM("--> --> Num of stereo matches: " << stereo_matches.size());

  // compute 3D position of features
  vector<int> indicesL;
  vector<cv::KeyPoint> kp_filteredL;
  cv::Ptr<cv::Mat> desc_filteredL;
  if(!stereo_triangulation(vkp, stereo_matches, cloud, indicesL)) return;
  filter_keypoints(vkp[0], vdesc[0], indicesL, kp_filteredL, desc_filteredL);
  ROS_INFO_STREAM("--> Num of filtered points: " << kp_filteredL.size());

  // find matches (track)
  vector<cv::DMatch> track_matches;
  find_good_matches(*desc_filteredL, *desc_prL, track_matches);
  filter_radius(kp_filteredL, kp_prL, track_matches, 200); // no sense
  ROS_INFO_STREAM("--> Num of tracking: " << track_matches.size());

  // write tracked points to file
  char filename[128];
  sprintf(filename, "%s/q1Q1q2Q2_L_%05d.tsv", kp_out_dir_.c_str(), frame_cnt++);
  ofstream ofs(filename);
  BOOST_FOREACH(cv::DMatch match, track_matches)
  {
    int ci = match.queryIdx;
    int pi = match.trainIdx;
    std::string TAB("\t");
    ofs << kp_prL[pi].pt.x << TAB << kp_prL[pi].pt.y << TAB
        << cloud_pr->points[pi].x << TAB << cloud_pr->points[pi].y << TAB << cloud_pr->points[pi].z << TAB
        << kp_filteredL[ci].pt.x << TAB << kp_filteredL[ci].pt.y  << TAB
        << cloud->points[ci].x << TAB << cloud->points[ci].y << TAB << cloud->points[ci].z
        << endl;
  }

  // publish only tracked features
  PC2MSG::Ptr src_cloud(new PC2MSG);
  PC2MSG::Ptr dst_cloud(new PC2MSG);
#if 0
  src_cloud->width = track_matches.size();
  src_cloud->height = 1;
  src_cloud->is_dense = false;
  src_cloud->points.resize(src_cloud->width * src_cloud->height);
  dst_cloud->width = track_matches.size();
  dst_cloud->height = 1;
  dst_cloud->is_dense = false;
  dst_cloud->points.resize(dst_cloud->width * dst_cloud->height);
  for(int i = 0; i < (int)track_matches.size(); i++)
  {
    cv::Ptr<cv::DMatch> m(&track_matches[i]);
    src_cloud->points[i] = cloud_pr->points[m->trainIdx];
    dst_cloud->points[i] = cloud->points[m->queryIdx];
  }
#endif
  // TODO
  
  //-- display
  {
#if 0
    cv::Mat track_disp = images[0].clone();
    draw_line_between_keypoints(track_disp, kp_filteredL, kp_prL, track_matches, track_disp);
    imshow("track", track_disp, 0.25);

    cv::Mat img_matches1;
    cv::drawMatches(images[0], kp_prL, images[0], kp_filteredL,
        track_matches, img_matches1, cv::Scalar::all(-1), cv::Scalar::all(-1),
        vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    imshow("track_match", img_matches1, 0.25);

    cv::Mat img_matches;
    cv::drawMatches(images[0], vkp[0], images[1], vkp[1],
        stereo_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
        vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    imshow("match", img_matches, 0.25);
    cv::waitKey(30);
#endif
#if 0
    vector<cv::KeyPoint>::iterator itr = keypoints.begin();
    while(itr != keypoints.end())
    {
      cv::circle(cv_ptr->image, itr->pt, 3, cv::Scalar(255, 0, 0), -1);
      itr++;
    }
    img_pub_.publish(cv_ptr->toImageMsg(), info);
#endif
    /* not working. will be fixed later..?
       MarkerGenerator mg;
       visualization_msgs::Marker::Ptr marker = mg.generate_feature_pts(keypoints);
       marker->header.stamp = msg->header.stamp; 
       marker->header.frame_id = msg->header.frame_id;
       marker_pub_.publish(marker);
     */
  }

  // update previous keypoins
  kp_prL = kp_filteredL;
  desc_prL = desc_filteredL;
  cloud_pr = cloud;

}

int FeatureExtractor::find_good_matches(cv::Mat &query, cv::Mat &train, std::vector<cv::DMatch> &matches)
{
  if(query.rows == 0 || train.rows == 0) return -1;
  matches.clear();

  std::vector<cv::DMatch> tmp_matches;
  matcher->match(query, train, tmp_matches);

#if 0
  //-- find good matches (MAX dist. = 2 * min_dist)
  float max_dist = numelic_limits<float>::min(), min_dist = numelic_limits<float>::max();
  for(int i = 0; i < query.rows; i++)
  {
    double dist = tmp_matches[i].distance;
    if(dist < min_dist) min_dist = dist;
    if(dist > max_dist) max_dist = dist;
  }
  for(int i = 0; i < query.rows; i++)
  {
    //if(tmp_matches[i].distance < 2 * min_dist) matches.push_back(tmp_matches[i]);
    matches.push_back(tmp_matches[i]);
  }
#endif

  //-- mutual consistency check
  int ind_mat[train.rows];
  double dist_mat[train.rows];
  double fmax = numeric_limits<float>::max();
  for(int i = 0; i < train.rows; i++) dist_mat[i] = fmax;
  for(int i = 0; i < (int)tmp_matches.size(); i++)
  {
    int idx = tmp_matches[i].trainIdx;
    if(dist_mat[idx] > tmp_matches[i].distance)
    {
      ind_mat[idx] = i;
      dist_mat[idx] = tmp_matches[i].distance;
    }
  }
  for(int i = 0; i < train.rows; i++)
  {
    if(dist_mat[i] < fmax) matches.push_back(tmp_matches[ ind_mat[i] ]);
  }

  return matches.size();
}

bool FeatureExtractor::stereo_triangulation(std::vector< std::vector<cv::KeyPoint> > &vkp, std::vector<cv::DMatch> &matches, CLOUD::Ptr &cloud, std::vector<int> &indices)
{
  //-- prepare pcl object
  CLOUD::Ptr cloud_ptr(new CLOUD);
  cloud_ptr->width = matches.size();
  cloud_ptr->height = 1;
  cloud_ptr->is_dense = false;
  cloud_ptr->points.resize(cloud_ptr->width * cloud_ptr->height);

  //-- calculate depth
  std::vector<cv::KeyPoint> lkp = vkp[0];
  std::vector<cv::KeyPoint> rkp = vkp[1];
  for(int i = 0; i < (int)matches.size(); i++)
  {
    cv::DMatch match = matches[i];
    int li = match.queryIdx;
    int ri = match.trainIdx;

    // epipolar constrains
    int max_y_dist = 100; // @todo to be tuned
    int y_dist = lkp[li].pt.y - rkp[ri].pt.y;
    if(y_dist > max_y_dist)
    {
      cloud_ptr->points[i].z = -1000.; // will be removed by the passthrough filter
      continue;
    }

    double disparity = lkp[li].pt.x - rkp[ri].pt.x;
    //double s = (A_.at<double>(0, 0) * baseline_) / (disparity * sensor_len_);
    double s = (A_.at<double>(0, 0) * baseline_) / disparity;
    cv::Mat U = (cv::Mat_<double>(3, 1) << lkp[li].pt.x, lkp[li].pt.y, 1.);
    cv::Mat X; // 1x3

    X = s * A_.inv() * U;

    //ROS_INFO_STREAM("X: " << X);

    cloud_ptr->points[i].x = X.at<double>(0, 0);
    cloud_ptr->points[i].y = X.at<double>(1, 0);
    cloud_ptr->points[i].z = X.at<double>(2, 0);
  }

  //-- pass filter
  pcl::PassThrough<POINT> pass;
  double pass_z_min_ = 0.5;   double pass_z_max_ = 1000.;

  vector<int> ind;
  pass.setInputCloud(cloud_ptr);
  pass.setFilterFieldName("z");
  pass.setFilterLimits(pass_z_min_, pass_z_max_);
  pass.filter(*cloud);
  pass.filter(ind);

  BOOST_FOREACH(int i, ind)
  {
    // push matched indices of left image
    indices.push_back(matches[i].queryIdx);
  }

  return true;
}

void FeatureExtractor::filter_keypoints(std::vector<cv::KeyPoint> &kp, cv::Mat &desc, std::vector<int> &indices, std::vector<cv::KeyPoint> &kp_new, cv::Ptr<cv::Mat> &desc_new)
{
  // set previous 
  kp_new.clear();
  desc_new = new cv::Mat(cv::Size(desc.cols, indices.size()), desc.type());
  int cnt = 0;
  BOOST_FOREACH(int i, indices)
  {
    kp_new.push_back(kp[i]);
    desc.row(i).copyTo(desc_new->row(cnt++));
  }
}

int FeatureExtractor::filter_radius(std::vector<cv::KeyPoint> &kp1, std::vector<cv::KeyPoint> &kp2, std::vector<cv::DMatch> &matches, float radius)
{
  std::vector<cv::DMatch> tmp_matches = matches;
  matches.clear();
  std::vector<cv::DMatch>::iterator itr = tmp_matches.begin();
  while(itr != tmp_matches.end())
  {
    int i1 = itr->queryIdx;
    int i2 = itr->trainIdx;
    float dist = sqrtf(powf(kp1[i1].pt.x-kp2[i2].pt.x, 2) + powf(kp1[i1].pt.y-kp2[i2].pt.y, 2));
    if(dist < radius) matches.push_back(*itr);
    itr++;
  }
  return (int)matches.size();
}

void FeatureExtractor::draw_line_between_keypoints(cv::Mat &src, std::vector<cv::KeyPoint> &kp1, std::vector<cv::KeyPoint> &kp2, std::vector<cv::DMatch> &matches, cv::Mat &dst, cv::Scalar color, float scale)
{
  // @todo scale not implemented
  if(scale == 1.0)
  {
    dst = src.clone();
  }

  std::vector<cv::DMatch>::iterator itr = matches.begin();
  while(itr != matches.end())
  {
    int i1 = itr->queryIdx;
    int i2 = itr->trainIdx;
    cv::line(dst, kp1[i1].pt, kp2[i2].pt, color, 2);
    itr++;
  }
}

void FeatureExtractor::imshow(const char* winname, cv::Mat &img, float scale)
{
  cv::Size sz(img.cols*scale, img.rows*scale);
  cv::Mat small_img(sz, CV_8UC3);
  cv::resize(img, small_img, sz);
  cv::imshow(winname, small_img);
  cv::waitKey(1);
}

} // end of namespace

int main(int ac, char **av)
{
  ros::init(ac, av, "FeatureExtractor");
  ros::NodeHandle nh;
  vo::FeatureExtractor handler(nh);
  ros::spin();
  return 0;
}

