/*!
  @file visod.h
  @copyright 2013 Kubota Lab. All rights resereved.
*/

#ifndef _OPENLAB2013_VISOD_H_
#define _OPENLAB2013_VISOD_H_

#include <fstream>

#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <message_filters/subscriber.h> 
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <sensor_msgs/CameraInfo.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/PointCloud2.h>
#include <std_msgs/Int8.h>
#include <std_msgs/Int32.h>
#include <nav_msgs/Path.h>
#include <tf/transform_broadcaster.h>

#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>

#include <pcl/point_types.h>
#include <pcl/point_cloud.h>

#include <Eigen/Geometry>

#include "openlab2013/sliding_window.h"

namespace mrover 
{

/*! 
 *  @brief  The visual odometry class
 *  @author Kyohei Otsu <kyon@ac.jaxa.jp>
 *  @date   2013
 *  
 *  This is a detailed description of the class 
 */

class VisOd 
{
 public:
  typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> SyncPolicy;

  typedef pcl::PointXYZ PointType;
  typedef pcl::PointCloud<PointType> Cloud;
  typedef typename Cloud::Ptr CloudPtr;
  typedef std::vector<int> Indices;
  typedef typename boost::shared_ptr<Indices> IndicesPtr;
  typedef std::vector<cv::KeyPoint> KeyPoints;
  typedef typename boost::shared_ptr<KeyPoints> KeyPointsPtr;


  VisOd(ros::NodeHandle &nh) 
      : RANSAC_ITERATIONS(100)
      , RANSAC_THRESH(0.3 * 0.3) // m
      , RANSAC_INLIER_THRESH(0.5)
      , nh_(nh)
      , private_nh_("~")
      , it_(nh_)
      , frame_cnt_(0)
      , counters_(new Indices)
      , reasons_(new Indices)
      , sw_(4)
      , mot_thresh_(0.20)
  {
    // initializing pub/sub, server
    imgL_sub_.subscribe(nh_, nh_.resolveName("imageL_in"), 3);
    imgR_sub_.subscribe(nh_, nh_.resolveName("imageR_in"), 3);
    infoL_sub_.subscribe(nh_, nh_.resolveName("camera_infoL_in"), 3);
    infoR_sub_.subscribe(nh_, nh_.resolveName("camera_infoR_in"), 3);
    sync_.reset(new message_filters::Synchronizer<SyncPolicy>(SyncPolicy(10), imgL_sub_, imgR_sub_, infoL_sub_, infoR_sub_));
    sync_->registerCallback(boost::bind(&VisOd::stereoCameraCB, this, _1, _2, _3, _4));
    cloud_pub_ = nh_.advertise<sensor_msgs::PointCloud2>(nh_.resolveName("cloud_out"), 3);
    path_pub_ = nh_.advertise<nav_msgs::Path>(nh_.resolveName("path_out"), 3);
    inlier_img_pub_ = it_.advertise(nh_.resolveName("inlier_image_out"), 2);
    reset_sub_ = nh_.subscribe(nh_.resolveName("reset_in"), 1, &VisOd::resetCB, this);
    trigger_pub_ = nh_.advertise<std_msgs::Int8>(nh_.resolveName("trigger"), 3);

    // initializing optional parameters
    // e.g.) private_nh_.param<TYPE>("PARAM_NAME", PARAM_VAR, DEFAULT_VALUE);
    std::string traj_file;
    private_nh_.param<std::string>("traj_file", traj_file, "");
    if (traj_file.length() > 0)
    {
      traj_ofs_.open(traj_file.c_str());
    }
    private_nh_.param<int>("method", method_, 1);
    std::vector<std::string> methodname(4);
    methodname[0] = "GaoP3P";
    methodname[1] = "KneipP3P";
    methodname[2] = "2PT";
    methodname[3] = "Arun3PT";
    ROS_WARN("METHOD: %s", methodname[method_].c_str());

    private_nh_.param<std::string>("work_dir", work_dir_, "sba_tmp");
    private_nh_.param<double>("motion_threshold", mot_thresh_, mot_thresh_);
    ROS_WARN("MOTION THRESHOLD: %.3f", mot_thresh_);

    /* send initial transform */
    publishInitialTF();
  }

  ~VisOd() 
  {
    if (traj_ofs_.is_open()) traj_ofs_.close();
  }

  void stereoCameraCB(const sensor_msgs::ImageConstPtr &imgL, const sensor_msgs::ImageConstPtr &imgR, const sensor_msgs::CameraInfoConstPtr &infoL, const sensor_msgs::CameraInfoConstPtr &infoR);

  void resetCB(const std_msgs::Int32ConstPtr &msg)
  {
    mTc_ = cTb_.inv();
    rect_mTc_ = cTb_.inv();
    path_.poses.clear();
    publishInitialTF();
  }

  template <typename Type>
  Eigen::Transform<Type, 3, Eigen::Affine> eigen2cvM(const cv::Mat &m)
  {
    Eigen::Transform<Type, 3, Eigen::Affine> e;
    for (int i = 0; i < 4; ++i)
      for (int j = 0; j < 4; ++j)
        e.matrix()(i, j) = m.at<Type>(i, j);
    return e;
  }
  template <typename Type>
  cv::Mat cv2eigenM(const Eigen::Transform<Type, 3, Eigen::Affine> &e)
  {
    cv::Mat m(4, 4, CV_32F);
    for (int i = 0; i < 4; ++i)
      for (int j = 0; j < 4; ++j)
        m.at<float>(i, j) = e.matrix()(i, j);
    return m;
  }
  tf::Transform cv2tfM(const cv::Mat &m)
  {
    tf::Vector3 position(m.at<float>(0, 3), m.at<float>(1, 3), m.at<float>(2, 3));
    tf::Matrix3x3 rotation;
    tf::Quaternion quaternion;
    for (int i = 0; i < 3; i++)
      for (int j = 0; j < 3; j++)
        rotation[i][j] = m.at<float>(i, j);
    rotation.getRotation(quaternion);

    tf::Transform pose_tf(quaternion, position);
    return pose_tf;
  }

 private:

  class ImageQueue
  {
   public:
    ImageQueue(size_t qsize=10) 
        : qsize_(qsize)
        , queue_(qsize_)
        , flag_(qsize_, 0)
        , pos_(0)
    { }
    ~ImageQueue() { }

    void push(const cv::Mat &img) 
    { 
      pos_ = getNextPos();
      queue_[pos_] = img;
      if (!flag_[pos_]) flag_[pos_] = 1;
    }
    void clear()
    {
      std::vector<int> zero_vec(qsize_, 0);
      flag_.swap(zero_vec);
    }

    cv::Mat &getCurrImg() throw (int) { return getImg(pos_); }
    cv::Mat &getPrevImg(size_t step=1) throw (int) { return getImg(getPrevPos(step)); }

   private:
    int getNextPos() { return (pos_ + 1) % qsize_; }
    int getPrevPos(size_t step=1) 
    {
      if (step > qsize_)
      {
        ROS_ERROR("Invalid step setting: %lu of queue size %lu", step, qsize_);
        return -1;
      }
      return (pos_ - step + qsize_) % qsize_; 
    }
    inline cv::Mat &getImg(int pos) throw (int)
    {
      if (!flag_[pos]) throw 0;
      else return queue_[pos];
    }
    
    size_t qsize_;
    std::vector<cv::Mat> queue_;
    std::vector<int> flag_;
    int pos_;
  };

  const size_t RANSAC_ITERATIONS;
  const double RANSAC_THRESH;
  const double RANSAC_INLIER_THRESH;


  void process(cv::Mat &pTc);
  void extractFeatures(const cv::Mat &img, std::vector<cv::KeyPoint> &kp, const std::string &detector_type="GridHARRIS", bool use_subpixel=true);
  void matchFeatures(const cv::Mat &imgL, const cv::Mat &imgR, std::vector<cv::KeyPoint> &kpL, std::vector<cv::KeyPoint> &kpR, std::vector<cv::DMatch> &match, const std::string &descriptor_type="BRIEF", const std::string &matcher_type="BruteForce-Hamming");
  void triangulate(const cv::Mat &imgL, const cv::Mat &imgR, std::vector<cv::KeyPoint> &kpL, Cloud &cloud);

  void projectPointsOntoGround(const std::vector<cv::KeyPoint> &kp, Cloud &cloud);
  void trackLK(const cv::Mat &img0, const cv::Mat &img1, KeyPoints &kp0, KeyPoints &kp1)
  {
    Indices indices(kp0.size());
    trackLK(img0, img1, kp0, kp1, indices);
  }
  void trackLK(const cv::Mat &img0, const cv::Mat &img1, KeyPoints &kp0, KeyPoints &kp1, Indices &indices);


  void estimateMotion(const Cloud &cloud0, const std::vector<cv::KeyPoint> &kp1, cv::Mat &pTc, std::vector<int> &inliers);
  void estimateMotion2PT(const Cloud &cloud0, const Cloud &cloud1, const std::vector<cv::KeyPoint> &kp0, const std::vector<cv::KeyPoint> &kp1, cv::Mat &pTc, std::vector<int> &inliers);
  void estimateMotion3PT(const Cloud &cloud0, const Cloud &cloud1, cv::Mat &pTc, std::vector<int> &inliers);
  void estimateMotionP3P(const Cloud &cloud0, const std::vector<cv::KeyPoint> &kp1, cv::Mat &pTc, std::vector<int> &inliers);

  void adjust(cv::Mat &pTc, size_t w_size=4);
  void adjustPath(const Cloud &cloud0, const std::vector<cv::KeyPoint> &kp1, cv::Mat &pTc, std::vector<int> &inliers);

  void applyPlanarApproximation(cv::Mat &mTc);

  void rodrigues(const Eigen::Vector3f &n, const double theta, Eigen::Matrix3f &R)
  {
    double c = cos(theta);
    double s = sin(theta);
    Eigen::Matrix3f n_ss;
    n_ss << 0, n(2), -n(1), -n(2), 0, n(0), n(1), -n(0), 0;
    //R = Eigen::Matrix3f::Identity() + s * n_ss + (1 - c) * n_ss.transpose() * n_ss;
    R = Eigen::Matrix3f::Identity() + s * n_ss + (1 - c) * n_ss * n_ss;
  }

  inline void publishInitialTF()
  {
    tf::Transform pose_tf(tf::Quaternion(1, 0, 0, 0), tf::Vector3(0, 0, 0));
    tf_pub_.sendTransform(tf::StampedTransform(pose_tf, ros::Time::now(), "/map", "/rover/base_link"));
  }
  void publishCurrentTF(const bool publish_path=false);

  template <typename T>
  void filterByIndices(std::vector<T> &vec, std::vector<int> &indices)
  {
    if (indices.size() == 0) return;
    std::vector<T> tmp;
    tmp.swap(vec);
    vec.resize(indices.size());
    for (size_t i = 0; i < indices.size(); ++i) vec[i] = tmp[indices[i]];
  }

  void sendStereoTrigger()
  {
    std_msgs::Int8Ptr msg(new std_msgs::Int8);
    trigger_pub_.publish(msg);
  }


  ros::NodeHandle nh_;
  ros::NodeHandle private_nh_;
  image_transport::ImageTransport it_;
  message_filters::Subscriber<sensor_msgs::Image> imgL_sub_;
  message_filters::Subscriber<sensor_msgs::Image> imgR_sub_;
  message_filters::Subscriber<sensor_msgs::CameraInfo> infoL_sub_;
  message_filters::Subscriber<sensor_msgs::CameraInfo> infoR_sub_;
  boost::shared_ptr< message_filters::Synchronizer<SyncPolicy> > sync_;
  ros::Publisher cloud_pub_;
  ros::Publisher path_pub_;
  ros::Subscriber reset_sub_;
  ros::Publisher trigger_pub_;
  tf::TransformBroadcaster tf_pub_; 
  image_transport::Publisher inlier_img_pub_;

  int method_;
  size_t frame_cnt_;
  IndicesPtr counters_;
  IndicesPtr reasons_;

  ImageQueue queueL_, queueR_;
  cv::Mat PL_, PR_;
  cv::Mat H_;
  cv::Mat mTc_;
  cv::Mat rect_mTc_;
  cv::Mat cTb_;
  Eigen::Affine3f transform_;
  nav_msgs::Path path_;
  SlidingWindow sw_;
  double mot_thresh_;

  std::ofstream traj_ofs_;
  std::string work_dir_;
};

} // namespace mrover

#endif  // _OPENLAB2013_VISOD_H_
