#include "vector"
#include <opencv2/opencv.hpp>
#include <boost/format.hpp>
#include "Eigen/Dense"
#include <opencv2/core/eigen.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <sophus/se3.hpp>
#include <iostream>
#include <opencv2/core/eigen.hpp>

#define IMG_SIZE 2
#define VAR_SIZE 6
#define ERR_SIZE 1

/// vertex and edges used in g2o ba
class VertexPose: public g2o::BaseVertex<VAR_SIZE, Sophus::SE3d> {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  virtual void setToOriginImpl() override {
    _estimate = Sophus::SE3d();
  }

  /// left multiplication on SE3
  virtual void oplusImpl(const double *input) override {
    Eigen::Matrix<double, 6, 1> update;
    update << input[0], input[1], input[2], input[3], input[4], input[5];
    _estimate = Sophus::SE3d::exp(update) * _estimate;
  }

  virtual bool read(std::istream &in) override {}

  virtual bool write(std::ostream &out) const override {}
};

class EdgeProjection : public g2o::BaseUnaryEdge<ERR_SIZE, Eigen::Vector3d, VertexPose> {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  EdgeProjection(const Eigen::Vector3d &x1, const Sophus::SE3d &Tc21)
   : _x1(x1), _Tc21(Tc21) {}

  virtual void computeError() override {
    const VertexPose *v = static_cast<VertexPose *> (_vertices[0]);
    Sophus::SE3d T = v->estimate();
    // do not allow the angle around z and x, 1-y, 2-z, 3-pitch, 4-yaw, 5-roll in camera frame
    // est(5, 0) = 0;
    // est(3, 0) = 0;
    T = _Tc21 * T;
    Eigen::Matrix3d R = T.rotationMatrix();
    Eigen::Vector3d t = T.translation();
    // the essential matrix constraints
    _error = _measurement.transpose() * Sophus::SO3d::hat(t) * R * _x1;
  }

  virtual bool read(std::istream &in) override {}

  virtual bool write(std::ostream &out) const override {}

private:
  Eigen::Vector3d _x1;
  Sophus::SE3d _Tc21;

};


void readT(std::string filename, Eigen::Matrix4d &m_matrix)
{
  std::ifstream fin(filename, std::ios::binary);
	if (!fin)
	{
		return;
	}

	int numRow = m_matrix.rows();
	int numCol = m_matrix.cols();

	Eigen::VectorXd vecPerRow(numRow);
	for (int j = 0; j < numRow; j++)//共numRow行
	{
		for (int i = 0; i < numCol; i++)//共numCol列组成一行
		{
			fin >> m_matrix(j, i);
		}
		
	}	
}

Eigen::Vector3d pixel2cam(const Eigen::Vector3d &p, const Eigen::Matrix3d &K) {
  auto p_hat = K.inverse() * p;
  return p_hat / p_hat(2);
}

int main(int argn, char** argc)
{

    using std::endl;
    using std::cout;
    int idx_first = 2;
    if (argn == 2)
    {
      idx_first = std::stoi(argc[1]);
    }
    std::cout << "Start from " << idx_first << " image." << std::endl;

    // const variables
    const double fx = 718.856, fy = 718.856, cx = 607.1928, cy = 185.2157;
    std::vector<std::vector<int>> bbox_list = {
        {534, 193, 553, 220},
        {537, 194, 556, 221},
        {539, 195, 556, 220},
        {543, 195, 562, 220},
        {547, 194, 566, 221},
    };
    boost::format fmt_others("../%06d.png");    // other files
    cv::Ptr<cv::Feature2D> detector = cv::ORB::create(10);
    cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
    cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");

    // image relevant things
    std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<double>> T_list;
    T_list.resize(IMG_SIZE);
    std::vector<cv::Mat> img_list;
    img_list.resize(IMG_SIZE);
    std::vector<std::vector<cv::KeyPoint>> kps_list;
    kps_list.resize(IMG_SIZE);
    std::vector<cv::Mat> dscp_list;
    dscp_list.resize(IMG_SIZE);

    for (int i = idx_first; i < idx_first + 2; i++) {
        const int j = i - idx_first;
        cv::Mat img = cv::imread((fmt_others % i).str(), 0);
        img_list[j] = img;

        // detect the key points
        cv::Point2i p1(bbox_list[i-1][0], bbox_list[i-1][1]);
        cv::Point2i p2(bbox_list[i-1][2], bbox_list[i-1][3]);

        cv::Mat mask = cv::Mat::zeros(img.rows, img.cols, CV_8UC1);
        cv::rectangle(mask, p1, p2, (255), -1);
        detector->detect(img, kps_list[j], mask);
        descriptor->compute(img, kps_list[j], dscp_list[j]);
        std::cout << "Img " << i << "; N keypoints: " << kps_list[j].size() << std::endl;

        // plot
        cv::Mat color_bg;
        cv::cvtColor(img, color_bg, CV_GRAY2BGR);
        cv::rectangle(color_bg, p1, p2, {0, 255, 0}, 1, 1);
        cv::drawKeypoints(color_bg, kps_list[j], color_bg, cv::Scalar::all(-1), cv::DrawMatchesFlags::DEFAULT);
        cv::imshow("key points", color_bg);
        cv::waitKey();

        // read the T matrix
        readT(std::to_string(i) + ".bin", T_list[j]);
    }
    // calculate the descriptor
    std::vector<cv::DMatch> matches;
    matcher->match(dscp_list[0], dscp_list[1], matches);


    cv::Mat img_match;
    cv::drawMatches(img_list[0], kps_list[0], img_list[1], kps_list[1], matches, img_match);
    cv::imshow("all matches", img_match);
    cv::waitKey();

    // K
    Eigen::Matrix3d K;
    K <<
      fx, 0, cx,
      0, fy, cy,
      0, 0, 1;

    // edges
    const Eigen::Matrix4d Tc1W = T_list[0];
    const Eigen::Matrix4d Tc2W = T_list[1];
    const Eigen::Matrix4d Tc21 = Tc2W * Tc1W.inverse();
    cout << "Tc1w: \n" << Tc1W << endl;
    cout << "Tc2W: \n" << Tc2W << endl;   
    cout << "Camera RT: \n" << Tc21 << endl;

    // use thriangular measurement
    std::vector<cv::Point2f> points1;
    std::vector<cv::Point2f> points2;
    for (size_t i = 0; i < matches.size(); ++i) {
        auto p1_hat = kps_list[0][matches[i].queryIdx].pt;
        auto p2_hat = kps_list[1][matches[i].trainIdx].pt;
        points1.push_back(p1_hat);
        points2.push_back(p2_hat);
    }

    // G2O method
    // begin to estimate the position
    typedef g2o::BlockSolver<g2o::BlockSolverTraits<VAR_SIZE, ERR_SIZE>> BlockSolverType;
    typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; // 线性求解器类型

    // 梯度下降方法，可以从GN, LM, DogLeg 中选
    auto solver = new g2o::OptimizationAlgorithmGaussNewton(
        g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
    g2o::SparseOptimizer optimizer;     // 图模型
    optimizer.setAlgorithm(solver);   // 设置求解器
    optimizer.setVerbose(true);       // 打开调试输出

    // vertex
    VertexPose *vertex_pose = new VertexPose(); // camera vertex_pose
    vertex_pose->setId(0);
    vertex_pose->setToOriginImpl();
    optimizer.addVertex(vertex_pose);
    
    Eigen::JacobiSVD<Eigen::MatrixXd> svd(Tc21.block(0, 0, 3, 3), Eigen::ComputeThinU | Eigen::ComputeThinV);
    auto U = svd.matrixU();
    auto V = svd.matrixV(); 
    Sophus::SE3d T21(U * V, Tc21.block(0, 3, 3, 1));
    std::cout << "T21: " << Tc21 << std::endl;

    int index = 1;
    for (size_t i = 0; i < matches.size(); ++i) {
        const auto p1_hat = kps_list[0][matches[i].queryIdx].pt;
        const auto p2_hat = kps_list[1][matches[i].trainIdx].pt;
        Eigen::Vector3d p1;
        p1 << p1_hat.x, p1_hat.y, 1;
        Eigen::Vector3d p2;
        p2 << p2_hat.x, p2_hat.y, 1;

        // static check 
        p1 = K.inverse() * p1;
        p2 = K.inverse() * p2;

        EdgeProjection *edge = new EdgeProjection(p1, T21);
        edge->setId(index);
        edge->setVertex(0, vertex_pose);
        edge->setMeasurement(p2);
        edge->setInformation(Eigen::Matrix<double, ERR_SIZE, ERR_SIZE>::Identity());
        optimizer.addEdge(edge);
        index++;
    }
    
    // the estimation core
    optimizer.setVerbose(true);
    optimizer.initializeOptimization();
    optimizer.optimize(15);
    std::cout << "Est: \n" << vertex_pose->estimate().matrix() << std::endl;
}