#include "vector"
#include <opencv2/opencv.hpp>
#include <boost/format.hpp>
#include "Eigen/Dense"
#include <opencv2/core/eigen.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <sophus/se3.hpp>
#include <iostream>
#include <opencv2/core/eigen.hpp>

#define IMG_SIZE 2

void readT(std::string filename, Eigen::Matrix4d &m_matrix)
{
  std::ifstream fin(filename, std::ios::binary);
	if (!fin)
	{
		return;
	}

	int numRow = m_matrix.rows();
	int numCol = m_matrix.cols();

	Eigen::VectorXd vecPerRow(numRow);
	for (int j = 0; j < numRow; j++)//共numRow行
	{
		for (int i = 0; i < numCol; i++)//共numCol列组成一行
		{
			fin >> m_matrix(j, i);
		}
		
	}	
}

cv::Point2f pixel2cam(const cv::Point2d &p, const cv::Mat &K) {
  return cv::Point2f
    (
      (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
      (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
    );
}

int main(int argn, char** argc)
{

    using std::endl;
    using std::cout;
    int idx_first = 2;
    if (argn == 2)
    {
      idx_first = std::stoi(argc[1]);
    }
    std::cout << "Start from " << idx_first << " image." << std::endl;

    // const variables
    const double fx = 718.856, fy = 718.856, cx = 607.1928, cy = 185.2157;
    std::vector<std::vector<int>> bbox_list = {
        {534, 193, 553, 220},
        {537, 194, 556, 221},
        {539, 195, 556, 220},
        {543, 195, 562, 220},
        {547, 194, 566, 221},
    };
    boost::format fmt_others("../%06d.png");    // other files
    cv::Ptr<cv::Feature2D> detector = cv::ORB::create(10);
    cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
    cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");

    // image relevant things
    std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<double>> T_list;
    T_list.resize(IMG_SIZE);
    std::vector<cv::Mat> img_list;
    img_list.resize(IMG_SIZE);
    std::vector<std::vector<cv::KeyPoint>> kps_list;
    kps_list.resize(IMG_SIZE);
    std::vector<cv::Mat> dscp_list;
    dscp_list.resize(IMG_SIZE);

    for (int i = idx_first; i < idx_first + 2; i++) {
        const int j = i - idx_first;
        cv::Mat img = cv::imread((fmt_others % i).str(), 0);
        img_list[j] = img;

        // detect the key points
        cv::Point2i p1(bbox_list[i-1][0], bbox_list[i-1][1]);
        cv::Point2i p2(bbox_list[i-1][2], bbox_list[i-1][3]);

        cv::Mat mask = cv::Mat::zeros(img.rows, img.cols, CV_8UC1);
        cv::rectangle(mask, p1, p2, (255), -1);
        detector->detect(img, kps_list[j], mask);
        descriptor->compute(img, kps_list[j], dscp_list[j]);
        std::cout << "Img " << i << "; N keypoints: " << kps_list[j].size() << std::endl;

        // plot
        cv::Mat color_bg;
        cv::cvtColor(img, color_bg, CV_GRAY2BGR);
        cv::rectangle(color_bg, p1, p2, {0, 255, 0}, 1, 1);
        cv::drawKeypoints(color_bg, kps_list[j], color_bg, cv::Scalar::all(-1), cv::DrawMatchesFlags::DEFAULT);
        cv::imshow("key points", color_bg);
        cv::waitKey();

        // read the T matrix
        readT(std::to_string(i) + ".bin", T_list[j]);
    }
    // calculate the descriptor
    std::vector<cv::DMatch> matches;
    matcher->match(dscp_list[0], dscp_list[1], matches);


    cv::Mat img_match;
    cv::drawMatches(img_list[0], kps_list[0], img_list[1], kps_list[1], matches, img_match);
    cv::imshow("all matches", img_match);
    cv::waitKey();

    // K
    Eigen::Matrix3d K;
    K <<
      fx, 0, cx,
      0, fy, cy,
      0, 0, 1;

    // edges
    const Eigen::Matrix4d Tc1W = T_list[0];
    const Eigen::Matrix4d Tc2W = T_list[1];
    const Eigen::Matrix4d Tc21 = Tc2W * Tc1W.inverse();
    cout << "Tc1w: \n" << Tc1W << endl;
    cout << "Tc2W: \n" << Tc2W << endl;   
    cout << "Camera RT: \n" << Tc21 << endl;

    // use thriangular measurement
    std::vector<cv::Point2f> points1;
    std::vector<cv::Point2f> points2;
    for (size_t i = 0; i < matches.size(); ++i) {
        auto p1_hat = kps_list[0][matches[i].queryIdx].pt;
        auto p2_hat = kps_list[1][matches[i].trainIdx].pt;
        points1.push_back(p1_hat);
        points2.push_back(p2_hat);
    }

    //-- 计算本质矩阵
    cv::Point2d principal_point(cx, cy);  //相机光心, TUM dataset标定值
    double focal_length = fx;      //相机焦距, TUM dataset标定值
    cv::Mat essential_matrix;
    essential_matrix = findEssentialMat(points1, points2, focal_length, principal_point);
    std::cout << "essential_matrix is " << std::endl << essential_matrix << std::endl;
    cv::Mat R(3, 3, CV_64F);
    cv::Mat t(3, 1, CV_64F);;
    cv::recoverPose(essential_matrix, points1, points2, R, t, focal_length, principal_point);
    std::cout << "R is " << std::endl << R << std::endl;
    std::cout << "t is " << std::endl << t << std::endl;

    Eigen::Matrix4d Tabs;
    Eigen::Matrix3d Reig;
    Eigen::Vector3d teig;
    cv::cv2eigen(R, Reig);
    cv::cv2eigen(t, teig);
    Tabs.setZero();
    Tabs.block(0, 0, 3, 3) = Reig;
    Tabs.block(0, 3, 3, 1) = teig;
    Tabs(3, 3) = 1;
    cout << "Tabs: \n" << Tabs << endl;
    cout << "Trel: \n" << Tc21.inverse() * Tabs << endl;

    // estimate the distance
    cv::Mat T1 = (cv::Mat_<float>(3, 4) <<
    1, 0, 0, 0,
    0, 1, 0, 0,
    0, 0, 1, 0);

    cv::Mat T2 = (cv::Mat_<float>(3, 4) <<
      R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), t.at<double>(0, 0),
      R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), t.at<double>(1, 0),
      R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), t.at<double>(2, 0)
    );

    std::vector<cv::Point2f> pts_1, pts_2;
    cv::Mat Kmat(3, 3, CV_64F);
    cv::eigen2cv(K, Kmat);
    for (cv::DMatch m:matches) {
      // 将像素坐标转换至相机坐标
      pts_1.push_back(pixel2cam(kps_list[0][m.queryIdx].pt, Kmat));
      pts_2.push_back(pixel2cam(kps_list[1][m.trainIdx].pt, Kmat));
    }

    cv::Mat pts_4d;
    cv::triangulatePoints(T1, T2, pts_1, pts_2, pts_4d);

    // 转换成非齐次坐标
    cout << "Dist: \n" << endl;
    for (int i = 0; i < pts_4d.cols; i++) {
      cv::Mat x = pts_4d.col(i);
      x /= x.at<float>(3, 0); // 归一化
      cv::Point3d p(
        x.at<float>(0, 0),
        x.at<float>(1, 0),
        x.at<float>(2, 0)
      );
      cout << p << endl;
    }
}