#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <eigen3/Eigen/Core>
#include <eigen3/Eigen/Dense>
#include <eigen3/Eigen/Geometry>
#include <eigen3/Eigen/SVD>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/core/optimization_algorithm_levenberg.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <chrono>
#include <sophus/se3.hpp>

using namespace cv;
using namespace std;

void find_feature_matches(
        const Mat &img_1,const Mat&img_2,
        std::vector<KeyPoint> &keypoints_1,
        std::vector<KeyPoint> &keypoints_2,
        std::vector<Dmatch> &matches);

//像素坐标转为相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K);

void pose_estimation_3d3d(
        const vector<Point3f> &pts1,
        const vector<Point3f> &pts2,
        Mat &R, Mat &t);

void bundleAdjustment(
        const vector<Point3f> &points_3d,
        const vector<Point3f> &Point_2d,
        Mat &R,Mat &t);
// vertex and  edges used in g2o ba
class VertexPose: public g2o::BaseVertex<6, Sophus::SE3d>{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

    virtual void setToOriginImpl() override{
        _estimate=Sophus::SE3d();
    }

    //left multiplication on SE3
    virtual void oplusImpl(const double *update) override{
        Eigen::Matrix<double,6,1> update_eigen;
        update_eigen<<update[0],update[1],update[2],uodate[3],update[4],update[5];
        _estimate=Sophus::SE3d::exp(update_eigen)*_estimate;
    }

    virtual bool read(istream &in) override{}
    virtual bool write(ostream &out) const override{}
};

//g2o edeg
class EdgeProjectXYZRGBPoseOnly: public g2o:;BaseUnaryEdge<3, Eigen::Vector3d,VertexPose>
{
    public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
    EdgeProjectXYZRGBPoseOnly(const Eigen::Vertor3d &point): _point(point){}
    virtual void computeError() override{
        const VertexPose *pose =static_cast<const VertexPose *>(_vertices[0]);
        _error=_measuremanet-pose->estimate() * _point;
    }
    virtual void linearizeOplus() override{
        VertexPose *pose=static_cast<VertexPose *>(_vertices[0]);
        Sophus::SE3d T=pose->estimate();
        Eigen::Vector3d xyz_trans=T*_point;
        _jacobianOplusXi.block<3,3>(0,0)=-Eigen::Matrix3d::Identity();
        _jacobianOplusXi.block<3,3>(0,3)=Sophus::SO3d::hat(xyz_trans);
    }
    bool read(istream &in){}
    bool write(ostream &out){}

    protected:
    Eigen::Vector3d _point;
};


int main(int argc, char** argv)
{
    if(argc!=5)
    {
        cout<<"usage: pose_estimation_3d3d img1,img2,depth1 depth2"<<endl;
        return 1;
    }

    //--读取图像
    Mat img_1=imread(argv[1],IMREAD_COLOR);
    Mat img_2=imread(argv[2],IMREAD_COLOR);

    vector<KeyPoint> keypoints_1, keypoints_2;
    vector<DMatch> matches;
    find_feature_matches(img_1,img_2,keypoints_1,keypoints_2,matches);
    cout<<"the number of the match points is "<<matches.size()<<endl;

    //建立三维点
    Mat depth1=imread(argv[3],IMREAD_UNCHANGED);
    Mat depth2=imread(argv[4],IMREAD_UNCHANGED);
    Mat K=(Mat_<double>(3,3)<<520.9,0,325.1,0,521.0,249.7,0,0,1);
    vector<Point3f> pts1,pts2;

    for(DMatch m:matches)
    {
        ushort d1=depth1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))
                [int(keypoint_1[m.queryIdx].pt.x)];
        ushort d2=depth2.ptr<unsigned short>(int(keypoints_2[m.queryIdx].pt.y))
                [int(keypoint_2[m.queryIdx].pt.x)];
        if(d1==0||d2==0)  //bad depth
            continue;
        Point2d p1=pixelcam(keypoints_1[m.queryIdx].pt,K);
        Point2d p2=pixelcam(keypoints_2[m.queryIdx].pt,K);
        float dd1=float(d1)/5000.0;
        float dd2=float(d2)/5000.0;
        pts1.push_back(Point3f(p1.x*dd1,p1.y*dd1,dd1));
        pts2.push_back(Point3f(p2.x*dd1,p2.y*dd1,dd1));
    }

    cout<<"3d-3d pairs: "<<pts1.size()<<endl;
    Mat R,t;
    pose_estimation_3d3d(pts1,pts2,R,t);
    cout<<"ICP via SVD results: "<<endl;
    cout<<" R= "<<R<<endl;
    cout<<"t ="<<t<<endl;
    cout<<"R_inv= "<<R.t()<<endl;
    cout<<"t_inv= "<<-R.t()*t<<endl;

    cout<<"calling bundle adjustment"<<endl;

    bundleAdjustment(pts1,pts2,R,t);

    //verify p1=R*p2+t
    for(int i=0;i<5;i++)
    {
        cout<<"p1= "<<pts1[i]<<endl;
        cout<<"p2= "<<pts2[i]<<endl;
        cout<<"(R*p2+t)= "<<
              R*(Mat_<double>(3,1)<<pts2[i].x,pts2[i].y,pts2[i].z)+t
           <<endl;
        cout<<endl;
    }
}

void find_feature_matches(const Mat &img_1,
                          const Mat &img_2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<Dmatch> &matches)
{
    //初始化
    Mat descriptors_1,descriptors_2;
    Ptr<FeatureDetector> detector=ORB::create();
    Ptr<DescriptorExtractor> descriptor=ORB::create();

    //检测角点
    detector->detect(img_1,keypoints_1);
    detector->detect(img_2,keypoints_2);

    //根据角点生成描述子
    descriptor->compute(img_1,keypoints_1,descriptors_1);
    descriptor->compute(img_2,keypoints_2,descriptors_2);

    //计算两幅图想的描述子的距离
    vector<DMatch> match;
    BFMatcher matcher (NORM_HAMMING);
    matcher->match(descriptors_1,descriptors_2,match);

    //筛选匹配点
    double min_dist=10000, max_dist=0;

    //找出所有匹配点之间的最值
    for(int i=0;i<descriptors_1.rows;++i)
    {
        dist=match[i].distance;
        if(dist<min_dist) min_dist=dist;
        if(dist>max_dist) max_dist=dist;
    }

    printf(" ---Max dist : %f \n", max_dist);
    printf(" ---Max dist : %f \n", max_dist);

    //描述子之间的距离大于最小距离的两倍则认为匹配有误
    for(int i=0;i<descriptors_1.rows;i++)
    {
        if(match[i].distance>max(2*min_dist,30)) matches.push_back(match[i]);
    }
 }

Point2d pixel2cam(const Point2d &p, const Mat &K) {
  return Point2d(
    (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
    (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
  );
}

 void pose_estimation_3d3d(const vector<Point3f> &pts1,
                           const vector<Point3f> &pts2,
                           Mat &R, Mat &t)
 {
     Point3f p1,p2;
     int N=pts1.size();
     for(int i=0;i<N;i++)
     {
         p1+=pts1[i];
         p2+=pts2[i];
     }
     p1=Point3f(Vec3f(p1)/N);
     p2=Point3f(vec3f(p2)/N);

     vector<Point3f> q1(N),q2(N); //remove the center
     for(int i=0;i<N;i++)
     {
         q1[i]=pts1[i]-p1;
         q2[i]=pts2[i]-p2;
     }

     //compute q1*q2^T
     Eigen::Matrix3d W=Eigen::Matrix3d::Zero();
     for(int i=0;i<N;i++)
     {
         W+=Eigen::Vector3d(q1[i].x,q1[i].y,q1[i].z)*Eigen::Vector3d(q2[i].x,q2[i].y,q2[i].z).transpose();
     }
     cout<<" W = "<<W<<endl;

     //SVD on W
     Eigen::JacobiSVD<Eigen::Matrix3d> svd(W,Eigen::ComputeFullU|Eigen::ComputeFullV);
     Eigen::Matrix3d U=svd.matrixU();
     Eigen::Matrix3d V=svd.matrixV();

     cout<<" U= "<<U<<endl;
     cout<<" V= "<<V<<endl;

     Eigen::Matrix3d R_=U*(V.transpose());
     if(R_.determinant()<0) R_=-R_;
     Eigen::Vector3d t_=Eigen::Vector3d(p1.x,p1.y,p1.z)-R*Eigen::Vector3d(p2.x,p2.y,p2.z);

     //convert to cv::Mat
     R = (Mat_<double>(3, 3) <<
       R_(0, 0), R_(0, 1), R_(0, 2),
       R_(1, 0), R_(1, 1), R_(1, 2),
       R_(2, 0), R_(2, 1), R_(2, 2)
     );
      t = (Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
 }

 void bundleAdjustment(
         const vector<Point3f> &pts1,
         const vector<Point3f> &pts2,
         Mat &R, Mat &t)
 {
     //构建图优化，先设定g2o
     typedef g2o::BlockSolverX BlockSolverType;
     typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; //线性求解器

     //梯度下降法 可以从GN，LM，Dogleg中选择
     auto solver=new g2o::OptimizationAlgorithmLevenberg(
                 g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
     g2o::SparseOptimizer optimizer; //图模型
     optimizer.setAlgorithm(solver); //设置求解器
     optimizer.setVerbose(true);     //打开调试输出

    //vertex
     VertexPose *pose=new VertexPose();  //camera pose
     pose->setId(0);
     pose->setEstimate(Sophus::SE3d());
     optimizer.addVertex(pose);

     //edges
     for(size_t i=0;i<pts1.size();i++)
     {
         EdgeProjectXYZRGBPoseOnly *edge=new EdgeProjectXYZRGBPoseOnly(
            Eigen::Vector3d(pts2[i].x,pts2[i].y,pts2[i].z));
         edge->setVertex(0,pose);
         edge->setMeasurement(Eigen::Vector3d(
              pts1[i].x,pts1[i].y,pts1[i].z));
         edge->setInformation(Eigen::Matrix3d::Identity());
         optimizer.addEdge(edge);
     }
s
     chrono::steady_clock::time_point t1=chrono::steady_clock::now();
     optimizer.initializeOptimization();
     optimizer.optimize(10);
     chrono::steady_clock::time_point t2=chrono::steady_clock::now();
     chrono::duration<double> time_used=chrono::duration_cast<chrono::duration<double>>(t2-t1);
     cout<<"optimization costs time: "<<time_used.count()()<<" seconds. "<<endl;

     cout<<endl<<"after optimization: "<<endl;
     cout<<"T=\n"<<pose->estimate().matrix()<<endl;

     //convert to cv::Mat
     Eigen::Matrix3d R_=pose->estimate().rotationMatrix();
     Eigen::Matrix3d t_=pose->estimate().translation();
     R=(Mat_<double>(3,3)<<
        R_(0, 0), R_(0, 1), R_(0, 2),
        R_(1, 0), R_(1, 1), R_(1, 2),
        R_(2, 0), R_(2, 1), R_(2, 2)
        );
     t=(Mat<double>(3,1)<< t_(0, 0), t_(1, 0), t_(2, 0));
 }


