#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>

using namespace std;
using namespace cv;

/**********************************************************
 * 演示使用2d-2d的特徵匹配估計相機運動
 * ************************************************************/
void find_feature_matches(
    const Mat &img_1,const Mat &img_2,
    std::vector<KeyPoint> &keypoints_1,
    std::vector<KeyPoint> &keypoints_2,
    std::vector<DMatch> &matches);

void pose_estimation_2d2d(
    std::vector<KeyPoint> keypoints_1,
    std::vector<KeyPoint> keypoints_2,
    std::vector<DMatch> matches,
    Mat &R, Mat &t);

//像素座標轉相機歸一化
Point2d pixel2cam(const Point2d &p,const Mat &K);

int main(int argc,char **argv)
{
    if(argc!=3)
    {
        cout<<"usage: pose_estimation_2d2d img1,img2"<<endl;
    }
    
    //--讀圖像
    Mat img_1=imread(argv[1],IMREAD_COLOR);
    Mat img_2=imread(argv[2],IMREAD_COLOR);
    assert(img_1.data&&img_2.data&& "can't load the images");
    
    vector<KeyPoint> keypoints_1,keypoints_2;
    vector<DMatch> matches;
    find_feature_matches(img_1,img_2,keypoints_1,keypoints_2,matches);
    cout<<"find the keypint's number"<<matches.size()<<endl;
    
    //估計两张图像之间的运动
    Mat R,t;
    pose_estimation_2d2d(keypoints_1,keypoints_2,matches,R,t);
    
    //--验证E=t^R*scale
    Mat t_x=
    (Mat_<double>(3,3)<<0,-t.at<double>(2,0),t.at<double>)(1,0),
                        t.at<double>(2,0),0,-t.at<double>(0,0),
                        -t.at<double>(1,0),t.at<double>(0,0),0);
                        
    cout<<"t^R="<<endl<<t_x*R<<endl;
    
    //----验证对极约束
    Mat K=(Mat_<double>(3,3)<<520.9,0,325.1,0,521.0,249.7,0,0,1);
    for(DMatch m: matches)
    {
        Point2d pt1=pixel2cam(keypoints_1[m.queryIdx].pt,K);
        Mat y1=(Mat_<double>(3,1)<<pt1.x,pt1.y,1);
        Point2d pt2=pixel2cam(keypoints_2[m.queryIdx].pt,K);
        Mat y2=(Mat_<double>(3,1)<<pt2.x,pt2,y,1);
        Mat d=y2.t()*t_x*R*y1;
        cout<<"epipolar constraint = "<<d<<endl;
    }
    return 0;
}

void find_feature_matches(const Mat &img_1,const Mat &img_2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches)
{
    //--初始化
    Mat descriptors_1,descriptors_2;
    Ptr<FeatureDetector> detector=ORB::create();
    Ptr<FeatureDetector> descriptor=ORB::create();
    
    Ptr<DescriptorMatcher> matcher=DescriptorMatcher::create("BruteForce-Hamming");
    
    //第一步--检测oriented FAST角点位置
    detector->detect(img_1,keypoints_1);
    detector->detect(img_2,keypoints_2);
    
    //第二--根据角点位置计算BRIEF描述子
    descriptor->compute(img_1,keypoints_1,descriptors_1);
    descriptor->compute(img_2,keypoints_2,descriptors_2);
    
    //第三--对两幅图像的BRIEF描述子进行匹配，使用汉明距离
    vector<DMatch> match;
    //BFMatcher matcher (Norm_Hamming);
    matcher->match((descriptors_1,descriptors_2,match);
    
    //第四--匹配点对筛选
    double min_dist=10000,max_dist=0;
    
    //找出所有匹配点之间的最小距离，最大距离，即最相似与最不相似的两组点之间的距离
    for(int i=0;i<descriptors_1.rows;i++)
    {
        double dist=match[i].distance;
        if(dist<min_dist) min_dist=dist;
        if(dist>max_dist) max_dist=dist;
    }
    
    printf("--max_dist : %f \n",max_dist);
    printf("--min_dist : %f \n",min_dist);
    
    //描述子之间的距离大于距离最大值两倍时即认为匹配有误，防止最大值过小需要设置经验值30为下限
    for(int i=0;i<descriptors_1.rows;i++)
    {
        if(match[i].distance<=max(2*min_dist,30.0)
        {
            matches.push_back(match[i]);
        }
    }
}

Point2d pixel2cam(const cv::Point2d& p, const cv::Mat& K)
{
    return Point2d((p.x-K.at<double>(0,2))/K.at<double>(0,0),(p.y-K.at<double>(1,2)/K.at<double>(1,1)));
}
   

void pose_estimation_2d2d(std::vector<Keypoint> keypoints_1,
                         std::vector<Keypoint> keypoints_2,
                         std::vector<DMatch> matches,
                         cv::Mat &R, cv::Mat &t)
{
    cv::Mat K=(Mat<double>(3,3)<<520.9,0,325.1,0,521.0,249.7,0,0,1);
    
    //将匹配点转换为vector<Point2f>的形式
    vector<Point2f> points1;
    vector<Point2f> points2;
    
    for(int i=0;i<(int)matches.size();i++)
    {
        points1.push_back(keypoints_1[matches[i].queryIdx].pt);
        points2.push_back(keypoints_2[matches[i].queryIdx].pt);
    }
    
    //计算基础矩阵
  cv::Mat fundamental_matrix;
  fundamental_matrix=findFundamentalMat(points1,points2,CV_FM_8POINT);
  cout<<"fundamental_matrix is"<<endl<<fundamental_matrix<<endl;
  
  //计算本质矩阵
  cv::Point2d principal_point(325.1,249.7);  //相机光心
  double focal_length=521;
  cv::Mat essenyial_matrix;
  essenyial_matrix=findEssentialMat(points1,points2,focal_length,principal_point);
  cout<<"essenyial_matrix is "<<endl<<essenyial_matrix<<endl;
  
  //--计算单应矩阵
  cv::Mat homography_matrix;
  homography_matrix=findHomography(points1,points2,RANSAC,3);
  cout<<"homography_matrix is"<<endl<<homography_matrix<<endl;
  
  //--获取旋转与平移信息
  recoverPose(essenyial_matrix,points1,points2, R, t, focal_length, principal_point);
  cout<<"R is "<<endl<<R<<endl;
  cout<<"t is "<<endl<<t<<endl;
}
        

