﻿#include <iostream>  
#include <opencv2/core/core.hpp>  
#include <opencv2/features2d/features2d.hpp>   
#include "opencv2/calib3d/calib3d.hpp"
#include <opencv2/opencv.hpp>
#include"opencv2/imgproc/imgproc.hpp"
#include"opencv2/highgui/highgui.hpp"
#include "Matrix.h"
#include "readini.cpp" 
#include <opencv2/xfeatures2d.hpp>
#include <opencv2/xfeatures2d/nonfree.hpp>

using namespace cv::xfeatures2d;



using namespace cv;
using namespace std;


// 声明函数
	 //两张图像得到关键点（keypoints）和描述子之间匹配关系（matches）
//void find_feature_matches(const Mat& img_1, const Mat& img_2,
//	std::vector<KeyPoint>& keypoints_1,
//	std::vector<KeyPoint>& keypoints_2,
//	std::vector< DMatch >& matches)
//{
//	//-- 初始化
//
//	Mat descriptors_1, descriptors_2;
//	// used in OpenCV3 
//	Ptr<FeatureDetector> detector = ORB::create(100, 2.f, 5, 50, 0, 3, ORB::HARRIS_SCORE, 31, 50);
//	Ptr<DescriptorExtractor> descriptor = ORB::create();
//	// use this if you are in OpenCV2 
//	// Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
//	// Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
//	Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(4);
//	//-- 第一步:检测 Oriented FAST 角点位置
//	detector->detect(img_1, keypoints_1);
//	detector->detect(img_2, keypoints_2);
//
//	//-- 第二步:根据角点位置计算 BRIEF 描述子
//	descriptor->compute(img_1, keypoints_1, descriptors_1);
//	descriptor->compute(img_2, keypoints_2, descriptors_2);
//
//	//-- 第三步:对两幅图像中的BRIEF描述子进行匹配，使用 Hamming 距离
//	vector<DMatch> match;
//	//BFMatcher matcher ( NORM_HAMMING );
//	matcher->match(descriptors_1, descriptors_2, match);
//
//	Mat outimg1;
//	drawKeypoints(img_1, keypoints_1, outimg1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
//	imshow("1.png的ORB特征点", outimg1);
//	Mat outimg2;
//	drawKeypoints(img_2, keypoints_2, outimg2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
//	imshow("2.png的ORB特征点", outimg2);
//	//-- 第四步:匹配点对筛选
//	double min_dist = 10000, max_dist = 0;
//
//	//找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
//	
//	for (int i = 0; i < descriptors_1.rows; i++)
//	{
//		double dist = match[i].distance;
//		if (dist < min_dist) min_dist = dist;
//		if (dist > max_dist) max_dist = dist;
//	}
//
//	printf("-- Max dist : %f \n", max_dist);
//	printf("-- Min dist : %f \n", min_dist);
//
//	//当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
//	vector<DMatch> good_matches;
//	for (int i = 0; i < descriptors_1.rows; i++)
//	{
//		if (match[i].distance <=50)
//		{
//			matches.push_back(match[i]);
//		}
//	}
//
//	//第5步：绘制匹配结果
//	Mat img_match;
//	Mat img_goodmatch;
//	drawMatches(img_1, keypoints_1, img_2, keypoints_2, match, img_match);
//	drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_goodmatch);
//	imshow("所有匹配点对", img_match);
//	imshow("优化后匹配点对", img_goodmatch);
//}

void KeyPointsToPoints(vector<KeyPoint> kpts, vector<Point2f> &pts);

bool refineMatchesWithHomography(
	const std::vector<cv::KeyPoint>& queryKeypoints,
	const std::vector<cv::KeyPoint>& trainKeypoints,
	float reprojectionThreshold, std::vector<cv::DMatch>& matches,
	cv::Mat& homography);

Point2d pixel2cam(const Point2d& p, const Mat& K)
{
	return Point2d
	(
		(p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
		(p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
	);
}
void triangulation(
	const vector<KeyPoint>& keypoint_1,
	const vector<KeyPoint>& keypoint_2,
	const std::vector< DMatch >& matches,
	const Mat& R, const Mat& t,
	vector<Point3d>& points
);

void pose_estimation_2d2d(
	const std::vector<KeyPoint>& keypoints_1,
	const std::vector<KeyPoint>& keypoints_2,
	std::vector< DMatch >& matches,
	Mat& R, Mat& t);


int main()
{
	//test
	const int imageWidth = 1280;                             //摄像头的分辨率
	const int imageHeight = 720;
	Rect validROIL;//图像校正之后，会对图像进行裁剪，这里的validROI就是指裁剪之后的区域  
	Rect validROIR;
	Size imageSize = Size(imageWidth, imageHeight);
	Mat R1, R2, P1, P2, Q;
	Mat rectifyImageL, rectifyImageR;
	Rect validRoiL;
	Rect validRoiR;
	vector<double>   nums;
	//Mat cameraMatrixtest = (Mat_<double>(3, 3) << nums);

	/*Mat cameraMatrixL = (Mat_<double>(3, 3) << 370.282287905271,	1.10679185793897,	379.988405692572,
		0,	372.781703155405,	246.538800237802,
		0,	0,	1);*/
		//Mat distCoeffL = (Mat_<double>(5, 1) << -0.327568650914029,	0.135366425692623,   7.64512723951968e-05,	0.00167380068823004, -0.0297344285153459
		//	);

		//Mat cameraMatrixR = (Mat_<double>(3, 3) << 370.016952068867,	0.526124285315244,	386.350542491014,
		//	0,	372.439848623195,	235.876505533511,
		//	0,	0,	1
		//	);
		//Mat distCoeffR = (Mat_<double>(5, 1) << -0.338374177297428,	0.149410313558290, -0.000577521792721289, -0.000806106103027259 ,-0.0360972147978469);

		//Mat T = (Mat_<double>(3, 1) << 702.411972065748, -24.8106993171000, -3.30636052236160);//T平移向量
		//Mat R = (Mat_<double>(3, 3) << 0.998853234551588,	0.0430784560711605, -0.0208916837128037,
		//	-0.0397953762112985,	0.989637375324846,	0.137964464237539,
		//	0.0266184871476939, -0.136974858943804,	0.990216816742320);//rec旋转向量

		//Mat cameraMatrixL = Mat::zeros(3, 3, CV_64F);

	double cameraMatrixL_t[3][3];
	double distCoeffL_t[5][1];

	double cameraMatrixR_t[3][3];
	double distCoeffR_t[5][1];

	double T_t[3][1];//T平移向量
	double R_t[3][3];//rec旋转向量

	double P1_t[3][3];
	double P2_t[3][3];
	double R1_t[3][3];
	double R2_t[3][3];
	//read configure txt
	readConfigFile("081901/readini_stereo_cal.txt", "cameraMatrixL", nums);
	memcpy(cameraMatrixL_t, &nums[0], nums.size() * sizeof(nums[0]));
	Mat cameraMatrixL = Mat(3, 3, CV_64F, cameraMatrixL_t);
	readConfigFile("081901/readini_stereo_cal.txt", "distCoeffL", nums);
	memcpy(distCoeffL_t, &nums[0], nums.size() * sizeof(nums[0]));
	Mat distCoeffL = Mat(5, 1, CV_64F, distCoeffL_t);
	readConfigFile("081901/readini_stereo_cal.txt", "cameraMatrixR", nums);
	memcpy(cameraMatrixR_t, &nums[0], nums.size() * sizeof(nums[0]));
	Mat cameraMatrixR = Mat(3, 3, CV_64F, cameraMatrixR_t);
	readConfigFile("081901/readini_stereo_cal.txt", "distCoeffR", nums);
	memcpy(distCoeffR_t, &nums[0], nums.size() * sizeof(nums[0]));
	Mat distCoeffR = Mat(5, 1, CV_64F, distCoeffR_t);
	readConfigFile("081901/readini_stereo_cal.txt", "T", nums);
	memcpy(T_t, &nums[0], nums.size() * sizeof(nums[0]));
	Mat T = Mat(3, 1, CV_64F, T_t);
	readConfigFile("081901/readini_stereo_cal.txt", "R", nums);
	memcpy(R1_t, &nums[0], nums.size() * sizeof(nums[0]));
	Mat R = Mat(3, 3, CV_64F, R1_t);

	//double LR[5][1];
	//for (int i = 0; i < 5; i++) {
	//	for (int j = 0; j < 1; j++) {
	//		LR[i][j] = (distCoeffL_t[i][j] + distCoeffR_t[i][j])/2;
	//		cout << LR[i][j]<<"\t";
	//	}
	//	cout << endl;
	//}

	Mat Image1 =imread("081901/left_1.jpg", CV_8UC1);
	Mat Image2= imread("081901/3right_1.jpg", CV_8UC1);
	/*Mat Image1= imread("img_1.jpg", CV_8UC1);
	Mat Image2= imread("img_2.jpg", CV_8UC1);*/
	imshow("img1", Image1);
	imshow("img2", Image2);
	cv::Mat image_undistort1;
	cv::Mat image_undistort2;
	undistort(Image1, image_undistort1, cameraMatrixL, distCoeffL);
	undistort(Image2, image_undistort2, cameraMatrixR, distCoeffR);
	Mat img1, img2;
	equalizeHist(image_undistort1, img1);
	equalizeHist(image_undistort2, img2);
	cv::Ptr<cv::FeatureDetector> detector = ORB::create(500);						// 创建orb特征点检测
	Ptr<FREAK> extractor = FREAK::create();   	// 用Freak特征来描述特征点
	cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_HAMMING,	// 特征匹配，计算Hamming距离
		true);

	vector<KeyPoint> keypoints1;	// 用于保存图中的特征点	
	vector<KeyPoint> keypoints2;
	Mat descriptors1;				// 用于保存图中的特征点的特征描述
	Mat descriptors2;

	detector->detect(img1, keypoints1);		// 检测第一张图中的特征点
	detector->detect(img2, keypoints2);

	extractor->compute(img1, keypoints1, descriptors1);		// 计算图中特征点位置的特征描述
	extractor->compute(img2, keypoints2, descriptors2);

	vector<DMatch> matches;
	matcher->match(descriptors1, descriptors2, matches);

	Mat imResultOri;
	drawMatches(img1, keypoints1, img2, keypoints2, matches, imResultOri,
		CV_RGB(0, 255, 0), CV_RGB(0, 255, 0));
	cout << "[Info] # of matches : " << matches.size() << endl;


	//Mat matHomo = (Mat_<double>(3, 3) << 0.999216713000942 ,-0.00183605348938677 ,-1.32134489888860e-06, -0.0148199281982147,	0.986148982269145, -2.69063150511836e-05,
	//	-86.5444047830319,	137.514722616882,	1.01123246606138);
	Mat matHomo;
	refineMatchesWithHomography(keypoints1, keypoints2, 3, matches, matHomo);
	cout << "[Info] Homography T : " << matHomo << endl;
	cout << "[Info] # of matches : " << matches.size() << endl;

	/*vector<Mat> R_H, T_H, h1;
	decomposeHomographyMat(matHomo, cameraMatrixL, R_H, T_H, h1);*/
	

	Mat imResult;
	drawMatches(img1, keypoints1, img2, keypoints2, matches, imResult,
		CV_RGB(0, 255, 0), CV_RGB(0, 255, 0));

	// 计算光流
	vector<uchar> vstatus;
	vector<float> verrs;
	vector<Point2f> points1;
	vector<Point2f> points2;
	vector<Point2f>  distortedPoints1;
	vector<Point2f>  distortedPoints2;
	KeyPointsToPoints(keypoints1, points1);
	KeyPointsToPoints(keypoints2, points2);

	//for (int i = 0; i < matches.size(); i++)
	//{
	//	points1.push_back(keypoints1[matches[i].queryIdx].pt);//queryIdx第一个图像索引
	//	points2.push_back(keypoints2[matches[i].trainIdx].pt);//trainIdx第二个图像索引
	//}
	calcOpticalFlowPyrLK(img1, img2, points1, points2, vstatus, verrs);
	//undistortPoints(distortedPoints1, points1, cameraMatrixL, distCoeffL);
	//undistortPoints(distortedPoints2, points2, cameraMatrixR, distCoeffR);

	Mat imOFKL = img1.clone();
	for (int i = 0; i < vstatus.size(); i++) {
		if (vstatus[i] && verrs[i] < 15) {
			line(imOFKL, points1[i], points2[i], CV_RGB(255, 255, 255), 1, 8, 0);
			circle(imOFKL, points2[i], 3, CV_RGB(255, 255, 255), 1, 8, 0);
		}
	}

	imwrite("opt.jpg", imOFKL);
	imwrite("re1.jpg", imResultOri);
	imwrite("re2.jpg", imResult);

	imshow("Optical Flow", imOFKL);
	imshow("origin matches", imResultOri);
	imshow("refined matches", imResult);
	////3. 估计两张图像间运动R t







	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	Mat R_G, t;
	pose_estimation_2d2d(keypoints1, keypoints2, matches, R_G, t);
	Mat newimage;
	//newimage=image_undistort2*R_G;
	vector<Point3d> points;
	triangulation(keypoints1, keypoints2, matches, R, t, points);

	//4. 验证E=t^R*scale,t^是t的反对称矩阵
	Mat t_x = (Mat_<double>(3, 3) <<
		0, -t.at<double>(2, 0), t.at<double>(1, 0),
		t.at<double>(2, 0), 0, -t.at<double>(0, 0),
		-t.at<double>(1.0), t.at<double>(0, 0), 0);
	cout << "t^R=" << endl << t_x * R << endl;
	//5. 验证对极约束
	Mat K = cameraMatrixL;
	for (DMatch m : matches)
	{
		Point2d pt1 = pixel2cam(keypoints1[m.queryIdx].pt, K);
		Mat y1 = (Mat_<double>(3, 1) << pt1.x, pt1.y, 1);
		Point2d pt2 = pixel2cam(keypoints2[m.trainIdx].pt, K);
		Mat y2 = (Mat_<double>(3, 1) << pt2.x, pt2.y, 1);
		Mat d = y2.t() * t_x * R * y1;
		cout << "epipolar constraint = " << d << endl;
	}
	for (int i = 0; i < matches.size(); i++)
	{
		Point2d pt1_cam = pixel2cam(keypoints1[matches[i].queryIdx].pt, K);
		Point2d pt1_cam_3d(
			points[i].x / points[i].z,
			points[i].y / points[i].z
		);

		cout << "point in the first camera frame: " << pt1_cam << endl;
		cout << "point projected from 3D " << pt1_cam_3d << ", d=" << points[i].z << endl;

		// 第二个图
		Point2f pt2_cam = pixel2cam(keypoints2[matches[i].trainIdx].pt, K);
		Mat pt2_trans = R * (Mat_<double>(3, 1) << points[i].x, points[i].y, points[i].z) + t;
		pt2_trans /= pt2_trans.at<double>(2, 0);
		cout << "point in the second camera frame: " << pt2_cam << endl;
		cout << "point reprojected from second frame: " << pt2_trans.t() << endl;
		cout << endl;
	}
	
	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

	//focal_length, principal_point, CV_RANSAC
	Mat cameraMatrix;
	//cameraMatrix = (cameraMatrixL + cameraMatrixR) / 2;
	Mat E = cv::findEssentialMat(points1, points2, cameraMatrixL, CV_RANSAC,0.999,0.5);
	//Mat E = cv::findEssentialMat(points1, points2, 1., Point2d(0, 0), RANSAC, 0.999, 1);
	/*Mat  E = (Mat_<double>(3, 3) << -3.29084046882178, - 0.860764296618868,	70.5018695044452,
		19.2228892561082, - 24.9838494581201, - 152.776063777723,
		- 67.7703075754156,	153.740035138250, - 28.1304686880302);*/

	Mat fundamental_matrix;
	fundamental_matrix = findFundamentalMat(points1, points2, FM_LMEDS);
	vector<Vec3f> linesl;
	computeCorrespondEpilines(points1, 1, fundamental_matrix, linesl);

	for (auto it = linesl.begin(); it != linesl.end(); it++)
	{
		line(img1, Point(0, -(*it)[2] / (*it)[1]), Point(img1.cols, -((*it)[2] + (*it)[0] * img1.cols) / (*it)[1]), Scalar(255, 255, 255));
	}
	imshow("第一幅图像的对极线", img1);

	vector<Vec3f> lines2;
	computeCorrespondEpilines(points2, 2, fundamental_matrix, lines2);
	for (auto it = lines2.begin(); it != lines2.end(); it++)
	{
		line(img2, Point(0, -(*it)[2] / (*it)[1]), Point(img2.cols, -((*it)[2] + (*it)[0] * img2.cols) / (*it)[1]), Scalar(255, 255, 255));
	}
	imshow("第二幅图像的对极线", img2);




	Mat R_r, t_r, R11, R12, t_d, R_d;
	recoverPose(E, points1, points2, cameraMatrixR, R_r, t_r);
	decomposeEssentialMat(E, R11, R12, t_d);
	R_d = R12.clone();
	t_d = -t_d.clone();
	//invert(R_r, R_r);
	//t_r = -R_r * t_r;
	//cvMul(&T, &t_r, &T);
	//T_M=T.mul(t_r);

	
	

	stereoRectify(cameraMatrixL, distCoeffL,
		cameraMatrixR, distCoeffR,
		imageSize, R, -T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, imageSize, &validRoiL, &validRoiR);




	Mat rmap[2][2];
	initUndistortRectifyMap(cameraMatrixL, distCoeffL, R1, P1, imageSize, CV_32FC1, rmap[0][0], rmap[0][1]);
	initUndistortRectifyMap(cameraMatrixR, distCoeffR, R2, P2, imageSize, CV_32FC1, rmap[1][0], rmap[1][1]);


	//read capture imageL and imageR
	
	remap(Image1, rectifyImageL, rmap[0][0], rmap[0][1], INTER_LINEAR);
	remap(Image2, rectifyImageR, rmap[1][0], rmap[1][1], INTER_LINEAR);
	//const std::string file_path1 = "C:/Users/leixuehui/Desktop/matlab/test1.raw";
	//std::ifstream fin1;
	//// 注意，这里要指定binary读取模式
	//fin1.open(file_path1, std::ios::binary);
	//if (!fin1) {
	//	std::cerr << "open failed: " << file_path1 << std::endl;
	//}
	//// seek函数会把标记移动到输入流的结尾
	//fin1.seekg(0, fin1.end);
	//// tell会告知整个输入流（从开头到标记）的字节数量
	//int length1 = fin1.tellg();
	//// 再把标记移动到流的开始位置
	//fin1.seekg(0, fin1.beg);

	//// load buffer
	//char* buffer1 = new char[length1];

	//const std::string file_path = "C:/Users/leixuehui/Desktop/matlab/test11.raw";
	//std::ifstream fin;
	//// 注意，这里要指定binary读取模式
	//fin.open(file_path, std::ios::binary);
	//if (!fin) {
	//	std::cerr << "open failed: " << file_path << std::endl;
	//}
	//// seek函数会把标记移动到输入流的结尾
	//fin.seekg(0, fin.end);
	//// tell会告知整个输入流（从开头到标记）的字节数量
	//int length = fin.tellg();
	//// 再把标记移动到流的开始位置
	//fin.seekg(0, fin.beg);

	//// load buffer
	//char* buffer = new char[length];
	//cv::Mat Image1(cv::Size(752, 480), CV_8UC1, buffer1);
	//cv::Mat Image2(cv::Size(752, 480), CV_8UC1, buffer);
	//
	//char *ImageL = new char[length1];
	//char *ImageR = new char[length1];
	//cv::Mat rectifyImageL(cv::Size(752, 480), CV_8UC1, Image1);
	//cv::Mat rectifyImageR(cv::Size(752, 480), CV_8UC1, Image2);


	Mat canvas;
	double sf;
	int w, h;
	sf = 600. / MAX(imageSize.width, imageSize.height);
	w = cvRound(imageSize.width * sf);
	h = cvRound(imageSize.height * sf);
	canvas.create(h, w * 2, CV_8UC1);   //注意通道

	//左图像画到画布上
	Mat canvasPart = canvas(Rect(w * 0, 0, w, h));                                //得到画布的一部分  
	resize(rectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);     //把图像缩放到跟canvasPart一样大小  
	Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf),                //获得被截取的区域    
		cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
	//rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8);                      //画上一个矩形  
	cout << "Painted ImageL" << endl;

	//右图像画到画布上
	canvasPart = canvas(Rect(w, 0, w, h));                                      //获得画布的另一部分  
	resize(rectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
	Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
		cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
	//rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
	cout << "Painted ImageR" << endl;

	//画上对应的线条
	for (int i = 0; i < canvas.rows; i += 16)
		line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
	imshow("rectified", canvas);

	waitKey(100000);







//stereoRectify1(cameraMatrixL, distCoeffL,
//		cameraMatrixR, distCoeffR,
//		imageSize, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 0, imageSize, &validRoiL, &validRoiR);



}

bool refineMatchesWithHomography(
	const std::vector<cv::KeyPoint>& queryKeypoints,
	const std::vector<cv::KeyPoint>& trainKeypoints,
	float reprojectionThreshold, std::vector<cv::DMatch>& matches,
	cv::Mat& homography) {
	const int minNumberMatchesAllowed = 8;

	if (matches.size() < minNumberMatchesAllowed)
		return false;

	// Prepare data for cv::findHomography
	std::vector<cv::Point2f> srcPoints(matches.size());
	std::vector<cv::Point2f> dstPoints(matches.size());

	for (size_t i = 0; i < matches.size(); i++) {
		srcPoints[i] = trainKeypoints[matches[i].trainIdx].pt;
		dstPoints[i] = queryKeypoints[matches[i].queryIdx].pt;
	}

	// Find homography matrix and get inliers mask
	std::vector<unsigned char> inliersMask(srcPoints.size());
	homography = cv::findHomography(srcPoints, dstPoints, CV_FM_RANSAC,
		reprojectionThreshold, inliersMask);

	std::vector<cv::DMatch> inliers;
	for (size_t i = 0; i < inliersMask.size(); i++) {
		if (inliersMask[i])
			inliers.push_back(matches[i]);
	}

	matches.swap(inliers);
	return matches.size() > minNumberMatchesAllowed;
}

void KeyPointsToPoints(vector<KeyPoint> kpts, vector<Point2f> &pts) {
	for (int i = 0; i < kpts.size(); i++) {
		pts.push_back(kpts[i].pt);
	}

	return;

}



void pose_estimation_2d2d(
	const std::vector<KeyPoint>& keypoints_1,
	const std::vector<KeyPoint>& keypoints_2,
	 std::vector< DMatch >& matches,
	Mat& R, Mat& t)
{
	float reprojectionThreshold = 3.0;
	vector<double>   nums;
	double cameraMatrixL_t1[3][3];
	readConfigFile("081901/readini_stereo_cal.txt", "cameraMatrixR", nums);
	memcpy(cameraMatrixL_t1, &nums[0], nums.size() * sizeof(nums[0]));
	Mat cameraMatrixL1 = Mat(3, 3, CV_64F, cameraMatrixL_t1);
	// 相机内参K
	Mat K = cameraMatrixL1;

	//-- 对齐匹配的点对，并用.pt转化为像素坐标。把匹配点转换为简单的Point2f形式，
	vector<Point2f> points1;
	vector<Point2f> points2;
	for (int i = 0; i < (int)matches.size(); i++)
	{
		points1.push_back(keypoints_1[matches[i].queryIdx].pt);//queryIdx第一个图像索引
		points2.push_back(keypoints_2[matches[i].trainIdx].pt);//trainIdx第二个图像索引
	}
	//-- 计算单应矩阵
	Mat homography_matrix;
	homography_matrix = findHomography(points1, points2, RANSAC, 3);
	cout << "homography_matrix is " << endl << homography_matrix << endl;
	//-- 计算基础矩阵F
	Mat fundamental_matrix;
	fundamental_matrix = findFundamentalMat(points1, points2, FM_RANSAC,3,0.99);
	cout << "fundamental_matrix is " << endl << fundamental_matrix << endl;
//findEssentialMat
	//-- 计算本质矩阵E
	Mat essential_matrix;
	essential_matrix = findEssentialMat(points1, points2,K,CV_RANSAC);
	cout << "essential_matrix is " << endl << essential_matrix << endl;
	

	//-- 从本质矩阵E中恢复旋转和平移信息.
	recoverPose(essential_matrix, points1, points2,K, R, t);
	cout << "R is " << endl << R << endl;
	cout << "t is " << endl << t << endl;
}

void triangulation(
	const vector< KeyPoint >& keypoint_1,
	const vector< KeyPoint >& keypoint_2,
	const std::vector< DMatch >& matches,
	const Mat& R, const Mat& t,
	vector< Point3d >& points)
{
	Mat T1 = (Mat_<float>(3, 4) <<
		1, 0, 0, 0,
		0, 1, 0, 0,
		0, 0, 1, 0);
	Mat T2 = (Mat_<float>(3, 4) <<
		R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), t.at<double>(0, 0),
		R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), t.at<double>(1, 0),
		R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), t.at<double>(2, 0)
		);
	vector<double>   nums;
	double cameraMatrixL_t1[3][3];
	readConfigFile("081901/readini_stereo_cal.txt", "cameraMatrixR", nums);
	memcpy(cameraMatrixL_t1, &nums[0], nums.size() * sizeof(nums[0]));
	Mat cameraMatrixL1 = Mat(3, 3, CV_64F, cameraMatrixL_t1);
	// 相机内参K
	Mat K = cameraMatrixL1;
	vector<Point2f> pts_1, pts_2;
	for (DMatch m : matches)
	{
		// 将像素坐标转换至相机坐标
		pts_1.push_back(pixel2cam(keypoint_1[m.queryIdx].pt, K));
		pts_2.push_back(pixel2cam(keypoint_2[m.trainIdx].pt, K));
	}

	Mat pts_4d;
	cv::triangulatePoints(T1, T2, pts_1, pts_2, pts_4d);

	// 转换成非齐次坐标
	for (int i = 0; i < pts_4d.cols; i++)
	{
		Mat x = pts_4d.col(i);
		x /= x.at<float>(3, 0); // 归一化   //金戈大王注：此处的归一化是指从齐次坐标变换到非齐次坐标。而不是变换到归一化平面。
		Point3d p(
			x.at<float>(0, 0),
			x.at<float>(1, 0),
			x.at<float>(2, 0)
		);
		points.push_back(p);
	}
}

