#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\features2d\features2d.hpp>
#include<opencv2\opencv.hpp>
#include <vector>
#include <opencv2/core.hpp>    
#include <opencv2/videoio.hpp> 
#include <iostream> 
#include <string>   
#include <cstdlib>
#include <ctime>
#define random(a,b) (rand()%(b-a)+a)
#include <string.h>
using namespace std;
using namespace cv;

String source = "D:\\a6500\\2020\\MAH07095.avi";
VideoCapture inputVideo(source);//关联读入视频文件
VideoCapture inputVideo2(source);//关联读入视频文件
VideoCapture inputVideo3(source);//关联读入视频文件

								 //
typedef VideoCapture FeatureOrbCapture_;
typedef VideoCapture FeatureRansacCapture_;

typedef VideoCapture ModelMatchCapture_;


const char* image_window = "Source Image";
const char* result_window = "Result window";
int match_method;
int max_Trackbar = 5;
void MatchingMethod(int, void*);

//模板匹配需要用到的全局变量
//定义Mat存放图片，用于存储视频帧
Mat img;
//模板
Mat templ;
//匹配结果
Mat result;

Mat temp2;

//直方图匹配始终以第一帧目标
Mat firstTarget;

//模板匹配每帧结果集合
vector<Mat> tempPoint;
//模板匹配每帧框选目标左上角坐标集合
vector<Point2f> modelPointMovexy;

//void MatchingMethod(int, void*)中的目标框大小设置
int x2;
int y2;
int width2;
int height2;

//
//重新定义关键点RR_KP和RR_matches来存储新的关键点和基础矩阵，通过RansacStatus来删除误匹配点
vector <KeyPoint> RR_KP1, RR_KP2;
vector <DMatch> RR_matches;

//orbX,orbY
float orbX;
float orbY;


//局部区域缩放等级
int level = 0;

void imshowMany(std::string& _winName, vector<Mat>& ployImages);
vector<Point2f> fetureByORB();
Point2f totalFit(vector<Point2f>& point1, vector<Point2f>& point2, int n);
int doStab(vector<Point2f> xyMove);
void imshowMany(const std::string& _winName, const vector<Mat>& ployImages);
void quickSort(int left, int right, vector<DMatch>& arr);
vector <DMatch> myRansac(Mat img, vector<KeyPoint> keypoints01, vector<KeyPoint> keypoints02, vector<DMatch> matches);
void imshowMove(Mat img, vector<KeyPoint>RR_KP1, vector<KeyPoint>RR_KP2, vector <DMatch>  RR_matches);
double comparByHistogram(Mat src_base, Mat src_test1);
vector<Point2f> trackerVideo(int x, int y, int length, int width);
Rect trackingByHistogram(Mat src, Mat next, int x, int y, int length, int width);
int ModelMatch(int x, int y, int width, int height);
vector<Point2f> fetureByORB(vector<Mat>& tempPoint);
vector<Point2f> ModelMatchMovexy(vector<Point2f>& modelPointMovexy);
vector<Point2f> ORBMatch(float x, float y, float width, float height);

class Matchinfo {

public:
	int i;
	int j;
	double matchNum;

	void	setMatchinfo(int i, int j, double Num) {
		this->i = i;
		this->j = j;
		this->matchNum = Num;
	}

};



int main(int argc, char* argv[])
{

	int x = 830;
	int y = 420;
	int width = 180;
	int height = 400;
	int level = 0;

	if (level == 20) {
		if (width >= 60 && height >= 60) {
			x = x + level;
			y = y + level;
			width = width - level;
			height = height - level;
		}
	}
	if (level == 40) {
		if (width >= 120 && height >= 120) {
			x = x + level;
			y = y + level;
			width = width - level;
			height = height - level;
		}
	}
	if (level == 60) {
		if (width >= 180 && height >= 180) {
			x = x + level;
			y = y + level;
			width = width - level;
			height = height - level;
		}
	}



	//基于运动目标

	int switch_on = 666;//ui获取，选择具体基于运动目标的匹配

	vector<Point2f> xyMove;
	vector<Mat> wrap;
	switch (switch_on)
	{

	case 1:
		//框选运动目标，直方图匹配△x△y补偿
		//测试数据800, 400, 250, 500
		//1200,450,100,100
		//35,590,100,90
		//990, 520, 120, 80 车
		xyMove = trackerVideo(860, 430, 160, 400);//ui获取框选
		doStab(xyMove);
		break;
	case 2:
		//框选运动目标，模板匹配，△x△y补偿
		ModelMatch(960, 480, 180, 180);//ui获取选框
		xyMove = ModelMatchMovexy(modelPointMovexy);
		doStab(xyMove);
		break;
	case 3:
		//框选运动目标，模板匹配目标
		ModelMatch(920, 440, 50, 120);//ui获取选框
									  //目标区域进行ORB，△x△y补偿
		xyMove = fetureByORB(tempPoint);
		doStab(xyMove);
		break;
	default:
		xyMove = ORBMatch(860, 430, 160, 400);
		doStab(xyMove);
		break;
	}

	//vector<Point2f> xyMove = fetureByORB();
	//doStab(xyMove);

	//测试直方图匹配
	//Mat src,test1;
	//inputVideo.read(src);
	//inputVideo.read(test1);
	//comparByHistogram(src,test1);

	//测试目标框选位置
	//Rect target(1200,465,100,100);
	//rectangle(src, Point(target.x, target.y), Point(target.x + target.width, target.y + target.height), Scalar::all(0), 2, 8, 0);
	//imshow("test",src);
	//waitKey(0);


	return 0;
}


//跟踪视频中的目标，得到目标框的偏移数据
vector<Point2f> trackerVideo(int x, int y, int width, int height)
{
	Mat img01;
	Mat img02;

	vector<Point2f> xyMove;

	//每次跟踪框的位置
	vector<Point2f> trackLocation;

	Mat current;

	if (!inputVideo2.isOpened()) {
		cout << " trackerVideo: trackLocation为空" << endl;
		system("pause");
		return trackLocation;
	}
	//获取视频fps
	double rate = inputVideo2.get(CV_CAP_PROP_FPS);
	//获取视频帧的尺寸
	int frameWidth = inputVideo.get(CV_CAP_PROP_FRAME_WIDTH);
	int frameHeight = inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT);
	//根据打开视频的参数初始化输出视频格式
	//自定义输出视频的尺寸，需要将读取的视频帧尺寸进行变换，下文使用的resize函数完成
	cv::VideoWriter w_cap("track_video.avi", CV_FOURCC('M', 'J', 'P', 'G'), rate, cv::Size(1920, 1080));


	//Mat dispImg;
	//dispImg.create(Size(2200, 1238), CV_8UC3);//根据图片矩阵w*h，创建画布，可线的图片数量为w*h 
	//Mat imgROI;

	int frameCount = inputVideo2.get(CAP_PROP_FRAME_COUNT);
	cout << "总帧数：" << frameCount << endl;
	VideoWriter outputVideo;

	int endTime = frameCount;//处理多少帧
	int start = 1;

	//每次跟踪的偏移
	int detailX = 0;
	int detailY = 0;


	while (1) {

		if (start == endTime) break;
		if (start == 1) {
			//依次读取两帧
			inputVideo2.read(img01);
			inputVideo2.read(img02);
			firstTarget = img01(Rect(x, y, width, height));
			imshow("第一帧目标", firstTarget);
			waitKey(0);
		}
		else
		{
			//不是第一次进来，说明已经读取过两帧，就只需要再读一帧
			inputVideo2.read(img01);
			Mat temp = img01;
			img01 = img02;
			img02 = temp;
		}

		int realx = x + detailX;
		int realy = y + detailY;
		//每次跟踪目标前，判断跟踪是否超出画面区域
		if (realx<0) {
			realx = 0;
		}
		if (realy < 0) {
			realy = 0;
		}
		if (realx > frameWidth - width) {
			realx = frameWidth - width;
		}
		if (realy > frameHeight) {
			realy = frameHeight;
		}
		//跟踪到目标
		Rect target = trackingByHistogram(img01, img02, realx, realy, width, height);
		//保存每次的跟踪目标位置
		trackLocation.push_back(Point2f(target.x, target.y));
		int xMove = target.x - realx;
		int yMove = target.y - realy;
		xyMove.push_back(Point2f(xMove, yMove));
		cout << "匹配目标位置" << target.x << "," << target.y << endl;
		Mat dispaly = img02;
		rectangle(dispaly, Point(target.x, target.y), Point(target.x + target.width, target.y + target.height), Scalar(255, 255, 0), 3, 8, 0);
		detailX = target.x - realx;
		detailY = target.y - realy;
		//detailWidth = target.width-width;
		//detailHeight =target.height -height;

		w_cap.write(dispaly);
		cv::namedWindow("跟踪", CV_WINDOW_AUTOSIZE);
		imshow("跟踪", dispaly);
		waitKey(1);
		start++;

	}



	//根据目标框位置计算偏移量
	/*for (int i = 0;i < trackLocation.size();i++) {

	int xMove = trackLocation[i].x - x;
	int yMove = trackLocation[i].y - y;
	xyMove.push_back(Point2f(xMove, yMove));

	}*/



	return xyMove;

}

//根据直方图跟踪目标      //测试数据800, 400, 250, 500
Rect trackingByHistogram(Mat src, Mat nextimg, int x, int y, int width, int height) {

	//匹配度
	vector<Matchinfo> match;
	//目标
	Mat target;
	//匹配模板,以ui传递得到target的选区
	Rect rect(x, y, width, height);//假设获得目标框
								   //得到目标图像
	target = src(rect);
	cv::namedWindow("目标", CV_WINDOW_AUTOSIZE);
	imshow("目标", target);
	waitKey(1);


	//在下一帧，原目标框周围一片范围，遍历搜寻与目标最接近的区域
	Rect range(x - 50, y - 50, 50 + width, 50 + height);
	range &= Rect(0, 0, nextimg.cols, nextimg.rows);//和图像大小求交集，防止越界


	for (int i = 0;i<range.width - width;) {
		for (int j = 0;j <range.height - height;) {


			//预测图
			Mat temp;
			int realx = range.x + i;
			int realy = range.y + j;

			/*	if (realx<0) {
			realx = 0;
			cout << "预测到边界了" << endl;
			}
			if (realx > width) {
			realx = width;
			cout << "预测到边界了" << endl;
			}
			if (realy<0) {
			realy = 0;
			cout << "预测到边界了" << endl;
			}
			if (realy > height) {
			realy = height;
			cout << "预测到边界了" << endl;
			}*/
			//cout << i <<","<< j<<","<< endl;
			Rect rect(realx, realy, width, height);
			temp = nextimg(rect);  //得到预测图像

								   /*	imshow("temp",temp);
								   waitKey(0);*/

								   //每次比较直方图，得到匹配度
			double matchNum = comparByHistogram(firstTarget, temp);
			//cout << matchNum << endl;
			//保留i,j,匹配度,就可以根据ij得到位置
			if (matchNum >= 0) {
				Matchinfo m;
				m.setMatchinfo(i, j, matchNum);
				//	cout << matchNum << endl;
				//cout << m.matchNum << endl;
				//system("pause");
				match.push_back(m);
			}

			j += 4;


		}
		i += 4;
		//cout <<i << endl;
	}


	Matchinfo bestMatch = match[0];//得到匹配度最高的区域
	for (int i = 0;i < match.size();i++) {
		bestMatch = (match[i].matchNum < bestMatch.matchNum ? match[i] : bestMatch);
	}

	Rect best(range.x + bestMatch.i, range.y + bestMatch.j, width, height);
	cout << "最佳匹配度和i,j： " << bestMatch.matchNum << " " << bestMatch.i << "," << bestMatch.j << endl;

	return best;
}

//直方图比较，得到匹配度
double comparByHistogram(Mat src_base, Mat src_test1)
{
	Mat  hsv_base;
	Mat  hsv_test1;


	if (src_base.empty() || src_test1.empty())
	{
		cout << "读取帧失败，无法进行直方图比较！" << endl;
		return -1;
	}
	cvtColor(src_base, hsv_base, COLOR_BGR2HSV);
	cvtColor(src_test1, hsv_test1, COLOR_BGR2HSV);


	/*vector<Mat> rgbchannels;
	split(hsv_test1, rgbchannels);
	Mat b = rgbchannels.at(0);
	Mat g = rgbchannels.at(1);
	Mat r = rgbchannels.at(2);
	imshow("b", b);
	imshow("g", g);
	imshow("r", r);
	imshow("base", src_base);
	imshow("test1", src_test1);
	waitKey(0);*/

	/*imshow("base", src_base);
	waitKey(0);*/


	int h_bins = 50; int s_bins = 60;
	int histSize[] = { h_bins, s_bins };
	// 色调从0到179，饱和度从0到255
	float h_ranges[] = { 0, 180 };
	float s_ranges[] = { 0, 256 };
	//ranges, 一个维度中的每一个bin的取值范围
	const float* ranges[] = { h_ranges, s_ranges };

	//先判断通道个数，再选择
	// Use the o-th and 1-st channels
	int channels[] = { 0, 1 ,2 };
	MatND hist_base;
	MatND hist_test1;

	//images, 是要求的Mat的指针，这里可以传递一个数组，可以同时求很多幅图片的直方图
	// 源图像个数
	//channels, 传递要加入直方图计算的通道
	//mask, 掩码矩阵，没有掩码，则传递空矩阵就行了
	//hist_base, 输出直方图
	calcHist(&src_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false);
	calcHist(&src_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false);
	//cv::namedWindow("直方图1", CV_WINDOW_NORMAL);
	//cv::namedWindow("直方图2", CV_WINDOW_NORMAL);
	//imshow("直方图1",hist_base);
	//imshow("直方图2", hist_test1);
	//waitKey(0);

	//归一化
	normalize(hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat());
	normalize(hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat());

	int compare_method = 3;//0,1,2,3,有四种方式
						   //double base_base = compareHist(hist_base, hist_base, compare_method);
						   //目标和test比较
	double base_test1 = compareHist(hist_base, hist_test1, compare_method);
	//printf(" Method [%d] Perfect,Base-Test(1) : %f, %f\n", compare_method, base_base, base_test1);
	//cout << base_test1 << endl;

	return base_test1;
}


vector<Point2f> ORBMatch(float x, float y, float width, float height)
{
	orbX = x;
	orbY = y;

	FeatureOrbCapture_ FeatureOrbCapture(source);
	//定义Mat存放图片，用于存储视频帧
	Mat img01;
	Mat img02;
	//KeyPoint是opencv中定义的特征点类
	vector<KeyPoint> keypoints01;
	vector<KeyPoint> keypoints02;
	//特征描述
	Mat  descriptors1;
	Mat  descriptors2;
	//匹配结果集合
	vector<DMatch> matches;
	//将匹配点封装，好传递给计算偏移的函数
	vector<Point2f> point1;
	vector<Point2f> point2;
	//存放两帧之间所有特征点的偏移量△x，△y；
	vector<Point2f> xyMove;
	//两帧之间拟合后选出的一组最优的偏移量△x，△y
	vector<Point2f> optimal_xyMove;
	if (!FeatureOrbCapture.isOpened()) {
		cout << "输入视频无法打开: " << source << endl;
		return xyMove;
	}
	int frameCount = FeatureOrbCapture.get(CAP_PROP_FRAME_COUNT);
	//获取视频fps
	double rate = inputVideo.get(CV_CAP_PROP_FPS);
	cout << "总帧数：" << frameCount << endl;
	int endTime = 60;//处理多少帧
	cv::VideoWriter w_cap("ORBMatchvideo.avi", CV_FOURCC('M', 'J', 'P', 'G'), rate, cv::Size(1920, 1080));
	int start = 1;
	while (1) {
		if (start == endTime) break;
		if (start == 1) {
			//依次读取两帧
			FeatureOrbCapture.read(img01);
			FeatureOrbCapture.read(img02);
			imshow("第一帧框选", img01(Rect(x, y, width, height)));
			waitKey(0);
		}
		else {
			//不是第一次进来，说明已经读取过两帧，就只需要再读一帧
			FeatureOrbCapture.read(img01);
			Mat temp = img01;
			img01 = img02;
			img02 = temp;
		}
		//FastFeatureDetector fast(40); //定义FAST特征检测器，这里的40是算法中的阈值t
		//灰度图转换  
		//Mat image1, image2;//得到两帧的灰度图
		//cvtColor(img01, image1, CV_RGB2GRAY);
		//cvtColor(img02, image2, CV_RGB2GRAY);
		//提取特征点
		//create默认参数threshold为10，type为2
		//Ptr<FastFeatureDetector> feature_detector = FastFeatureDetector::create(20);
		//feature_detector->detect(img01, keypoints01);   //检测img图像，特征点存在keypoints中
		//feature_detector->detect(img02, keypoints02);
		//创建orb，检测特征点数目
		Ptr<ORB> orb = ORB::create(5000);
		//检测关键点
		orb->detect(img01, keypoints01);
		orb->detect(img02, keypoints02);
		//描述子
		orb->compute(img01, keypoints01, descriptors1);
		orb->compute(img02, keypoints02, descriptors2);
		//显示特征点
		//Mat ShowKeypoints1, ShowKeypoints2;
		//drawKeypoints(img01, keypoints01, ShowKeypoints1);
		//drawKeypoints(img02, keypoints02, ShowKeypoints2);
		//imshow("Keypoints1", ShowKeypoints1);
		//imshow("Keypoints2", ShowKeypoints2);
		BFMatcher matcher(NORM_HAMMING);//汉明距离匹配
		matcher.match(descriptors1, descriptors2, matches);

		//// -- dwaw ma
		//Mat img_mathes;
		//drawMatches(img01, keypoints01, img02, keypoints02, matches, img_mathes);
		//// -- show 
		//namedWindow("Mathces", CV_WINDOW_NORMAL);
		//imshow("Mathces", img_mathes);
		//waitKey(0);

		vector<DMatch> filterMatches;

		for (int i = 0; i < matches.size(); i++) {
			if (keypoints01[matches[i].queryIdx].pt.x <= orbX || keypoints01[matches[i].queryIdx].pt.y <= orbY)
				continue;
			if (keypoints01[matches[i].queryIdx].pt.x >= (orbX + width) || keypoints01[matches[i].queryIdx].pt.y >= (y + height))
				continue;

			if (matches[i].distance < 30)
				filterMatches.push_back(matches[i]);
		}

		if (filterMatches.size() == 0) {
			Mat img_display;
			img02.copyTo(img_display);
			rectangle(img_display, Point2f(x, y), Point2f(x + width, y + height), Scalar(0, 255, 0), 2, 8, 0);
			/*imshow("匹配结果", img_display);
			waitKey(0);*/
			cout << "fetureByORB第" << start++ << "无匹配点" << endl;
			optimal_xyMove.push_back(Point2f(0, 0));
			start++;
			keypoints01.clear();
			keypoints02.clear();
			matches.clear();
			xyMove.clear();
			point1.clear();//清空前帧匹配坐标集
			point2.clear();//清空后帧的，目的是让下一次相邻帧存储
			continue;
		}

		//	vector<DMatch> finalMatchers = myRansac(img01,keypoints01,keypoints02,filterMatches);

		cout << "前帧特征点数：" << keypoints01.size() << endl;
		cout << "后帧特征点数：" << keypoints02.size() << endl;
		cout << "匹配个数" << filterMatches.size() << endl;
		for (int i = 0; i < filterMatches.size(); i++) {
			/*cout << "前匹配点id:" << finalMatchers[i].queryIdx <<
			" 坐标： （" << keypoints01[finalMatchers[i].queryIdx].pt.x << "," << keypoints01[finalMatchers[i].queryIdx].pt.y << ")" << endl <<
			"后匹配点id:" << finalMatchers[i].trainIdx <<
			" 坐标： （" << keypoints02[finalMatchers[i].trainIdx].pt.x << "," << keypoints02[finalMatchers[i].trainIdx].pt.y << ")" << endl <<
			"汉明码距离： " << finalMatchers[i].distance
			<< endl << endl;*/
			point1.push_back(Point2f(keypoints01[filterMatches[i].queryIdx].pt.x, keypoints01[filterMatches[i].queryIdx].pt.y));
			point2.push_back(Point2f(keypoints02[filterMatches[i].trainIdx].pt.x, keypoints02[filterMatches[i].trainIdx].pt.y));
		}
		//xyMove = xyMoveDistance(point1, point2);
		//Point2f	ORBOffset(avgxvMove(xyMove));
		Point2f ORBOffset = totalFit(point1, point2, filterMatches.size());
		optimal_xyMove.push_back(ORBOffset);
		Mat img_display;
		img02.copyTo(img_display);
		orbX += ORBOffset.x;
		orbY += ORBOffset.y;
		rectangle(img_display, Point2f(orbX, orbY), Point2f(orbX + width, orbY + height), Scalar(0, 255, 0), 2, 8, 0);
		/*namedWindow("匹配结果", CV_WINDOW_NORMAL);
		imshow("匹配结果", img_display);
		imshowMove(img01, keypoints01, keypoints02, filterMatches);
		waitKey(0);*/
		w_cap.write(img_display);


		keypoints01.clear();
		keypoints02.clear();
		matches.clear();
		xyMove.clear();
		point1.clear();//清空前帧匹配坐标集
		point2.clear();//清空后帧的，目的是让下一次相邻帧存储

					   //waitKey(0);
		cout << "fetureByORB计算了" << start << "次" << endl;
		start++;
	}
	return optimal_xyMove;
}


//△x，△y稳定补偿
int doStab(vector<Point2f> xyMove)
{

	if (xyMove.size() == 0) {
		cout << " doStab参数为空" << endl;
		system("pause");
		return -1;
	}


	int xMove;
	int yMove;


	//Point pt1, pt2;


	Mat current;
	//cv::VideoCapture capture("D:\\a6500\\2017\\2020\\毕设视频\\MAH07278.avi");//关联读入视频文件
	if (!inputVideo.isOpened()) {
		cout << "fail to load video";
		cout << " doStab中inputvideo为空" << endl;
		system("pause");
		return 1;
	}
	//获取视频fps
	double rate = inputVideo.get(CV_CAP_PROP_FPS);
	//获取视频帧的尺寸
	int width = inputVideo.get(CV_CAP_PROP_FRAME_WIDTH);
	int height = inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT);
	//根据打开视频的参数初始化输出视频格式
	//自定义输出视频的尺寸，需要将读取的视频帧尺寸进行变换，下文使用的resize函数完成
	cv::VideoWriter w_cap("stab_video2.avi", CV_FOURCC('M', 'J', 'P', 'G'), rate, cv::Size(width * 2, height * 2));


	//Mat dispImg;
	//dispImg.create(Size(2200, 1238), CV_8UC3);//根据图片矩阵w*h，创建画布
	Mat imgROI;

	//	int frameCount = inputVideo.get(CAP_PROP_FRAME_COUNT);
	//	int endTime = frameCount;//处理多少帧

	//循环读取视频的帧

	for (int i = 0;i <= xyMove.size();i++) {

		inputVideo.read(current);
		Mat dispImg;

		//跳过第一帧不做补偿
		if (i == 0) {
			continue;
		}


		dispImg.create(Size(2 * width, 2 * height), CV_8UC3);//根据图片矩阵w*h，创建画布

															 //xMove = random((int)-20, (int)20);//假设已获取每帧的补偿
															 //yMove = random((int)-20, (int)20);//这里随机生成补偿测试


		xMove = xyMove[i - 1].x;
		yMove = xyMove[i - 1].y;
		cout << "xMove " << xMove << "yMove " << yMove << endl;
		//划分ROI之前要先判断补偿后该区域会不会超出原图像帧区域
		int realx = width / 2 - xMove;
		int realy = height / 2 - yMove;
		if (realx<0) {
			realx = 0;
			cout << "补偿到边界了" << endl;
		}
		if (realx > width) {
			realx = width;
			cout << "补偿到边界了" << endl;
		}
		if (realy<0) {
			realy = 0;
			cout << "补偿到边界了" << endl;
		}
		if (realy > height) {
			realy = height;
			cout << "补偿到边界了" << endl;
		}

		Rect roi(realx, realy, width, height);


		imgROI = dispImg(roi);//在画布中划分ROI  (Region of interest)

							  //前面使用的是自定义输出视频尺寸时，使用resize函数
							  //resize(current, current, cv::Size(384, 216));
		resize(current, imgROI, cv::Size(width, height)); //将要显示的图像设置为ROI区域大小 
														  //对读取的视频帧进行处理
														  //rectangle(current, pt1, pt2, Scalar(0, 0, 255), 2, 8, 0);//用矩形画矩形窗 
														  //保存处理后的帧为视频
														  //w_cap.write(current);
														  //current.copyTo(imgROI);

		w_cap.write(dispImg);
		//imshow("output", dispImg);
		//imshow("output2", current);
		//waitKey(1);

		cout << "doStab补偿了" << i << "次" << endl;
	}
	return 0;
}

//测试视频相邻帧特征点提取与显示，测试匹配算法orb实现,返回存储△x，和△y的一个数组
vector<Point2f> fetureByORB()
{

	//定义Mat存放图片，用于存储视频帧
	Mat img01;
	Mat img02;
	//KeyPoint是opencv中定义的特征点类
	vector<KeyPoint> keypoints01;
	vector<KeyPoint> keypoints02;

	//特征描述
	Mat  descriptors1;
	Mat  descriptors2;

	//匹配结果集合
	vector<DMatch> matches;
	//最终匹配结果集合
	vector<DMatch> finalMatches;

	//将匹配点封装，好传递给计算偏移的函数
	vector<Point2f> point1;
	vector<Point2f> point2;

	//存每一帧的偏移量
	vector<Point2f> xyMove;

	//VideoCapture inputVideo(source);

	if (!inputVideo.isOpened()) {

		cout << "输入视频无法打开: " << source << endl;
		return xyMove;
	}

	int frameCount = inputVideo.get(CAP_PROP_FRAME_COUNT);
	cout << "总帧数：" << frameCount << endl;
	VideoWriter outputVideo;

	int endTime = 50;//处理多少帧
	int start = 1;

	while (1) {

		if (start == endTime) break;
		if (start == 1) {
			//依次读取两帧
			inputVideo.read(img01);
			inputVideo.read(img02);
		}
		else {
			//不是第一次进来，说明已经读取过两帧，就只需要再读一帧
			inputVideo.read(img01);
			Mat temp = img01;
			img01 = img02;
			img02 = temp;
		}

		//FastFeatureDetector fast(40); //定义FAST特征检测器，这里的40是算法中的阈值t
		//灰度图转换  
		//Mat image1, image2;//得到两帧的灰度图
		//cvtColor(img01, image1, CV_RGB2GRAY);
		//cvtColor(img02, image2, CV_RGB2GRAY);

		//提取特征点
		//用fast
		//create默认参数threshold为10，type为2
		//Ptr<FastFeatureDetector> feature_detector = FastFeatureDetector::create(20);
		//feature_detector->detect(img01, keypoints01);   //检测img图像，特征点存在keypoints中
		//feature_detector->detect(img02, keypoints02);

		//feature_detector->compute(img01, keypoints01, descriptors1);
		//feature_detector->compute(img02, keypoints02, descriptors2);

		//用orb
		Ptr<ORB> orb = ORB::create(5000);

		//检测关键点
		orb->detect(img01, keypoints01);
		orb->detect(img02, keypoints02);
		//描述子
		orb->compute(img01, keypoints01, descriptors1);
		orb->compute(img02, keypoints02, descriptors2);

		//基于Flann的描述符匹配器
		//FlannBasedMatcher fbmatcher;
		//fbmatcher.match(descriptors1, descriptors2, matches);

		//显示特征点
		//Mat ShowKeypoints1, ShowKeypoints2;
		//drawKeypoints(img01, keypoints01, ShowKeypoints1);
		//drawKeypoints(img02, keypoints02, ShowKeypoints2);
		//imshow("Keypoints1", ShowKeypoints1);
		//imshow("Keypoints2", ShowKeypoints2);

		//FlannBasedMatcher fbmatcher;
		//fbmatcher.match(descriptors1, descriptors2, matches);

		//基于BF的描述符匹配器
		BFMatcher matcher(NORM_HAMMING);//汉明距离匹配
										//BFMatcher matcher(NORM_L2);//欧氏距离

		matcher.match(descriptors1, descriptors2, matches);
		//得到的matches可以通过一个简单的筛选剔除掉一些匹配错误的点

		//比如1.根据汉明距离直接做淘汰
		//for (int i = 0; i < matches.size(); i++) {
		//	if (matches[i].distance<50){
		//		//只保留汉明距离小于20的
		//		finalMatches.push_back(matches[i]);
		//	}
		//}

		//2.线性回归

		//3.ransac
		finalMatches = myRansac(img01, keypoints01, keypoints02, matches);
		//finalMatches = matches;



		cout << "前帧特征点数：" << keypoints01.size() << endl;
		cout << "后帧特征点数：" << keypoints02.size() << endl;

		cout << "匹配个数" << finalMatches.size() << endl;


		/*
		for (int i = 0; i < keypoints01.size(); i++) {

		cout << "前帧特征点坐标： （" << keypoints01[i].pt.x << "," << keypoints01[i].pt.y << ")" << endl;
		circle(img01, Point(keypoints01[i].pt.x, keypoints01[i].pt.y), 2, Scalar(0, 255, 0), 1);

		}
		for (int i = 0; i < keypoints02.size(); i++) {

		cout << "后帧特征点坐标： （" << keypoints02[i].pt.x << "," << keypoints02[i].pt.y << ")" << endl;

		circle(img02, Point(keypoints02[i].pt.x, keypoints02[i].pt.y), 2, Scalar(0, 255, 0), 1);
		}
		*/

		for (int i = 0; i < finalMatches.size(); i++) {

			cout << "前匹配点id:" << finalMatches[i].queryIdx <<
				" 坐标： （" << RR_KP1[finalMatches[i].queryIdx].pt.x << "," << RR_KP1[finalMatches[i].queryIdx].pt.y << ")" << endl <<
				"后匹配点id:" << finalMatches[i].trainIdx <<
				" 坐标： （" << RR_KP2[finalMatches[i].trainIdx].pt.x << "," << RR_KP2[finalMatches[i].trainIdx].pt.y << ")" << endl <<
				"汉明距离： " << finalMatches[i].distance
				<< endl << endl;

			point1.push_back(Point2f(RR_KP2[finalMatches[i].queryIdx].pt.x, RR_KP2[finalMatches[i].queryIdx].pt.y));
			point2.push_back(Point2f(RR_KP2[finalMatches[i].trainIdx].pt.x, RR_KP2[finalMatches[i].trainIdx].pt.y));

		}
		//imshow("test1", img01);
		//imshow("test2", img02);



		//vector<Mat>imgs(2);//模板类vector，用于放置2帧
		//imgs[0] = img01;
		//imgs[1] = img02;
		//imshowMany("Feature", imgs);

		//计算帧间补偿
		//method 1
		xyMove.push_back(totalFit(point1, point2, finalMatches.size()));

		//method 2
		//xyMove.push_back(avgxvMove(xyMoveDistance(point1, point2)));

		//method 3 
		//通过仿射变换去做



		point1.clear();//清空前帧匹配坐标集
		point2.clear();//清空后帧的，目的是让下一次相邻帧存储
		keypoints01.clear();
		keypoints02.clear();
		matches.clear();
		finalMatches.clear();

		//waitKey(0);
		cout << "fetureByORB计算了" << start << "次" << endl;
		start++;

	}


	return xyMove;
}

//将局部区域周围一片区域进行ORB
vector<Point2f> fetureByORB(vector<Mat>& tempPoint) {


	//定义Mat存放图片，用于存储视频帧
	Mat img01;
	Mat img02;
	//KeyPoint是opencv中定义的特征点类
	vector<KeyPoint> keypoints01;
	vector<KeyPoint> keypoints02;

	//特征描述
	Mat  descriptors1;
	Mat  descriptors2;

	//匹配结果集合
	vector<DMatch> matches;

	//将匹配点封装，好传递给计算偏移的函数
	vector<Point2f> point1;
	vector<Point2f> point2;

	//存放两帧之间所有特征点的偏移量△x，△y；
	vector<Point2f> xyMove;
	//vector<Point2f> optimal_location;


	//两帧之间拟合后选出的一组最优的偏移量△x，△y
	vector<Point2f> optimal_xyMove;

	/*ModelMatchAimedCapture_ aimedCapture(src4)

	if (!aimedCapture.isOpened()) {

	cout << "输入视频无法打开: " << source << endl;
	return xyMove;
	}

	int frameCount = aimedCapture.get(CAP_PROP_FRAME_COUNT);
	std::cout << "总帧数：" << frameCount << endl;*/
	//VideoWriter outputVideo;

	//int endTime = frameCount;//处理多少帧
	int endTime = tempPoint.size();//处理多少帧
	int start = 1, Framelocation = 0;

	//cout << "daxiao" << tempPoint.size() << endl;

	while (1) {

		if (start == endTime) break;


		if (start == 1) {
			//依次读取两帧

			//Mat传值
			img01 = tempPoint[Framelocation];
			Framelocation++;
			img02 = tempPoint[Framelocation];
			//读视频
			/*aimedCapture.read(img01);
			aimedCapture.read(img02);*/
		}

		else {
			//不是第一次进来，说明已经读取过两帧，就只需要再读一帧
			//读视频
			//aimedCapture.read(img01);
			//Mat传值 
			//转到下一帧位置，为下次循环做好准备
			Framelocation++;
			img01 = tempPoint[Framelocation];
			Mat temp = img01;
			img01 = img02;
			img02 = temp;
		}
		//	imshow("im1", img01);
		//imshow("im2", img02);
		//waitKey(0);


		//创建orb，检测特征点数目
		Ptr<ORB> orb = ORB::create(1000);
		//检测关键点
		orb->detect(img01, keypoints01);
		orb->detect(img02, keypoints02);
		//描述子
		orb->compute(img01, keypoints01, descriptors1);
		orb->compute(img02, keypoints02, descriptors2);

		//显示特征点
		//Mat ShowKeypoints1, ShowKeypoints2;
		//drawKeypoints(img01, keypoints01, ShowKeypoints1);
		//drawKeypoints(img02, keypoints02, ShowKeypoints2);
		//imshow("Keypoints1", ShowKeypoints1);
		//imshow("Keypoints2", ShowKeypoints2);


		BFMatcher matcher(NORM_HAMMING);//汉明距离匹配
		matcher.match(descriptors1, descriptors2, matches);

		//vector<DMatch> temp_filterMatches;
		vector<DMatch> filterMatches;
		for (int i = 0; i < matches.size(); i++) {
			if (matches[i].distance < 30)
				//Mat传值
				//if (keypoints01[matches[i].queryIdx].pt.x >= modelPointMovexy[start - 1].x && keypoints01[matches[i].queryIdx].pt.y >= modelPointMovexy[start - 1].y && keypoints01[matches[i].queryIdx].pt.x <= (modelPointMovexy[start - 1].x + tempPoint[start - 1].cols) && keypoints01[matches[i].queryIdx].pt.y <= (modelPointMovexy[start - 1].y + tempPoint[start - 1].rows))
				filterMatches.push_back(matches[i]);

		}


		cout << "前帧特征点数：" << keypoints01.size() << endl;
		cout << "后帧特征点数：" << keypoints02.size() << endl;
		cout << "匹配个数" << filterMatches.size() << endl;


		//未匹配到特征点，不做补偿
		if (filterMatches.size() == 0) {

			optimal_xyMove.push_back(Point2f(0, 0));
			keypoints01.clear();
			keypoints02.clear();

			matches.clear();
			xyMove.clear();

			point1.clear();//清空前帧匹配坐标集
			point2.clear();//清空后帧的，目的是让下一次相邻帧存储
			start++;
			continue;

		}



		//for (int i = 0; i < keypoints01.size(); i++) {

		//	cout << "前帧特征点坐标： （" << keypoints01[i].pt.x << "," << keypoints01[i].pt.y << ")" << endl;
		//	circle(img01, Point(keypoints01[i].pt.x, keypoints01[i].pt.y), 2, Scalar(0, 255, 0), 1);

		//}
		//for (int i = 0; i < keypoints02.size(); i++) {

		//	cout << "后帧特征点坐标： （" << keypoints02[i].pt.x << "," << keypoints02[i].pt.y << ")" << endl;

		//	circle(img02, Point(keypoints02[i].pt.x, keypoints02[i].pt.y), 2, Scalar(0, 255, 0), 1);
		//}


		for (int i = 0; i < filterMatches.size(); i++) {

			cout << "前匹配点id:" << filterMatches[i].queryIdx <<
				" 坐标： （" << keypoints01[filterMatches[i].queryIdx].pt.x << "," << keypoints01[filterMatches[i].queryIdx].pt.y << ")" << endl;
			cout << "后匹配点id:" << filterMatches[i].trainIdx << " 坐标： （" << keypoints02[filterMatches[i].trainIdx].pt.x
				<< "," << keypoints02[filterMatches[i].trainIdx].pt.y << ")" << endl;
			cout << "汉明码距离： " << filterMatches[i].distance << endl << endl;

			point1.push_back(Point2f(keypoints01[filterMatches[i].queryIdx].pt.x, keypoints01[filterMatches[i].queryIdx].pt.y));
			point2.push_back(Point2f(keypoints02[filterMatches[i].trainIdx].pt.x, keypoints02[filterMatches[i].trainIdx].pt.y));
			//point2.push_back(Point2f(keypoints02[matches[i].trainIdx].pt.x, keypoints02[matches[i].trainIdx].pt.y));

		}
		//Point2f Para(lineFit(point1, point2, point1.size()));

		//计算帧间补偿method(1)
		optimal_xyMove.push_back(totalFit(point1, point2, matches.size()));
		//计算帧间补偿method(2)
		//xyMove = xyMoveDistance(point1, point2);
		//optimal_xyMove.push_back(avgxvMove(xyMove));

		//imshow("test1", img01);
		//imshow("test2", img02);

		//vector<Mat>imgs(2);//模板类vector，用于放置2帧
		//imgs[0] = img01;
		//imgs[1] = img02;
		//imshowMany("Feature", imgs);

		//画下局部ORB矢量图
		//Mat showimg;
		//inputVideo3.read(showimg);
		//showimg = showimg(Rect(modelPointMovexy[start].x, modelPointMovexy[start].y,img01.cols,img01.rows));
		//imshowMove(showimg, keypoints01, keypoints02, filterMatches);
		imshowMove(img02, keypoints01, keypoints02, filterMatches);
		waitKey(0);

		keypoints01.clear();
		keypoints02.clear();

		matches.clear();
		xyMove.clear();

		point1.clear();//清空前帧匹配坐标集
		point2.clear();//清空后帧的，目的是让下一次相邻帧存储

					   //waitKey(0);
		cout << "fetureByORB计算了" << start << "次" << endl;
		start++;

	}


	return optimal_xyMove;
}



//使用统计法求偏移均值
//point1前帧匹配点集，point2后帧匹配点集，n为匹配个数
Point2f totalFit(vector<Point2f>& point1, vector<Point2f>& point2, int n) {
	//整体偏移量
	float detalX = 0;
	float detalY = 0;

	float localDeltaX = 0;
	float localDeltaY = 0;

	int smaller = point1.size() < point2.size() ? point1.size() : point2.size();
	smaller = smaller < n ? smaller : n;

	//遍历每一对匹配点，统计局部偏移量
	for (int i = 0;i < smaller;i++) {
		localDeltaX += point2[i].x - point1[i].x;
		localDeltaY += point2[i].y - point1[i].y;
	}

	//计算整体偏移
	detalX = localDeltaX / smaller;
	detalY = localDeltaY / smaller;
	cout << "整体偏移 " << detalX << "," << detalY << endl;
	//存储返回的△x，和△y
	Point2f xyMove(detalX, detalY);

	return xyMove;
}


//ransac处理
//传入KeyPoint集合，已匹配结果集合做筛选
vector <DMatch> myRansac(Mat img, vector<KeyPoint> keypoints01, vector<KeyPoint> keypoints02, vector<DMatch> matches)
{

	//VideoCapture inputVideo(source);


	//绘制匹配出的关键点
	//Mat img_matches;
	//drawMatches(img01, keypoints01, img02, keypoints02, matches, img_matches);
	//imshow("Match image",img_matches);
	//计算匹配结果中距离最大和距离最小值
	double min_dist = matches[0].distance, max_dist = matches[0].distance;
	for (int m = 0; m < matches.size(); m++)
	{
		if (matches[m].distance < min_dist)
		{
			min_dist = matches[m].distance;
		}
		if (matches[m].distance > max_dist)
		{
			max_dist = matches[m].distance;
		}
	}
	cout << "最小距离=" << min_dist << endl;
	cout << "最大距离=" << max_dist << endl;
	//筛选出较好的匹配点
	vector<DMatch> goodMatches;
	for (int m = 0; m < matches.size(); m++)
	{
		if (matches[m].distance < 0.6 * max_dist)
		{
			goodMatches.push_back(matches[m]);
		}
	}
	cout << "较好的匹配点个数:" << goodMatches.size() << endl;

	//画出匹配结果
	//Mat img_out;
	//红色连接的是匹配的特征点数，绿色连接的是未匹配的特征点数
	//matchColor – Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) , the color is generated randomly.
	//singlePointColor – Color of single keypoints(circles), which means that keypoints do not have the matches.If singlePointColor == Scalar::all(-1), the color is generated randomly.
	//CV_RGB(0, 255, 0)存储顺序为R-G-B,表示绿色
	//drawMatches(img01, keypoints01, img02, keypoints02, goodMatches, img_out, Scalar::all(-1), CV_RGB(0, 0, 255), Mat(), 2);
	//imshow("good Matches", img_out);

	//RANSAC匹配过程
	vector<DMatch> m_Matches;
	m_Matches = goodMatches;
	int ptCount = goodMatches.size();
	if (ptCount < 100)
	{
		cout << "ransac匹配点较少！" << endl;

	}

	//坐标转换为float类型
	vector <KeyPoint> RAN_KP1, RAN_KP2;
	//size_t是标准C库中定义的，应为unsigned int，在64位系统中为long unsigned int,在C++中为了适应不同的平台，增加可移植性。
	for (size_t i = 0; i < m_Matches.size(); i++)
	{
		RAN_KP1.push_back(keypoints01[goodMatches[i].queryIdx]);
		RAN_KP2.push_back(keypoints02[goodMatches[i].trainIdx]);
		//RAN_KP1是要存储img01中能与img02匹配的点
		//goodMatches存储了这些匹配点对的img01和img02的索引值
	}
	//坐标变换
	vector <Point2f> p01, p02;
	for (size_t i = 0; i < m_Matches.size(); i++)
	{
		p01.push_back(RAN_KP1[i].pt);
		p02.push_back(RAN_KP2[i].pt);
	}
	/*vector <Point2f> img1_corners(4);
	img1_corners[0] = Point(0,0);
	img1_corners[1] = Point(img_1.cols,0);
	img1_corners[2] = Point(img_1.cols, img_1.rows);
	img1_corners[3] = Point(0, img_1.rows);
	vector <Point2f> img2_corners(4);*/
	////求转换矩阵
	//Mat m_homography;
	//vector<uchar> m;
	//m_homography = findHomography(p01, p02, RANSAC);//寻找匹配图像
	//求基础矩阵 Fundamental,3*3的基础矩阵
	//存储ransac后每个点的状态
	vector<uchar> RansacStatus;
	Mat Fundamental = findFundamentalMat(p01, p02, RansacStatus, FM_RANSAC);
	//重新定义关键点RR_KP和RR_matches来存储新的关键点和基础矩阵，通过RansacStatus来删除误匹配点
	//vector <KeyPoint> RR_KP1, RR_KP2;  //定义为了全局变量，方便orb操作
	//vector <DMatch> RR_matches;//定义为了全局变量，方便orb操作
	int index = 0;
	for (size_t i = 0; i < m_Matches.size(); i++)
	{
		if (RansacStatus[i] != 0)
		{
			RR_KP1.push_back(RAN_KP1[i]);
			RR_KP2.push_back(RAN_KP2[i]);
			m_Matches[i].queryIdx = index;
			m_Matches[i].trainIdx = index;
			RR_matches.push_back(m_Matches[i]);
			index++;
		}
	}
	cout << "RANSAC后匹配点数" << RR_matches.size() << endl;
	//Mat img_RR_matches;
	//drawMatches(img01, RR_KP1, img02, RR_KP2, RR_matches, img_RR_matches);
	//imshow("After RANSAC", img_RR_matches);
	//改进
	//展示矢量图
	/*imshowMove( img, RR_KP1, RR_KP2,  RR_matches);
	waitKey(0);*/

	return RR_matches;
}

//自定义一个窗口显示多图函数 ?
void imshowMany(const std::string& _winName, const vector<Mat>& ployImages)
{
	int nImg = (int)ployImages.size();//获取在同一画布中显示多图的数目 ?


	Mat dispImg;


	int size;
	int x, y;
	//若要在OpenCV实现同一窗口显示多幅图片，图片要按矩阵方式排列，类似于Matlab中subplot(); ? ??
	//多图按矩阵排列的行数 ?，h: 多图按矩阵排列的的数 ? ?
	int w, h;


	float scale;//缩放比例 ?
	int max;


	if (nImg <= 0)
	{
		printf("Number of arguments too small....\n");
		return;
	}
	else if (nImg > 12)
	{
		printf("Number of arguments too large....\n");
		return;
	}


	else if (nImg == 1)
	{
		w = h = 1;
		size = 600;
	}
	else if (nImg == 2)
	{
		w = 2; h = 1;//2x1 ?
		size = 600;
	}
	else if (nImg == 3 || nImg == 4)
	{
		w = 2; h = 2;//2x2 ?
		size = 600;
	}
	else if (nImg == 5 || nImg == 6)
	{
		w = 3; h = 2;//3x2 ?
		size = 600;
	}
	else if (nImg == 7 || nImg == 8)
	{
		w = 4; h = 2;//4x2 ?
		size = 600;
	}
	else
	{
		w = 4; h = 3;//4x3 ?
		size = 600;
	}


	dispImg.create(Size(100 + size*w, 30 + size*h), CV_8UC3);//根据图片矩阵w*h，创建画布，可线的图片数量为w*h ?


	for (int i = 0, m = 20, n = 20; i < nImg; i++, m += (20 + size))
	{
		x = ployImages[i].cols;//第(i+1)张子图像的宽度列数） ?
		y = ployImages[i].rows;//第(i+1)张子图像的高度（行数） ?


		max = (x > y) ? x : y;//比较每张图片的行数和列数，取大值 ?
		scale = (float)((float)max / size);//计算缩放比例 ?


		if (i%w == 0 && m != 20)
		{
			m = 20;
			n += 20 + size;
		}


		Mat imgROI = dispImg(Rect(m, n, (int)(x / scale), (int)(y / scale)));//在画布中划分ROI
		resize(ployImages[i], imgROI, Size((int)(x / scale), (int)(y / scale))); //将要显示的图像设置为ROI区域大小 ?
	}
	namedWindow(_winName);
	imshow(_winName, dispImg);
}

//展示某两帧运动矢量图
void imshowMove(Mat img, vector<KeyPoint>RR_KP1, vector<KeyPoint>RR_KP2, vector <DMatch>  RR_matches) {

	//待显示的帧
	Mat result;
	//画前帧的点
	drawKeypoints(img, RR_KP1, result);
	//画线，即运动轨迹
	for (int i = 0; i < RR_matches.size();i++) {
		//参数为：承载的图像、起始点、结束点、颜色、粗细、线型
		line(result, RR_KP1[RR_matches[i].queryIdx].pt, RR_KP2[RR_matches[i].trainIdx].pt, Scalar(255, 0, 0));

	}
	imshow("运动矢量图", result);

}


//为仿射变换选择三个bestMatchPoint的快速排序,通过比较distance（从小到大）
void quickSort(int left, int right, vector<DMatch>& arr)
{
	if (left >= right)
		return;
	int i, j;
	DMatch base, temp;
	i = left, j = right;
	base = arr[left];  //取最左边的match为基准
	while (i < j)
	{
		while (arr[j].distance >= base.distance && i < j)
			j--;
		while (arr[i].distance <= base.distance && i < j)
			i++;
		if (i < j)
		{
			temp = arr[i];
			arr[i] = arr[j];
			arr[j] = temp;
		}
	}
	//基准数归位
	arr[left] = arr[i];
	arr[i] = base;
	quickSort(left, i - 1, arr);//递归左边
	quickSort(i + 1, right, arr);//递归右边
}


//实现模板匹配寻找局部目标，假设已获得框选坐标框，这里定义了初始值
int ModelMatch(int x, int y, int width, int height)
{
	x2 = x;
	y2 = y;
	width2 = width;
	height2 = height;

	ModelMatchCapture_ ModelMatchCapture(source);

	if (!ModelMatchCapture.isOpened()) {

		cout << "输入视频无法打开: " << source << endl;
		return -1;
	}
	//获取视频的fps
	double rate = ModelMatchCapture.get(CV_CAP_PROP_FPS);
	cv::VideoWriter w_cap("aimedvideo.avi", CV_FOURCC('M', 'J', 'P', 'G'), rate, cv::Size(width, height));

	int frameCount = ModelMatchCapture.get(CAP_PROP_FRAME_COUNT);
	cout << "总帧数：" << frameCount << endl;

	int endTime = frameCount;//处理多少帧

							 //匹配模板,以ui传递得到target的选区
	Mat imgTest;
	ModelMatchCapture.read(imgTest);


	templ = imgTest(Rect(x, y, width, height));//假设获得目标框

	resize(templ, templ, cv::Size(width, height));//在当前帧框选出目标
												  //imshow("框选目标", templ);
												  //waitKey(0);

	modelPointMovexy.push_back(Point2f(x, y));

	tempPoint.push_back(templ);

	int start = 1;
	while (1) {

		if (start == endTime) break;
		//每次依次读取帧,作为匹配源
		ModelMatchCapture.read(img);

		//imshow("test", img);
		if (img.empty() || templ.empty())
		{
			cout << "模板匹配或者源图像获取失败" << endl;
			return -1;
		}
		//namedWindow("test", CV_WINDOW_NORMAL);
		//namedWindow(image_window, CV_WINDOW_NORMAL);
		//namedWindow(result_window, CV_WINDOW_NORMAL);
		//窗口上的bar可以调节method
		//const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
		//createTrackbar(trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod);
		MatchingMethod(0, NULL);
		resize(temp2, temp2, cv::Size(width, height));
		//imshow("temp2", temp2);
		//waitKey(0);
		//imshow(image_window, img_display);
		tempPoint.push_back(temp2);

		w_cap.write(tempPoint[start]);
		cout << "匹配" << start << endl;
		start++;




	}

	return 0;

}


void MatchingMethod(int, void*)
{
	//显示当前帧
	Mat img_display;

	img.copyTo(img_display);
	//
	int result_cols = img.cols - templ.cols + 1;
	int result_rows = img.rows - templ.rows + 1;
	//
	result.create(result_cols, result_rows, CV_32FC1);
	//bool method_accepts_mask = (CV_TM_SQDIFF == match_method || match_method == CV_TM_CCORR_NORMED);

	matchTemplate(img, templ, result, match_method);

	normalize(result, result, 0, 1, NORM_MINMAX, -1, Mat());

	double minVal;
	double maxVal;
	Point minLoc;
	Point maxLoc;
	//
	Point matchLoc;
	//定位结果矩阵R中的最小值和最大值
	minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, Mat());

	//			标准化差值平方和匹配        				相关匹配 
	if (match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED)
	{

		matchLoc = minLoc;
	}
	else
	{

		matchLoc = maxLoc;
	}

	//画出匹配框
	//rectangle(img_display, matchLoc, Point(matchLoc.x + templ.cols, matchLoc.y + templ.rows), Scalar::all(0), 2, 8, 0);
	rectangle(result, matchLoc, Point(matchLoc.x + templ.cols, matchLoc.y + templ.rows), Scalar::all(0), 2, 8, 0);

	//Rect rect(matchLoc.x, matchLoc.y, templ.cols, templ.rows);//获得目标框
	//templ = img_display(rect);//在当前帧框选出目标

	//imshow("test", img);

	//将框选目标从中剥离，重置大小
	temp2 = img_display(Rect(x2, y2, width2, height2));



	//imshow(result_window, result);
	modelPointMovexy.push_back(matchLoc);

	return;
}

//模板匹配.2,框选目标后帧xy减前帧，得到偏移量△x△y
vector<Point2f> ModelMatchMovexy(vector<Point2f>& modelPointMovexy) {

	vector<Point2f> optimal_xyMove;
	for (int i = 0; i < modelPointMovexy.size() - 1; i++) {
		optimal_xyMove.push_back(Point2f(modelPointMovexy[i + 1].x - modelPointMovexy[i].x, modelPointMovexy[i + 1].y - modelPointMovexy[i].y));
	}
	return optimal_xyMove;
}
