/*
 *  stereo_match.cpp
 *  calibration
 *
 *  Created by Victor  Eruhimov on 1/18/10.
 *  Copyright 2010 Argus Corp. All rights reserved.
 *
 */

#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/utility.hpp"
#include <omp.h>  //开启多核cpu计算模式

#include <stdio.h>
#include <iostream>
#include <vector>
#include <map>

using namespace std;
using namespace cv;

const double FRAME_WIDTH = 1280; //宽度 
const double FRAME_HEIGHT = 480; //高度

enum { STEREO_BM = 0, STEREO_SGBM = 1, STEREO_HH = 2, STEREO_VAR = 3, STEREO_3WAY = 4 };

static int64 _tick_count = getTickCount();

const static float DISTANCE_PPROPORTION = 0.043;

static vector<Vec3f> VECXYZ;

static void print_help()
{
	printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
	printf("\nUsage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh|sgbm3way] [--blocksize=<block_size>]\n"
		"[--max-disparity=<max_disparity>] [--scale=scale_factor>] [-i=<intrinsic_filename>] [-e=<extrinsic_filename>]\n"
		"[--no-display] [-o=<disparity_image>] [-p=<point_cloud_file>]\n");
}

static float GetDistance(float z) {
	return z * DISTANCE_PPROPORTION;
}

static void saveXYZ(const char* filename, const Mat& mat)
{
	const double max_z = 1.0e4;
	FILE* fp = fopen(filename, "wt");
	for (int y = 0; y < mat.rows; y++)
	{
		for (int x = 0; x < mat.cols; x++)
		{
			Vec3f point = mat.at<Vec3f>(y, x);
			if (fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;
			fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]);
		}
	}
	fclose(fp);
}

Vec3f GetMinXYZ(const Mat& mat)
{
	const double max_z = 1.0e4;
	Vec3f min_point;
	for (int y = 0; y < mat.rows; y++)
	{
		for (int x = 0; x < mat.cols; x++)
		{
			Vec3f point = mat.at<Vec3f>(y, x);
			if (fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;

			if (min_point[0] == 0) {
				min_point = point;
			}
			else {
				min_point = min_point[2] < point[2] ? min_point : point;
			}
		}
	}

	min_point[2] = GetDistance(min_point[2]);

	return min_point;
}

/*给深度图上色*/
void GenerateFalseMap(cv::Mat &src, cv::Mat &disp)
{
	// color map  
	float max_val = 255.0f;
	float map[8][4] = { { 0,0,0,114 },{ 0,0,1,185 },{ 1,0,0,114 },{ 1,0,1,174 },
	{ 0,1,0,114 },{ 0,1,1,185 },{ 1,1,0,114 },{ 1,1,1,0 } };
	float sum = 0;
	for (int i = 0; i < 8; i++)
		sum += map[i][3];

	float weights[8]; // relative   weights  
	float cumsum[8];  // cumulative weights  
	cumsum[0] = 0;
	for (int i = 0; i < 7; i++) {
		weights[i] = sum / map[i][3];
		cumsum[i + 1] = cumsum[i] + map[i][3] / sum;
	}

	int height_ = src.rows;
	int width_ = src.cols;
	// for all pixels do  
	for (int v = 0; v < height_; v++) {
		for (int u = 0; u < width_; u++) {

			// get normalized value  
			float val = min(max(src.data[v*width_ + u] / max_val, 0.0f), 1.0f);

			// find bin  
			int i;
			for (i = 0; i < 7; i++)
				if (val < cumsum[i + 1])
					break;

			// compute red/green/blue values  
			float   w = 1.0 - (val - cumsum[i])*weights[i];
			uchar r = (uchar)((w*map[i][0] + (1.0 - w)*map[i + 1][0]) * 255.0);
			uchar g = (uchar)((w*map[i][1] + (1.0 - w)*map[i + 1][1]) * 255.0);
			uchar b = (uchar)((w*map[i][2] + (1.0 - w)*map[i + 1][2]) * 255.0);
			//rgb内存连续存放  
			disp.data[v*width_ * 3 + 3 * u + 0] = b;
			disp.data[v*width_ * 3 + 3 * u + 1] = g;
			disp.data[v*width_ * 3 + 3 * u + 2] = r;
		}
	}
}

Vec3f ProcessDistance() {

	float min_distance = 0;

	for (vector<Vec3f>::iterator iter = VECXYZ.begin(); iter != VECXYZ.end(); iter++) {

		Vec3f point = Vec3f(*iter);

		if (min_distance == 0) {
			min_distance = point[2];
		}
		else {
			min_distance = min_distance < point[2] ? min_distance : point[2];
		}
	}

	map<float, vector<Vec3f> > distance_map;
	float distance_qj = 0.2;

	for (vector<Vec3f>::iterator iter = VECXYZ.begin(); iter != VECXYZ.end(); iter++) {

		Vec3f point = Vec3f(*iter);

		float t1 = point[2] - min_distance;
		if (t1 < distance_qj) {
			distance_map[min_distance].push_back(point);
		}
		else {
			int t2 = t1 / distance_qj;
			float t3 = min_distance + (t2 * distance_qj);
			distance_map[t3].push_back(point);
		}
	}

	VECXYZ.clear();

	int max_distance_vec_size = 0;
	float max_distance_map_key;

	for (map<float, vector<Vec3f> >::iterator iter = distance_map.begin(); iter != distance_map.end(); iter++) {

		if (iter->second.size() > max_distance_vec_size) {
			max_distance_vec_size = iter->second.size();
			max_distance_map_key = iter->first;
		}
	}

	Vec3f _min_point = 0;

	for (vector<Vec3f>::iterator iter = distance_map[max_distance_map_key].begin();
		iter != distance_map[max_distance_map_key].end(); iter++) {

		Vec3f point = Vec3f(*iter);

		if (_min_point[0] == 0 && _min_point[1] == 0 && _min_point[2] == 0) {
			_min_point = point;
		}
		else {
			_min_point = _min_point[2] < point[2] ? _min_point : point;
		}
	}

	return _min_point;
}

int ProcessIMG(Mat img1, Mat img2, int alg, int blocksize, int numberOfDisparities, bool no_display, float scale,
	string intrinsic_filename, string extrinsic_filename,
	string disparity_filename, string point_cloud_filename) {

	if (img1.empty())
	{
		printf("Command-line parameter error: could not load the first input image file\n");
		return -1;
	}
	if (img2.empty())
	{
		printf("Command-line parameter error: could not load the second input image file\n");
		return -1;
	}

	//imshow("左边的摄像头", img1); //显示当前帧
	//imshow("右边的摄像头", img2);

	if (scale != 1.f)
	{
		Mat temp1, temp2;
		int method = scale < 1 ? INTER_AREA : INTER_CUBIC;
		resize(img1, temp1, Size(), scale, scale, method);
		img1 = temp1;
		resize(img2, temp2, Size(), scale, scale, method);
		img2 = temp2;
	}

	Size img_size = img1.size();

	Rect roi1, roi2;
	Mat Q;

	if (!intrinsic_filename.empty())
	{
		// reading intrinsic parameters
		FileStorage fs(intrinsic_filename, FileStorage::READ);
		if (!fs.isOpened())
		{
			printf("Failed to open file %s\n", intrinsic_filename.c_str());
			return -1;
		}

		Mat M1, D1, M2, D2;
		fs["M1"] >> M1;
		fs["D1"] >> D1;
		fs["M2"] >> M2;
		fs["D2"] >> D2;

		M1 *= scale;
		M2 *= scale;

		fs.open(extrinsic_filename, FileStorage::READ);
		if (!fs.isOpened())
		{
			printf("Failed to open file %s\n", extrinsic_filename.c_str());
			return -1;
		}

		Mat R, T, R1, P1, R2, P2;
		fs["R"] >> R;
		fs["T"] >> T;
		fs["R1"] >> R1;
		fs["P1"] >> P1;
		fs["R2"] >> R2;
		fs["P2"] >> P2;
		fs["Q"] >> Q;

		stereoRectify(M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2);

		Mat map11, map12, map21, map22;
		initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
		initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);

		Mat img1r, img2r;
		remap(img1, img1r, map11, map12, INTER_LINEAR);
		remap(img2, img2r, map21, map22, INTER_LINEAR);

		img1 = img1r;
		img2 = img2r;

		//imshow("img1r", img1r);
		//imshow("img2r", img2r);
	}

	numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width / 8) + 15) & -16;

	Ptr<StereoBM> bm = StereoBM::create(16, 9);
	Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, 16, 3);

	bm->setROI1(roi1);
	bm->setROI2(roi2);
	bm->setPreFilterCap(31);
	bm->setBlockSize(blocksize > 0 ? blocksize : 9); //SAD窗口大小，5~21之间为宜
	bm->setMinDisparity(0); //最小视差，默认值为0, 可以是负值，int型
	bm->setNumDisparities(numberOfDisparities); //视差窗口，即最大视差值与最小视差值之差,窗口大小必须是16的整数倍，int型
	bm->setTextureThreshold(10);
	bm->setUniquenessRatio(15);
	bm->setSpeckleWindowSize(100);
	bm->setSpeckleRange(32);
	bm->setDisp12MaxDiff(1);

	sgbm->setPreFilterCap(63);
	int sgbmWinSize = blocksize > 0 ? blocksize : 3;
	sgbm->setBlockSize(sgbmWinSize);

	int cn = img1.channels();

	sgbm->setP1(8 * cn*sgbmWinSize*sgbmWinSize);
	sgbm->setP2(32 * cn*sgbmWinSize*sgbmWinSize);
	sgbm->setMinDisparity(0);
	sgbm->setNumDisparities(numberOfDisparities);
	sgbm->setUniquenessRatio(15);
	sgbm->setSpeckleWindowSize(100);
	sgbm->setSpeckleRange(32);
	sgbm->setDisp12MaxDiff(1);
	if (alg == STEREO_HH)
		sgbm->setMode(StereoSGBM::MODE_HH);
	else if (alg == STEREO_SGBM)
		sgbm->setMode(StereoSGBM::MODE_SGBM);
	else if (alg == STEREO_3WAY)
		sgbm->setMode(StereoSGBM::MODE_SGBM_3WAY);

	Mat disp, disp8;
	//Mat img1p, img2p, dispp;
	//copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
	//copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);

	int64 t = getTickCount();
	if (alg == STEREO_BM)
		bm->compute(img1, img2, disp); //输入图像必须为灰度图
	else if (alg == STEREO_SGBM || alg == STEREO_HH || alg == STEREO_3WAY)
		sgbm->compute(img1, img2, disp);
	t = getTickCount() - t;

	//disp = dispp.colRange(numberOfDisparities, img1p.cols);
	if (alg != STEREO_VAR)
		disp.convertTo(disp8, CV_8U, 255 / (numberOfDisparities*16.)); //计算出的视差是CV_16S格式
	else
		disp.convertTo(disp8, CV_8U);

	if (!disparity_filename.empty())
		imwrite(disparity_filename, disp8);

	//printf("storing the point cloud...");
	fflush(stdout);
	Mat xyz;
	reprojectImageTo3D(disp, xyz, Q, true);
	xyz *= 16; //在实际求距离时，ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16)，才能得到正确的三维坐标信息。

	if (!point_cloud_filename.empty()) {
		saveXYZ(point_cloud_filename.c_str(), xyz);
	}

	VECXYZ.push_back(GetMinXYZ(xyz));

	if (((getTickCount() - _tick_count) * 1000 / getTickFrequency()) >= 300) {

		Vec3f _min_point = ProcessDistance();

		if (!no_display)
		{
			stringstream ss_min_point, ss_tick_count;
			ss_min_point << "distance: " << _min_point[2] << "m";
			ss_tick_count << "time: " << t * 1000 / getTickFrequency() << "ms";

			/*Mat color(disp.size(), CV_8UC3);
			GenerateFalseMap(disp8, color);//转成彩图

			line(color, Point(320 + _min_point[0], 0), Point(320 + _min_point[0], 480), Scalar(255, 150, 0), 1, 8);
			line(color, Point(0, 240 + _min_point[1]), Point(640, 240 + _min_point[1]), Scalar(255, 150, 0), 1, 8);

			putText(color, ss_min_point.str(), cv::Point(450, 440), cv::FONT_HERSHEY_DUPLEX, 0.5, cv::Scalar(0, 255, 0), 1);
			putText(color, ss_tick_count.str(), cv::Point(450, 460), cv::FONT_HERSHEY_DUPLEX, 0.5, cv::Scalar(0, 255, 0), 1);*/

			Mat color(Size(480, 300), CV_8UC3, cv::Scalar(0, 0, 0));
			putText(color, ss_min_point.str(), cv::Point(5, 80), cv::FONT_HERSHEY_DUPLEX, 1.3, cv::Scalar(0, 255, 0), 1);
			putText(color, ss_tick_count.str(), cv::Point(5, 150), cv::FONT_HERSHEY_DUPLEX, 1.3, cv::Scalar(0, 255, 0), 1);

			imshow("disparity", color);

			waitKey(1);
		}
		else {
			cout << t * 1000 / getTickFrequency() << "ms: " << _min_point[0] << " " << _min_point[1] << " " << _min_point[2] << "m" << endl;
		}

		_tick_count = getTickCount();
	}

	return 0;
}

int krx_ProcessIMG(int alg, int blocksize, int numberOfDisparities, bool no_display, float scale,
	string intrinsic_filename, string extrinsic_filename,
	string disparity_filename, string point_cloud_filename) {

	// 【1】从摄像头读入视频
	VideoCapture capture(0);

	if (!capture.isOpened()) { //判断能够打开摄像头
		cout << "can not open the camera" << endl;
		cin.get();
		exit(1);
	}

	capture.set(CV_CAP_PROP_FOURCC, CV_FOURCC('M', 'J', 'P', 'G'));
	capture.set(CV_CAP_PROP_FPS, 60);//帧率 帧/秒
	capture.set(CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);//宽度 
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);//高度

	//【2】循环显示每一帧
	int i = 1;
	while (i++)
	{
		Mat frame; //定义一个Mat变量，用于存储每一帧的图像

		capture >> frame;  //读取当前帧

		if (frame.empty())
		{
			cout << "--(!) No captured frame -- Break!" << endl;
			//break;                
		}
		else
		{
			cv::cvtColor(frame, frame, CV_BGR2GRAY); //彩色转换成灰度

			Rect left_rect(0, 0, FRAME_WIDTH / 2, FRAME_HEIGHT);   //创建一个Rect框，属于cv中的类，四个参数代表x,y,width,height
			Mat left_limage_cut = Mat(frame, left_rect);      //从img中按照rect进行切割，此时修改image_cut时image中对应部分也会修改，因此需要copy
			Mat left_edges = left_limage_cut.clone();   //clone函数创建新的图片

			Rect right_rect(FRAME_WIDTH / 2, 0, FRAME_WIDTH / 2, FRAME_HEIGHT);   //创建一个Rect框，属于cv中的类，四个参数代表x,y,width,height
			Mat right_rimage_cut = Mat(frame, right_rect);      //从img中按照rect进行切割，此时修改image_cut时image中对应部分也会修改，因此需要copy
			Mat right_edges = right_rimage_cut.clone();   //clone函数创建新的图片

			ProcessIMG(left_edges, right_edges, alg, blocksize, numberOfDisparities, no_display, scale, intrinsic_filename, extrinsic_filename, disparity_filename, point_cloud_filename);
		}
	}

	return 0;
}

int anc_ProcessIMG(int alg, int blocksize, int numberOfDisparities, bool no_display, float scale,
	string intrinsic_filename, string extrinsic_filename,
	string disparity_filename, string point_cloud_filename) {

	// 【1】从摄像头读入视频
	VideoCapture left_capture(0);
	VideoCapture right_capture(1);

	if (!left_capture.isOpened() || !right_capture.isOpened()) { //判断能够打开摄像头
		cout << "can not open the camera" << endl;
		cin.get();
		exit(1);
	}

	//【2】循环显示每一帧
	int i = 1;
	while (i++)
	{
		Mat left_frame, right_frame; //定义一个Mat变量，用于存储每一帧的图像

		left_capture >> left_frame;  //读取当前帧
		right_capture >> right_frame;

		if (left_frame.empty() || right_frame.empty())
		{
			cout << "--(!) No captured frame -- Break!" << endl;
			//break;                
		}
		else
		{
			cv::cvtColor(left_frame, left_frame, CV_BGR2GRAY); //彩色转换成灰度
			cv::cvtColor(right_frame, right_frame, CV_BGR2GRAY);

			ProcessIMG(left_frame, right_frame, alg, blocksize, numberOfDisparities, no_display, scale, intrinsic_filename, extrinsic_filename, disparity_filename, point_cloud_filename);
		}
	}

	return 0;
}

int main(int argc, char** argv)
{
	string img1_filename = "";
	string img2_filename = "";
	string intrinsic_filename = "";
	string extrinsic_filename = "";
	string disparity_filename = "";
	string point_cloud_filename = "";

	int alg = STEREO_SGBM;
	int blocksize, numberOfDisparities;
	bool no_display;
	float scale;

	cv::CommandLineParser parser(argc, argv,
		"{@arg1|../data/left_6.jpg|}{@arg2|../data/right_6.jpg|}{help h||}{algorithm|bm|}{max-disparity|256|}{blocksize|5|}{no-display||}{scale|1|}{i|intrinsics.yml|}{e|extrinsics.yml|}{o||}{p||}");
	if (parser.has("help"))
	{
		print_help();
		return 0;
	}
	img1_filename = parser.get<string>(0);
	img2_filename = parser.get<string>(1);
	if (parser.has("algorithm"))
	{
		string _alg = parser.get<string>("algorithm");
		alg = _alg == "bm" ? STEREO_BM :
			_alg == "sgbm" ? STEREO_SGBM :
			_alg == "hh" ? STEREO_HH :
			_alg == "var" ? STEREO_VAR :
			_alg == "sgbm3way" ? STEREO_3WAY : -1;
	}
	numberOfDisparities = parser.get<int>("max-disparity");
	blocksize = parser.get<int>("blocksize");
	scale = parser.get<float>("scale");
	no_display = parser.has("no-display");
	if (parser.has("i"))
		intrinsic_filename = parser.get<string>("i");
	if (parser.has("e"))
		extrinsic_filename = parser.get<string>("e");
	if (parser.has("o"))
		disparity_filename = parser.get<string>("o");
	if (parser.has("p"))
		point_cloud_filename = parser.get<string>("p");
	if (!parser.check())
	{
		parser.printErrors();
		return 1;
	}
	if (alg < 0)
	{
		printf("Command-line parameter error: Unknown stereo algorithm\n\n");
		print_help();
		return -1;
	}
	if (numberOfDisparities < 1 || numberOfDisparities % 16 != 0)
	{
		printf("Command-line parameter error: The max disparity (--maxdisparity=<...>) must be a positive integer divisible by 16\n");
		print_help();
		return -1;
	}
	if (scale < 0)
	{
		printf("Command-line parameter error: The scale factor (--scale=<...>) must be a positive floating-point number\n");
		return -1;
	}
	if (blocksize < 1 || blocksize % 2 != 1)
	{
		printf("Command-line parameter error: The block size (--blocksize=<...>) must be a positive odd number\n");
		return -1;
	}
	if (img1_filename.empty() || img2_filename.empty())
	{
		printf("Command-line parameter error: both left and right images must be specified\n");
		return -1;
	}
	if ((!intrinsic_filename.empty()) ^ (!extrinsic_filename.empty()))
	{
		printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n");
		return -1;
	}

	if (extrinsic_filename.empty() && !point_cloud_filename.empty())
	{
		printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n");
		return -1;
	}

	//int color_mode = alg == STEREO_BM ? 0 : -1;
	//Mat img1 = imread(img1_filename, color_mode);
	//Mat img2 = imread(img2_filename, color_mode);

	krx_ProcessIMG(alg, blocksize, numberOfDisparities, no_display, scale, intrinsic_filename, extrinsic_filename, disparity_filename, point_cloud_filename);
	//anc_ProcessIMG(alg, blocksize, numberOfDisparities, no_display, scale, intrinsic_filename, extrinsic_filename, disparity_filename, point_cloud_filename);

	return 0;
}
