/*
 *  stereo_match.cpp
 *  calibration
 *
 *  Created by Victor  Eruhimov on 1/18/10.
 *  Copyright 2010 Argus Corp. All rights reserved.
 *
 */

#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"

#include <stdio.h>
#include "../libcam.h"

using namespace cv;

void print_help()
{
	printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
	printf("\nUsage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh|var] [--blocksize=<block_size>]\n"
		   "[--max-disparity=<max_disparity>] [--scale=scale_factor>] [-i <intrinsic_filename>] [-e <extrinsic_filename>]\n"
		   "[--no-display] [-o <disparity_image>] [-p <point_cloud_file>]\n");
}

void saveXYZ(const char* filename, const Mat& mat)
{
	const double max_z = 1.0e4;
	FILE* fp = fopen(filename, "wt");
	for(int y = 0; y < mat.rows; y++)
	{
		for(int x = 0; x < mat.cols; x++)
		{
			Vec3f point = mat.at<Vec3f>(y, x);
			if(fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;
			fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]);
		}
	}
	fclose(fp);
}

int main(int argc, char** argv)
{
	int ww=640;
	int hh=480;
	int fps=20;

	const char *dev="/dev/video1";
	const char *rdev="/dev/video0";

	// 1) Instance a Camera object
	Camera c(dev, ww, hh, fps);
	Camera rc(rdev, ww, hh, fps);

	cvNamedWindow("l", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("r", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("disp", CV_WINDOW_AUTOSIZE);

	IplImage *l=cvCreateImage(cvSize(ww, hh), IPL_DEPTH_8U, 3);
	unsigned char *l_=(unsigned char *)l->imageData;

	IplImage *r=cvCreateImage(cvSize(ww, hh), IPL_DEPTH_8U, 3);
	unsigned char *r_=(unsigned char *)l->imageData;

	const char* algorithm_opt = "--algorithm=";
	const char* maxdisp_opt = "--max-disparity=";
	const char* blocksize_opt = "--blocksize=";
	const char* nodisplay_opt = "--no-display=";
	const char* scale_opt = "--scale=";

	const char* img1_filename = 0;
	const char* img2_filename = 0;
	const char* intrinsic_filename = "intrinsics.yml";
	const char* extrinsic_filename = "extrinsics.yml";
	const char* disparity_filename = "disp.jpg";
	const char* point_cloud_filename = 0;

	enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3 };
	int alg = STEREO_SGBM;
	int SADWindowSize = 0, numberOfDisparities = 0;
	bool no_display = false;
	float scale = 1.f;
	
	StereoBM bm;
	StereoSGBM sgbm;
	StereoVar var;
	
	int color_mode = alg == STEREO_BM ? 0 : -1;

	int N = 7;
	int aperature_size = N;
	double lowThresh = 20;
	double highThresh = 40;
	c.Update();
	c.toIplImage(l);
	IplImage* gray = cvCreateImage( cvGetSize(l), IPL_DEPTH_8U, 1);
	IplImage* outl = cvCreateImage( cvGetSize(l), IPL_DEPTH_8U, 3);
	IplImage* outr = cvCreateImage( cvGetSize(l), IPL_DEPTH_8U, 3);

		Mat M1, D1, M2, D2;
		Mat R, T, R1, P1, R2, P2;
	if( intrinsic_filename )
	{
		// reading intrinsic parameters
		FileStorage fs(intrinsic_filename, CV_STORAGE_READ);
		if(!fs.isOpened())
		{
			printf("Failed to open file %s\n", intrinsic_filename);
			return -1;
		}
		
		fs["M1"] >> M1;
		fs["D1"] >> D1;
		fs["M2"] >> M2;
		fs["D2"] >> D2;
		
		fs.open(extrinsic_filename, CV_STORAGE_READ);
		if(!fs.isOpened())
		{
			printf("Failed to open file %s\n", extrinsic_filename);
			return -1;
		}

		fs["R"] >> R;
		fs["T"] >> T;
	}

	Mat imgt = l;
	Size img_size = imgt.size();

	Rect roi1, roi2;
	Mat Q;
	Mat map11, map12, map21, map22;

	stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 0, img_size, &roi1, &roi2 );

	initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
	initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);

	int imgNum = 0;
	int continuous = 10;
	while(1){
	Mat img1 = l;//imread(img1_filename, color_mode);
	Mat img2 = r;//imread(img2_filename, color_mode);
		c.Update();
		rc.Update();

		// 3) Convert to OpenCV format  (default is YUYV, stored into c.data[] )
		c.toIplImage(l);
		rc.toIplImage(r);

/*		int canny_thres = 100;
		cvCvtColor(l, gray, CV_BGR2GRAY);
		cvCanny(gray, gray, canny_thres, canny_thres, 3);
		cvCvtColor( gray, outl, CV_GRAY2BGR);
		add(Mat(outl), Mat(l), img1);

		cvCvtColor(r, gray, CV_BGR2GRAY);
		cvCanny(gray, gray, canny_thres, canny_thres, 3);
		cvCvtColor( gray, outr, CV_GRAY2BGR);
		add(Mat(outr), Mat(r), img2);*/

		img1 = Mat(l);
		img2 = Mat(r);

		//cvShowImage("l", l);
		//cvShowImage("r", r);

	Mat img1r, img2r;
	remap(img1, img1r, map11, map12, INTER_LINEAR);
	remap(img2, img2r, map21, map22, INTER_LINEAR);

	img1 = img1r;
	img2 = img2r;
	
	numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width/8) + 15) & -16;
	
	bm.state->roi1 = roi1;
	bm.state->roi2 = roi2;
	bm.state->preFilterCap = 31;
	bm.state->SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 9;
	bm.state->minDisparity = 0;
	bm.state->numberOfDisparities = numberOfDisparities;
	bm.state->textureThreshold = 10;
	bm.state->uniquenessRatio = 15;
	bm.state->speckleWindowSize = 100;
	bm.state->speckleRange = 32;
	bm.state->disp12MaxDiff = 1;
	
	sgbm.preFilterCap = 63;
	sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
	
	int cn = img1.channels();
	
	sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
	sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
	sgbm.minDisparity = 0;
	sgbm.numberOfDisparities = numberOfDisparities;
	sgbm.uniquenessRatio = 10;
	sgbm.speckleWindowSize = bm.state->speckleWindowSize;
	sgbm.speckleRange = bm.state->speckleRange;
	sgbm.disp12MaxDiff = 1;
	sgbm.fullDP = alg == STEREO_HH;
	
	var.levels = 3;									// ignored with USE_AUTO_PARAMS
	var.pyrScale = 0.5;								// ignored with USE_AUTO_PARAMS
	var.nIt = 25;
	var.minDisp = -numberOfDisparities;	
	var.maxDisp = 0;
	var.poly_n = 3;
	var.poly_sigma = 0.0;
	var.fi = 15.0f;
	var.lambda = 0.03f;
	var.penalization = var.PENALIZATION_TICHONOV;	// ignored with USE_AUTO_PARAMS
	var.cycle = var.CYCLE_V;						// ignored with USE_AUTO_PARAMS
	var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;
	
	Mat disp, disp8;
	//Mat img1p, img2p, dispp;
	//copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
	//copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
	
	int64 t = getTickCount();
	if( alg == STEREO_BM )
		bm(img1, img2, disp);
	else if( alg == STEREO_VAR ) {
		var(img1, img2, disp);
	}
	else if( alg == STEREO_SGBM || alg == STEREO_HH )
		sgbm(img1, img2, disp);
	t = getTickCount() - t;
//	printf("Time elapsed: %fms\n", t*1000/getTickFrequency());

	//disp = dispp.colRange(numberOfDisparities, img1p.cols);
	if( alg != STEREO_VAR )
		disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
	else
		disp.convertTo(disp8, CV_8U);
	if( !no_display )
	{
		//namedWindow("left", 1);
		imshow("l", img1);
		//namedWindow("right", 1);
		imshow("r", img2);
		//namedWindow("disparity", 0);
		imshow("disp", disp8);
		fflush(stdout);
	int c = waitKey(continuous) & 255;
		if(c == 27)
		exit(0);
	if(c == 'c')
		continuous = 10;
	else if (c == 'f')
		continuous = 0;
	else if(c == 's') {
		char fname[32];
		sprintf(fname, "dispImg%d.jpg", imgNum);
		imwrite(fname, disp8);

		sprintf(fname, "leftImg%d.jpg", imgNum);
		imwrite(fname, img1);

		sprintf(fname, "rightImg%d.jpg", imgNum++);
		imwrite(fname, img2);
	}
	}

/*	if(disparity_filename)
		imwrite(disparity_filename, disp8);
	
	if(point_cloud_filename)
	{
		printf("storing the point cloud...");
		fflush(stdout);
		Mat xyz;
		reprojectImageTo3D(disp, xyz, Q, true);
		saveXYZ(point_cloud_filename, xyz);
		printf("\n");
	}*/
	}
	cvReleaseImage(&l);
	cvReleaseImage(&r);
//	}
	return 0;
}
