/**
 * @file SURF_FlannMatcher
 * @brief SURF detector + descriptor + FLANN Matcher
 * @author A. Huaman, Son Le
 */

#include <stdio.h>
#include <iostream>

#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/calib3d/calib3d.hpp"

#include "ObjectDetector.hpp"
#include "PoseEstimator.hpp"
#include "ZhangPoseEstimator.hpp"
#include "PylonGrabber.h"

using namespace cv;
using namespace std;

// SIFT Matcher vars
vector<DMatch> enh_good_matches;
vector<KeyPoint> keypoints_img, keypoints_ref;
Mat descriptor_img, descriptor_ref;
int vote;
Mat ref_img;
Mat matched_pnts_ref, matched_pnts_img;

// Optical flow & VS vars
vector<DMatch> matches;

// ######## working with Checkerboard STARTs ##########
//define chess-board variables
int iNumBoards = 10; //how many successful boards to collect
int iNumCornersHorizontal = 8;//7
int iNumCornersVertical = 6;//5
//get input of chess-board info from users
double disConersHori=18.75;
double disConersVer=18.75;

//setup more variables
int iNumSquares = iNumCornersHorizontal * iNumCornersVertical;
Size sBoardSize = Size(iNumCornersHorizontal, iNumCornersVertical);

vector<Point2f> corners;
// ######## working with Checkerboard ENDs ##########

int initChkBrdTracker(Mat& frame, vector<Point2f>& pnts_ref);

int callPoseEst(const vector<Point2f> pnts_ref, const vector<Point2f> pnts_img, PoseEstimator& pe);

int callPoseEst(const vector<Point2f> pnts_ref, const vector<Point2f> pnts_img, Mat& image, ZhangPoseEstimator& zpe);

int MatchOneIter(Mat& frame, vector<Point2f>& points, vector<Point2f>& pnts_ref);

int checkReprojectionError(Mat& image, const Mat objPnts, const Mat imgPnts, const Mat rvec, const Mat tvec, const Mat camMat, const Mat distCoeffs);

static void help()
{
	// print a welcome message, and the OpenCV version
	cout << "\nThis is a demo of Lukas-Kanade optical flow lkdemo(),\n"
		"Using OpenCV version %s\n" << CV_VERSION << "\n"
		<< endl;

	cout << "\nHot keys: \n"
		"\tESC - quit the program\n"
		"\tr - auto-initialize tracking\n"
		"\tc - delete all the points\n"
		"\tn - switch the \"night\" mode on/off\n"
		"To add/remove a feature point click it\n" << endl;
}

Point2f point;
bool addRemovePt = false;

static void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ )
{
	if( event == CV_EVENT_LBUTTONDOWN )
	{
		point = Point2f((float)x,(float)y);
		//addRemovePt = true;   // disable mouse click
	}
}



int InitMatching(Mat& frame, vector<Point2f>& points)
{
	int ret = 1;

	Mat gray_image;

	cvtColor(frame, gray_image, CV_BGR2GRAY, 0);

	bool found = findChessboardCorners(frame,
				sBoardSize,
				points,
				CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

	if(found)
		cornerSubPix(gray_image,
					points,
					Size(11, 11),
					Size(-1, -1),
					TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1)
					);

	drawChessboardCorners(frame, sBoardSize, corners, found);

	//imshow("board", frame);
	//waitKey(0);

	return ret;
}


/**
 * @brief Main function
 * 
 * @param argc ...
 * @param argv ...
 * @return int
 */
int main( int argc, char** argv )
{
	int CamMode = 1;
	
	VideoCapture cap;	
	PylonGrabber pg; 
	if (CamMode) 
	{
	  //printf("Read wc mode\n");
	  //cap = VideoCapture(0);
		
	  	// PYLON CAMERA
		//PylonGrabber pg; 

	} else
	{
	  printf("Read Video mode\n");
	  cap = VideoCapture("vid.avi");
	}
	
	
	// enter file name if you want to run video i.e. exec "vid.avi"
	//if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
	//	cap.open(argc == 2 ? argv[1][0] - '0' : 0);
	//else if( argc == 2 )
	//	cap.open(argv[1]);

	//if( !cap.isOpened() )
	//{
	//	cout << "Could not initialize capturing...\n";
	//	getchar();

	//	return 0;
	//}

	help();

	namedWindow( "LK Demo", 1 );
	//setMouseCallback( "LK Demo", onMouse, 0 );

	Mat gray, prevGray, image;
	vector<Point2f> points[3];

	// read reference image
	//ref_img = imread("Picture 49.jpg");
	//ref_img = imread("Picture 3.jpg");
	//ref_img = imread("Picture 5.jpg");
	ref_img = imread("Picture 5.bmp");

	//char *filename = "data.log";
	//FileStorage fs( filename, FileStorage::WRITE );
	//string filename2("data2.log");
	//ofstream LoggerIn(filename2.c_str);
	//ofstream LoggerIn("data2.log");

	// error calculation vars
	vector<uchar> status;
	vector<float> err;

	vector<Point2f> ds;      // ds = s(k) - s(k-1); at every iteration
	vector<Point2f> error;   // error = s_star - s(k); at every iteration
	vector<Point2f> pnts_ref, pnts_ref_tmp; 
	Point2f p;
 
	// flag for optical flow
	int flag = 0; //OPTFLOW_USE_INITIAL_FLOW;
	
	int init_flag = 0;   // flag will raise after key 'r' is pressed
	
	Point2i pr, pi;
	Point2f prf, pif;
	
	// to run directly to init without pressing key 'r'
	//needToInit = 1;
	int nightMode = 0;
	int needToInit = 0;  // run video mode needs press key 'r'
	
	// ######## working with class STARTs ##########
	initChkBrdTracker(ref_img, pnts_ref);
	
	int debug = 0;
	// ######## working with class ENDs ##########

	// ######## working with PoseEstimator STARTs ##########
	PoseEstimator pe;
	ZhangPoseEstimator zpe("camera.yaml", "camera.yaml");
	//getchar();

	Mat H, M;

	Mat pnts_0, pnts_i, mask, H_RS;
	// ######## working with PoseEstimator ENDs ##########

	
	char c;
	Mat frame;
	//IplImage * imgframe=NULL;
	image = Mat::zeros(ref_img.rows, ref_img.cols, CV_8UC3);
	for(;;)
	{
		// cap >> frame;
		pg.grabFrame(frame);
		//frame = Mat(imgframe);

		if( frame.empty() )
		{
			printf("cannot read frame!\n");
			getchar();
			break;
		}

		//imshow("received image", frame);
		//waitKey(0);
		// frame.copyTo(image);
		frame.copyTo(gray);
		cvtColor(frame, image, CV_GRAY2BGR);
		cvtColor(frame, frame, CV_GRAY2BGR);
		
		//cvtColor(image, gray, CV_BGR2GRAY);  // it's already mono

		if(flag == 1)
		{
			MatchOneIter(frame, points[0], pnts_ref);		

			// with Post Processing combined in MatchOneIter, points[1] has been swapped to points[0]
			if (points[0].size() != pnts_ref.size())
			{
				printf("Error: PassToVS\n");
				getchar();
			}
		}

		imshow("LK Demo", frame);

		if (CamMode == 0)
		{
			// wait for enter at every step for video mode
			c = (char)waitKey(0);
		} else
		{
			// no wait for webcam mode
			c = (char)waitKey(1);
		}

		if( c == 27 )
			break;
		switch( c )
		{
		    case 'r':
				InitMatching(frame,points[0]);
				flag = 1;
			    break;
		    case 'g':
			    // call pose estimator
				//callPoseEst(pnts_ref, points[0], pe);
				callPoseEst(pnts_ref, points[0], frame, zpe);
			    break;
		    case 'n':
			    //nightMode = !nightMode;
			    break;
		    default:
			    ;
		}

	}

	//cvReleaseImage(&imgframe);

	//LoggerIn.close();

	return 0;
}


int HomDataAdapt(const vector<Point2f> pnts_ref, const vector<Point2f> pnts_img, Mat& pnts_0, Mat& pnts_i)
{
	int ret = 1;

	pnts_0 = Mat::zeros(1, pnts_ref.size(), CV_64FC2);
	pnts_i = Mat::zeros(1, pnts_ref.size(), CV_64FC2);   // RISK RISK RISK

	if (1)
	{
		for (int i=0; i<pnts_ref.size(); i++)
		{
			pnts_0.at<Vec2d>(i)[0] = pnts_ref[i].x;
			pnts_0.at<Vec2d>(i)[1] = pnts_ref[i].y;

			pnts_i.at<Vec2d>(i)[0] = pnts_img[i].x;
			pnts_i.at<Vec2d>(i)[1] = pnts_img[i].y;

		}
	} else
	{ 
		// RISK RISK RISK => give it a try
		pnts_0 = Mat(pnts_ref);
		pnts_i = Mat(pnts_img);
	}

	return ret;
}

int callPoseEst(const vector<Point2f> pnts_ref, const vector<Point2f> pnts_img, PoseEstimator& pe)
{
	int ret = 1;

	Mat pnts_0, pnts_i, mask, H_RS, M; 

	// call Homography PoseEstimator
	HomDataAdapt(pnts_ref, pnts_img, pnts_0, pnts_i);

	// log to file
	ofstream LoggerIn("ref.txt", ios_base::app);
	LoggerIn << pnts_0 << endl;

	ofstream LoggerIn1("img.txt", ios_base::app);
	LoggerIn1 << pnts_i << endl;

	H_RS = findHomography(pnts_0, pnts_i, mask, CV_FM_RANSAC, 5);

	// log to file
	ofstream LoggerIn2("H.txt", ios_base::app);
	LoggerIn2 << H_RS << endl;

	// call PoseEstimator
	pe.Estimate(H_RS, M);

	return ret;
}     


int callPoseEst(const vector<Point2f> pnts_ref, const vector<Point2f> pnts_img, Mat& image, ZhangPoseEstimator& zpe)
{
	int ret = 1;

	Mat pnts_0, pnts_i, mask, H_RS, M; 
	Mat rvec, tvec;

	// call Homography PoseEstimator
	HomDataAdapt(pnts_ref, pnts_img, pnts_0, pnts_i);

	// log to file
	ofstream LoggerIn("ref.txt", ios_base::app);
	LoggerIn << pnts_0 << endl;

	ofstream LoggerIn1("img.txt", ios_base::app);
	LoggerIn1 << pnts_i << endl;

	H_RS = findHomography(pnts_0, pnts_i, mask, CV_FM_RANSAC, 5);

	// log to file
	ofstream LoggerIn2("H.txt", ios_base::app);
	LoggerIn2 << H_RS << endl;

#define CHKBRD2   // Ban Mai Xanh Pattern

#ifdef PAT1 // PNC pattern on notebook
	double objpts[12] = {.0, .0, .0, 75.0, .0, .0, 75.0, 33.0, .0, .0, 33.0, .0};   // used for extrinsics est, objpts are in Cartes. Coord.
	double refpts[12] = {167.0, 405.0, 1.0, 313.0, 403.0, 1.0, 313.0, 465.0, 1.0, 167.0, 465.0, 1.0};   // used for extrinsics est, refpts are in Img. Coord., it's the projection of objpts
#endif

#ifdef CHKBRD // PNC pattern on notebook
	double objpts[12] = {.0, .0, .0, 66.0, .0, .0, 66.0, 52.5, .0, .0, 52.5, .0};   // used for extrinsics est, objpts are in Cartes. Coord.
	double refpts[12] = {197.0, 135.0, 1.0, 527.0, 128.0, 1.0, 529.0, 387.0, 1.0, 202.0, 385.0, 1.0};   // used for extrinsics est, refpts are in Img. Coord., it's the projection of objpts
#endif

#ifdef CHKBRD2 // checker board of Basler hi-res image
	double objpts[12] = {.0, .0, .0, 66.0, .0, .0, 66.0, 52.5, .0, .0, 52.5, .0};   // used for extrinsics est, objpts are in Cartes. Coord.
	double refpts[12] = {274.0, 220.0, 1.0, 1046.0, 234.0, 1.0, 1040.0, 838.0, 1.0, 266.0, 826.0, 1.0};   // used for extrinsics est, refpts are in Img. Coord., it's the projection of objpts
#endif

	Mat objPnts = Mat(1, 4, CV_64FC3, objpts);
	Mat refPnts = Mat(1, 4, CV_64FC3, refpts);
	Mat imgPnts = Mat::zeros(1, 4, CV_64FC2);

	Mat q0 = Mat::zeros(3, 1, CV_64FC1);
	Mat q1 = Mat::zeros(3, 1, CV_64FC1);
	float num; 
	Point2i p;
	for (int i=0; i<4; i++)
	{
		q0.at<double>(0) = refPnts.at<Vec3d>(i)[0];
		q0.at<double>(1) = refPnts.at<Vec3d>(i)[1];
		q0.at<double>(2) = refPnts.at<Vec3d>(i)[2];
		
		q1 = H_RS*q0;
		num = q1.at<double>(2);
		//cout << q1 << endl;
		q1 = q1/num;
		//cout << q1 << endl;
		
		imgPnts.at<Vec2d>(i)[0] = q1.at<double>(0);
		imgPnts.at<Vec2d>(i)[1] = q1.at<double>(1);

		p = Point2i(cvRound(refPnts.at<Vec3d>(i)[0]), 
		            cvRound(refPnts.at<Vec3d>(i)[1]));
		circle( image, p, 6, Scalar(0,255,255), -1, 8);

		p = Point2i(cvRound(imgPnts.at<Vec2d>(i)[0]), 
		            cvRound(imgPnts.at<Vec2d>(i)[1]));
		circle( image, p, 6, Scalar(255,0,255), -1, 8);
		//circle( image, Point2i(300,100), 6, Scalar(255,0,255), -1, 8);
	}

	// call PoseEstimator
	zpe.Estimate(objPnts, imgPnts, rvec, tvec);

	// get tracked pattern
	Point2i p0 = Point2i(cvRound(imgPnts.at<Vec2d>(0)[0]),  
		                 cvRound(imgPnts.at<Vec2d>(0)[1]));
	Point2i p1 = Point2i(cvRound(imgPnts.at<Vec2d>(2)[0]),  
		                 cvRound(imgPnts.at<Vec2d>(2)[1]));
	Mat roi(image, Rect(p0, p1));

	// log to output
	cout << rvec << endl;
	cout << tvec << endl;

	// log to file
	ofstream LoggerIn3("rvec.txt", ios_base::app);
	LoggerIn3 << rvec << endl;
	ofstream LoggerIn4("tvec.txt", ios_base::app);
	LoggerIn4 << tvec << endl;

	// check re-projection error
	Mat camMat, distCoeffs;
	zpe.getCamParams(camMat, distCoeffs);
	checkReprojectionError(image, objPnts, imgPnts, rvec, tvec, camMat, distCoeffs); // RISK => OK!

	// show separate frame
	imshow("projection",image);
	imshow("tracked pattern",roi);

	return ret;
}     



int checkReprojectionError(Mat& image, const Mat objPnts, const Mat imgPnts, const Mat rvec, const Mat tvec, const Mat camMat, const Mat distCoeffs)
{
	int ret = 1;

	Mat imgPntsB;

	projectPoints(objPnts, rvec, tvec, camMat, distCoeffs, imgPntsB, noArray(), 0 );

	//MatInfo(imgPntsB);

	double reproj_err = 0;
	double err=0;
	for (int i=0; i<4; i++)
	{
		//double a = cvRound(imgPntsB.at<Vec2d>(i)[0]);
		//double b = cvRound(imgPntsB.at<Vec2d>(i)[1]);

		Point2i pB = Point2i(cvRound(imgPntsB.at<Vec2d>(i)[0]), 
						    cvRound(imgPntsB.at<Vec2d>(i)[1]));
		circle(image, pB, 6, Scalar(0,0,0), -1, 8);

		Point2i p = Point2i(cvRound(imgPnts.at<Vec2d>(i)[0]), 
						    cvRound(imgPnts.at<Vec2d>(i)[1]));

		// calculate reprojection error
		err = sqrt((double)((pB.x - p.x)*(pB.x - p.x) + (pB.y - p.y)*(pB.y - p.y)));
		reproj_err = reproj_err + err;
	}

	cout << "projected points:" << endl;
	cout << imgPntsB << endl;
	cout << "Reprojection error:" << reproj_err << endl;
	// log to file
	ofstream LoggerIn3("reproj_err.txt", ios_base::app);
	LoggerIn3 << reproj_err << endl;

	return ret;
}



int initChkBrdTracker(Mat& frame, vector<Point2f>& pnts_ref)
{
	int ret = 1;

	Mat gray_image;

	cvtColor(frame, gray_image, CV_BGR2GRAY, 0);

	bool found = findChessboardCorners(frame,
				sBoardSize,
				pnts_ref,
				CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

	if(found)
		cornerSubPix(gray_image,
					pnts_ref,
					Size(11, 11),
					Size(-1, -1),
					TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1)
					);

	drawChessboardCorners(frame, sBoardSize, pnts_ref, found);

	//cout << pnts_ref << endl;

	//imshow("board", frame);
	//waitKey(0);

	return ret;
}


int MatchOneIter(Mat& frame, vector<Point2f>& points, vector<Point2f>& pnts_ref)
{
	int ret = 1;

	Mat gray_image;

	cvtColor(frame, gray_image, CV_BGR2GRAY, 0);

	bool found = findChessboardCorners(frame,
				sBoardSize,
				points,
				CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

	if(found)
		cornerSubPix(gray_image,
					points,
					Size(11, 11),
					Size(-1, -1),
					TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1)
					);

	drawChessboardCorners(frame, sBoardSize, points, found);

	//cout << points << endl;

	//imshow("board", frame);
	//waitKey(0);


	for (int i=0; i<points.size(); i++)
	  {	
		// draw all the matching lines between original and current frame
		Point2i pr = Point2i(cvRound(pnts_ref[i].x),
					  cvRound(pnts_ref[i].y));
		Point2i pi = Point2i(cvRound(points[i].x),
					cvRound(points[i].y));
				
		// the vector pointing from prev to current frame
		line( frame, pr, pi, CV_RGB(255,0,0),2 );
		circle( frame, pr, 3, Scalar(0,255,0), -1, 8);
	    
		//p.x = matched_pnts_ref.at<Vec2f>(i)[0] - points[1][i].x;
		//p.y = matched_pnts_ref.at<Vec2f>(i)[1] - points[1][i].y;

		//error.push_back(p);
	  }



	return ret; 
}
