//by Chen Feng (cforrest at umich.edu)
//standard include
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
//opencv
#include "opencv2/opencv.hpp"

#include "phonyferns.h"
#include "config.h"
using namespace std;
using namespace cv;

#define CV_RED		Scalar(255,0,0)
#define CV_GREEN	Scalar(0,255,0)
#define CV_BLUE		Scalar(0,0,255)
#define CV_WHITE	Scalar(255,255,255)
#define CV_BLACK	Scalar(0,0,0)
#define CV_GRAY		Scalar(128,128,128)

/*#############################################################################
                        Global Vars
#############################################################################*/
int patchSize = 32; // 32*32
//gaussian blur
int blurKSize = 3;
double sigma = 0;
//patch generator
double scaleMin = 0.5;
double scaleMax = 2;
double thetaMin = -CV_PI;
double thetaMax = CV_PI;
double phiMin = -CV_PI;
double phiMax = CV_PI;
//dynamic threshold
//int thresh=20, dt=1;
int train_setup_num=300, train_diffbound=20;
int detect_setup_num=500, detect_diffbound=50;
//train parameters
int ferns_num=80;
int tests_per_fern=9;
int synth_view_num=10000;
//ROI
int roiTop=-1,roiBottom=-1,roiLeft=-1,roiRight=-1;
//inlier threshold to be consider as recognition success
int	inlier_num_thresh = 20;
//debugging
bool debugging = true;
//detector
PhonyPlanarObjectDetector detector;
vector<KeyPoint> objKeypoints, imgKeypoints;
Mat object;
//double Homo[9];
double K[9] = {
	9.1556072719327040e+02, 0., 3.1659567931197148e+02,
	0.,	9.2300384975219845e+02, 2.8310067999512370e+02,
	0., 0., 1.
};

/*#############################################################################
                        Global Functions
#############################################################################*/
void config()
{
	Config cfg("parameter.config");
	cfg.pInt("patchSize", patchSize);
	cfg.pInt("blurKSize", blurKSize);
	cfg.pDouble("sigma", sigma);
	cfg.pDouble("scaleMin", scaleMin);
	cfg.pDouble("scaleMax", scaleMax);
	cfg.pDouble("thetaMax", thetaMax);
	cfg.pDouble("thetaMin", thetaMin);
	cfg.pDouble("phiMax", phiMax);
	cfg.pDouble("phiMin", phiMin);
//	cfg.pInt("thresh", thresh);
//	cfg.pInt("dt", dt);
	cfg.pInt("train_setup_num", train_setup_num);
	cfg.pInt("train_diffbound", train_diffbound);
	cfg.pInt("detect_setup_num", detect_setup_num);
	cfg.pInt("detect_diffbound", detect_diffbound);
	cfg.pInt("ferns_num", ferns_num);
	cfg.pInt("tests_per_fern", tests_per_fern);
	cfg.pInt("synth_view_num", synth_view_num);
	cfg.pInt("roiTop", roiTop);
	cfg.pInt("roiBottom", roiBottom);
	cfg.pInt("roiLeft", roiLeft);
	cfg.pInt("roiRight", roiRight);
	cfg.pBool("debugging", debugging);
	cfg.pInt("inlier_num_thresh", inlier_num_thresh);
}

void help(char** av) {
	cout<<"[help] Usage: "<< av[0] <<" <to-track image path> "
		"[video_device_number or video_file_path]"<<endl;
	cout<<"\tIf training file : <to-track image path>.train not found, "
		<<"will perform training first and then begin tracking.\n"
		<<"video_device_number: default 0, i.e. webcam\n"
		<< "q,Q,esc -- quit\n"
		<< "space   -- save frame\n\n"
        << endl;
}

inline void getkeys(Mat& mat, vector<KeyPoint>& keys)
{
	//useAdaptiveFast();
	Ptr<FeatureDetector> dynFastDetector(
		new DynamicAdaptedFeatureDetector(
			new FastAdjuster(20,true),
			train_setup_num-train_diffbound,
			train_setup_num+train_diffbound,
			10000)
	);
	dynFastDetector->detect(mat, keys);
}

void getdetector(string train_filename)
{
	PatchGenerator gen(0, //background Min
		256, //background Max
		5, //noiseRange
		true, //randomBlur
		scaleMin,
		scaleMax,
		thetaMin,
		thetaMax,
		phiMin,
		phiMax
	);

	cout<<"[getdetector] Trying to load :\n"<<train_filename<<endl;
	if( detector.read_binary_detector(train_filename) )	{
		cout<<"[getdetector] Successfully loaded!"<<endl;
	}	else {
		cout<<"[getdetector] Failed to load training file: "<<endl;
		cout<<train_filename<<endl;
		cout<<"[getdetector] Begin Training..."<<endl;
		cout<<"[getdetector] Step 1. Finding keypoints ..."<<endl;
		getkeys(object, objKeypoints);
		cout<<"\tfind "<<(int)objKeypoints.size()<<" points."<<endl;
		cout<<"[getdetector] Step 2. Training "
			"ferns-based planar object detector ..."<<endl;
		detector.setVerbose(true);

		detector.train(object, objKeypoints,
			patchSize,
			ferns_num, //number of Ferns
			tests_per_fern, //number of Features per Fern
			synth_view_num, //number of synthesized views
			gen);
		cout<<"Done.\n[getdetector] Step 3. "
			"Saving the model to :\n"<<train_filename<<endl;
		if( detector.write_binary_detector(train_filename) ) {
			cout<<"[getdetector] Successfully saved!"<<endl;
		} else {
			cout<<"[getdetector] failed to save!"<<endl;
		}
	}
}

void prepare(int argc, char** argv)
{
	string model_image_path(argv[1]);
	string train_filename = model_image_path + string(".train");

	Mat rawobject = imread( model_image_path, CV_LOAD_IMAGE_GRAYSCALE );
	if( !rawobject.data ) {
		cout<<"[main] Can not load :\n"<<model_image_path<<endl;
		help(argv);	return;
	}

	if(roiTop==-1 || roiBottom==-1 || roiLeft==-1 || roiRight==-1) {
		object = rawobject;
	} else {
		cv::Rect rect(roiLeft, roiTop, roiRight-roiLeft, roiBottom-roiTop);
		object = rawobject(rect);
	}

	GaussianBlur(object, object, Size(blurKSize, blurKSize), sigma, sigma);

	// The tracking parameters
//	int miter = 3,  mprec = 3;
//	int posx = 0, posy = 0;
//	int sizx = object.cols, sizy = object.rows;
//	if(!esm.init(object,posx,posy,sizx,sizy,miter,mprec)) {
//		exit(0);
//	}

	getdetector(train_filename);

	//show object
	cvNamedWindow("Object");
	objKeypoints = detector.getModelPoints();
	cout<<"obj key points num = "<<objKeypoints.size()<<endl;
	Mat objectColor;
	cvtColor(object, objectColor, CV_GRAY2BGR);
	for( int i = 0; i < (int)objKeypoints.size(); i++ ) {
		circle( objectColor, objKeypoints[i].pt, 2, CV_RED, -1 );
	}
	imshow( "Object", objectColor );
}

int process(VideoCapture& capture)
{
	cv::Rect rect = detector.getModelROI();
	double crns[5][3] = {
		{rect.x, rect.y, 0},
		{rect.x+rect.width, rect.y, 0},
		{rect.x+rect.width, rect.y+rect.height, rect.height},
		{rect.x, rect.y+rect.height, 0},
		{rect.x, rect.y, rect.height}
	};
	Scalar colors[4] = {
		CV_RED,
		CV_BLACK,
		CV_GREEN,
		CV_BLUE,
	};

	Ptr<FeatureDetector> dynFastDetector(
		new DynamicAdaptedFeatureDetector(
			new FastAdjuster(50,true),
			detect_setup_num-detect_diffbound,
			detect_setup_num+detect_diffbound,
			1)
	);

	string winname = "PhonyFerns | q or esc to quit";
	namedWindow(winname);

	Mat oframe, gframe;

	bool stop = false;
	for (;!stop;) {
		capture >> oframe;
		if (oframe.empty())
			continue;
		cvtColor(oframe, gframe, CV_RGB2GRAY);

		/////////////////////per frame
		vector<Point2f> dst_corners;
		vector<Point2f> toPt;
		vector<int> pairs;
		Mat H;

		double t = (double)getTickCount();
		dynFastDetector->detect(gframe, imgKeypoints);
		double tfast = (double)getTickCount() -t;
		cout<<"getkeys="<<( tfast*1000/getTickFrequency() )<<" ";

		bool found = detector(
			gframe,
			imgKeypoints,
			toPt,
			H,
			dst_corners,
			&pairs)
			&& ((int)pairs.size()>=inlier_num_thresh*2);
		t = (double)getTickCount() - t;
		cout<<"fps="<<( 1000.0/( t*1000/getTickFrequency() ) )<<"\n";
		cout<<"homo/ferns/getkeys="<< pairs.size()/2 << "/" << toPt.size()
			<< "/" << imgKeypoints.size() << " : homo/ferns="
			<< (float)pairs.size()/2.0/toPt.size() << ", ferns/getkeys="
			<< (float)toPt.size()/(float)imgKeypoints.size() << "\n";

		if(debugging) {
			for( int i = 0; i < (int)imgKeypoints.size(); ++i )	{
				circle(oframe, imgKeypoints[i].pt, 2, CV_RED, -1 );
			}
			for( int i= 0; i<(int)toPt.size(); ++i ) {
				circle(oframe, toPt[i], 2, CV_BLUE, 2);
			}
			for( int i = 0; i < (int)pairs.size(); i += 2 )	{
				circle(oframe, imgKeypoints[pairs[i+1]].pt, 3, CV_GREEN, -1);
			}
		}
		if( found )	{

		//ESM refinement
//		for(int i=0; i<3; i++) {
//			for(int j=0; j<3; j++) {
//				esm.T.homog[i*3+j] = H.at<double>(i,j);
//			}
//		}
//		if(esm.run(gframe)) {
//			for(int i=0; i<3; i++) {
//				for(int j=0; j<3; j++) {
//					H.at<double>(i,j) = esm.T.homog[i*3+j]/esm.T.homog[8];
//				}
//			}
//		}
			for( int i = 0; i < 4; ++i ) {
				Point r1 = dst_corners[i%4];
				Point r2 = dst_corners[(i+1)%4];
				line( oframe, r1, r2, CV_GREEN, 3 );
			}
//			//homo to P
//			double Homo[9];
//			std::copy(H.begin<double>(), H.end<double>(), Homo);
//			double R[9],T[3],P[12],Rf[9];
//			CameraHelper::RTfromKH(K,Homo,R,T);
//			double R0[9]={0,1,0,1,0,0,0,0,-1};
//			helper::mul(3,3,3,3,R,R0,Rf);
//			CameraHelper::compose(K,Rf,T,P,false);
//			double p[5][2];
//			for(int i=0; i<5; ++i) {
//				CameraHelper::project(P,crns[i],p[i]);
//			}
//			for(int i=0; i<4; ++i) {
//				Point r1(p[0][0],p[0][1]);
//				Point r2(p[i+1][0],p[i+1][1]);
//				line( oframe, r1, r2, colors[i], 3 );
//			}
//			std::cout<<"R=\n"<<helper::PrintMat<>(3,3,R)<<std::endl;
//			std::cout<<"T=\n"<<helper::PrintMat<>(1,3,T)<<std::endl;
//			std::cout<<"norm(T)="<<sqrt(T[0]*T[0]+T[1]*T[1]+T[2]*T[2])<<std::endl;
		}
		imshow(winname, oframe);
		/////////////////////

		int key = cvWaitKey(5);
		if (key >= 0) {
			switch(char(key)) {
			case 'q': case 'Q': case 27:
				stop = true; break;
			case ' ':
				cv::imwrite("save.jpg", gframe);
				cout<<"captured"<<endl;	break;
			}
		}
	}

	//post process

	return 0;
}

int main(int argc, char** argv)
{
	if(argc<2) {
		help(argv);	return -1;
	}

	config();

	prepare(argc,argv);

	std::string video_device;
	if(argc>=3)
		video_device = argv[2];
	else
		video_device = string("0");
	VideoCapture capture(video_device);
	if (!capture.isOpened()) //if this fails, try to open as a video camera, through the use of an integer param
        capture.open(atoi(video_device.c_str()));
    if (!capture.isOpened()) {
        cerr << "[main] Failed to open a video device or video file!\n" << endl;
        help(argv); return -1;
    }

	if(capture.set(CV_CAP_PROP_FRAME_WIDTH, 640))
		cout<<"[main] video width=640"<<endl;
	if(capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480))
		cout<<"[main] video height=480"<<endl;

    return process(capture);
}
