#include <cstdio>
#include <cstdlib>
//#include <tchar.h>

#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/video/video.hpp"
#include <opencv2/gpu/gpu.hpp>


#include "VideoRead.h"

#include "FeatureComputer.h"

#include "Classifier.h"

#include "Bagging.h"

#include "PostProcessing.h"

#include "graph.h"
void getChannel( Mat & frm, Mat & frm_gray , char channel_code)
{
	int id = 0;
	switch( channel_code )
	{
		case 'R': id = 0;break;
		case 'G': id = 1;break;
		case 'B': id = 2;break;
		case 'L': id = 3;break;
		case 'a': id = 4;break;
		case 'b': id = 5;break;
		case 'H': id = 6;break;
		case 'S': id = 7;break;
		case 'V': id = 8;break;
	}

	int code;
	if( id < 3) code = CV_BGR2RGB;
	else if( id < 6) code = CV_BGR2Lab;
	else code = CV_BGR2HSV_FULL;

	Mat color;
	cvtColor(frm,color,code);

	vector<Mat> channels;

	cv::split(color, channels);

	channels[id%3].copyTo( frm_gray );
}

class LcInterface
{
public:

	LcInterface();
	void input( int argc, char * const argv[]);

	LcClassifier*  getClassifier(){ return my_classifier;}

	LcVideoReadExt getTrainVideo();
	LcVideoReadExt getTestVideo();


	bool IF_TRAIN;
	bool IF_TEST;

	int train_f_start;
	int train_f_end;
	int train_f_rate;

	int test_f_start;
	int test_f_end;
	int test_f_rate;

	string root;

	string train_video_name;
	string test_video_name;

	string feature_code_string;

	string classifier_name;

	string addition_string;

	void buildDatasetmap();

	map< string , Vec3i> data_map;

	bool setVideo( string name ,int & start, int & end , int & rate);
	void setClassifier();

	void init();
	

	LcClassifier* my_classifier;

	bool IF_POSTPROCESSING;
	bool IF_MRFSMOOTING;

	float edge_penalty_scale;	
	float max_edge_weight;

	float half_lambda;

	float bias_enhance;

	char channel_code;
	
	int bound_setting;
};

LcVideoReadExt LcInterface::getTrainVideo()
{
	LcVideoReadExt ans;
	ans.video_name = train_video_name;
	ans.f_start = train_f_start;
	ans.f_end = train_f_end;
	ans.f_rate = train_f_rate;
	ans.t_win = 0;
	ans.root = root;
	return ans;
}

LcVideoReadExt LcInterface::getTestVideo()
{
	LcVideoReadExt ans;
	ans.video_name = test_video_name;
	ans.f_start = test_f_start;
	ans.f_end = test_f_end;
	ans.f_rate = test_f_rate;
	ans.t_win = 0;
	ans.root = root;
	return ans;
}

void LcInterface::setClassifier()
{
	if(classifier_name == "rdt")
	{
		my_classifier = new LcRandomTrees;
	}
}
void LcInterface::init()
{
	if(IF_TEST == false)
	{
		if(train_f_start<0) train_f_start = test_f_start;
		if(train_f_end<0) train_f_end = test_f_end;
		if(train_f_rate<0) train_f_rate = test_f_rate;
	}

	if(feature_code_string == "") feature_code_string = "l";

	if(classifier_name == "" ) classifier_name = "rdt";
	setClassifier();

	if(train_video_name == "" ){train_video_name = "GOPR2382_cab";}
	setVideo( train_video_name, train_f_start, train_f_end, train_f_rate);

	if(test_video_name == "" ){test_video_name = "GOPR2384_cab";}
	setVideo( test_video_name, test_f_start, test_f_end, test_f_rate);
}

LcInterface::LcInterface()
{
	train_f_start = -1;
	train_f_rate = -1;
	train_f_end = -1;

	test_f_start = -1;
	test_f_rate = -1;
	test_f_end = -1;

	IF_POSTPROCESSING = true;
	IF_MRFSMOOTING = true;

	edge_penalty_scale = 0.0f;	
	max_edge_weight = -1.0f;

	half_lambda = 0.2f;

	bias_enhance = 0.0f;

	bound_setting = 0;
	root = "./../_FGS/";
	train_video_name = "";
	test_video_name = "";
	feature_code_string = "";
	classifier_name = "";

	IF_TEST = false;
	IF_TRAIN = false;

	addition_string = "";

	buildDatasetmap();

	channel_code = 'L';
}

void LcInterface::buildDatasetmap()
{
	data_map.clear();
	data_map.insert( pair< string, Vec3i>( "GOPR2382_cab", Vec3i(150,5300,50)) );
	data_map.insert( pair< string, Vec3i>( "GOPR2384_cab", Vec3i(150,11200,25)) );
}

bool LcInterface::setVideo( string name ,int & start, int & end , int & rate)
{
	map<string , Vec3i>::iterator iter = data_map.find( name );
	if( iter != data_map.end())
	{
		Vec3i temp = iter->second;
		if(start<0)start = temp[0];
		if(end<0)end = temp[1];
		if(rate<0)rate = temp[2];
		return true;
	}
	else
	{
		cout << "One of Video is not in default" << endl;
		return false;
	}
}

void LcInterface::input( int argc, char * const argv[])
{
	for(int ind = 1;ind < argc;ind++)
	{
		/*** Parse switches ***/
		if (argv[ind][0] == '-') {
			switch (argv[ind][1]) {
				case 'f':
					root = string(argv[++ind]);
					break;
				case 't':
					train_video_name = string(argv[++ind]);
					break;
				case 'p':
					test_video_name = string(argv[++ind]);
					break;
				case 'v':
					feature_code_string =  string(argv[++ind]);
					break;
				case 'a':
					IF_TRAIN = true;
					break;
				case 'b':
					IF_TEST = true;
					break;
				case 's':
					//only for predict video setting , for trainning video it's always solid with frame
					// unless it's no testing model then the test frame setting will be use
					// as training parameter
					test_f_start =  atoi(argv[++ind]);
					break;
				case 'e':
					test_f_end = atoi(argv[++ind]);
					break;
				case 'r':
					test_f_rate = atoi(argv[++ind]);
					break;
				case 'c':
					classifier_name = string(argv[++ind]);
					break;
				case 'd':
					IF_POSTPROCESSING = false;
					break;
				case 'g':
					IF_MRFSMOOTING = false;
					break;
				case 'h':
					edge_penalty_scale = (float)atof(argv[++ind]);
					break;
				case 'i':
					max_edge_weight = (float)atof(argv[++ind]);
					break;
				case 'j':					
					half_lambda = (float)atof(argv[++ind]);
					break;
				case 'k':
					bias_enhance = (float)atof(argv[++ind]);
					break;
				case 'l':
					channel_code = argv[++ind][0];
					break;
				case 'n':
					bound_setting = atoi(argv[++ind]);
					break;
				case 'z':
					addition_string = string(argv[++ind]);
					break;
				default : printf("Unknown switch '%c'\n", argv[ind][1]);
				break;
			}
		}
	}

	init();
}


int main(int argc, char * const argv[])
{


	LcInterface my_setting;
	//char *const setting_string[] ={ "","-b","-v","l","-t","GOPR2384_cab","-s", "150", "-e", "5300", "-r","50","-p","GOPR2382_cab","-f","./../../_FGS/","-j","0.2"};
	//char *const setting_string[] ={ "","-b","-v","l","-t","GOPR2384_cab","-s", "20", "-e", "3100", "-r","20","-p","GOPR2393_cab","-f","./../../_FGS/"};
	//my_setting.input( 15 + 3 ,setting_string);

	my_setting.input( argc, argv);
	
	LcBagging bagging;

	if(my_setting.IF_TRAIN)
	{
		
		LcVideoReadExt my_reader = my_setting.getTrainVideo();
		bagging.setClassifier( my_setting.getClassifier());
		bagging.setFeatureCode( my_setting.feature_code_string.c_str() );
		bagging.setReader( &my_reader );
		bagging.veb = 1;

		{
			FILE * fid = fopen("bagging_config.txt","r");
			int n_classifier; int instance_limit;
			fscanf(fid, "%d%d",&n_classifier, &instance_limit);
			bagging.n_classifier = n_classifier;
			bagging.instance_limit = instance_limit;
			fclose(fid);
		}

		bagging.train( my_reader);

		{
			string saving_name = my_reader.root + "classifier_output/bagging/";
			bagging.save(saving_name.c_str());
		}
	}

	if(my_setting.IF_TEST)
	{
	
		//LcBagging bagging;
		
		LcVideoReadExt train_reader = my_setting.getTrainVideo();
		LcVideoReadExt test_reader = my_setting.getTestVideo();
		
		if(!my_setting.IF_TRAIN)
		{
		
			bagging.setClassifier( my_setting.getClassifier());
			bagging.setFeatureCode( my_setting.feature_code_string.c_str() );
			bagging.setReader( &train_reader );

			bagging.veb = 1;
			{
				string saving_name = test_reader.root + "classifier_output/bagging/";
				bagging.load(saving_name.c_str());
			}
		}

		vector< LcValidator > validator_array;
		//bagging.predict( test_reader, validator_array);

		test_reader.reset();


		int obj_to_track = 3;

		vector<ContourPF> my_tracker;

		my_tracker.resize( obj_to_track );

		vector< int > if_tracker_work;

		for(int i = 0 ; i < obj_to_track; i++) if_tracker_work.push_back( 0 );

		vector< Mat > tracker_res; tracker_res.resize( obj_to_track );

		MultiTracker multi_tracker;


		VideoWriter avi;

		LcValidator sum_validator(0,0,0,0);

		for(int i = 0 ; i < 4e5 ; i++ )
		{
			Mat frm,  gt;


			//int statu = test_reader.readNext( frm );

			int statu;
			for(int j = 0; j <my_setting.test_f_rate;j++)
				statu = test_reader.read( frm );
				
			{//just for saving the src image			
				{

					stringstream ss;

					ss <<  my_setting.root + "/src/frm_"+ my_setting.test_video_name + "_" <<  test_reader.getf() << ".jpg";

					cv::imwrite(ss.str().c_str(), frm);
				}
			}
			
			continue;

			cout << "===work on frame " << test_reader.getf() << "===" << endl;

			{
				{

					stringstream ss;

					ss <<  "./likelihood/frm_"+ my_setting.test_video_name + "_on_" +
							my_setting.train_video_name + "_" + my_setting.feature_code_string +"_" << 
							test_reader.getf() << ".jpg";

					cv::imwrite(ss.str().c_str(), frm);
				}
			}
			
			
			if(statu ==-1) break;

			//{
			//	Mat temp;
			//	for(int j=0;j<9;j++) if(test_reader.read( temp )==-1) break;
			//}

			//test_reader.getLabel( gt );

			LcFeatureExtractor my_extractor;
			my_extractor.set_extractor( my_setting.feature_code_string );

			Mat desc, lab;

			lab = Mat::Mat();

			//my_extractor.work( frm, desc, gt, lab );
			my_extractor.work( frm, desc);

			Mat res;

			bagging.predict( desc, res, lab);

			Mat mask;

			getLabelMask( res , mask, frm.size() , my_extractor.bound_setting);
			
			medianBlur(mask,mask,5);

			mask.convertTo( mask,5);

			mask = mask* (1.0/255.0);			

			bool IF_BLUR = true;

			bool IF_POSTPROCESSING = my_setting.IF_POSTPROCESSING;
			bool IF_MRFSMOOTING = my_setting.IF_MRFSMOOTING;

			bool IF_EDGE_PENALTY = false;	
			float edge_penalty_scale = my_setting.edge_penalty_scale;
			if(edge_penalty_scale>1e-3) IF_EDGE_PENALTY = true;

			float max_edge_weight = my_setting.max_edge_weight;
			bool IF_EDGE_WEIGHT = false;
			if(max_edge_weight > 0) IF_EDGE_WEIGHT = true;

			float half_lambda = my_setting.half_lambda;

			float bias_enhance = my_setting.bias_enhance;
			
			char channel_code = my_setting.channel_code;			

			if(IF_MRFSMOOTING){
				if( IF_BLUR ) // if blur?
				{
					GaussianBlur(mask, mask, Size(9,9), 5,5);
				}
				
				Mat frm_x, frm_y;
				if(channel_code != 'Q' ){
					Mat frm_gray;
					//cv::cvtColor(frm, frm_gray, CV_RGB2GRAY);
					getChannel( frm, frm_gray , channel_code);

					GaussianBlur(frm_gray, frm_gray, Size(9,9), 3,3);
					
					Sobel( frm_gray , frm_x, CV_8U, 1, 0, 3);
					Sobel( frm_gray , frm_y, CV_8U, 0, 1, 3);

					frm_x.convertTo(frm_x, 5);
					frm_y.convertTo(frm_y, 5);
				}
				else
				{
					Mat frm_blur;
					GaussianBlur(frm, frm_blur, Size(9,9), 3,3);

					vector<Mat> frm_rgb;

					cv::split( frm_blur,frm_rgb);

					frm_x = Mat::zeros( frm.size(), 5);
					frm_y = Mat::zeros( frm.size(), 5);

					for(int j = 0 ;j<(int) frm_rgb.size() ; j++)
					{
						Mat temp_x, temp_y;
						Sobel( frm_rgb[j] , temp_x, CV_8U, 1, 0, 3);
						Sobel( frm_rgb[j] , temp_y, CV_8U, 0, 1, 3);
						temp_x.convertTo(temp_x,5);
						temp_y.convertTo(temp_y,5);
						frm_x = frm_x + temp_x * (1.0/double( (int)  frm_rgb.size() ) );
						frm_y = frm_y + temp_y * (1.0/double( (int)  frm_rgb.size() ) );
					}
				}

				if( IF_EDGE_PENALTY ) // if edge penalty
				{

					Mat frm_edge = (abs(frm_x) + abs(frm_y))*(1.0/255.0/2.0);

					frm_edge = max(0, frm_edge - 0.95);

					//frm_edge = frm_edge - lc::reduceMat( frm_edge, CV_REDUCE_AVG );

					mask = mask - frm_edge * edge_penalty_scale;
				}

				//lc::colorshow("debug_pened_mask",mask);
				//cv::waitKey(1);

				float MAX_EDGE_CAP = 1000;

				int _rows = mask.rows;
				int _cols = mask.cols;
				int _sz = _rows * _cols;

				typedef Graph< float , float , float > GraphType;

				GraphType * _g = new GraphType( _sz, _sz * 4 );

				int id = 0;

				for(int y = 0; y < _rows ; y++)
					for(int x = 0 ; x< _cols ; x++ , id++)
					{
						_g->add_node();
					
						float value = mask.at<float>(y,x) + bias_enhance;

						if(value>0.99f) value = 0.99f;
						else if(value<0.01f) value = 0.01f;

						float weight = log((value+1e-2)/(1-value+1e-2));
						
						_g->add_tweights( id, weight + MAX_EDGE_CAP, MAX_EDGE_CAP );
					}

				int dx[] = {-1,1,0,0};
				int dy[] = {0, 0,1,-1};
				

				if(1)
				{
					for(int y = _rows-2 ; y>0;y--)
					{
						
						for(int x = _cols -2; x>0;x--)
						{
							int id1 = (y) * _cols  + x;
							for(int k = 0 ; k < 4; k++)
							{
								int x2 = x + dx[k];
								int y2 = y + dy[k];

								float edge_weight = 1.0;

								if(IF_EDGE_WEIGHT){
									if(k<2)
									{
										edge_weight = float(frm_x.at<float>(y2,x2) + frm_x.at<float>(y2,x2));
									}
									else
									{
										edge_weight = float(frm_y.at<float>(y2,x2) + frm_y.at<float>(y2,x2));
									}
									
									edge_weight = float((max_edge_weight-edge_weight/2.0f)/max_edge_weight);
									
								}

								int id2 = ( y2 ) * _cols + x2;

								if(edge_weight>0)
								{
									if(id1==id2)
									{
										cout << "warning " << endl;
										system("pause");
									}
									_g->add_edge( id1, id2, half_lambda*edge_weight, half_lambda*edge_weight);
								}
							}							
						}

					}
				}

				float flow = _g->maxflow();	

				id = 0;
				for(int y = 0; y < _rows ; y++)
				{
					for(int x = 0 ; x< _cols ; x++ , id++)
					{
						if( _g->what_segment(id) == GraphType::SOURCE) mask.at<float>(y,x) = 1.0f;
						else mask.at<float>(y,x) = 0.0f;
					}				
					
				}

				delete _g;
			}	

			mask = mask * 255.0;

			mask.convertTo( mask, CV_8U);

			if(IF_POSTPROCESSING)
			{
				

				multi_tracker.preprocessing( mask );

				if(0){
					Mat kernel = Mat::ones(3,3,5);
					   kernel.at<float>(0,0) = kernel.at<float>(2,0)
					=kernel.at<float>(0,2) = kernel.at<float>(2,2) = 1e-3f;
					cv::dilate( mask, mask, kernel,Point(0,0),1);
				}
			}

			if(statu==0)
			{

				Mat posterior;

				if(0)
				{
					multi_tracker.getPosterior( posterior, mask.size());
				}
				else
				{
					mask.copyTo( posterior );				
				}

				//imshow("debug",posterior);
				//cv::waitKey(1);

				if(posterior.type() == CV_8U)
				{
					posterior.convertTo( posterior, 5 );
					posterior = posterior* (1.0/255.0);
				}

				
								
				test_reader.getLabel( gt );

				gt.convertTo( gt, 5);
				gt = gt * (1.0/255.0);

				cv::resize(gt, gt, mask.size());

				LcValidator score;
				{
					int _bd = my_setting.bound_setting;
					
					int _rows = gt.rows;
					int _cols = gt.cols;
					
					Mat _po,_gt;
					posterior( Range( _bd, _rows-_bd), Range(_bd, _cols - _bd) ).copyTo(_po);
					gt( Range( _bd, _rows-_bd), Range(_bd, _cols - _bd) ).copyTo(_gt);
					score = LcValidator( _po , _gt );
				}
				score.display();
				
				{
					string report_name = "./report/frame_report_" + my_setting.train_video_name + "_on_"
						+ my_setting.test_video_name + "_v_" + my_setting.feature_code_string + 
						my_setting.addition_string.c_str() + ".txt";
					FILE * fout = fopen( report_name.c_str(),"a+");
					
					{
						fprintf(fout,"%f\t%d\t", score.getF1(),test_reader.getf());
					}
					
					fclose(fout);
					
				}

				sum_validator = sum_validator + score;
				//cv::waitKey(0);

				{
					string report_name = "./report/PFtrack_" + my_setting.train_video_name + "_on_"
						+ my_setting.test_video_name + "_v_" + my_setting.feature_code_string + 
						my_setting.addition_string.c_str() + ".txt";
					FILE * fout = fopen( report_name.c_str(),"a+");
					
					{
						fprintf(fout,"%f\t%f\t%f\t%f\t%f\t%d\n", sum_validator.getF1(),
							sum_validator.tp,sum_validator.fp,sum_validator.fn,sum_validator.tn,test_reader.getf());
					}
					fclose(fout);
				}
			}
		}//end for video

		{
			string report_name = "./report/PFtrack_" + my_setting.train_video_name + "_on_"
				+ my_setting.test_video_name + "_v_" + my_setting.feature_code_string + 
				my_setting.addition_string.c_str() + ".txt";
			FILE * fout = fopen( report_name.c_str(),"w");
			
			{
				fprintf(fout,"%f\t%f\t%f\t%f\t%f\n", sum_validator.getF1(),
					sum_validator.tp,sum_validator.fp,sum_validator.fn,sum_validator.tn);
			}
			fclose(fout);
		}
	}	

	//LcVideoReadExt my_reader;

	//{
	//	my_reader.root = "./../../_FGS/";
	//	my_reader.video_name = "GOPR2382_cab";
	//	my_reader.f_start = 550;
	//	my_reader.f_end = 700;
	//	my_reader.f_rate = 50;
	//	my_reader.t_win = 0;
	//	my_reader.veb = 1;
	//}

	//if(0)
	//{

	//	test.setFeatureCode("l");
	//	test.setReader( &my_reader );

	//	test.train( my_reader);
	//	test.save("./");

	//	LcFeatureExtractor my_extractor;

	//	my_extractor.set_extractor( "l" );

	//	test.predict( my_reader).display();

	//}
	//else
	//{
	//	
	//	test.setFeatureCode("l");
	//	test.setReader( &my_reader );
	//	test.setClassifier( new LcRandomTrees);

	//	test.load("./");

	//	vector< LcValidator > validator_array;
	//	test.predict( my_reader, validator_array);

	//	for(int i = 0 ; i < (int) validator_array.size() ; i++)
	//	{
	//		validator_array[i].display();
	//	}
	//}

	//

	return 0;
}