#include <iostream>
#include <fstream>
#include <boost/filesystem.hpp>
#include <boost/regex/v4/fileiter.hpp>

#include "vosmFacade.h"
#include "smf.h"
#include "../cvcommon/VO_FaceKeyPoint.h"

using namespace std;
using namespace cv;

bool VOSM::smfitting( string traineddatadir, string testDir, 
	string statMod /*= "ASM_PROFILEND"*/, 
	bool staticOrNot /*= true*/, bool record /*= false*/ )
{
	unsigned int 			fittingmtd = VO_AXM::ASM_PROFILEND;
	vector<string> 			AllImgFiles4Testing;


	if(statMod.compare("ASM_PROFILEND") == 0)
		fittingmtd 		= VO_AXM::ASM_PROFILEND;
	else if(statMod.compare("ASM_LTC") == 0)
		fittingmtd 		= VO_AXM::ASM_LTC;
	else if(statMod.compare("AAM_BASIC") == 0)
		fittingmtd 		= VO_AXM::AAM_BASIC;
	else if(statMod.compare("AAM_CMUICIA") == 0)
		fittingmtd 		= VO_AXM::AAM_CMUICIA;
	else if(statMod.compare("AAM_IAIA") == 0)
		fittingmtd 		= VO_AXM::AAM_IAIA;
	else
	{
		cerr << "Wrong fitting type parameters!" << endl;
		return false;
	}


	if ( ! boost::filesystem::is_directory( testDir ) )
	{
		cerr << "image path does not exist!" << endl;
		return false;
	}
	AllImgFiles4Testing = VO_IO::ScanNSortImagesInDirectory ( testDir );
	if (AllImgFiles4Testing.size() == 0)
	{
		cerr << " No image loaded" << endl;
		return false;
	}


	VO_Fitting2DSM* fitting2dsm = NULL;
	switch(fittingmtd)
	{
	case VO_AXM::AAM_BASIC:
	case VO_AXM::AAM_DIRECT:
		fitting2dsm = new VO_FittingAAMBasic();
		dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->VO_LoadParameters4Fitting(traineddatadir);
		break;
	case VO_AXM::CLM:
	case VO_AXM::AFM:
		fitting2dsm = new VO_FittingAFM();
		dynamic_cast<VO_FittingAFM*>(fitting2dsm)->VO_LoadParameters4Fitting(traineddatadir);
		break;
	case VO_AXM::AAM_IAIA:
	case VO_AXM::AAM_CMUICIA:
		fitting2dsm = new VO_FittingAAMInverseIA();
		dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->VO_LoadParameters4Fitting(traineddatadir);
		break;
	case VO_AXM::AAM_FAIA:
		fitting2dsm = new VO_FittingAAMForwardIA();
		dynamic_cast<VO_FittingAAMForwardIA*>(fitting2dsm)->VO_LoadParameters4Fitting(traineddatadir);
		break;
	case VO_AXM::ASM_LTC:
		fitting2dsm = new VO_FittingASMLTCs();
		dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)->VO_LoadParameters4Fitting(traineddatadir);
		break;
	case VO_AXM::ASM_PROFILEND:
		fitting2dsm = new VO_FittingASMNDProfiles();
		dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)->VO_LoadParameters4Fitting(traineddatadir);
		break;
	}


	vector<Mat> oImages;
	vector<VO_Shape> oShapes;
	int nb = 20;
	bool doEvaluation = false;
	unsigned int nbOfTestingSamples = AllImgFiles4Testing.size();
	Mat_<float> nbOfIterations = Mat_<float>::zeros(1, nbOfTestingSamples);
	Mat_<float> deviations;
	Mat_<float> ptsErrorFreq;
	Mat_<float> times = Mat_<float>::zeros(1, nbOfTestingSamples);

	CFaceDetectionAlgs fd;
	Point2f ptLeftEyeCenter, ptRightEyeCenter, ptMouthCenter;
	fd.SetConfiguration(
		"D:/Program Files/OpenCV231/data/lbpcascades/lbpcascade_frontalface.xml", 
		"D:/Program Files/OpenCV231/data/haarcascades/haarcascade_profileface.xml",
		"D:/Program Files/OpenCV231/data/haarcascades/haarcascade_mcs_lefteye.xml",
		"D:/Program Files/OpenCV231/data/haarcascades/haarcascade_mcs_righteye.xml",
		"D:/Program Files/OpenCV231/data/haarcascades/haarcascade_mcs_nose.xml",
		"D:/Program Files/OpenCV231/data/haarcascades/haarcascade_mcs_mouth.xml",
		VO_AdditiveStrongerClassifier::BOOSTING,
		CFaceDetectionAlgs::FRONTAL );

	Mat iImage, resizedImage, drawImage, fittedImage;
	VO_Shape fittingShape;
	unsigned int detectionTimes = 0;

	// For static images from stadard face databases
	// (Detection only, no tracking) + ASM/AAM
	if(staticOrNot)
	{
		detectionTimes = 0;
		for(unsigned int i = 0; i < AllImgFiles4Testing.size(); i++)
		{
			iImage = imread(AllImgFiles4Testing[i]);
			// Explained by JIA Pei. You can use cv::resize() to ensure before fitting starts,
			// every image to be tested is of a standard size, say (320, 240)
			iImage.copyTo(resizedImage);
			// cv::resize(iImage, resizedImage, Size(320, 240) );
			iImage.copyTo(fittedImage);
			size_t found1 = AllImgFiles4Testing[i].find_last_of("/\\");
			size_t found2 = AllImgFiles4Testing[i].find_last_of(".");
			string prefix = AllImgFiles4Testing[i].substr(found1+1, found2-1-found1);

			detectionTimes++;
			fd.FullFaceDetection( 	resizedImage,
				NULL,
				true,
				true,
				true,
				true,
				1.0,
				Size(80,80),
				Size( min(resizedImage.rows,resizedImage.cols), min(resizedImage.rows,resizedImage.cols) ) ); // Size(240,240)

			if( fd.IsFaceDetected() )
			{
				fd.CalcFaceKeyPoints();
				double tmpScaleX = (double)iImage.cols/(double)resizedImage.cols;
				double tmpScaleY = (double)iImage.rows/(double)resizedImage.rows;
				Rect rect = fd.GetDetectedFaceWindow();
				ptLeftEyeCenter = fd.GetDetectedFaceKeyPoint(VO_KeyPoint::LEFTEYECENTER);
				ptRightEyeCenter = fd.GetDetectedFaceKeyPoint(VO_KeyPoint::RIGHTEYECENTER);
				ptMouthCenter = fd.GetDetectedFaceKeyPoint(VO_KeyPoint::MOUTHCENTER);
				ptLeftEyeCenter.x *= tmpScaleX;
				ptLeftEyeCenter.y *= tmpScaleY;
				ptRightEyeCenter.x *= tmpScaleX;
				ptRightEyeCenter.y *= tmpScaleY;
				ptMouthCenter.x *= tmpScaleX;
				ptMouthCenter.y *= tmpScaleY;

				// Explained by JIA Pei, you can save to see the detection results.
				//				iImage.copyTo(drawImage);
				//				cv::rectangle(drawImage, Point(ptLeftEyeCenter.x-1, ptLeftEyeCenter.y-1),
				//									 Point(ptLeftEyeCenter.x+1, ptLeftEyeCenter.y+1),
				//								colors[5], 2, 8, 0);
				//				cv::rectangle(drawImage, Point(ptRightEyeCenter.x-1, ptRightEyeCenter.y-1),
				//									 Point(ptRightEyeCenter.x+1, ptRightEyeCenter.y+1),
				//								colors[6], 2, 8, 0);
				//				cv::rectangle(drawImage, Point(ptMouthCenter.x-1, ptMouthCenter.y-1),
				//									 Point(ptMouthCenter.x+1, ptMouthCenter.y+1),
				//								colors[7], 2, 8, 0);
				//				imwrite("drawImage.jpg", drawImage);
				//				imwrite("resizedImage.jpg", resizedImage);
				fitting2dsm->VO_StartFitting(	iImage,
					oImages,
					fittingmtd,
					ptLeftEyeCenter,
					ptRightEyeCenter,
					ptMouthCenter,
					VO_Fitting2DSM::EPOCH, // at most, how many iterations will be carried out
					4,
					record );
				nbOfIterations(0,i) = (float)(fitting2dsm->GetNbOfIterations());
				fittingShape = fitting2dsm->VO_GetFittedShape();
				times(0,i) = fitting2dsm->GetFittingTime();
				//				cout << nbOfIterations(0,i) << endl;
			}

			if(record)
			{
				// Explained by JIA Pei. For static images, we can save all intermediate images of the fitting process.
				SaveSequentialImagesInFolder(oImages, prefix);
				string fn = prefix+".jpg";
				if(oImages.size() > 0)
				{
					fittedImage = oImages.back();
					imwrite(fn.c_str(), fittedImage);
					oImages.clear();
				}
			}

			// For evaluation
			if(doEvaluation)
			{
				vector<float> ptErrorFreq;
				float deviation = 0.0f;
				vector<unsigned int> unsatisfiedPtList;
				unsatisfiedPtList.clear();
				CRecognitionAlgs::CalcShapeFittingEffect(	oShapes[i],
					fittingShape,
					deviation,
					ptErrorFreq,
					nb);
				deviations(0,i) = deviation;
				for(unsigned int j = 0; j < nb; j++)
					ptsErrorFreq(i, j) = ptErrorFreq[j];
				CRecognitionAlgs::SaveShapeRecogResults(	"./",
					prefix,
					deviation,
					ptErrorFreq);
			}
		}

		cout << "detection times = " << detectionTimes << endl;
		float avgIter = cv::mean(nbOfIterations).val[0];
		cout << "Average Interation Times = " << avgIter << endl;
		float avgTime = cv::mean(times).val[0];
		cout << "Averaget Detection time (in ms) = " << avgTime << endl;
		Scalar avgDev, stdDev;
		if(doEvaluation)
		{
			cv::meanStdDev(deviations, avgDev, stdDev);
			cout << "Average Deviation of Errors = " << avgDev.val[0] << " " 
				<< "Standard Deviation of Errors = " << stdDev.val[0] << endl << endl;
			vector<float> avgErrorFreq(nb, 0.0f);
			for(int j = 0; j < nb; j++)
			{
				Mat_<float> col = ptsErrorFreq.col(j);
				avgErrorFreq[j] = cv::mean(col).val[0];
				cout << avgErrorFreq[j] << "percentage of points are in " << j << "pixels" << endl;
			}
		}
	}
	// For dynamic image sequences
	// (Detection or Tracking) + ASM/AAM
	else
	{
		CTrackingAlgs*	trackAlg = new CTrackingAlgs();
		bool isTracked = false;
		detectionTimes = 0;
		for(unsigned int i = 0; i < AllImgFiles4Testing.size(); i++)
		{
			iImage = imread(AllImgFiles4Testing[i]);
			// Explained by JIA Pei. You can use cv::resize() to ensure before fitting starts,
			// every image to be tested is of a standard size, say (320, 240)
			// iImage.copyTo(resizedImage);	// 
			cv::resize(iImage, resizedImage, Size(320, 240) );
			iImage.copyTo(fittedImage);
			size_t found1 = AllImgFiles4Testing[i].find_last_of("/\\");
			size_t found2 = AllImgFiles4Testing[i].find_last_of(".");
			string prefix = AllImgFiles4Testing[i].substr(found1+1, found2-1-found1);

			if(!isTracked)
			{
				detectionTimes++;
				fd.FullFaceDetection( 	resizedImage,
					NULL,
					true,
					true,
					true,
					true,
					1.0,
					Size(80,80),
					Size( min(resizedImage.rows,resizedImage.cols), min(resizedImage.rows,resizedImage.cols) ) ); // Size(240,240)
				if( fd.IsFaceDetected() )
				{
					fd.CalcFaceKeyPoints();
					double tmpScaleX = (double)iImage.cols/(double)resizedImage.cols;
					double tmpScaleY = (double)iImage.rows/(double)resizedImage.rows;
					Rect rect = fd.GetDetectedFaceWindow();
					ptLeftEyeCenter = fd.GetDetectedFaceKeyPoint(VO_KeyPoint::LEFTEYECENTER);
					ptRightEyeCenter = fd.GetDetectedFaceKeyPoint(VO_KeyPoint::RIGHTEYECENTER);
					ptMouthCenter = fd.GetDetectedFaceKeyPoint(VO_KeyPoint::MOUTHCENTER);
					ptLeftEyeCenter.x *= tmpScaleX;
					ptLeftEyeCenter.y *= tmpScaleY;
					ptRightEyeCenter.x *= tmpScaleX;
					ptRightEyeCenter.y *= tmpScaleY;
					ptMouthCenter.x *= tmpScaleX;
					ptMouthCenter.y *= tmpScaleY;

					// Explained by JIA Pei, you can save to see the detection results.
					//					resizedImage.copyTo(drawImage);
					//					fd.VO_DrawDetection(drawImage, true, true, true, true, true);
					//					imwrite("drawImage.jpg", drawImage);
					//					imwrite("resizedImage.jpg", resizedImage);
					//					imwrite("iImage.jpg", iImage);
					fitting2dsm->SetInputImage(iImage);


					switch(fittingmtd)
					{
					case VO_AXM::AAM_BASIC:
						{
							fittingShape.clone(dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->m_VOAAMBasic->GetAlignedMeanShape() );
							fittingShape.Affine2D(
								VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
								dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->m_VOAAMBasic->GetFaceParts(),
								fittingShape,
								ptLeftEyeCenter,
								ptRightEyeCenter,
								ptMouthCenter) );
							fittingShape.ConstrainShapeInImage(iImage);

							dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)
								->VO_BasicAAMFitting(iImage,
								fittingShape,
								fittedImage,
								VO_Fitting2DSM::EPOCH );
						}
						break;
					case VO_AXM::AAM_DIRECT:
						{
							fittingShape.clone(dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->m_VOAAMBasic->GetAlignedMeanShape() );
							fittingShape.Affine2D(
								VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
								dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->m_VOAAMBasic->GetFaceParts(),
								fittingShape,
								ptLeftEyeCenter,
								ptRightEyeCenter,
								ptMouthCenter) );
							fittingShape.ConstrainShapeInImage(iImage);

							dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)
								->VO_DirectAAMFitting(iImage,
								fittingShape,
								fittedImage,
								VO_Fitting2DSM::EPOCH );
						}
						break;
					case VO_AXM::CLM:
					case VO_AXM::AFM:
						break;
					case VO_AXM::AAM_IAIA:
						{
							fittingShape.clone(dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->m_VOAAMInverseIA->GetAlignedMeanShape() );
							fittingShape.Affine2D(
								VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
								dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->m_VOAAMInverseIA->GetFaceParts(),
								fittingShape,
								ptLeftEyeCenter,
								ptRightEyeCenter,
								ptMouthCenter) );
							fittingShape.ConstrainShapeInImage(iImage);

							dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)
								->VO_IAIAAAMFitting(iImage,
								fittingShape,
								fittedImage,
								VO_Fitting2DSM::EPOCH );
						}
						break;
					case VO_AXM::AAM_CMUICIA:
						{
							fittingShape.clone(dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->m_VOAAMInverseIA->GetAlignedMeanShape() );
							fittingShape.Affine2D(
								VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
								dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->m_VOAAMInverseIA->GetFaceParts(),
								fittingShape,
								ptLeftEyeCenter,
								ptRightEyeCenter,
								ptMouthCenter) );
							fittingShape.ConstrainShapeInImage(iImage);

							dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)
								->VO_ICIAAAMFitting(iImage,
								fittingShape,
								fittedImage,
								VO_Fitting2DSM::EPOCH );
						}
						break;
					case VO_AXM::AAM_FAIA:
						break;
					case VO_AXM::ASM_LTC:
						{
							fittingShape.clone(dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)->m_VOASMLTC->GetAlignedMeanShape() );
							fittingShape.Affine2D(
								VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
								dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)->m_VOASMLTC->GetFaceParts(),
								fittingShape,
								ptLeftEyeCenter,
								ptRightEyeCenter,
								ptMouthCenter) );
							fittingShape.ConstrainShapeInImage(iImage);

							dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)
								->VO_ASMLTCFitting(	iImage,
								fittingShape,
								fittedImage,
								VO_Features::DIRECT,
								VO_Fitting2DSM::EPOCH,
								3);	// change this 2 to 1 for 1D profile ASM
						}
						break;
					case VO_AXM::ASM_PROFILEND:	// default, 2D Profile ASM
						{
							fittingShape.clone(dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)->m_VOASMNDProfile->GetAlignedMeanShape() );
							fittingShape.Affine2D(
								VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
								dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)->m_VOASMNDProfile->GetFaceParts(),
								fittingShape,
								ptLeftEyeCenter,
								ptRightEyeCenter,
								ptMouthCenter)
								);
							fittingShape.ConstrainShapeInImage(iImage);

							dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)
								->VO_ASMNDProfileFitting(	iImage,
								fittingShape,
								fittedImage,
								VO_Fitting2DSM::EPOCH,
								4,
								2);	// change this 2 to 1 for 1D profile ASM
						}
						break;
					}
					// Whenever the face is re-detected, initialize the tracker and set isTracked = true;
					Rect rect1 =	fittingShape.GetShapeBoundRect();
					trackAlg->UpdateTracker(iImage, rect1);
					isTracked =  true;
				}
			}
			else
			{
				switch(fittingmtd)
				{
				case VO_AXM::AAM_BASIC:
					{
						dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)
							->VO_BasicAAMFitting(iImage,
							fittingShape,
							fittedImage,
							VO_Fitting2DSM::EPOCH);	
					}
					break;
				case VO_AXM::AAM_DIRECT:
					{
						dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)
							->VO_DirectAAMFitting(iImage,
							fittingShape,
							fittedImage,
							VO_Fitting2DSM::EPOCH);	
					}
					break;
				case VO_AXM::CLM:
				case VO_AXM::AFM:
					break;
				case VO_AXM::AAM_IAIA:
					{
						dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)
							->VO_IAIAAAMFitting(iImage,
							fittingShape,
							fittedImage,
							VO_Fitting2DSM::EPOCH);						
					}
					break;
				case VO_AXM::AAM_CMUICIA:
					{
						dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)
							->VO_ICIAAAMFitting(iImage,
							fittingShape,
							fittedImage,
							VO_Fitting2DSM::EPOCH);	
					}
					break;
				case VO_AXM::AAM_FAIA:
					break;
				case VO_AXM::ASM_LTC:
					{
						dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)
							->VO_ASMLTCFitting(	iImage,
							fittingShape,
							fittedImage,
							VO_Features::DIRECT,
							VO_Fitting2DSM::EPOCH,
							3);
					}
					break;
				case VO_AXM::ASM_PROFILEND:
					{
						dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)
							->VO_ASMNDProfileFitting(	iImage,
							fittingShape,
							fittedImage,
							VO_Fitting2DSM::EPOCH,
							4,
							2);	// change this 2 to 1 for 1D profile ASM
					}
					break;
				default:
					return false;
				}
				// Explained by JIA Pei. For every consequent image, whose previous image is regarded as tracked, 
				// we have to double-check whether current image is still a tracked one.
				//				isTracked = true;
				isTracked = CRecognitionAlgs::EvaluateFaceTrackedByProbabilityImage(
					trackAlg,
					iImage,
					fittingShape,
					Size(80,80),
					Size( min(iImage.rows,iImage.cols), min(iImage.rows,iImage.cols) ) );
			}

			nbOfIterations(0,i) = (float)(fitting2dsm->GetNbOfIterations());
			fittingShape = fitting2dsm->VO_GetFittedShape();
			times(0,i) = fitting2dsm->GetFittingTime();


			if(record)
			{
				string fn = prefix+".jpg";
				imwrite(fn.c_str(), fittedImage);
			}

			// For evaluation
			if(doEvaluation)
			{
				vector<float> ptErrorFreq;
				float deviation = 0.0f;
				vector<unsigned int> unsatisfiedPtList;
				unsatisfiedPtList.clear();
				CRecognitionAlgs::CalcShapeFittingEffect(	oShapes[i],
					fittingShape,
					deviation,
					ptErrorFreq,
					nb);
				deviations(0,i) = deviation;
				for(unsigned int j = 0; j < nb; j++)
					ptsErrorFreq(i, j) = ptErrorFreq[j];
				CRecognitionAlgs::SaveShapeRecogResults(	"./",
					prefix,
					deviation,
					ptErrorFreq);
			}
		}

		cout << "detection times = " << detectionTimes << endl;
		float avgIter = cv::mean(nbOfIterations).val[0];
		cout << avgIter << endl;
		float avgTime = cv::mean(times).val[0];
		cout << avgTime << endl;
		Scalar avgDev, stdDev;
		cv::meanStdDev(deviations, avgDev, stdDev);
		cout << avgDev.val[0] << " " << stdDev.val[0] << endl << endl;
		vector<float> avgErrorFreq(nb, 0.0f);
		for(int j = 0; j < nb; j++)
		{
			Mat_<float> col = ptsErrorFreq.col(j);
			avgErrorFreq[j] = cv::mean(col).val[0];
			cout << j << " " << avgErrorFreq[j] << endl;
		}

		delete trackAlg;
	}

	delete fitting2dsm;

	return true;
}

bool VOSM::smbuilding( string imgDir, string annoDir, string shapeinfoFileName, 
	string annoFormat, string outputDir /*= "./"*/, 
	int channels /*= 3*/, string statMod /*= "SMNDPROFILE"*/, 
	int levels /*= 4*/, double percentage /*= 0.95*/ )
{
	vector<string> 			AllImgFiles4Training;
	vector<string> 			AllLandmarkFiles4Training;
	unsigned int 			database = CAnnotationDBIO::EMOUNT;
	unsigned int			type = ASMNDPROFILE;

	if ( !boost::filesystem::is_directory(imgDir) )
	{
		cerr << "image path does not exist!" << endl;
		return false;
	}
	AllImgFiles4Training = VO_IO::ScanNSortImagesInDirectory ( imgDir );


	if ( ! boost::filesystem::is_directory( annoDir ) )
	{
		cerr << "landmark path does not exist!" << endl;
		return false;
	}
	AllLandmarkFiles4Training = VO_IO::ScanNSortAnnotationInDirectory ( annoDir );


	if(annoFormat.compare("PUT") == 0)
		database	= CAnnotationDBIO::PUT;
	else if(annoFormat.compare("IMM") == 0)
		database 	= CAnnotationDBIO::IMM;
	else if(annoFormat.compare("AGING") == 0)
		database 	= CAnnotationDBIO::AGING;
	else if(annoFormat.compare("BIOID") == 0)
		database 	= CAnnotationDBIO::BIOID;
	else if(annoFormat.compare("XM2VTS") == 0)
		database 	= CAnnotationDBIO::XM2VTS;
	else if(annoFormat.compare("FRANCK") == 0)
		database 	= CAnnotationDBIO::FRANCK;
	else if(annoFormat.compare("EMOUNT") == 0)
		database 	= CAnnotationDBIO::EMOUNT;
	else if(annoFormat.compare("JIAPEI") == 0)
		database 	= CAnnotationDBIO::JIAPEI;
	else
		return false;


	if(statMod.compare("SM") == 0)
		type 		= SHAPEMODEL;
	else if(statMod.compare("TM") == 0)
		type 		= TEXTUREMODEL;
	else if(statMod.compare("AM") == 0)
		type 		= APPEARANCEMODEL;
	else if(statMod.compare("IA") == 0)
		type 		= INVERSEIMAGEALIGNMENT;
	else if(statMod.compare("FM") == 0)
		type 		= AFM ;
	else if(statMod.compare("SMLTC") == 0)
		type 		= ASMLTC;
	else if(statMod.compare("SMNDPROFILE") == 0)
		type 		= ASMNDPROFILE;


	switch(type)
	{
	case SHAPEMODEL:
		{
			VO_ShapeModel shapeModel;
			shapeModel.VO_BuildShapeModel(	AllLandmarkFiles4Training,
				shapeinfoFileName,
				database,
				percentage,
				false);
			shapeModel.VO_Save(outputDir);
		}
		break;
	case TEXTUREMODEL:
		{
			VO_TextureModel textureModel;
			textureModel.VO_BuildTextureModel(	AllLandmarkFiles4Training,
				AllImgFiles4Training,
				shapeinfoFileName, 
				database,
				channels,
				VO_Features::DIRECT,
				percentage,
				percentage,
				false );
			textureModel.VO_Save(outputDir);
		}
		break;
	case APPEARANCEMODEL:
		{
			VO_AAMBasic aamBasicModel;
			aamBasicModel.VO_BuildAppearanceModel(	AllLandmarkFiles4Training,
				AllImgFiles4Training,
				shapeinfoFileName, 
				database,
				channels,
				levels,
				VO_Features::DIRECT,
				percentage,
				percentage,
				percentage,
				false );
			aamBasicModel.VO_Save(outputDir);
		}
		break;
	case INVERSEIMAGEALIGNMENT:
		{
			VO_AAMInverseIA aamInverseIAModel;
			aamInverseIAModel.VO_BuildAAMICIA(	AllLandmarkFiles4Training,
				AllImgFiles4Training,
				shapeinfoFileName,
				database,
				channels,
				levels,
				VO_Features::DIRECT,
				percentage,
				percentage,
				false );
			aamInverseIAModel.VO_Save(outputDir);
		}
		break;
	case AFM:
		{
			VO_AFM featureModel;
			featureModel.VO_BuildFeatureModel(	AllLandmarkFiles4Training,
				AllImgFiles4Training,
				shapeinfoFileName,
				database,
				channels,
				levels,
				VO_Features::DIRECT,
				percentage,
				false,
				VO_DiscreteWavelet::HAAR,
				Size(16, 16) );
			featureModel.VO_Save(outputDir);
		}
		break;
	case ASMLTC:
		{
			VO_ASMLTCs asmLTCModel;
			asmLTCModel.VO_BuildASMLTCs(AllLandmarkFiles4Training,
				AllImgFiles4Training,
				shapeinfoFileName,
				database,
				channels,
				levels,
				VO_Features::DIRECT,
				percentage,
				false,
				VO_Features::DIRECT,
				Size(16, 16) );
			asmLTCModel.VO_Save(outputDir);
		}
		break;
	case ASMNDPROFILE:
		{
			VO_ASMNDProfiles asmNDProfilesModel;

			asmNDProfilesModel.VO_BuildASMNDProfiles(	AllLandmarkFiles4Training,
				AllImgFiles4Training,
				shapeinfoFileName, 
				database,
				channels,
				levels,
				2,
				8,
				VO_Features::DIRECT,
				percentage,
				false);
//			fstream testFile;
// 			testFile.open("text.txt", ios::out);
// 			testFile << "helloworld" << endl;
// 			testFile.close();
			cout << "build asm NDProfiles success" << endl;

			asmNDProfilesModel.VO_Save(outputDir);
		}
		break;
	default:
		return false;
	}

	return true;
}

bool VOSM::smbuidlingSimple( string db )
{
	return smbuilding(db+"/training", db+"/landmark", 
		db+"/shapeinfo.txt", db, db+"/result");
}

bool VOSM::loadASMModel( string modelDir, /*optional */ string statMod /*= "ASM_PROFILEND"*/ )
{
	unsigned int fittingmtd = VO_AXM::ASM_PROFILEND;

	if(statMod.compare("ASM_PROFILEND") == 0)
		fittingmtd 		= VO_AXM::ASM_PROFILEND;
	else if(statMod.compare("ASM_LTC") == 0)
		fittingmtd 		= VO_AXM::ASM_LTC;
	else if(statMod.compare("AAM_BASIC") == 0)
		fittingmtd 		= VO_AXM::AAM_BASIC;
	else if(statMod.compare("AAM_CMUICIA") == 0)
		fittingmtd 		= VO_AXM::AAM_CMUICIA;
	else if(statMod.compare("AAM_IAIA") == 0)
		fittingmtd 		= VO_AXM::AAM_IAIA;
	else
	{
		cerr << "Wrong fitting type parameters!" << endl;
		return false;
	}

	switch(fittingmtd)
	{
	case VO_AXM::AAM_BASIC:
	case VO_AXM::AAM_DIRECT:
		fitting2dsm = new VO_FittingAAMBasic();
		dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->VO_LoadParameters4Fitting(modelDir);
		break;
	case VO_AXM::CLM:
	case VO_AXM::AFM:
		fitting2dsm = new VO_FittingAFM();
		dynamic_cast<VO_FittingAFM*>(fitting2dsm)->VO_LoadParameters4Fitting(modelDir);
		break;
	case VO_AXM::AAM_IAIA:
	case VO_AXM::AAM_CMUICIA:
		fitting2dsm = new VO_FittingAAMInverseIA();
		dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->VO_LoadParameters4Fitting(modelDir);
		break;
	case VO_AXM::AAM_FAIA:
		fitting2dsm = new VO_FittingAAMForwardIA();
		dynamic_cast<VO_FittingAAMForwardIA*>(fitting2dsm)->VO_LoadParameters4Fitting(modelDir);
		break;
	case VO_AXM::ASM_LTC:
		fitting2dsm = new VO_FittingASMLTCs();
		dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)->VO_LoadParameters4Fitting(modelDir);
		break;
	case VO_AXM::ASM_PROFILEND:
		fitting2dsm = new VO_FittingASMNDProfiles();
		dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)->VO_LoadParameters4Fitting(modelDir);
		break;
	default:
		return false;
	}
	isLoadASM = true;
	return true;
}

bool VOSM::loadFDModel( string opencvDir /* = D:/program files/opencv231/ */ )
{
	fd = new CFaceDetectionAlgs;
	fd->SetConfiguration(
		opencvDir+"data/lbpcascades/lbpcascade_frontalface.xml", 
		opencvDir+"data/haarcascades/haarcascade_profileface.xml",
		opencvDir+"data/haarcascades/haarcascade_mcs_lefteye.xml",
		opencvDir+"data/haarcascades/haarcascade_mcs_righteye.xml",
		opencvDir+"data/haarcascades/haarcascade_mcs_nose.xml",
		opencvDir+"data/haarcascades/haarcascade_mcs_mouth.xml",
		VO_AdditiveStrongerClassifier::BOOSTING,
		CFaceDetectionAlgs::FRONTAL );
	isLoadFD = true;
	return true;
}

bool VOSM::faceDectection( Mat img, VOSMFaceComp& faceComp, Rect* ROI)
{
	Point2f ptLeftEyeCenter, ptRightEyeCenter, ptMouthCenter;
	if (!isLoadFD) loadFDModel();
	Mat resizedImg;
	resize(img, resizedImg, Size(320, 240) );

	fd->FullFaceDetection( resizedImg, ROI, true, true, true, true, 1.0, Size(80,80),
		Size( min(resizedImg.rows, resizedImg.cols), min(resizedImg.rows, resizedImg.cols) ) ); // Size(240,240)
	if( fd->IsFaceDetected() ) {
		fd->CalcFaceKeyPoints();
		double tmpScaleX = (double)img.cols/(double)resizedImg.cols;
		double tmpScaleY = (double)img.rows/(double)resizedImg.rows;
		Rect rect = fd->GetDetectedFaceWindow();
		ptLeftEyeCenter = fd->GetDetectedFaceKeyPoint(VO_KeyPoint::LEFTEYECENTER);
		ptRightEyeCenter = fd->GetDetectedFaceKeyPoint(VO_KeyPoint::RIGHTEYECENTER);
		ptMouthCenter = fd->GetDetectedFaceKeyPoint(VO_KeyPoint::MOUTHCENTER);
		rect.x *= tmpScaleX;
		rect.width *= tmpScaleX;
		rect.y *= tmpScaleY;
		rect.height *= tmpScaleY;
		ptLeftEyeCenter.x *= tmpScaleX;
		ptLeftEyeCenter.y *= tmpScaleY;
		ptRightEyeCenter.x *= tmpScaleX;
		ptRightEyeCenter.y *= tmpScaleY;
		ptMouthCenter.x *= tmpScaleX;
		ptMouthCenter.y *= tmpScaleY;
		faceComp.face = rect;
		faceComp.leftEye = ptLeftEyeCenter;
		faceComp.rightEye = ptRightEyeCenter;
		faceComp.mouth = ptMouthCenter;
// Explained by JIA Pei, you can save to see the detection results.
// 		resizedImage.copyTo(drawImage);
// 		fd.VO_DrawDetection(drawImage, true, true, true, true, true);
// 		imwrite("drawImage.jpg", drawImage);
// 		imwrite("resizedImage.jpg", resizedImage);
// 		imwrite("iImage.jpg", iImage);
	}
	else
		return false;
	return true;
}

bool VOSM::fitASM(Mat img, TVOSMFaceComp faceComp, vector<Point2f>& pl,
		/*optional */ string statMod /*= "ASM_PROFILEND"*/ )
{
	if (!isLoadASM) {
		cerr << "need load asm model." << endl;
		return false;
	}
	fitting2dsm->SetInputImage(img);

	unsigned int fittingmtd = VO_AXM::ASM_PROFILEND;
	if(statMod.compare("ASM_PROFILEND") == 0)
		fittingmtd 		= VO_AXM::ASM_PROFILEND;
	else if(statMod.compare("ASM_LTC") == 0)
		fittingmtd 		= VO_AXM::ASM_LTC;
	else if(statMod.compare("AAM_BASIC") == 0)
		fittingmtd 		= VO_AXM::AAM_BASIC;
	else if(statMod.compare("AAM_CMUICIA") == 0)
		fittingmtd 		= VO_AXM::AAM_CMUICIA;
	else if(statMod.compare("AAM_IAIA") == 0)
		fittingmtd 		= VO_AXM::AAM_IAIA;
	else {
		cerr << "Wrong fitting type parameters!" << endl;
		return false;
	}

	VO_Shape fittingShape;
	Mat fittedImage(img);

	switch(fittingmtd)
	{
	case VO_AXM::AAM_BASIC:
		{
			fittingShape.clone(dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->m_VOAAMBasic->GetAlignedMeanShape() );
			fittingShape.Affine2D(
				VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
				dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->m_VOAAMBasic->GetFaceParts(),
				fittingShape,
				faceComp.leftEye,
				faceComp.rightEye,
				faceComp.mouth) );
			fittingShape.ConstrainShapeInImage(img);

			dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)
				->VO_BasicAAMFitting(img,
				fittingShape,
				fittedImage,
				VO_Fitting2DSM::EPOCH );
		}
		break;
	case VO_AXM::AAM_DIRECT:
		{
			fittingShape.clone(dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->m_VOAAMBasic->GetAlignedMeanShape() );
			fittingShape.Affine2D(
				VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
				dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)->m_VOAAMBasic->GetFaceParts(),
				fittingShape,
				faceComp.leftEye,
				faceComp.rightEye,
				faceComp.mouth) );
			fittingShape.ConstrainShapeInImage(img);

			dynamic_cast<VO_FittingAAMBasic*>(fitting2dsm)
				->VO_DirectAAMFitting(img,
				fittingShape,
				fittedImage,
				VO_Fitting2DSM::EPOCH );
		}
		break;
	case VO_AXM::CLM:
	case VO_AXM::AFM:
		break;
	case VO_AXM::AAM_IAIA:
		{
			fittingShape.clone(dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->m_VOAAMInverseIA->GetAlignedMeanShape() );
			fittingShape.Affine2D(
				VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
				dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->m_VOAAMInverseIA->GetFaceParts(),
				fittingShape,
				faceComp.leftEye,
				faceComp.rightEye,
				faceComp.mouth) );
			fittingShape.ConstrainShapeInImage(img);

			dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)
				->VO_IAIAAAMFitting(img,
				fittingShape,
				fittedImage,
				VO_Fitting2DSM::EPOCH );
		}
		break;
	case VO_AXM::AAM_CMUICIA:
		{
			fittingShape.clone(dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->m_VOAAMInverseIA->GetAlignedMeanShape() );
			fittingShape.Affine2D(
				VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
				dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)->m_VOAAMInverseIA->GetFaceParts(),
				fittingShape,
				faceComp.leftEye,
				faceComp.rightEye,
				faceComp.mouth) );
			fittingShape.ConstrainShapeInImage(img);

			dynamic_cast<VO_FittingAAMInverseIA*>(fitting2dsm)
				->VO_ICIAAAMFitting(img,
				fittingShape,
				fittedImage,
				VO_Fitting2DSM::EPOCH );
		}
		break;
	case VO_AXM::AAM_FAIA:
		break;
	case VO_AXM::ASM_LTC:
		{
			fittingShape.clone(dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)->m_VOASMLTC->GetAlignedMeanShape() );
			fittingShape.Affine2D(
				VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
				dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)->m_VOASMLTC->GetFaceParts(),
				fittingShape,
				faceComp.leftEye,
				faceComp.rightEye,
				faceComp.mouth) );
			fittingShape.ConstrainShapeInImage(img);

			dynamic_cast<VO_FittingASMLTCs*>(fitting2dsm)
				->VO_ASMLTCFitting(	img,
				fittingShape,
				fittedImage,
				VO_Features::DIRECT,
				VO_Fitting2DSM::EPOCH,
				3);	// change this 2 to 1 for 1D profile ASM
		}
		break;
	case VO_AXM::ASM_PROFILEND:	// default, 2D Profile ASM
		{
			fittingShape.clone(dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)->m_VOASMNDProfile->GetAlignedMeanShape() );
			fittingShape.Affine2D(
				VO_Fitting2DSM::VO_FirstEstimationBySingleWarp(
				dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)->m_VOASMNDProfile->GetFaceParts(),
				fittingShape,
				faceComp.leftEye,
				faceComp.rightEye,
				faceComp.mouth)
				);
			fittingShape.ConstrainShapeInImage(img);
		
			dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm)
				->VO_ASMNDProfileFitting(	img,
				fittingShape,
				fittedImage,
				VO_Fitting2DSM::EPOCH,
				4,
				2);	// change this 2 to 1 for 1D profile ASM

		}
		break;
	}

	Rect rect1 = fittingShape.GetShapeBoundRect();
	Mat plist = fittingShape.ToPointList();
	pl.clear();
	for (int i = 0; i < plist.cols; ++i)
		pl.push_back(plist.at<Point2f>(0,i));
	return true;
}

bool VOSM::drawASM( Mat img, const vector<Point2f>& plist )
{
	if (!isLoadASM) return false;

	VO_FittingASMNDProfiles* specifiedP = dynamic_cast<VO_FittingASMNDProfiles*>(fitting2dsm);
	vector<VO_Edge> edges = specifiedP->m_VOASMNDProfile->GetEdge();
	Point2f iorg;
	Point2f idst;
	for (int i=0; i<edges.size(); ++i) {
		iorg = cvPointFrom32f( plist[edges[i].GetIndex1()] );
		idst = cvPointFrom32f( plist[edges[i].GetIndex2()] );
		cv::line( img, iorg, idst, colors[8], 1, 0, 0 );
		cv::circle( img, iorg, 2, colors[0], -1, 8, 0 );
		cv::circle( img, idst, 2, colors[0], -1, 8, 0 );
	}
	return true;
}
