#include <string>
#include <vector>
#include <windows.h>
#include <tchar.h>
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <fstream>
#include <iomanip>
#include "opencv.hpp"
#include "CTruthDataPoint.h"
#include "DataPointManager.h"
#include "DataPoint.h"
#include "FaceDetectorModule.h"
#include "ImgTxtWriter.h"

using namespace cv;
using namespace std;
vector<TFaceList> RotateToMostFaces( IplImage * input, CFaceDetectorModule& detector, IplImage ** output);
CDataPointManager* MakeFaceTrackingVideo( CDataPointManager& inputDataMgr, string output_base, CFaceDetectorModule& detector );
void SetupFaceTrackingVideos(CFaceDetectorModule& detector);
void ApplyRotation( IplImage* src, IplImage * dst, double angle );
SFaceObject GetClosestFace( int center_x, int center_y, TFaceList faces);
int DistanceFrom( SFaceObject face, int center_x, int center_y);
CvRect ClampToImage( int img_w, int img_h, CvRect input );
bool AreOverlapping( CvRect a, CvRect b );
bool IsSingleFace( TFaceList faces );

int main(int argc, char* argv[])
{
	//CvVideoWriter* writer = cvCreateVideoWriter("wtf.avi",-1,30, cvSize(320,240),1);
	int imgCount = 200;
	string path=".\\..\\proc\\";
	string baseName="image";
	string ext=".jpg";
	// Load a bunch of sequential images from file
	CvMemStorage* storage = 0;
	storage = cvCreateMemStorage(0);

	CDataPointManager * dataMgr = new CDataPointManager(baseName,path,ext,imgCount);

	CFaceDetectorModule faceDetector;
	vector<string> detectors;
	vector<string> detectorNames;
	string detectorPath=".\\";
	detectors.push_back("haarcascade_frontalface_default.xml");
	detectorNames.push_back("DEFAULT");
	detectors.push_back("haarcascade_profileface.xml");
	detectorNames.push_back("PROFILE");
	detectors.push_back("haarcascade_frontalface_alt.xml");
	detectorNames.push_back("ALT");
	detectors.push_back("haarcascade_frontalface_alt2.xml");
	detectorNames.push_back("ALT2");
	detectors.push_back("haarcascade_profileface.xml");
	detectorNames.push_back("PROFILE");

	if(!faceDetector.InitializeHaarDetectors(detectorPath,detectors,detectorNames))
	{
		cout << "FAILED TO LOAD DETECTOR CASCADES" << endl;
		exit(1);
	}
	
	faceDetector.SetScale(1);
//	SetupFaceTrackingVideos(faceDetector);
	faceDetector.SetScale(2);
	bool quit=false;
	string wndwName = "Input Image";
	//cvNamedWindow(wndwName.c_str(), CV_WINDOW_AUTOSIZE);
	for( int i=0; i < dataMgr->GetSize() && !quit ; i++ )
	{
		if( dataMgr->GetDataPoint(i)->LoadSourceImage() )
		{
			bool done = false;
			IplImage * result = NULL;
			cout << "Doing Image " << i << " of " << dataMgr->GetSize() << endl;
			RotateToMostFaces( dataMgr->GetDataPoint(i)->GetImage(), faceDetector, &result);
			//faceDetector.DetectAndDrawFaces(dataMgr->GetDataPoint(i)->GetImage());
//			char key='-';
			//while(!done && !quit)
			//{
//				key = cvWaitKey(1);
				//cvShowImage(wndwName.c_str(),result ); // dataMgr->GetDataPoint(i)->GetImage());
/*				if( key == ' ')
				{
					done = true;
				}
				else if(key=='q' || key=='Q' )
				{
					quit = true;
				}
			}*/
			stringstream out;
			out << "Result"  << setw(3) << setfill('0') << i+1 << ".jpg";
			cvSaveImage(out.str().c_str(), result );
			cvReleaseImage(&result);
		}
		dataMgr->GetDataPoint(i)->FreeImage(); 
	}  
	cvDestroyWindow(wndwName.c_str());
	if( NULL != dataMgr )
	{
		delete dataMgr;
	}
}
/*******************************************************************/
vector<TFaceList> RotateToMostFaces( IplImage * input, CFaceDetectorModule& detector, IplImage ** output)
{
	vector<TFaceList> retVal;
	int w = input->width;
	int h = input->height;
	//double rotation[4] =  {0.00, 90.00, 180.00, 270.00);
	IplImage* images[4];
	images[0] = cvCreateImage(cvSize(w,h),input->depth,input->nChannels);
	images[1] = cvCreateImage(cvSize(h,w),input->depth,input->nChannels);
	images[2] = cvCreateImage(cvSize(w,h),input->depth,input->nChannels);
	images[3] = cvCreateImage(cvSize(h,w),input->depth,input->nChannels);
	ApplyRotation( input, images[0], 0); 
	ApplyRotation( input, images[1], 90);
	ApplyRotation( input, images[2], 180);
	ApplyRotation( input, images[3], 270);
	int bestCount=0;
	int index=0;
	for( int i=0; i<4; i++)
	{
		TFaceList temp = detector.DetectFaces(images[i]);
		int nFaces = static_cast<int>(temp.size());
		cout << "rotation: " << i << " faces: " << nFaces <<endl;
 		if(nFaces > bestCount)
		{
			bestCount = nFaces;
			index = i;
		}
		retVal.push_back(temp);
	}
	*output = images[index];
	detector.DrawFaces(*output, retVal[index] ); 
	CImgTxtWriter textWriter;
	if( index == 0 )
	{
		cout << "Using 0 rotation " << endl;
		*output = textWriter.WriteText("Rotation: 0 degrees", *output, "BLUE", cvPoint(10,15) );
	}
	else if( index == 1 )
	{
		cout << "Using 90 rotation " << endl;
		*output = textWriter.WriteText("Rotation: 90 degrees", *output, "BLUE", cvPoint(10,15) );
	}
	else if( index == 2 )
	{
		cout << "Using 180 rotation " << endl;
		*output = textWriter.WriteText("Rotation: 180 degrees", *output, "BLUE", cvPoint(10,15) );
	}
	else
	{
		cout << "Using 270 rotation " << endl;
		*output = textWriter.WriteText("Rotation: 270 degrees", *output, "BLUE", cvPoint(10,15) );
	}


	if( retVal[index].size() == 0 )
	{
		cout << "No faces " << endl;
		*output = textWriter.WriteText("No faces detected!", *output, "GREEN", cvPoint(10,30) );
	}
	else if( IsSingleFace( retVal[index] ))
	{
		cout << "Individual photo " << endl;
		*output = textWriter.WriteText("Individual Photo", *output, "GREEN", cvPoint(10,30) );
	}
	else
	{
		cout << "Group photo " << endl;
		*output = textWriter.WriteText("Group Photo", *output, "GREEN", cvPoint(10,30) );
	}
	
	
	for( int i=0; i<4; i++)
	{
		if( i != index )
		{
			cvReleaseImage(&images[i]);
		}
	}
	
	return retVal;
}
/*******************************************************************/
void ApplyRotation( IplImage* src, IplImage * dst, double angle )
{
	// THIS IS EPIC OPEN CV VOODOO
	double ugh=0.00;
	if( angle==90 )
	{
		ugh = (float)((src->width)-1) / 2;
	}
	else  if( angle==270 )
	{
		ugh = (float)((src->height)-1) / 2;
	}
	CvPoint2D32f center = cvPoint2D32f( ugh, ugh );
	CvMat* rot_mat = cvCreateMat(2,3,CV_32FC1);
	rot_mat = cv2DRotationMatrix( center, angle, 1.00, rot_mat );
	cvWarpAffine( src, dst, rot_mat );
	cvReleaseMat(&rot_mat);
}
/*******************************************************************/
void SetupFaceTrackingVideos(CFaceDetectorModule& detector)
{
	int count = 172;
	string path=".\\..\\Data\\video1\\";
	string baseName="";
	string ext=".png";
	string output="Video1.avi";
	CDataPointManager inputDataMgr1(baseName,path,ext,count,8);
	MakeFaceTrackingVideo(inputDataMgr1, output, detector );
	count = 156;
	path=".\\..\\Data\\video2\\";
	baseName="";
	ext=".png";
	output="Video2.avi";
	CDataPointManager inputDataMgr2(baseName,path,ext,count,8);
	MakeFaceTrackingVideo(inputDataMgr2, output, detector );
	count = 233;
	path=".\\..\\Data\\video3\\";
	baseName="";
	ext=".png";
	output="Video3.avi";
	CDataPointManager inputDataMgr3(baseName,path,ext,count,8);
	MakeFaceTrackingVideo(inputDataMgr3, output, detector );

}
/*******************************************************************/
CDataPointManager* MakeFaceTrackingVideo( CDataPointManager& inputDataMgr, string output_base, CFaceDetectorModule& detector )
{
	CDataPointManager* outDataMgr = new CDataPointManager();


	// our system is simple 4 variables x,y position and x,y velocity 
	CvKalman* kalman = cvCreateKalman( 4, 2, 0 );
    CvMat* x_k = cvCreateMat( 4, 1, CV_32FC1 ); /* [x,y,vx,vy] our state vector*/
    CvMat* process_noise = cvCreateMat( 2, 1, CV_32FC1 );
    CvMat* z_k = cvCreateMat( 2, 1, CV_32FC1 ); /* [x,y] our mesurement vector */
	CvMat* w_k = cvCreateMat( 4, 1, CV_32FC1 ); /* Process Noise */
	cvZero(z_k);
	
	float F[] = {1, 0, 1, 0, // THE TRANSISTION MATRIX 
	             0, 1, 0, 1,
				 0, 0, 1, 0,
			     0, 0, 0, 1 }; 
	memcpy(kalman->transition_matrix->data.fl, F, sizeof(F));
	
	float H[] = { 1, 0, 0, 0,       // THE MEASUREMENT MATRIX 
				  0, 1, 0, 0,
				  0, 0, 1, 0,
				  0, 0, 0, 1};
	memcpy(kalman->measurement_matrix->data.fl, H, sizeof(H));
	cvSetIdentity(kalman->process_noise_cov, cvRealScalar(10));
	cvSetIdentity(kalman->measurement_noise_cov, cvRealScalar(1000));
	cvSetIdentity(kalman->error_cov_post, cvRealScalar(1000));


	cvKalmanCorrect(kalman,z_k);
	// Now we are ready to go.
	double x_update=0.00, y_update=0.00;
	int fixed_w = 0, fixed_h = 0, xp =0, yp=0;
	TFaceList faces;
	int j=0;
	inputDataMgr.GetDataPoint(0)->LoadSourceImage();
	int input_w = inputDataMgr.GetDataPoint(0)->GetImage()->width;
	int input_h = inputDataMgr.GetDataPoint(0)->GetImage()->height;
	inputDataMgr.GetDataPoint(0)->FreeSourceImage(); 
	CvVideoWriter* writer = cvCreateVideoWriter(output_base.c_str(),0,10, cvSize(input_w,input_h));

	while( faces.empty() )
	{
		inputDataMgr.GetDataPoint(j)->LoadSourceImage(); 
		faces = detector.DetectFaces(inputDataMgr.GetDataPoint(j)->GetImage());
		inputDataMgr.GetDataPoint(j)->FreeSourceImage(); 
		j++;
	}
	fixed_w = 4*faces[0].m_face.width/5;
	//fixed_w = fixed_w + fixed_w/10;
	fixed_h = 4*faces[0].m_face.height/5;
	//fixed_h = fixed_h + fixed_h/10;


	yp = faces[0].m_face.x+(faces[0].m_face.width/2);
	xp = faces[0].m_face.y+(faces[0].m_face.height/2);
	cvSet1D(z_k, 0, cvScalar(xp));
	cvSet1D(z_k, 1, cvScalar(yp));
	cvKalmanCorrect(kalman,z_k);
	int last_x = xp;
	int last_y = yp;
	int threshold = (fixed_w+fixed_h)/8;

	
	for( int i=1; i < inputDataMgr.GetSize(); ++i )
	{
		const CvMat* prediction = cvKalmanPredict(kalman, 0);
		int kxp = static_cast<int>(cvGet2D(prediction,1,0).val[0]);
		int kyp = static_cast<int>(cvGet2D(prediction,0,0).val[0]);
		if( inputDataMgr.GetDataPoint(i)->LoadSourceImage() )
		{
			cout << "Generating Video Image: " << i << " of " << inputDataMgr.GetSize() << endl;
			IplImage* result=NULL; 
			faces = detector.DetectFaces(inputDataMgr.GetDataPoint(i)->GetImage());
			//cout << "KPREDICT---->" << endl; 


			if( static_cast<int>(faces.size()) > 0 )
			{
				SFaceObject face;
				if(static_cast<int>(faces.size()) > 1 )
				{
					face = GetClosestFace( xp, yp, faces );
					xp = face.m_face.x+(face.m_face.width/2);
					yp = face.m_face.y+(face.m_face.height/2);
				}
				else
				{
					xp = faces[0].m_face.x+(faces[0].m_face.width/2);
					yp = faces[0].m_face.y+(faces[0].m_face.height/2);
					face = faces[0];
				}
				cout << "=============================================" << endl;
				cout << "TRUTH : (" << xp << "," << yp << " ) " << endl; 

				// if the face is making a crazy jump then use the Kalmann
				int dist = DistanceFrom( face, last_x, last_y); 
				if( dist  > threshold && i > 5 )
				{
					 
					x_update = static_cast<double>(xp);
					y_update = static_cast<double>(yp);
					//const CvMat* prediction = cvKalmanPredict(kalman, 0);
					xp = kxp;//static_cast<int>(cvGet2D(prediction,1,0).val[0]);
					yp = kyp;//static_cast<int>(cvGet2D(prediction,0,0).val[0]);
					cout << "BIG JUMP! USING(" << xp << " , " << yp << " )" << endl;
				}
				else // otherwise do the update
				{
					x_update = static_cast<double>(xp);
					y_update = static_cast<double>(yp);
				}
					cvSet1D(z_k, 0, cvScalar(y_update));
					cvSet1D(z_k, 1, cvScalar(x_update));
					cvKalmanCorrect(kalman, z_k);

					cout << "KALMAN: (" << kxp << "," << kyp << /*" , " << c << " , " << d << */" ) " << endl; 
			}
			else
			{
				xp = kxp;
				yp = kyp;
				cout << "DID NOT FIND FACE" << "USING(" << xp << " , " << yp << " )" << endl;
			}
			last_x = xp;
			last_y = yp;
			cout << "LAST  : (" << last_x << " , " << last_y << " )" << endl;
			xp = xp-(fixed_w/2);
			yp = yp-(fixed_h/2);
 

			IplImage * tempImg = cvCreateImage(cvSize(input_w,input_h), inputDataMgr.GetDataPoint(i)->GetImage()->depth,inputDataMgr.GetDataPoint(i)->GetImage()->nChannels);
			cvZero(tempImg); 
			cvCopy(inputDataMgr.GetDataPoint(i)->GetImage(),tempImg);
			CvRect r = cvRect(xp,yp,fixed_w, fixed_h);
			r = ClampToImage(input_w,input_h,r); 

			cvSetImageROI(inputDataMgr.GetDataPoint(i)->GetImage(),r);
			cvSetImageROI(tempImg,cvRect(input_w-fixed_w,input_h-fixed_h,fixed_w,fixed_h));
			cvCopy(inputDataMgr.GetDataPoint(i)->GetImage(),tempImg);
			//stringstream outName;
			//outName << "fubar" << i << ".jpg";
			//cvSaveImage(outName.str().c_str(), inputDataMgr.GetDataPoint(i)->GetImage());
			

			//cvSetImageROI(tempImg, cvRect((320/2)-(fixed_w/2),(240/2)-(fixed_h/2),fixed_w,fixed_h));
			//cvCopy(inputDataMgr.GetDataPoint(i)->GetImage(), tempImg);
			cvRectangle(tempImg,cvPoint(input_w-5,input_h-5), cvPoint(input_w-fixed_w-5, input_h-fixed_h-5), CV_RGB(255,0,0),5);
			cvResetImageROI(tempImg); 
			cvResetImageROI(inputDataMgr.GetDataPoint(i)->GetImage());
			cvWriteFrame(writer, tempImg);
			cvReleaseImage(&tempImg); 
			inputDataMgr.GetDataPoint(i)->FreeSourceImage();
		}

	}
	cvReleaseVideoWriter(&writer);
	cvReleaseMat(&x_k); 
    cvReleaseMat(&process_noise); 
    cvReleaseMat(&z_k);
	cvReleaseMat(&w_k); 
//	cvReleaseKalman(&kalman);
	return(outDataMgr);
}
/*******************************************************************/
SFaceObject GetClosestFace( int center_x, int center_y, TFaceList faces )
{
	TFaceListIter iter;
	int i = 0;
	int min_dist = INT_MAX;
	int best = 0;
	for( iter = faces.begin(); iter != faces.end(); ++iter )
	{
		int test = DistanceFrom( *iter, center_x, center_y);
		if( test < min_dist )
		{
			min_dist = test; 
			best = i; 
		}
		i++;
	}
	
	return( faces[best] ); 
}
/*******************************************************************/
int DistanceFrom( SFaceObject face, int center_x, int center_y)
{ 
	int cx = (face.m_face.x+(face.m_face.width/2));
	int cy = (face.m_face.y+(face.m_face.height/2));
	int retVal = ((cx-center_x)*(cx-center_x))+((cy-center_y)*(cy-center_y));
	retVal =static_cast<int>(sqrt(static_cast<double>(retVal) )); 
	return retVal;
}
/*******************************************************************/
CvRect ClampToImage( int img_w, int img_h, CvRect input )
{
	int xR = input.x+input.width;
	int yT = input.y+input.height;
	if( input.y < 0 )
	{
		input.y = 0;
	}
	if( input.x < 0 )
	{
		input.x=0;
	}
	if( xR > img_w )
	{
		int diff = xR-img_w;
		input.x = input.x-diff;
	}
	if( yT > img_h )
	{
		int diff = yT-img_h;
		input.y = input.y-diff;
	}
	return input;

}
/*******************************************************************/
bool IsSingleFace( TFaceList faces )
{
	bool retVal = true;
	TFaceListIter first;
	TFaceListIter second; 
	for( first = faces.begin(); first != faces.end() && retVal; ++first)
	{
		//second = first;
		//second++;
		for( second = faces.begin(); second != faces.end() && retVal; ++second )
		{
			if( first != second )
			{
				if(!AreOverlapping(first->m_face, second->m_face )
					&& !AreOverlapping(second->m_face, first->m_face ) )
				{
					retVal = false;
				}
			}
		}

	}
	return retVal; 
}
/*******************************************************************/
bool AreOverlapping( CvRect a, CvRect b )
{
	bool retVal = false; 
	// I'm sleepy, hacky
	int x[4];
	int y[4];
	x[0] = a.x;
	y[0] = a.y;
	x[1] = a.x+a.width;
	y[1] = a.y;
	x[2] = a.x;
	y[2] = a.y+a.height;
	x[3] = a.x+a.width;
	y[3] = a.y+a.height;
	for( int i=0; i < 4 && !retVal; i++ )
	{
		if( x[i] >= b.x && x[i] <= b.x+b.width )
		{
			if( y[i] >= b.y && y[i] <= b.y+b.height )
			{
				retVal = true; 
			}
		}
	}
	return retVal; 
}
/*******************************************************************/