
#include "gazeTracker.h"
#include <time.h>

extern CvFont              myFont;
extern CvRect			   *faceRec;
extern CvRect			   *lEyeRec;
extern CvRect			   *rEyeRec;
extern float			   *lPupilRec;
extern float			   *rPupilRec;

extern CvSeq			   *lPupils;
extern CvSeq			   *rPupils;

extern CvCapture *capture;
extern IplImage *gray_image;

int getFaceBox, leftEyeX, leftEyeY, rightEyeX, rightEyeY;


// the transformation matrixies
// used to convert eye points into screen points
CvArr *screenTransformLeft;
CvArr *screenTransformRight;


// Points that represent the eyes positions
// when looking at the four corners of the screen
EyesPoint topLeft;
EyesPoint topRight;
EyesPoint bottomLeft;
EyesPoint bottomRight;

int debug;
int color;
FILE *output;

#define screen_width 1680
#define screen_height 1050

void transformToScreen(CvPoint2D32f eyePoint, CvPoint2D32f *screenPoint, CvArr *transformMatrix) 
{
	CvMat* M = cvCreateMat(3,1,CV_32FC1);
	cvmSet(M, 0,0, eyePoint.x);
	cvmSet(M, 1,0, eyePoint.y);
	cvmSet(M, 2,0, 1.0);

	CvMat* temp = cvCreateMat(3,1,CV_32FC1);
	cvMatMul(transformMatrix, M, temp);

	screenPoint->x = cvmGet(temp, 0,0) / cvmGet(temp, 2,0);
	screenPoint->y = cvmGet(temp, 1,0) / cvmGet(temp, 2,0);

	if (screenPoint->x < 0)
		screenPoint->x = 0;
	else if (screenPoint->x > screen_width)
		screenPoint->x = screen_width;

	if (screenPoint->y < 0)
		screenPoint->y = 0;
	else if (screenPoint->y > screen_height)
		screenPoint->y = screen_height;

	cvReleaseMat(&temp);
	cvReleaseMat(&M);
}

void drawGaze(IplImage *image, CvPoint2D32f l, CvPoint2D32f r, int transform = 0) {
	CvPoint2D32f screenl, screenr;
	if (transform) {
		transformToScreen(l, &screenl, screenTransformLeft);
		transformToScreen(r, &screenr, screenTransformRight);
	} else {
		screenl.x = l.x;
		screenl.y = l.y;
		screenr.x = r.x;
		screenr.y = r.y;
	}

	cvCircle(image, cvPointFrom32f(screenl), 5, CV_RGB(255,0,0), -1);	
	cvCircle(image, cvPointFrom32f(screenr), 5, CV_RGB(0, 255, 0), -1);

	int avgx = (screenl.x + screenr.x)/2;
	int avgy = (screenl.y + screenr.y)/2;
	cvCircle(image, cvPoint(avgx, avgy), 5, CV_RGB(0,0, 255), -1);
}

// Get the average eye position over 'length' seconds
void avgEyePos(IplImage *image, CvPoint2D32f *leye, CvPoint2D32f *reye, int length /*s*/)
{
	leye->x = 0;
	leye->y = 0;
	reye->x = 0;
	reye->y = 0;
	time_t t = time(NULL);
	int count = 0;
	while(difftime(time(NULL), t) < length ) {
		// get the image
		image = cvQueryFrame(capture);
		
		// get the current eye positions
		CvPoint2D32f l, r;
		if (color)
			getEyesPos_color(image, &l, &r);
		else
			getEyesPos(image, &l, &r);

		// add them to the running total
		leye->x += l.x;
		leye->y += l.y;
		reye->x += r.x;
		reye->y += r.y;
		count++;

		if (debug) {
			// draw debuging informaation
			cvRectangle(image, cvPoint(lEyeRec->x, lEyeRec->y), cvPoint((lEyeRec->x + lEyeRec->width), (lEyeRec->y + lEyeRec->height)), CV_RGB(0,255,0), 1, 8, 0);
			cvRectangle(image, cvPoint(rEyeRec->x, rEyeRec->y), cvPoint((rEyeRec->x + rEyeRec->width), (rEyeRec->y + rEyeRec->height)), CV_RGB(0,255,0), 1, 8, 0);
			cvCircle(image, cvPoint(l.x, l.y), 3, CV_RGB(255,0,0), 1);
			cvCircle(image, cvPoint(r.x, r.y), 3, CV_RGB(255,0,0), 1);
			//printf("(%f,%f)\t(%f,%f)\n",l.x, l.y, r.x, r.y);
			cvShowImage("win", image);
		}
	}

	// average the values
	leye->x /= count;
	leye->y /= count;
	reye->x /= count;
	reye->y /= count;
}

// This function gets the average eye positions at each of the four corners
void train(IplImage *face)
{
	IplImage *image = cvCreateImage(cvSize (screen_width, screen_height), IPL_DEPTH_8U, 1);
	
	#define dot_size 30
	#define dot_time 3 // time in seconds to show the dot

	// get top left corner
	// black the screen
	cvRectangle(image, cvPoint(0, 0), cvPoint(image->width, image->height), cvScalar(0), -1);
	//draw the dot
	cvCircle(image, cvPoint(0, 0), dot_size, cvScalar(255), -1);
	// show the image
	cvShowImage("win", image);
	// wait one second, this is to allow the user time to move there eyes to look at the dot
	cvWaitKey(1000);
	// get the average eye position while they are looking at the dot
	avgEyePos(image, &topLeft.leye, &topLeft.reye, dot_time);
	// print the cordinates
	//printf("Top Left\t(%f,%f)\t(%f,%f)\n",topLeft.leye.x, topLeft.leye.y, topLeft.reye.x, topLeft.leye.y);

	// get top right corner
	cvRectangle(image, cvPoint(0, 0), cvPoint(image->width, image->height), cvScalar(0), -1);
	cvCircle(image, cvPoint(screen_width, 0), dot_size, cvScalar(255), -1);
	cvShowImage("win", image);
	cvWaitKey(1000); // wait one second
	avgEyePos(image, &topRight.leye, &topRight.reye, dot_time);
	//printf("Top Right\t(%f,%f)\t(%f,%f)\n",topRight.leye.x, topRight.leye.y, topRight.reye.x, topRight.leye.y);

	// get bottom right corner
	cvRectangle(image, cvPoint(0, 0), cvPoint(image->width, image->height), cvScalar(0), -1);
	cvCircle(image, cvPoint(screen_width, screen_height), dot_size, cvScalar(255), -1);
	cvShowImage("win", image);
	cvWaitKey(1000); // wait one second
	avgEyePos(image, &bottomRight.leye, &bottomRight.reye, dot_time);
	//printf("Bottom Right\t(%f,%f)\t(%f,%f)\n",bottomRight.leye.x, bottomRight.leye.y, bottomRight.reye.x, bottomRight.leye.y);

	// get bottom left corner
	cvRectangle(image, cvPoint(0, 0), cvPoint(image->width, image->height), cvScalar(0), -1);
	cvCircle(image, cvPoint(0, screen_height), dot_size, cvScalar(255), -1);
	cvShowImage("win", image);
	cvWaitKey(1000); // wait one second
	avgEyePos(image, &bottomLeft.leye, &bottomLeft.reye, dot_time);
	//printf("Bottom Left\t(%f,%f)\t(%f,%f)\n",bottomLeft.leye.x, bottomLeft.leye.y, bottomLeft.reye.x, bottomLeft.leye.y);
	

	// now we need to create a transformation matrix to take a point inside these 4 points
	// and transform it into screen cordinates
	float imageCordsLeft[] = {topLeft.leye.x, topLeft.leye.y,
									  topRight.leye.x, topRight.leye.y,
									  bottomRight.leye.x, bottomRight.leye.y,
									  bottomLeft.leye.x, bottomLeft.leye.y};
	screenTransformLeft = createScreenTransform(imageCordsLeft);

	float imageCordsRight[] = {topLeft.reye.x, topLeft.reye.y,
										topRight.reye.x, topRight.reye.y, 
									 	bottomRight.reye.x, bottomRight.reye.y,
										bottomLeft.reye.x, bottomLeft.reye.y};
	screenTransformRight = createScreenTransform(imageCordsRight);
}

// this using the function described here
// http://alumni.media.mit.edu/~cwren/interpolator/
CvArr *createScreenTransform(float *imageCords)
{
	// The screen cordinates, starting from the top left, then top right
	// then bottom right, then bottom left
	float screenCord[] = {0.0, 0.0,
								 (float)screen_width, 0.0,
								 (float)screen_width, (float)screen_height,
								 0.0, (float)screen_height };
	CvMat* B = cvCreateMat(8,1,CV_32FC1);
	cvmSet(B,0,0, 0.0);
	cvmSet(B,1,0, 0.0);
	cvmSet(B,2,0, (float)screen_width );
	cvmSet(B,3,0, 0.0);
	cvmSet(B,4,0, (float)screen_width );
	cvmSet(B,5,0, (float)screen_height );
	cvmSet(B,6,0, 0.0);
	cvmSet(B,7,0, (float)screen_height );

	CvMat* A = cvCreateMat(8,8,CV_32FC1);
	for (int i = 0; i < 4; i++) {
		cvmSet(A,2*i,0, imageCords[2*i]);
		cvmSet(A,2*i,1, imageCords[2*i+1]);
		cvmSet(A,2*i,2, 1.0);
		cvmSet(A,2*i,3, 0.0);
		cvmSet(A,2*i,4, 0.0);
		cvmSet(A,2*i,5, 0.0);

		cvmSet(A,2*i+1,0, 0.0);
		cvmSet(A,2*i+1,1, 0.0);
		cvmSet(A,2*i+1,2, 0.0);
		cvmSet(A,2*i+1,3, imageCords[2*i]);
		cvmSet(A,2*i+1,4, imageCords[2*i+1]);
		cvmSet(A,2*i+1,5, 1.0);
	}
	
	for (int i = 0; i < 4; i++) {
		cvmSet(A,2*i,6, -1*screenCord[2*i]*imageCords[2*i]);
		cvmSet(A,2*i,7, -1*screenCord[2*i]*imageCords[2*i+1]);

		cvmSet(A,2*i+1,6, -1*screenCord[2*i+1]*imageCords[2*i]);
		cvmSet(A,2*i+1,7, -1*screenCord[2*i+1]*imageCords[2*i+1]);
	}
	/*
	for (int i = 0; i < 8; i++) {
		for (int j = 0; j < 8; j++) {
			printf("%f\t", cvmGet(A, i,j));
		}
		printf("\n");
	}
	printf("\n");
	*/

	// Now we will solve the equation Al = B by using the pseudo-invers
	// l = (A'A)^-1 A'B

	CvMat* ATrans = cvCreateMat(8,8,CV_32FC1);
	cvTranspose(A, ATrans);

	CvMat* temp = cvCreateMat(8,8,CV_32FC1);
	cvMatMul(ATrans, A, temp);
	CvMat* tempInv = cvCreateMat(8,8,CV_32FC1);
	cvInvert(temp, tempInv);
	cvMatMul(tempInv, ATrans, temp);

	CvMat* l = cvCreateMat(8,1,CV_32FC1);
	cvMatMul(temp, B, l);

	// Now we create the transformation matrix
	// with the values from l
	CvMat* M = cvCreateMat(3,3,CV_32FC1);
	cvmSet(M, 0,0, cvmGet(l,0,0));
	cvmSet(M, 0,1, cvmGet(l,1,0));
	cvmSet(M, 0,2, cvmGet(l,2,0));
	cvmSet(M, 1,0, cvmGet(l,3,0));
	cvmSet(M, 1,1, cvmGet(l,4,0));
	cvmSet(M, 1,2, cvmGet(l,5,0));
	cvmSet(M, 2,0, cvmGet(l,6,0));
	cvmSet(M, 2,1, cvmGet(l,7,0));
	cvmSet(M, 2,2, 1.0);

	// Do some house cleaning
	cvReleaseMat(&A);
	cvReleaseMat(&B);
	cvReleaseMat(&ATrans);
	cvReleaseMat(&temp);
	cvReleaseMat(&tempInv);
	cvReleaseMat(&l);

	// return the transformation matrix
	return (CvArr*)M;
}

void drawEyeBounds (IplImage *image)
{
	cvLine(image, cvPointFrom32f(topLeft.reye), cvPointFrom32f(topRight.reye), CV_RGB(255,0,0));
	cvLine(image, cvPointFrom32f(topRight.reye), cvPointFrom32f(bottomRight.reye), CV_RGB(255,0,0));
	cvLine(image, cvPointFrom32f(bottomRight.reye), cvPointFrom32f(bottomLeft.reye), CV_RGB(255,0,0));
	cvLine(image, cvPointFrom32f(bottomLeft.reye), cvPointFrom32f(topLeft.reye), CV_RGB(255,0,0));

	cvLine(image, cvPointFrom32f(topLeft.leye), cvPointFrom32f(topRight.leye), CV_RGB(255,0,0));
	cvLine(image, cvPointFrom32f(topRight.leye), cvPointFrom32f(bottomRight.leye), CV_RGB(255,0,0));
	cvLine(image, cvPointFrom32f(bottomRight.leye), cvPointFrom32f(bottomLeft.leye), CV_RGB(255,0,0));
	cvLine(image, cvPointFrom32f(bottomLeft.leye), cvPointFrom32f(topLeft.leye), CV_RGB(255,0,0));
}

void test()
{
	#define num_targets 10
	srand ( time(NULL) );

	IplImage *image;
	IplImage *image2 = cvCreateImage(cvSize (screen_width, screen_height), IPL_DEPTH_8U, 3);
	
	fprintf(output, "lx, ly, ld, rx, ry, rd, ax, ay, ad\n");

	float lx=0, ly=0, ld=0, rx=0, ry=0, rd=0, ax=0, ay=0, ad=0; 

	for (int i = 0; i < num_targets; i++) {
		int target_x = rand() % screen_width;
		int target_y = rand() % screen_height;

		float lxError = 0;
		float lyError = 0;
		float lError = 0;

		float rxError = 0;
		float ryError = 0;
		float rError = 0;

		float avgxError = 0;
		float avgyError = 0;
		float avgError = 0;
		
		for (int j = 10; j > 0; j--) {
			cvRectangle(image2, cvPoint(0, 0), cvPoint(screen_width, screen_height), CV_RGB(0,0,0), -1);
			cvCircle(image2, cvPoint(target_x, target_y), 10*j, CV_RGB(255,0,0), -1);
			cvShowImage("win", image2);
			cvWaitKey(10); // wait one tenth of a second
		}

		time_t t = time(NULL);
		int count = 0;
		while (difftime(time(NULL), t) < 2 ) {
			cvRectangle(image2, cvPoint(0, 0), cvPoint(screen_width, screen_height), CV_RGB(0,0,0), -1);
			cvCircle(image2, cvPoint(target_x, target_y), 10, CV_RGB(255,0,0), -1);
			image = cvQueryFrame(capture);

			// get the current eye positions
			CvPoint2D32f l, r, screenl, screenr;  
			
			if (color)
				getEyesPos_color(image, &l, &r);
			else
				getEyesPos(image, &l, &r);
			count++;

			transformToScreen(l, &screenl, screenTransformLeft);
			transformToScreen(r, &screenr, screenTransformRight);

			lxError += abs(screenl.x - target_x);
			lyError += abs(screenl.y - target_y);
			lError += sqrt(pow(screenl.x - target_x,2) + pow(screenl.y - target_y,2));

			rxError += abs(screenr.x - target_x);
			ryError += abs(screenr.y - target_y);
			rError += sqrt(pow(screenr.x - target_x,2) + pow(screenr.y - target_y,2));

			avgxError += abs(((screenr.x + screenl.x)/2) - target_x);
			avgyError += abs(((screenr.y + screenl.y)/2) - target_y);
			avgError += sqrt(pow(((screenr.x + screenl.x)/2) - target_x,2) + pow(((screenr.y + screenl.y)/2) - target_y,2));

			if (debug)
				drawGaze(image2, screenl, screenr);	

			cvShowImage("win", image2);
			if (cvWaitKey(1) != -1)
				break;
		}

		lxError /= count;
		lyError /= count;
		lError /= count;

		rxError /= count;
		ryError /= count;
		rError /= count;

		avgxError /= count;
		avgyError /= count;
		avgError /= count;

		lx += lxError;
		ly += lyError;
		ld += lError;
		rx += rxError;
		ry += ryError;
		rd += rError;
		ax += avgxError;
		ay += avgyError;
		ad += avgError;

		//printf("Target %d\n", i);
		//printf("Left Eye\nX Error = \t%f\nY Error = \t%f\nError = \t%f\n", lxError, lyError, lError);
		//printf("Right Eye\nX Error = \t%f\nY Error = \t%f\nError = \t%f\n", rxError, ryError, rError);
		//printf("Average of Eyes\nX Error = \t%f\nY Error = \t%f\nError = \t%f\n", avgxError, avgyError, avgError);

		fprintf(output, "%f,%f,%f,%f,%f,%f,%f,%f,%f\n", lxError, lyError, lError, rxError, ryError, rError, avgxError, avgyError, avgError);

		//printf("\n");

	}

	fprintf(output, "%f,%f,%f,%f,%f,%f,%f,%f,%f\n", lx/num_targets, ly/num_targets, ld/num_targets, rx/num_targets, ry/num_targets, rd/num_targets, ax/num_targets, ay/num_targets, ad/num_targets);

	cvReleaseImage(&image2);
}

int main(int argc, char *argv[])
{
	// set up the shitz
	if (setUpFaceFinder()) {
		printf("Failed to set up properly\n");
		return 0;
	}

	debug = 0;
	color = 1;
	if (argc > 1)
		output = fopen(argv[1], "w");
	else 
		output = stdout;
	if (argc > 2)
		color = atoi(argv[2]);
	if (argc > 3)
		debug = atoi(argv[3]);

	// width and height of the image from the camera. Will be assigned later
	int width = 0;
	int height = 0;

	// more set up
	getFaceBox = 1;
	IplImage *image;
	if (color)
		cvSetMouseCallback("win", mouseCallBack_color, &image);
	else
		cvSetMouseCallback("win", mouseCallBack, &image);

	
	while (getFaceBox) {
		// get the image from the camera
		image = cvQueryFrame(capture);
	
		// if this is the first frame, then create the image we will use to process
		if (!width && !height) {
			width = image->width;
			height = image->height;
			if (width && height)
				gray_image = cvCreateImage(cvSize (width, height), IPL_DEPTH_8U, 1);
		} else {
			// process the image
			cvCvtColor (image, gray_image, CV_BGR2GRAY);
		
			// get and draw the face rec, and the eye bounding regions

			if (getFaceRec(gray_image) ) {
				cvRectangle(image, cvPoint(faceRec->x, faceRec->y), cvPoint((faceRec->x + faceRec->width), (faceRec->y + faceRec->height)), CV_RGB(0,255,0), 1, 8, 0);

				cvSetImageROI(image, cvRect(faceRec->x, faceRec->y, faceRec->width*.5, faceRec->height*.5));
				cvRectangle(image, cvPoint(lEyeRec->x, lEyeRec->y), cvPoint((lEyeRec->x + lEyeRec->width), (lEyeRec->y + lEyeRec->height)), CV_RGB(0,255,0), 1, 8, 0);
				cvResetImageROI(image);
				cvSetImageROI(image, cvRect(faceRec->x +  faceRec->width*.5, faceRec->y, faceRec->width*.5, faceRec->height*.5));
				cvRectangle(image, cvPoint(rEyeRec->x, rEyeRec->y), cvPoint((rEyeRec->x + rEyeRec->width), (rEyeRec->y + rEyeRec->height)), CV_RGB(0,255,0), 1, 8, 0);

			}
			cvResetImageROI(image);
			//cvReleaseImage(&gray_image);
			cvShowImage("win", image);
			//printf("%d\n", numFaces);
		}
		
		
		int key = cvWaitKey(100);
		if (key != -1)
			break;
	}

	// assigine the bounding recs
	rEyeRec->x += faceRec->x + faceRec->width*.5;
	rEyeRec->y += faceRec->y;
	lEyeRec->x += faceRec->x;
	lEyeRec->y += faceRec->y;

	// train the system
	train(image);

	test();
	// now we should be able to track the users gaze...
	IplImage *image2 = cvCreateImage(cvSize (screen_width, screen_height), IPL_DEPTH_8U, 3);
	while (1) {
		// get the image
		image = cvQueryFrame(capture);

		// get the eye positions
		CvPoint2D32f l, r, screenl, screenr;  
		if (color)
			getEyesPos_color(image, &l, &r);
		else
			getEyesPos(image, &l, &r);
		transformToScreen(l, &screenl, screenTransformLeft);
		transformToScreen(r, &screenr, screenTransformRight);
		cvRectangle(image2, cvPoint(0, 0), cvPoint(screen_width, screen_height), CV_RGB(0,0,0), -1);
		drawEyeBounds(image);
		drawGaze(image2, screenl, screenr);	

		cvShowImage("win", image2);
		if (cvWaitKey(1) != -1)
			break;
	}

	return 0;
}

