
/*

the detector class

*/
#include <iostream>
#include <string>
#include <math.h>
#include "cv.h"
#include "highgui.h"
#include "CDetector.h"

CDetector::CDetector()
{
	mState = STATE_NOFACE;
	cascade_name = "haarcascade_frontalface_alt.xml";
	storage = 0;
	cascade = 0;
	avgH=0;
	avgS=0;
	avgV=0;

    // Structure for getting video from camera or avi
    capture = 0;

    // Images to capture the frame from video or camera or from file
    frame = 0;
	frame_copy = 0;
    old_frame = 0;
    end_result = 0;
	hsv_image = 0;
	hsv_mask = 0;

    // Input file name for avi or image file.
    const char* input_name;
	cascade_name = "./haarcascades/haarcascade_frontalface_alt.xml";
	input_name = 0;

    // Load the HaarClassifierCascade
	cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name.c_str(), 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return;
    }
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);
    
    // Find whether to detect the object from file or from camera.
	capture = cvCaptureFromCAM( 0 );

	// Allocate a few frames with the same size of the frame
	CvSize sz = cvGetSize(cvQueryFrame( capture));
    frame_copy = cvCreateImage( sz,IPL_DEPTH_8U, 3 );
    end_result = cvCreateImage( sz,IPL_DEPTH_8U, 3 );
    old_frame = cvCreateImage( sz,IPL_DEPTH_8U, 3 );
	hsv_image = cvCreateImage( sz, 8, 3);
	hsv_mask = cvCreateImage( sz, 8, 1);

    snail_result = cvCreateImage( sz,IPL_DEPTH_8U, 3 );
    snail_temp = cvCreateImage( sz,IPL_DEPTH_8U, 3 );
    snail_temp2 = cvCreateImage( sz,IPL_DEPTH_8U, 3 );
    snail_old = cvCreateImage( sz,IPL_DEPTH_8U, 3 );
	snail_gray = cvCreateImage( sz, 8, 1);
	

	handSpeed.x = 0;
	handSpeed.y = 0;
	mouseIsDown = false;
	for (int i=0; i<CLICKFRAMES; i++)
		clickSpeeds[i] = 0;

}

CDetector::~CDetector()
{
    // Release the images, and capture memory
    cvReleaseImage( &frame_copy );
    cvReleaseImage( &old_frame );
    cvReleaseImage( &end_result );
    cvReleaseImage( &hsv_image );
    cvReleaseImage( &hsv_mask );
    cvReleaseImage( &snail_result );
    cvReleaseImage( &snail_temp );
    cvReleaseImage( &snail_temp2 );
    cvReleaseImage( &snail_gray );


    cvReleaseCapture( &capture );

}

void CDetector::createWindows()
{
    cvNamedWindow( "cam", 1 );
	cvNamedWindow( "mask",1);
	cvNamedWindow( "snail",1);
}

void CDetector::destroyWindows()
{
    cvDestroyWindow("cam");
    cvDestroyWindow("mask");
    cvDestroyWindow("snail");
}

void CDetector::showMask()
{
	cvShowImage( "mask", hsv_mask);
	cvShowImage( "snail", hsv_image);
}

void CDetector::showCam()
{
	cvShowImage( "cam", frame_copy);
}


bool CDetector::grabNewFrame()
{
    // Capture the frame and load it in IplImage
    if( !cvGrabFrame( capture ))
       return false;
    frame = cvRetrieveFrame( capture );
	if (frame) return true;
	else return false;
}

void CDetector::fixFrame()
{
    // Check the origin of image. If top left, copy the image frame to frame_copy. 
    if( frame->origin == IPL_ORIGIN_TL )
        cvCopy( frame, frame, 0 );
    // Else flip and copy the image
    else
        cvFlip( frame, frame, 0 );
	cvFlip( frame, frame, 1 );
}

void CDetector::copyFrame()
{
	// now frame is done being flipped and stuff. Now we can copy it to whatever surfaces we need...
	cvCopy( frame, frame_copy, 0 );
	// produce HSV image
	cvCvtColor(frame, hsv_image, CV_BGR2HSV);
}

/*
void CDetector::invertImage( IplImage* img )
{

}
*/

void CDetector::produceMask( IplImage* img )
{
	// first produce the snailtrail image
	cvAbsDiff(frame,old_frame,snail_temp); // extract the movement of latest frame
//	cvSub(frame,old_frame,snail_temp); // extract the movement of latest frame
//	cvSub(old_frame,frame,snail_temp2); // extract the movement of latest frame
//	cvAdd(snail_temp,snail_temp2,snail_temp); //add the two minusimages

	cvErode(snail_temp,snail_temp,0,1);
	cvDilate(snail_temp,snail_temp,0,1);

	
	cvAdd(snail_temp,snail_result,snail_result); // add this to the snailtrail
	cv::Mat fadeMat(snail_result);
	fadeMat = fadeMat*0.97; // fade out all things in the snailtrail
	cvNot(snail_result,snail_temp); // invert the snailtrail
	cvSub(frame,snail_temp,snail_temp); // subtract the inverted snailtrail from FRAME
	cvCvtColor(snail_temp,snail_gray,CV_RGB2GRAY); // make the result into grayscale
	cvThreshold(snail_gray,snail_gray,20,255,CV_THRESH_BINARY); // threshold the grayscale

	cvErode(snail_gray,snail_gray,0,1);
	cvDilate(snail_gray,snail_gray,0,10);

//	cvOr(hsv_image,snail_gray,hsv_image);
	cvNot(snail_gray,snail_gray);
	cvCopy(snail_temp,hsv_image,snail_gray);


	// the modify the frame_copy to darken the non-moving stuff
//	cv::invert(invertMat,invertMat);
//	cvMul(hsv_mask,snail_result,hsv_mask);

	// calculate which colors (upper and lower bound) we should look for
	int Hinterval = 25;
	int Sinterval = 25;
	int Vinterval = 25;
	int Hlow = boundsCheck(avgH-Hinterval,255);
	int Hhigh = boundsCheck(avgH+Hinterval,255);
	int Slow = boundsCheck(avgS-Sinterval,255);
	int Shigh = boundsCheck(avgS+Sinterval,255);
	int Vlow = boundsCheck(avgV-Vinterval,255);
	int Vhigh = boundsCheck(avgV+Vinterval,255);
	CvScalar  lower;
	CvScalar  upper;

	if (HSVMODE)
	{
		lower = cvScalar(0, 60, 70, 0);
		upper = cvScalar(50, 160, 255, 0);
	}
	else
	{
		lower = cvScalar(Hlow, Slow, Vlow, 0);
		upper = cvScalar(Hhigh, Shigh, Vhigh, 0);
	}

	// produce the bounded image (in black and white)
	cvInRangeS (img, lower, upper, hsv_mask);
	cvErode(hsv_mask,hsv_mask,0,1);
	cvDilate(hsv_mask,hsv_mask,0,3);

	// and mask out the faceRect
	cvRectangle(hsv_mask, cvPoint(faceRect.x*0.9,faceRect.y*0.9), cvPoint(faceRect.x+faceRect.width*1.1,faceRect.y+faceRect.height*1.1), cvScalar(0,0,0,0), CV_FILLED);

}

double CDetector::sumPixels(cv::Mat *matrix, cv::Rect *roi)
{
	cv::Rect leftroi(0, 0, 320, 480);
	cv::Mat cropped = (*matrix)(*roi);
	cv::Scalar sum = cv::sum(cropped);
	return sum.val[0];
}

double CDetector::sumWhitePixels(cv::Mat *matrix, cv::Rect *roi)
{
	cv::Rect leftroi(0, 0, 320, 480);
	cv::Mat cropped = (*matrix)(*roi);

	// for all pixels in the square
	long numPixels = 0;
	for (int iy=0; iy < roi->width; iy++)
		for (int ix=0; ix < roi->height; ix++)
		{
	//		cout << (int)((cropped.data + cropped.step*ix))[iy] << "," << flush;

			if (((cropped.data + cropped.step*ix))[iy] != 0)
				numPixels++;
		}
	return numPixels;
}

void CDetector::extractFaceColor(IplImage* src)
{
	int x = (int)(faceRect.x+faceRect.width*0.15);
	int y = (int)(faceRect.y+faceRect.height*0.15);
	int sizex = (int)(faceRect.width*0.7);
	int sizey = (int)(faceRect.height*0.7);

	CvScalar s;

	// create hue counter, and null it
	long hues[256];
	long sats[256];
	long vals[256];
	for (int i=0; i<256; i++)
	{
		hues[i] = 0;
		sats[i] = 0;
		vals[i] = 0;
	}

	// for all pixels in the square (of the face)
	for (int iy=y; iy < y+sizey; iy++)
		for (int ix=x; ix < x+sizex; ix++)
		{
			// extract color
			s=cvGet2D(src,iy,ix); // get the (ix,iy) pixel value
			// if not an approved color then disregard it
			if (s.val[0] < 20) continue;
			if (s.val[1] < 20) continue;
			if (s.val[2] < 20) continue;
			// add it to the counter
			hues[(int)s.val[0]]++;
			sats[(int)s.val[1]]++;
			vals[(int)s.val[2]]++;
		}

	// now determine which hue was the most common
	avgH = 0;
	int record = 0;
	for (int i=0; i<256; i++)
	{
		if (hues[i] > record)
		{
			record = hues[i];
			avgH = i;
		}
	}
	avgS = 0;
	record = 0;
	for (int i=0; i<256; i++)
	{
		if (sats[i] > record)
		{
			record = sats[i];
			avgS = i;
		}
	}
	avgV = 0;
	record = 0;
	for (int i=0; i<256; i++)
	{
		if (vals[i] > record)
		{
			record = vals[i];
			avgV = i;
		}
	}
	
	// draw debug rectangle of which face square we just worked on
//	cvRectangle( src, cvPoint(x,y), cvPoint(x+sizex,y+sizey), CV_RGB(255, 0, 0), 2, 8, 0 );
}

// Function to detect and draw any faces that is present in an image
void CDetector::findFace( IplImage* img )
{
    int scale = 1;

    CvPoint pt1;
    int i;

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    // Find whether the cascade is loaded, to find the faces. If yes, then:
    if( cascade )
    {

        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence
        CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
                                            1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );

        // Loop the number of faces found.
		printf("numFaces = %d\n", faces->total);
		int recordID = 0;
		double record = 10000;
        CvRect* r = NULL;
		for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
			// Create a new rectangle for drawing the face
			r = (CvRect*)cvGetSeqElem( faces, i );

            pt1.x = r->x + r->width/2;
            pt1.y = r->y + r->height/2;

			int cenx = (img->width/2);
			int ceny = (img->height/2);
			int diffx = abs(cenx-pt1.x);
			int diffy = abs(ceny-pt1.y);
			double dist = sqrt((double)(diffx*diffx + diffy*diffy));

			if (dist < record)
			{
				record = dist;
				recordID = i;
			}
		}

		if (!(faces ? faces->total : 0))
		{
			faceRect.x = 0;
			faceRect.y = 0;
			faceRect.width = 10;
			faceRect.height = 10;
			mState = STATE_NOFACE;
			return;
		}

		// we found a face!! woohoo!
		mState = STATE_GOTFACE;

		// set the faceRect to point to this face
		r = (CvRect*)cvGetSeqElem( faces, recordID );
		faceRect.x = r->x;
		faceRect.y = r->y;
		faceRect.width = r->width;
		faceRect.height = r->height;
	}
}

void CDetector::drawFaceFrame( IplImage* img )
{
    // Draw the rectangle in the input image
    cvRectangle( img, cvPoint(faceRect.x,faceRect.y), cvPoint(faceRect.x+faceRect.width,faceRect.y+faceRect.height), CV_RGB(255,0,0), 3, 8, 0 );
}

void CDetector::searchForHand( IplImage* img )
{
    CvRect r;
	r.x = (int)(faceRect.x + faceRect.width*1.5);
	r.y = (int)(faceRect.y + faceRect.height*0.25);
	r.width = (int)(faceRect.width*2.4);
	r.height = (int)(faceRect.height*1.5);

    // Draw the rectangle in the original image
    cvRectangle( frame_copy, cvPoint(r.x,r.y), cvPoint(r.x+r.width,r.y+r.height), CV_RGB(255,0,0), 2, 8, 0 );

	// for all pixels in the square
	CvScalar s;
	long numPixels = 0;
	long totalArea = r.height*r.width;
	for (int iy=r.y; iy < r.y+r.height; iy++)
		for (int ix=r.x; ix < r.x+r.width; ix++)
		{
			if (ix >= img->width) continue;
			if (iy >= img->height) continue;
			if (ix < 0) continue;
			if (iy < 0) continue;
			// extract color
			s=cvGet2D(img,iy,ix); // get the (ix,iy) pixel value
			if (s.val[0] != 0)
				numPixels++;
		}

	double ratio = (double)numPixels / (double)totalArea;
	cout << "white hand ratio = " << ratio << endl;

	if (ratio > 0.04)
	{
		mState = STATE_MAYBEHAND;
		frameCounter = 0;
		handSpeed.x = 0;
		handSpeed.y = 0;
		handRect.x = r.x;
		handRect.y = r.y;
		handRect.width = r.width;
		handRect.height = r.height;

		// do 5 updateHands in a row to shrink the square to the right size
		updateHandRect(img);
		updateHandRect(img);
		updateHandRect(img);
		updateHandRect(img);
		updateHandRect(img);
		updateHandRect(img);
		updateHandRect(img);
		updateHandRect(img);
		updateHandRect(img);

		// now store the size as the natural hand size
		normalHandSize.width = handRect.width;
		normalHandSize.height = handRect.height;

		// if joystick then initiate the cursor position
		if (JOYSTICK)
		{
			new_mouseX = 0.5;
			new_mouseY = 0.5;
		}
	}

}

void CDetector::moveMouse()
{
	if (JOYSTICK)
	{
        // move the cursor
        double mx = handRect.x - faceRect.x - faceRect.width*1.5;
        double my = handRect.y - faceRect.y + faceRect.height*1.0;
        mx = mx / 160.0;
        my = my / 200.0;
        
        //Joystick movement
        if ( (mx < 0.4) || (mx > 0.6) || (my < 0.4) || (my > 0.6))
        {
                new_mouseX += (mx-0.5)*0.1;
                new_mouseY += (my-0.5)*0.1;
        }

        //Changed for joystick
        mouse_event(MOUSEEVENTF_ABSOLUTE | MOUSEEVENTF_MOVE,(DWORD)(new_mouseX*64000),(DWORD)(new_mouseY*64000),0,0);
	}
	else
	{
		// calc new cursor position
		new_mouseX = handRect.x - faceRect.x - faceRect.width*1.5;
		new_mouseY = handRect.y + handRect.height - faceRect.y + faceRect.height*0.0;
		new_mouseX = new_mouseX / 120.0;
		new_mouseY = new_mouseY / 120.0;

		// move it 50% of the way towards the old position
		new_mouseX = (new_mouseX+old_mouseX)/2;
		new_mouseY = (new_mouseY+old_mouseY)/2;

		// if this was far away enough then insert movement event
		double diffx = abs(new_mouseX-old_mouseX);
		double diffy = abs(new_mouseY-old_mouseY);
		double dist = sqrt((double)(diffx*diffx + diffy*diffy));
		if (dist > 0.02)
		{
			mouse_event(MOUSEEVENTF_ABSOLUTE | MOUSEEVENTF_MOVE,(DWORD)(new_mouseX*64000),(DWORD)(new_mouseY*64000),0,0);
			old_mouseX = new_mouseX;
			old_mouseY = new_mouseY;
		}
	}


}

void CDetector::checkForClick(cv::Rect *old)
{
	/*
	// calc and store the new change
	double nowChange = (double)handRect.height / (double)old->height;
	for (int i=0; i<CLICKFRAMES-1; i++)
		clickSpeeds[i] = clickSpeeds[i+1];
	clickSpeeds[CLICKFRAMES-1] = nowChange;

	// calc the sum of what have happened during the last frames
	double sum = 0;
	for (int i=0; i<CLICKFRAMES; i++)
		sum += clickSpeeds[i];

	// if the sum is beyond the threshold then trigger mouseclick
	if (sum > (double)CLICKFRAMES*1.2)
	{
		if (mouseIsDown)
		{
			mouse_event(MOUSEEVENTF_LEFTUP, 0, 0, 0, 0);
			mouseIsDown = false;
		}
		cout << "CLICK RELEASE!" << endl;
	}
	if (sum < (double)CLICKFRAMES*0.85)
	{
		if (!mouseIsDown)
		{
			mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0);
			mouseIsDown = true;
		}
		cout << "CLICK !" << endl;
	}
	*/

	/*
	if (handRect.height > (double)normalHandSize.height*0.95)
	{
		if (mouseIsDown)
		{
			mouse_event(MOUSEEVENTF_LEFTUP, 0, 0, 0, 0);
			mouseIsDown = false;
			cout << "CLICK RELEASE!" << endl;
		}
	}
	if (handRect.height < (double)normalHandSize.height*0.8)
	{
		if (!mouseIsDown)
		{
			mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0);
			mouseIsDown = true;
			cout << "CLICK !" << endl;
		}
	}
	*/

}

void CDetector::searchForWave( IplImage* img )
{
	cv::Rect oldHand(handRect);

	// add speed of movement from old frame
	handRect.x = boundsCheck(handRect.x + handSpeed.x, 640);
	handRect.y = boundsCheck(handRect.y + handSpeed.y, 480);

	// do 3 updateHands in a row
	updateHandRect(img);
	updateHandRect(img);
	updateHandRect(img);
	updateHandRect(img);
	updateHandRect(img);

	// update the hand speed
	handSpeed.x = (handRect.x + handRect.width/2) - (oldHand.x + oldHand.width/2);
	handSpeed.y = (handRect.y + handRect.height/2) - (oldHand.y + oldHand.height/2);
	if (handSpeed.x > 20) handSpeed.x = 20;
	if (handSpeed.y > 20) handSpeed.y = 20;
	if (handSpeed.x < -20) handSpeed.x = -20;
	if (handSpeed.y < -20) handSpeed.y = -20;

	// this calcs what have happened during the last frames
	checkForClick(&oldHand);

}

void CDetector::updateHandRect( IplImage* img )
{
/*
	frameCounter++;
	if (frameCounter >= 60)	
	{
		mState = STATE_NOFACE;
	}
*/

	cv::Rect r;
	int pixelSteps = 3;
	double limitRatio = 0.4;
	cv::Mat temp;
	temp = hsv_mask;
	double theSum = 0;

	// if rect is unreasonably small, then kill it
	if (handRect.height < pixelSteps*2) 
		mState = STATE_NOFACE;
	if (handRect.width < pixelSteps*2) 
		mState = STATE_NOFACE;

	// if rect is unreasonably large, then kill it
	if (handRect.height > faceRect.height*3) 
		mState = STATE_NOFACE;
	if (handRect.width > faceRect.width*3)
		mState = STATE_NOFACE;

	// if handRect is within faceRect, then kill it
//	if (handRect.x < faceRect.x + faceRect.width)
//		mState = STATE_NOFACE;


	//Top area
	//Add
	r.x = handRect.x;
	r.y = handRect.y - pixelSteps;
	r.width = handRect.width;
	r.height = pixelSteps;
	boundsCheck(&r,640-r.width,480-r.height);
	if((sumWhitePixels(&temp, &r)/((double)r.width*r.height) ) > limitRatio)
	{
		handRect.height = boundsCheck(handRect.height + pixelSteps,350);
		handRect.y = boundsCheck(handRect.y - pixelSteps, 480-handRect.height);
	}
	//Bottom area
	//Add
	r.x = handRect.x;
	r.y = handRect.y + handRect.height;
	r.width = handRect.width;
	r.height = pixelSteps;
	boundsCheck(&r,640-r.width,480-r.height);
	if((sumWhitePixels(&temp, &r)/((double)r.width*r.height) ) > limitRatio)
	{
		handRect.height = boundsCheck(handRect.height + pixelSteps, 350);
	}
	//Right area
	//Add
	r.x = handRect.x + handRect.width;
	r.y = handRect.y;
	r.width = pixelSteps;
	r.height = handRect.height;
	boundsCheck(&r,640-r.width,480-r.height);
	if((sumWhitePixels(&temp, &r)/((double)r.width*r.height) ) > limitRatio)
	{
		handRect.width = boundsCheck(handRect.width + pixelSteps, 350);
		handRect.x = boundsCheck(handRect.x, 640-handRect.width);
	}
	//Left area
	//Add
	r.x = handRect.x - pixelSteps;
	r.y = handRect.y;
	r.width = pixelSteps;
	r.height = handRect.height;
	boundsCheck(&r,640-r.width,480-r.height);
	if((sumWhitePixels(&temp, &r)/((double)r.width*r.height) ) > limitRatio)
	{
		handRect.width = boundsCheck(handRect.width + pixelSteps, 350);
		handRect.x = boundsCheck(handRect.x - pixelSteps, 640-handRect.width);
	}
	//Top area
	//Subtract
	r.x = handRect.x;
	r.y = handRect.y;
	r.width = handRect.width;
	r.height = pixelSteps;
	boundsCheck(&r,640-r.width,480-r.height);
	if((sumWhitePixels(&temp, &r)/((double)r.width*r.height) ) < limitRatio)
	{
		handRect.height = boundsCheck(handRect.height - pixelSteps, 350);
		handRect.y = boundsCheck(handRect.y + pixelSteps, 480-handRect.height);
	}
	//Bottom area
	//Subtract
	r.x = handRect.x;
	r.y = handRect.y + handRect.height - pixelSteps;
	r.width = handRect.width;
	r.height = pixelSteps;
	boundsCheck(&r,640-r.width,480-r.height);
	if((sumWhitePixels(&temp, &r)/((double)r.width*r.height) ) < limitRatio)
	{
		handRect.height = boundsCheck(handRect.height - pixelSteps, 350);
	}
	//Right area
	//Subtract
	r.x = handRect.x + handRect.width - pixelSteps;
	r.y = handRect.y;
	r.width = pixelSteps;
	r.height = handRect.height;
	boundsCheck(&r,640-r.width,480-r.height);
	if((sumWhitePixels(&temp, &r)/((double)r.width*r.height) ) < limitRatio)
	{
		handRect.width = boundsCheck(handRect.width - pixelSteps, 350);
	}
	//Left area
	//Subtract
	r.x = handRect.x;
	r.y = handRect.y;
	r.width = pixelSteps;
	r.height = handRect.height;
	boundsCheck(&r,640-r.width,480-r.height);
	if((sumWhitePixels(&temp, &r)/((double)r.width*r.height) ) < limitRatio)
	{
		handRect.width = boundsCheck(handRect.width - pixelSteps, 350);
		handRect.x = boundsCheck(handRect.x + pixelSteps, 640-handRect.width);
	}



	// cout << "wavesearch" << endl;


}

int CDetector::boundsCheck(int val, int max)
{
	if (val < 0) return 0;
	if (val > max) return max;
	return val;
}

void CDetector::boundsCheck(cv::Rect *re, int maxx, int maxy)
{
	re->x = boundsCheck(re->x, maxx);
	re->y = boundsCheck(re->y, maxy);
}