// OpenCVTesting.cpp : Defines the entry point for the console application.
//

#include "stdafx.h"

// Include header files
#include <opencv\cv.h>
#include <opencv\highgui.h>

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <time.h>
#include <ctype.h>


using namespace std; 

// Create memory for calculations
static CvMemStorage* storage = 0;
static CvMemStorage* storage2 = 0;

// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;

static CvHaarClassifierCascade* cascade2 = 0;


// Function prototype for detecting and drawing an object from an image
void detect_and_draw( IplImage* image );

// Create a string that contains the cascade name
const char* cascade_name =
	"haarcascade_upperbody.xml"; 
  //  "haarcascade_frontalface_alt.xml"; 
 //   "haarcascade_profileface.xml";

const char* cascade_name_2 = "haarcascade_frontalface_alt.xml";




// ========================= motion detection =================================

//IplImage * currentImage;
IplImage * diffGray;
IplImage * difference;
IplImage * movingAverage;

IplImage * activityHeatmap; 

IplImage * temp;
CvPoint2D32f* cornersA;
CvPoint2D32f* cornersB;
bool first = true ;
int * cBoundPoints;
int numContours;

// ============================================================================


// ================================= face detection ==========================

const int AVG_FRAMES=40; 
const int FACE_MIN_SIZE=30;		// minimum face size 
const int RADIUS = 60; 
int avg_ctr = 0; 
int frame_count = 0; 
std::vector<CvRect> faces_hist[AVG_FRAMES+1]; 
std::vector<CvRect> last_faces; 

// ===========================================================================



void DetectMotion(IplImage * currentImage)
	{

		//// TODO: find memory leaks!

		//currentImage->imageData = (char*) imageData;

		if (first)
		{
			
			temp = cvCloneImage(currentImage);
			cvConvertScale(currentImage, movingAverage, 1.0, 0.0);
			first = false;
			return;
		}

		CvSize imgSize = cvSize(currentImage->width, currentImage->height);
		

		
		IplImage * paintActivity = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		cvSet(paintActivity, cvScalar(0,0,0));


		cvRunningAvg(currentImage, movingAverage, 0.02, 0);
		cvConvertScale(movingAverage, temp, 1.0, 0.0);
		cvAbsDiff(currentImage, temp, difference);
		cvCvtColor(difference,diffGray, CV_RGB2GRAY);

		cvSmooth(diffGray, diffGray, CV_GAUSSIAN,3,3);
		cvThreshold(diffGray, diffGray, 70, 255, CV_THRESH_BINARY);
		cvDilate(diffGray, diffGray, 0, 18);
		cvErode(diffGray, diffGray, 0, 10);

		////////////////////////////////
		IplImage * grayCtr = cvCloneImage(diffGray);
		////////////////////////////////

		// contours
		////////////////////////////////
		CvMemStorage * storage = cvCreateMemStorage(0);
		///////////////////////////////
		CvSeq * contour = 0;

		cvFindContours( grayCtr, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
	
		if (contour == 0)
		{
			numContours = 0;
			//return;
		}
		else
		{
			int numPoints = contour->total * 4;
	#ifdef DEBUG
			printf("Number of boundingRectPoints: %d \n", numPoints);
	#endif

			//numContours = numPoints;
		
			CvRect bndRect = cvRect(0,0,0,0);
			int prevX;

			// contour array index
			int cIdx = 0;
		
			for( ; contour != 0; contour = contour->h_next )
			{

				CvPoint pt1;
				CvPoint pt2;

				//Get a bounding rectangle around the moving object.
				bndRect = cvBoundingRect(contour, 0);

				pt1.x = bndRect.x;
				pt1.y = bndRect.y;
				pt2.x = bndRect.x + bndRect.width;
				pt2.y = bndRect.y + bndRect.height;

				cvRectangle( temp, pt1, pt2, CV_RGB(0,255,255), 3, 8, 0 );




				cvRectangle(paintActivity, pt1, pt2, CV_RGB(1,1,1), CV_FILLED, 8, 0);
			

				//contourBoundPoints[cIdx++] = pt1.x;
				//contourBoundPoints[cIdx++] = pt1.y;
				//ontourBoundPoints[cIdx++] = pt2.x;
				//contourBoundPoints[cIdx++] = pt2.y;
	#ifdef DEBUG
				printf("x1 %d y1 %d x2 %d y2 %d\n", pt1.x, pt1.y, pt2.x, pt2.y);
	#endif
			
			}
		}

		cvRunningAvg(paintActivity, activityHeatmap, 0.02,0);

		cvShowImage("motion", temp) ;
		//cvShowImage("motionHistory", activityHeatmap); 

//		numContours = cIdx;

		/////////////////////////////////////
		// release memory

		cvReleaseImage(&grayCtr);
		
		cvFree(&storage);
		

	}









int  main( int argc, char** argv )

{
	printf ("===================== OpenCV Cascade Tester ===============================\n");
	// Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame =0 , *frame_copy =0 , *frame_copy2 = 0;

    // Used for calculations
    int optlen = strlen("--cascade=");

    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 )
    {
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    }
    else
    {
        fprintf( stderr,
        "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n \n using default!\n" );
        //return -1;
        /*input_name = argc > 1 ? argv[1] : 0;*/
		input_name = cascade_name; 
    }

	printf("Loading Cascade: %s\n", input_name);
    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( input_name, 0, 0, 0 );
	cascade2 = (CvHaarClassifierCascade*)cvLoad( cascade_name_2, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);
	storage2 = cvCreateMemStorage(0);
     
    // Find whether to detect the object from file or from camera.
    //if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
    //    capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    //else
    //    capture = cvCaptureFromAVI( input_name ); 

	capture = cvCaptureFromCAM(0); 

    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );
	cvNamedWindow("motion", 1); 
	cvNamedWindow("motionHistory", 1); 

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    if( capture )
    {
        // Capture from the camera.
        for(;;)
        {
			
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;
            
            // Allocate framecopy as the same size of the frame
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );
			
			  if( !frame_copy2 )
                frame_copy2 = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

			int width = frame->width;
			int height = frame->height;

			CvSize imgSize =  cvSize(width,height);

			if (movingAverage == NULL )
			{
				movingAverage = cvCreateImage(imgSize, IPL_DEPTH_32F, 3);
				activityHeatmap = cvCreateImage(imgSize, IPL_DEPTH_32F, 1);
				cvSet(activityHeatmap, cvScalar(0,0,0));
				diffGray = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
				difference = cvCreateImage(imgSize, 8, 3);
			}

            // Check the origin of image. If top left, copy the image frame to frame_copy. 
            if( frame->origin == IPL_ORIGIN_TL )
			{
                cvCopy( frame, frame_copy, 0 );
				cvCopy( frame, frame_copy2, 0 );

			}
            // Else flip and copy the image
            else
			{
                cvFlip( frame, frame_copy, 0 );
				cvCopy( frame, frame_copy, 0 );
				cvCopy( frame, frame_copy2, 0 );
			}
            
            // Call the function to detect and draw the face
           

			// motion detect
			DetectMotion(frame_copy2);

			detect_and_draw( frame_copy );


            // Wait for a while before proceeding to the next frame
            if( cvWaitKey( 10 ) >= 0 )
                break;

			frame_count++; 
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
		cvReleaseImage( &frame_copy2 );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded succesfully, then:
    else
    {
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

        // Load the image from that filename
        IplImage* image = cvLoadImage( filename, 1 );

        // If Image is loaded succesfully, then:
        if( image )
        {
            // Detect and draw the face
            detect_and_draw( image );

            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
            cvReleaseImage( &image );
        }
        else
        {
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];

                // Get the line from the file
                while( fgets( buf, 1000, f ) )
                {

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded succesfully, then:
                    if( image )
                    {
                        // Detect and draw the face from the image
						//	detect_and_draw( image );
                        
                        // Wait for the user input, and release the memory
                        cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                // Close the file
                fclose(f);
            }
        }

    }
    
    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");
	
	cvWaitKey(0);
    // return 0 to indicate successfull execution of the program
    return 0;
}

bool Decide_Presence(std::vector<CvRect> faces, IplImage * activity)
{
	printf("======== Average Activity of detected Face Region ========\n");
	
	bool presence = false; 

	for (int i = 0; i < faces.size(); i++)
	{

		CvRect face = faces[i]; 

		if (face.width >= FACE_MIN_SIZE && face.height >= FACE_MIN_SIZE)
		{
			cvSetImageROI(activity, face); 
			CvScalar average = cvAvg(activity); 
			printf("Region\t%d\tAverage\t%f\n", i, average.val[0]); 

			if (average.val[0] > 0.05) 
			{
				presence = true; 
			}
		}

	}

	cvResetImageROI(activity);

	return presence; 

}

// Function to detect and draw any faces that is present in an image
void detect_and_draw( IplImage* img )
{
    int scale = 1;

    // Create a new image based on the input image
    IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );

    // Create two points to represent the face locations
    CvPoint pt1, pt2;
    int i;

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );
	 cvClearMemStorage( storage2 );


	IplImage * motionFaceComb = cvCreateImage(cvSize(img->width, img->height), IPL_DEPTH_32F, 3);

	//IplImage * motionFaceComb_tmp = cvCreateImage(cvSize(img->width, img->height), IPL_DEPTH_32F, 3);

	cvMerge(activityHeatmap, activityHeatmap, activityHeatmap, NULL, motionFaceComb);

	//cvCopy( activityHeatmap, motionFaceComb, 0 ); 

    // Find whether the cascade is loaded, to find the faces. If yes, then:
    if( cascade && cascade2)
    {

        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence
        CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
                                            1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );


		CvSeq* faces2 = cvHaarDetectObjects( img, cascade2, storage2,
                                            1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(40, 40) );

		faces_hist[avg_ctr].clear();

		


		// Loop the number of faces found.
        for( i = 0; i < (faces2 ? faces2->total : 0); i++ )
        {
			
			

           // Create a new rectangle for drawing the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces2, i );

			/*
			CvRect rr; 
			rr.x = r->x;
			rr.y = r->y;
			rr.width = r->width;
			rr.height = r->height; */ 

			faces_hist[avg_ctr].push_back(*r);


            // Find the dimensions of the face,and scale it if necessary
			/*
            pt1.x = r->x*scale;
            pt2.x = (r->x+r->width)*scale;
            pt1.y = r->y*scale;
            pt2.y = (r->y+r->height)*scale;

            // Draw the rectangle in the input image
            cvRectangle( img, pt1, pt2, CV_RGB(0,255,0), 3, 8, 0 ); */ 

        }

		printf("Detected %d faces this frame \n", faces_hist[avg_ctr].size());
		
		// find correspondences in faces history

		//faces_averages.clear();

		
						
		if (frame_count > AVG_FRAMES) 
		{
			if (last_faces.size() == 0)
			{
				for (int i = 0; i < faces_hist[avg_ctr].size(); i++)
				{
					last_faces.push_back(faces_hist[avg_ctr][i]);
				}
			}

			int last_faces_size = last_faces.size();

			// copy constructor
			std::vector<CvRect> faces(last_faces); 

			// clear last faces
			last_faces.clear();

			for (int i = 0; i <last_faces_size; i++)
			{

				CvRect r = faces[i]; 

				std::vector<CvRect> correspondences;
				correspondences.push_back(r); 

				CvPoint c1,c2; 
				c1.x = r.x;
				c1.y = r.y; 
				c2.x = r.x + r.width;
				c2.y = r.y + r.height; 

				bool foundCorresp = false; 

				for (int j = 1; j< AVG_FRAMES; j++)
				{
				
					foundCorresp = false; 
					int prev_index = abs((avg_ctr-j) % AVG_FRAMES);
					printf ("[j] %d prev_index %d", j, prev_index); 
					for (int k = 0; k < faces_hist[prev_index].size(); k++)
					{
						printf ("\t[k] %d\n", k); 
						CvRect cr = faces_hist[prev_index][k]; 
						CvPoint corr1, corr2;
						corr1.x = cr.x;
						corr1.y = cr.y; 
						corr2.x = cr.x + cr.width; 
						corr2.y = cr.y + cr.height;

						float d1 = sqrtf((c1.x-corr1.x)*(c1.x-corr1.x) + (c1.y-corr1.y)*(c1.y-corr1.y));
						float d2 = sqrtf((c2.x-corr2.x)*(c2.x-corr2.x) + (c2.y-corr2.y)*(c2.y-corr2.y));

						//printf("Distances: \t %f \t %f \n", d1, d2); 

						// try if it is a correspondence
						if ( sqrtf((c1.x-corr1.x)*(c1.x-corr1.x) + (c1.y-corr1.y)*(c1.y-corr1.y)) <=  RADIUS && 
							 sqrtf((c2.x-corr2.x)*(c2.x-corr2.x) + (c2.y-corr2.y)*(c2.y-corr2.y)) <= RADIUS)
						{
							foundCorresp  = true; 
							correspondences.push_back(cr); 
							//break;
						}

					}
					if (foundCorresp = false)
						break; 
				}

				if (correspondences.size() >1)
				{
					//printf("Found %d Correspondences \n", correspondences.size()); 
					// draw them

					CvPoint avg1, avg2;
					avg1.x = avg1.y = avg2.x = avg2.y = 0; 
					int size = correspondences.size(); 
					for (int n = 0; n < size; n++)
					{
						CvRect cr = correspondences[n]; 
						avg1.x += cr.x;
						avg1.y += cr.y; 

						avg2.x += cr.x + cr.width;
						avg2.y += cr.y + cr.height; 
					}

					avg1.x /= size; 
					avg1.y /= size;
					avg2.x /= size;
					avg2.y /= size; 


					// Draw the rectangle in the input image
					cvRectangle( img, avg1, avg2, CV_RGB(0,0,255), 3, 8, 0 );
					cvRectangle( motionFaceComb, avg1, avg2, CV_RGB(0,0,255), 3, 8, 0 );

					CvRect face;
					face.x = avg1.x;
					face.y = avg1.y;
					face.width = avg2.x - avg1.x;
					face.height = avg2.y - avg1.y;
					last_faces.push_back(face);
					
				}

			}
		}

#ifdef DETECT_UPPER_BODY
		if (faces2->total > 0)
		{

		}
		else
		{
			// Loop the number of upper bodies found.
			for( i = 0; i < (faces ? faces->total : 0); i++ )
			{
			   // Create a new rectangle for drawing the face
				CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

				// Find the dimensions of the face,and scale it if necessary
				pt1.x = r->x*scale;
				pt2.x = (r->x+r->width)*scale;
				pt1.y = r->y*scale;
				pt2.y = (r->y+r->height)*scale;

				// Draw the rectangle in the input image
				cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
			}
		}	
#endif	
		// increment the ring buffer index
		avg_ctr = ++avg_ctr % AVG_FRAMES;
    }

	

    // Show the image in the window named "result"
   
	bool presence = Decide_Presence(last_faces, activityHeatmap);

	CvFont font;
	cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 2, CV_AA);
	if (presence)
	{
		cvPutText(motionFaceComb, "PRESENCE", cvPoint(380, 450), &font, cvScalar(255, 0, 0, 0));
	}
	else
	{
		cvPutText(motionFaceComb, "NO PRESENCE", cvPoint(380, 450), &font, cvScalar(0, 255, 0, 0));
		last_faces.clear();
	}


	cvShowImage("motionHistory", motionFaceComb);
	cvShowImage( "result", img );


    // Release the temp image created.
    cvReleaseImage( &temp );
	cvReleaseImage(&motionFaceComb);
	//cvReleaseMemStorage(&storage);
}




