/**
 * Author: Daniel Becker
 * Date:   August 6, 2013
 *
 * This OpenCV based program connects to a MJPEG camera stream and performs
 * motion detection. Motion detections are used as bounding box for
 * pedestrian detection based on Haar features.
 *
 * ToDo: Perform a tracking of detected pedestrians using cvblob
 * ToDo: Identify tracked pedestrians utilizing smart phone sensor modalities
 * 		 (WiFi positioning and PDR)
 * ToDo: Completely switch to C++ interface (don't use mix of C and C++)
 */

//#if 0

#include <iostream>
#include <iomanip>

#include <sstream>
#include <string>

#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include <iostream>
#include <list>
#include <vector>

#include <stdio.h>
#include <unistd.h>
#include <curl/curl.h>

using namespace std;
using namespace cv;

#if (defined(_WIN32) || defined(__WIN32__) || defined(__TOS_WIN__) || defined(__WINDOWS__) || (defined(__APPLE__) & defined(__MACH__)))
#include <cv.h>
#include <highgui.h>
#else
#include <opencv/cv.h>
#include <opencv/highgui.h>
#endif

#include <cvblob.h>
using namespace cvb;



/**
 * Global variables
 */
String g_face_cascade_name = "haar/haarcascade_lowerbody.xml";
CascadeClassifier g_haar_cascade;
RNG rng(12345);
Mat g_lastDetectedFeet(10, 10, CV_8UC3);
Mat g_lastDetectedFeet2(10, 10, CV_8UC3);

// Global Images (FIXME Better move these into main, but there's some problems with visibility as function parameters)
IplImage* g_greyImage;
IplImage* g_colourImage;
IplImage* g_movingAverage;
IplImage* g_difference;
IplImage* g_temp;

// Map draw offset (in pixel)
int g_offsX = 30;
int g_offsY = 15;
int g_scalingFact = 20;

// configuration variables
bool posServerActive = false;
int g_binaryThres = 30;  // Change for different sensitivity of movement detection
const char g_posServerURL[] = "http://143.248.55.81:8080/VisionLocalization/CoordinateGetter";
//const char g_posServerURL[] = "http://192.168.0.171:8080/PositioningServer/QueryPosition";
//const char g_posServerURL[] = "http://143.248.56.151/db/daniel.php?id=1&x1=2&y1W=3";
const char g_camURL [] = "http://192.168.0.17:8080/video";


/**
 * MHI variables
 */
// various tracking parameters (in seconds)
const double MHI_DURATION = 1;
const double MAX_TIME_DELTA = 0.5;
const double MIN_TIME_DELTA = 0.05;
// number of cyclic frame buffer used for motion detection
// (should, probably, depend on FPS)
const int N = 4;

// ring image buffer
IplImage **buf = 0;
int last = 0;

// temporary images
IplImage *mhi = 0; // MHI
IplImage *orient = 0; // orientation
IplImage *mask = 0; // valid orientation mask
IplImage *segmask = 0; // motion segmentation map
CvMemStorage* storage = 0; // temporary storage

// Near/far field threshold image height
const double g_nearFarFieldThres = 0.55;

/**
 * FUNCTIONS
 */

/**
 * Motion detection using MHI
 *
 * parameters:
 * img - input video frame
 * dst - resultant motion picture
 * args - optional parameters
 */
std::vector<Rect> update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
	std::vector<Rect> motionROIs;
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to 32f image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask, 0, 0, 0, dst );
    //cvCopy( mhi, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 40 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;

            //printf("Motion area: x=%d y=%d w=%d h=%d n=%d\n", comp_rect.x, comp_rect.y, comp_rect.width, comp_rect.height, i);
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );


        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.02 )
            continue;

        // Skip if contour is too large (95 of image area)
        if( (comp_rect.width > img->width*0.95) || (comp_rect.height > img->height*0.95)  )
        	continue;

		// Skip contour if it is too small
		if ( (comp_rect.width*comp_rect.height) < 1000 )
			continue;

        // draw a rectangle showing motion ROI
        CvPoint pt1, pt2;
        pt1.x = comp_rect.x;
        pt1.y = comp_rect.y;
        pt2.x = comp_rect.x + comp_rect.width;
        pt2.y = comp_rect.y + comp_rect.height;

		// Add relevant contour to vector
        Rect bndRect2(pt1, pt2);
		//Rect bndRect2(bX, bY, bW, bH);
		motionROIs.push_back(bndRect2);

 /*
        cvRectangle(dst, pt1, pt2, color, 1);

        // draw a clock with arrow indicating the direction
        //center = cvPoint( (comp_rect.x + comp_rect.width/2),
        //                  (comp_rect.y + comp_rect.height/2) );
        //cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        //cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
        //        cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
*/
    }
    return motionROIs;
}


/**
 *
 */
//CvSeq* motionDetection(bool first, IplImage* colourImage, IplImage* greyImage,
//		IplImage* movingAverage, IplImage* difference, IplImage* temp)
CvSeq* motionDetection(bool first)
{
    if( first )
    {
        g_difference = cvCloneImage(g_colourImage);
        g_temp = cvCloneImage(g_colourImage);
        cvConvertScale(g_colourImage, g_movingAverage, 1.0, 0.0);
        //first = false;
    }
    else
    {
        cvRunningAvg(g_colourImage, g_movingAverage, 0.020, NULL);
    }

    // Perform motion detection considering changes between average reference and current images
    cvConvertScale(g_movingAverage,g_temp, 1.0, 0.0);
    cvAbsDiff(g_colourImage,g_temp,g_difference);
    cvCvtColor(g_difference,g_greyImage,CV_RGB2GRAY);

    // Extract contours (representing areas where changes occurred)
    cvThreshold(g_greyImage, g_greyImage, g_binaryThres, 255, CV_THRESH_BINARY);
    cvDilate(g_greyImage, g_greyImage, 0, 18);
    cvErode(g_greyImage, g_greyImage, 0, 10);

    // Create storage for variable number of contours
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* contour;

    // Obtain a list of corner points describing the outer contours
    cvFindContours( g_greyImage, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

    return contour;
}


/**
 * Detect pedestrian's feet using Haar detector
 *
 * Returns rectangle of all detected pedestrian feet
 */
std::vector<Rect> detectPedestrians_Haar( Mat frame, std::vector<Rect> motion )
{
  int imgWidth = frame.cols;
  int imgHeight = frame.rows;

  Mat frame_gray;

  std::vector<Rect> pedestrians;
  std::vector<Rect> out;

  // Preprocess images
  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  // Iterate through all motion bounding boxes
  for(unsigned int i = 0; i < motion.size(); i++ )
  {
	Rect motionROI = motion[i];

	// Increase motion ROI by 12% (width) and 12% (height)
	int wPlus = (int)motionROI.width/7;
	int hPlus = (int)motionROI.height/7;

	// Extend motion ROI (haar classifier needs to "breath")
	motionROI.x = max(motionROI.x-wPlus, 0);
	motionROI.y = max(motionROI.y-hPlus, 0);
	motionROI.width = min(motionROI.width+wPlus*2, imgWidth-motionROI.x-1);
	motionROI.height = min(motionROI.height+hPlus*2, imgHeight-motionROI.y-1);

    // Extract region of interest from big image
    Mat ROI = frame_gray( motionROI );

    // In each motion bounding box, detect pedestrians
    //g_haar_cascade.detectMultiScale( ROI, out, 1.0, 2, 0, Size(18, 18), Size(100, 100) );
    g_haar_cascade.detectMultiScale( ROI, out, 1.1, 3, 0 |CV_HAAR_SCALE_IMAGE, Size(10, 10), Size(80, 80) );

    // Correct rect coordinates (relative to image not ROI)
    for(unsigned int j = 0; j < out.size(); j++ )
     {
       //CvPoint pt1, pt2;
    	out[j].x += motionROI.x;
    	out[j].y += motionROI.y;
    	out[j].height += out[j].height*0.15;  // increase height by 15%
     }

    //imshow( "greyImage", ROI );

    // Copy result
    pedestrians.insert( pedestrians.end(), out.begin(), out.end() );
  }

  return pedestrians;
 }


/**
 * Detect pedestrian's feet using HOG detector
 *
 * Returns rectangle of all detected pedestrian contours
 */
std::vector<Rect> detectPedestrians_HOG( Mat frame, std::vector<Rect> motion )
{
    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

    int imgWidth = frame.cols;
    int imgHeight = frame.rows;

    vector<Rect> found, found_filtered;
    vector<Rect> out;
    //double t = (double)getTickCount();


	/**
	 * ITERATE THROUGH MOTION ROIs
	 */
	// Iterate through all motion bounding boxes
	for(unsigned int i = 0; i < motion.size(); i++ )
	{
		Rect motionROI = motion[i];

		// Increase motion ROI by 20% (width) and 20% (height)
		int wPlus = (int)motionROI.width/5;
		int hPlus = (int)motionROI.height/5;

		// Extend motion ROI (haar classifier needs to "breath")
		motionROI.x = max(motionROI.x-wPlus, 0);
		motionROI.y = max(motionROI.y-hPlus, 0);
		motionROI.width = min(motionROI.width+wPlus*2, imgWidth-motionROI.x-1);
		motionROI.height = min(motionROI.height+hPlus*2, imgHeight-motionROI.y-1);

	    // Extract region of interest from big image
	    Mat ROI = frame( motionROI );

	    // In each motion bounding box, detect pedestrians
	    //g_haar_cascade.detectMultiScale( ROI, out, 1.0, 2, 0, Size(18, 18), Size(100, 100) );
	    //g_haar_cascade.detectMultiScale( ROI, out, 1.1, 3, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30), Size(120, 120) );

		// run the detector with default parameters. to get a higher hit-rate
		// (and more false alarms, respectively), decrease the hitThreshold and
		// groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
		hog.detectMultiScale(ROI, out, 0.8, Size(8,8), Size(32,32), 1.05, 1.5);

	    // Correct rect coordinates (relative to image not ROI)
	    for(unsigned int j = 0; j < out.size(); j++ )
	     {
	       //CvPoint pt1, pt2;
	    	out[j].x += motionROI.x;
	    	out[j].y += motionROI.y;
	     }

	    // Copy result
	    found.insert( found.end(), out.begin(), out.end() );
	}


	/**
	 * POST PROCESSING
	 */

	//t = (double)getTickCount() - t;
	//printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
	size_t i, j;
	for( i = 0; i < found.size(); i++ )
	{
		Rect r = found[i];
		for( j = 0; j < found.size(); j++ )
			if( j != i && (r & found[j]) == r)
				break;
		if( j == found.size() )
			found_filtered.push_back(r);
	}
/*
	for( i = 0; i < found_filtered.size(); i++ )
	{
		Rect r = found_filtered[i];
		// the HOG detector returns slightly larger rectangles than the real objects.
		// so we slightly shrink the rectangles to get a nicer output.
		r.x += cvRound(r.width*0.1);
		r.width = cvRound(r.width*0.8);
		r.y += cvRound(r.height*0.07);
		r.height = cvRound(r.height*0.8);
		rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
	}
*/
	return found_filtered;
}


/**
 *
 */
std::vector<Rect> detectPedestrians( Mat frame, std::vector<Rect> motion )
{
	// Filter motion ROIs according to size, position etc. to distinguish near/far field


	// Use HOG for near field pedestrian detection


	// Use Haar foor far field pedestrian detection


}


/**
 * From the given vector of motion ROIs, extracts the ROIs which are in the near field.
 *
 * The near field is defined as:
 * - motion ROI area of at least 7000 pixels
 * - optional: Motion ROI lower border in the lower half of the image
 */
std::vector<Rect> getNearFieldROIs( Mat img, std::vector<Rect> motion )
{
	std::vector<Rect> nearFieldROI;

	// Iterate through all motion bounding boxes
	for(unsigned int i = 0; i < motion.size(); i++ )
	{
		Rect motionROI = motion[i];

		// Skip ROI if too small
		if ( (motionROI.width*motionROI.height) < 7000 )
			continue;

		// Skip ROI if lower border in upper half of image
		if( (motionROI.y + motionROI.height) < img.rows*g_nearFarFieldThres )
			continue;

		// Add to result vector
		nearFieldROI.push_back(motionROI);
	}

	return nearFieldROI;
}


/**
 *
 */
std::vector<Rect> getFarFieldROIs( Mat img, std::vector<Rect> motion )
{
	std::vector<Rect> farFieldROI;

	// Iterate through all motion bounding boxes
	for(unsigned int i = 0; i < motion.size(); i++ )
	{
		Rect motionROI = motion[i];

		// Skip ROI if too big
		//if ( (motionROI.width*motionROI.height) > 5000 )
		// continue;

		// Skip ROI if lower border in lower half of image
		if( (motionROI.y + motionROI.height) > img.rows*g_nearFarFieldThres )
			continue;

		// Add to result vector
		farFieldROI.push_back(motionROI);
	}

	return farFieldROI;
}




/**
 * Returns root point in given grabcut result Matrix
 *
 * Root point is the position where the feet of a person connect to the floor
 * representing the position of this person
 *
 * Note: If grabcut failed, this also will yield an unsatisfactory result
 */
Point2i findRootPoint(Mat gcResult)
{
	int width = gcResult.cols;
	int height = gcResult.rows;

	int startHeight = (int)(height*0.42);
	int startWidth = (int)(width*0.5);

	// Default point (-1/-1) - error
	Point2i rootPoint;
	rootPoint.x = -1;
	rootPoint.y = -1;

	// If first point already zero, GC is assumed to have failed
	// -> return (-1/-1) - error
	if( gcResult.at<uint8_t>(startHeight,startWidth) == 0 ) {
		return rootPoint;
	}

	// Starting near image center, we go down until the image value turns zero
	// (GC object detection boundary)
	int w = startWidth;
	for(int i=startHeight;i<height;i++) {

		int val = gcResult.at<uint8_t>(i,w);
		//printf("%u",val);

		// We reached edge, try left and right if we can go further down
		if( 0 == val )
		{
			rootPoint.x = w;
			rootPoint.y = i;


			// Check left and right
			int wStep = 5;
			int wLeft = max(0, w-wStep);
			int wRight = min(width, w+wStep);
			int leftVal = gcResult.at<uint8_t>(i,wLeft);
			int rightVal = gcResult.at<uint8_t>(i,wRight);

			if( 0 != leftVal ) // max appears to be further left
			{
				w -= wStep;
			}

			else if( 0 != rightVal ) // max appears to be further right
			{
				w += wStep;
			}

			else	// we are at (local) max, finish and return result
			{
				printf("Found RP at x=%d y=%d x/w=%d percent\n", rootPoint.x, rootPoint.y, rootPoint.x*100/width);
				break;
			}

		}
	}
	return rootPoint;
}

/**
 * Given a list of ROIs defining the lower body position in a given image,
 * the exact point coordinates of the detected feet are returned
 */
std::vector<Point2i> extractFeetPos( Mat frame, std::vector<Rect> pedestrians )
{
	int imgWidth = frame.cols;
	int imgHeight = frame.rows;

	Mat frame_gray;

	std::vector<Point2i> feetCoords;

	// Iterate through all motion bounding boxes
	for(unsigned int i = 0; i < pedestrians.size(); i++ )
	{
		Rect pedROI = pedestrians[i];

//		// Increase motion ROI by 10% (width) and 14% (height)
//		int wPlus = (int)pedROI.width/10;
//		int hPlus = (int)pedROI.height/7;

		// Prevent out of bound errors
		pedROI.x = max(pedROI.x, 0);
		pedROI.y = max(pedROI.y, 0);
		pedROI.width = min(pedROI.width, imgWidth-pedROI.x-2);
		pedROI.height = min(pedROI.height, imgHeight-pedROI.y-2);

		// Extract region of interest from big image
		Mat pedLow = frame( pedROI );

		pedLow.copyTo(g_lastDetectedFeet);

		// Apply grab cut on pedLow and find lowest central intersection point
		// (feet root point in pixel x/y)
		Mat pedLowGC;

		Size s = pedLow.size();
	    //Rect brect(wPlus,0,s.width-2*wPlus,s.height-5);
		Rect brect(4,0,s.width-8,s.height-2);


	    Mat result; // segmentation result (4 possible values)
	    Mat bgModel, fgModel; // the models (internally used)

	    // GrabCut segmentation
	    grabCut( 	 pedLow,    // input image
	    			 result,   // segmentation result
	                 brect,// rectangle containing foreground
	                 bgModel,fgModel, // models
	                 1,        // number of iterations
	                 cv::GC_INIT_WITH_RECT); // use rectangle

	    // Get the pixels marked as likely foreground
	    compare(result,GC_PR_FGD,result,CMP_EQ);

	    // Generate output image
	    Mat foreground(pedLow.size(),CV_8UC3,Scalar(255,255,255));
	    pedLow.copyTo(foreground,result); // bg pixels not copied

	    // Find root point in grabcut result
	    Point2i rootP = findRootPoint(result);

	    // print result
	    //std::cout << "Grabcut result matrix = " << std::endl << " "  << result << std::endl << std::endl;

	    // Draw foreground region rect and root point
	    cv::rectangle(foreground, brect, Scalar(100, 100, 200), 1, 1, 0);
	    cv::circle(foreground, rootP, 2, Scalar(100, 100, 200), 1, 8, 0);

	    foreground.copyTo(g_lastDetectedFeet2);

	    //If valid then transform root point back to absolute image coordinates and append to result vector
	    if( rootP.x != -1 && rootP.y != -1 ) {
	    	rootP.x += pedestrians[i].x;
	    	rootP.y += pedestrians[i].y;
	    	feetCoords.push_back(rootP);
	    }
	}
	return feetCoords;
 }


/**
 * Draw two feet detections on map
 */
void drawFeet( Mat feet, Mat feet2, Mat image )
{

	Point2d p1 = Point(1, 1);
    Rect roi( p1, feet.size() );
    Point2d p2 = Point(p1.x+roi.width, p1.y+roi.height);
//    feet.copyTo( image(roi) );
//    rectangle(image, p1, p2, CV_RGB(255,0,0), 1);

	Rect roi2( p1, feet2.size() );
    p2 = Point(p1.x+roi2.width, p1.y+roi2.height);
    feet2.copyTo( image(roi2) );
    rectangle(image, p1, p2, CV_RGB(255,0,0), 1);
}

/**
 * Draw x/y image points of feet detections into image
 */
void drawFeetPoints( std::vector<Point2i> feetPos, Mat image )
{
	Point2i rootP;
	for(unsigned int i = 0; i < feetPos.size(); i++ )
	{
		rootP = feetPos.at(i);

		//cvCircle(image, cvPoint(rootP.x,rootP.y), 8, CV_RGB(0,255,0), 2);
		cv::circle(image, rootP, 2, Scalar(100, 100, 200), 1, 8, 0);
	}
}


/**
 * Returns homography matrix H based on correlated points
 * between object and image plane
 */
Mat calibrate( void )
{
  //-- Step 1: Insert key points in both image plane and object plane (obtained by manual measurement)
  std::vector<Point2f> objectPlane;
  std::vector<Point2f> imagePlane;

  // Input object plane measurements (in m)
  objectPlane.push_back(Point2f(5,-1.1));
  objectPlane.push_back(Point2f(6,-1.1));
  objectPlane.push_back(Point2f(7,-1.1));
  objectPlane.push_back(Point2f(8,-1.1));
  objectPlane.push_back(Point2f(9,-1.1));
  objectPlane.push_back(Point2f(10,-1.1));
  objectPlane.push_back(Point2f(11,-1.1));
  objectPlane.push_back(Point2f(12,-1.1));
  objectPlane.push_back(Point2f(13,-1.1));
  objectPlane.push_back(Point2f(14,-1.1));
  objectPlane.push_back(Point2f(15,-1.1));
  objectPlane.push_back(Point2f(5,-2.55));
  objectPlane.push_back(Point2f(11,-2.55));

  // Input image plane measurements (in Pixels)
  imagePlane.push_back(Point2f(509,448));
  imagePlane.push_back(Point2f(487,393));
  imagePlane.push_back(Point2f(467,350));
  imagePlane.push_back(Point2f(457,317));
  imagePlane.push_back(Point2f(449,293));
  imagePlane.push_back(Point2f(441,273));
  imagePlane.push_back(Point2f(437,256));
  imagePlane.push_back(Point2f(429,243));
  imagePlane.push_back(Point2f(423,230));
  imagePlane.push_back(Point2f(419,220));
  imagePlane.push_back(Point2f(415,212));
  imagePlane.push_back(Point2f(749,420));
  imagePlane.push_back(Point2f(557,251));

  // Print correlated object and image plane measurements
  printf("Object plane x/y (meters) - image plane x/y (pixels)");
  for(unsigned int i=0; i<objectPlane.size(); i++) {
	  printf("%2d: %.2f/%.2f - %.0f/%.0f\n", i, objectPlane.at(i).x, objectPlane.at(i).y, imagePlane.at(i).x, imagePlane.at(i).y);
  }
  printf("\n");

  //-- Step 2: Calculate homography matrix using OpenCV functions
  //Mat H = findHomography( objectPlane, imagePlane, CV_LMEDS);
  //Mat H = findHomography( objectPlane, imagePlane, CV_RANSAC);
  Mat H = findHomography( objectPlane, imagePlane, 0 );
  //std::cout << "H = "<< std::endl << " "  << H << std::endl << std::endl;
/*
  //-- Step 3: Apply Hinv to transform pixel from image plane to object plane
  // H transformation:
  Mat pixelCoord = Mat::ones(3,1,CV_64FC1);
  Mat realCoord = Mat::ones(3,1,CV_64FC1);

  pixelCoord.at<cv::Vec2d>(0,0)[0] = 478; // img x
  pixelCoord.at<cv::Vec2d>(1,0)[0] = 271; // img y

  std::cout << "pixelCoord = "<< std::endl << " "  << pixelCoord << std::endl << std::endl;

  // Calculate real from pixel coordinates
  Mat Hinv = H.inv();
  realCoord = Hinv * pixelCoord;

  // normalize and print result
  double scale = 1 / realCoord.at<cv::Vec2d>(2,0)[0];
  realCoord = realCoord*scale;
  std::cout << "realCoord = " << std::endl << " "  << realCoord << std::endl << std::endl;
*/
  return H;
}

/**
 * Returns image coordinate matrix (in pixel) for a given
 * homography matrix H and object coordinate matrix (in meters)
 */
Point2i object2ImagePlane(Mat H, Point2d realXY) {

	Mat imgCoord = Mat::ones(3,1,CV_64FC1);
	Mat objCoord = Mat::ones(3,1,CV_64FC1);

	objCoord.at<cv::Vec2d>(0,0)[0] = realXY.x;  // real x
	objCoord.at<cv::Vec2d>(1,0)[0] = realXY.y;  // real y

	imgCoord = H * objCoord;

	// Normalize
	double scale = 1 / imgCoord.at<cv::Vec2d>(2,0)[0];
	imgCoord = imgCoord*scale;

	Point2i imgXY;
	imgXY.x = (int)imgCoord.at<cv::Vec2d>(0,0)[0];
	imgXY.y = (int)imgCoord.at<cv::Vec2d>(1,0)[0];

	return imgXY;
}

/**
 * Returns object coordinate matrix (in meters) for a given
 * INVERSED homography matrix Hinv and image coordinate matrix (in pixel)
 */
Point2d image2ObjectPlane(Mat Hinv, Point2i imageXY) {

	Mat imgCoord = Mat::ones(3,1,CV_64FC1);
	Mat objCoord = Mat::ones(3,1,CV_64FC1);

	imgCoord.at<cv::Vec2d>(0,0)[0] = imageXY.x;  // img x
	imgCoord.at<cv::Vec2d>(1,0)[0] = imageXY.y;  // img y

	objCoord = Hinv * imgCoord;

	// Normalize
	double scale = 1 / objCoord.at<cv::Vec2d>(2,0)[0];
	objCoord = objCoord*scale;

	Point2d realXY;
	realXY.x = objCoord.at<cv::Vec2d>(0,0)[0];
	realXY.y = objCoord.at<cv::Vec2d>(1,0)[0];

	return realXY;
}

/**
 * Test homography transformation in both directions
 */
void testHomographyTransform(void) {

	Mat H = calibrate();
	Mat Hinv = H.inv();

	Mat imgCoord = Mat::ones(3,1,CV_64FC1);
	Mat objCoord = Mat::ones(3,1,CV_64FC1);

	// Set test pixel
	Point2i imageXY;
	imageXY.x = 478;
	imageXY.y = 271;
	//imgCoord.at<cv::Vec2d>(0,0)[0] = 478; // img x
	//imgCoord.at<cv::Vec2d>(1,0)[0] = 271; // img y

	std::cout << "Initial imgCoord = " << std::endl << " "  << imageXY << std::endl << std::endl;

	// Transform back and forth
	Point2d realXY = image2ObjectPlane(Hinv, imageXY);
	imageXY = object2ImagePlane(H, realXY);

	std::cout << "Transformed objCoord = " << std::endl << " "  << realXY << std::endl << std::endl;
	std::cout << "Transformed imgCoord = " << std::endl << " "  << imageXY << std::endl << std::endl;
}

/**
 * Draw floor map
 */
void drawFloorMap(IplImage* floorMap) {

	//cvResizeWindow("floorMap", 300, 300);

    // Enclosing rectangle
	//cvRectangle(floorMap, cvPoint(g_offsX,g_offsY), cvPoint(g_offsX+30*g_scalingFact,g_offsY+30*g_scalingFact), CV_RGB(200,200,200), 2);

	// Lines marking floor
	cvLine(floorMap, cvPoint(g_offsX,g_offsY), cvPoint(g_offsX+35*g_scalingFact,g_offsY), CV_RGB(0,0,200), 1);

	cvLine(floorMap, cvPoint(g_offsX,g_offsY+2.55*g_scalingFact), cvPoint(g_offsX+8*g_scalingFact,g_offsY+2.55*g_scalingFact), CV_RGB(0,0,200), 1);
	cvLine(floorMap, cvPoint(g_offsX+10.5*g_scalingFact,g_offsY+2.55*g_scalingFact), cvPoint(g_offsX+35*g_scalingFact,g_offsY+2.55*g_scalingFact), CV_RGB(0,0,200), 1);

	cvLine(floorMap, cvPoint(g_offsX+8*g_scalingFact,g_offsY+2.55*g_scalingFact), cvPoint(g_offsX+8*g_scalingFact,g_offsY+5*g_scalingFact), CV_RGB(0,0,200), 1);
	cvLine(floorMap, cvPoint(g_offsX+10.5*g_scalingFact,g_offsY+2.55*g_scalingFact), cvPoint(g_offsX+10.5*g_scalingFact,g_offsY+5*g_scalingFact), CV_RGB(0,0,200), 1);

	// Draw camera location
	cvCircle(floorMap, cvPoint(g_offsX,g_offsY), 8, CV_RGB(0,255,0), 2);

	// Camera view area
	cvLine(floorMap, cvPoint(g_offsX,g_offsY), cvPoint(g_offsX+7*g_scalingFact,g_offsY-0.9*g_scalingFact), CV_RGB(0,255,0), 1);
	cvLine(floorMap, cvPoint(g_offsX,g_offsY), cvPoint(g_offsX+7*g_scalingFact,g_offsY+4.2*g_scalingFact), CV_RGB(0,255,0), 1);
}

/**
 * Draw position on the map, x/y being coordinates relative to camera in meters
 * (i.e. camera displacement)
 */
void drawPosition(IplImage* floorMap, double x_m, double y_m) {

	// Scale to pixel coordinates
	int x = (int)(x_m * g_scalingFact + 0.5);
	int y = (int)(-y_m * g_scalingFact + 0.5); // Invert y as map y is inverted screen y
	cvCircle(floorMap, cvPoint(g_offsX+x,g_offsY+y), 1, CV_RGB(255,0,0), 2);
}

/**
 * Sets all pixels to zero
 */
void clearImage(IplImage* floorMap) {
	cvSet(floorMap, cvScalar(0,0,0));
}

/**
 * Filters the given list of motion contours (i.e. removes/merges overlapping, removes too small ones etc.)
 * and returns vector of rectangles representing filtered contours
 */
std::vector<Rect> cleanupContours(CvSeq* contour, CvSize imgSize) {

	std::vector<Rect> motionROIs;
	std::vector<Rect> motionROIs2;
	std::vector<Rect> checked;
	int count = 0;
	//CvPoint pt1, pt2;
	CvRect bndRect = cvRect(0,0,0,0);

	for( ; contour != 0; contour = contour->h_next )
	{
		count++;
		bndRect = cvBoundingRect(contour, 0);

		// Save bounding box for haar detection (size increased), mind image borders
//		int bX = max(bndRect.x-10, 0);
//		int bY = max(bndRect.y-10, 0);
//		int bW = min(bndRect.width+20, imgSize.width-bndRect.x-1);
//		int bH = min(bndRect.height+60, imgSize.height-bndRect.y-1);
		int bX = bndRect.x-1;
		int bY = bndRect.y-1;
		int bW = bndRect.width-1;
		int bH = bndRect.height-1;
		//printf("%d %d %d %d\n",bX,bY,bW,bH);

		// Skip contour if it is too small
		if ( (bndRect.width*bndRect.height) < 1000 ) //< 10000 )
		 continue;

		// Skip contour if in left (irrelevant) part of camera view
//		if( bX+bW < 350 )
//			continue;

		// Shrink (!) contour if adjacent to image border
		unsigned int borderThres = 5; // max. pixel distance from img border
		if ( bndRect.x < borderThres )
			bX = borderThres;

		if( bndRect.y < borderThres )
			bY = borderThres;

		if ( bndRect.x+bndRect.width > (imgSize.width-borderThres) )
			bW -= borderThres;

		if ( bndRect.y+bndRect.height > (imgSize.height-borderThres)  )
			bH -= borderThres;


		// Add relevant contour to vector
		Rect bndRect2(bX, bY, bW, bH);
		motionROIs.push_back(bndRect2);
	}


	// TODO Merge boxes which are at same x position close to each other
	/*
	if(motionROIs.size() > 1)
	{
		for(unsigned int i=0; i<motionROIs.size()-1; i++)
		{
			Rect cur = motionROIs.at(i);
			Rect next = motionROIs.at(i+1);

			Rect intersect = cur & next;

			//printf("Intersect: %d %d %d %d\n",intersect.x,intersect.y,intersect.width,intersect.height);
		}
	}
	*/
	return motionROIs;
}

/**
 * Draws list of rectangles into image
 */
void drawRectangles(IplImage* img, std::vector<Rect> ROIs, CvScalar color)
{
	for(unsigned int i=0; i<ROIs.size(); i++)
	{
		Rect r = ROIs.at(i);

		CvPoint pt1, pt2;
		pt1.x = r.x;
		pt1.y = r.y;
		pt2.x = r.x + r.width;
		pt2.y = r.y + r.height;

		// Draw contours into image
		cvRectangle(img, pt1, pt2, color, 1);
		//cvCircle(colourImage, ptAvg, 3, CV_RGB(0,255,255), 2, 8, 0);
	}
}

/**
 * Draws position detections in image plane on given image
 */
void drawPositions(IplImage* img, std::vector<Point2i> points, CvScalar color)
{
	for(unsigned int i=0; i<points.size(); i++)
	{
		Point2i p = points.at(i);
		cvCircle(img, p, 2, color, 2, 8, 0);
	}
}


/**
 * FIXME This function does yield compile errors
 */
/*
ostringstream generateResults(std::vector<Point2i> feetPos, Mat Hinv)
{
	ostringstream os;
	os << "<?xml version=\"1.0\" encoding=\"UTF-8\"?><root>";
	 //os << "You: " << 2 << " CPU: " << 5 << endl;

	 // Iterate through all detected points
    for(unsigned int j = 0; j < feetPos.size(); j++ )
    {
    	// Calculate center point of face
       //Point center( faces[i].x + pedestrians[j].x + pedestrians[j].width*0.5, faces[i].y + pedestrians[j].y + pedestrians[j].height*0.5 );

    	Point2i imageXY = feetPos[j];

   	// Get pixel coordinate of detected person
//    	Point2i imageXY;
//    	imageXY.x = r.x + r.width/2; 	// img x
//    	imageXY.y = r.y + r.height;		// img y

		// Transform to floor coordinate
		Point2d realXY = image2ObjectPlane(Hinv, imageXY);

		// Draw position to in-screen map
		drawPosition(g_colourImage, realXY.x, realXY.y);

		// Append to XML string with all positions
		os << "<coordinate id=\"" << j << "\" x=\"" << realXY.x << "\" y=\"" << realXY.y << "\"/>";
     }
     os << "</root>";

     return os;
}
*/


/**
 *
 */
std::vector<Rect> simpleMotionDetect(bool first, CvSize imgSize)
{
	CvSeq* contour = motionDetection(first);
	return cleanupContours(contour, imgSize);
}


/**
 * Main function
 */
//#if 0
int main()
{
	bool connectedToPosServer = false;

	// Perform tests
	testHomographyTransform();

	// Perform calibrations for homography transform
	Mat H = calibrate();
	Mat Hinv = H.inv();

	//Load the cascades
	if( !g_haar_cascade.load( g_face_cascade_name ) ){ printf("--(!)Error loading Haar cascade file\n"); return -1; };

   // Open Video stream
    //CvCapture *input = cvCaptureFromFile(g_camURL);  				// Remote MJPG server
    //CvCapture *input = cvCaptureFromCAM(0);						// Integrated webcam
    CvCapture* input = cvCreateFileCapture("passageway1-c0.avi");   // Local video file
	//CvCapture* input = cvCreateFileCapture("LANE1.mp4");

    if (input==NULL)
           printf("Error opening video stream\n");
        else
           printf("Video stream successfully opened\n");


    // Video properties
    int width = (int)cvGetCaptureProperty(input,CV_CAP_PROP_FRAME_WIDTH);
    int height = (int)cvGetCaptureProperty(input,CV_CAP_PROP_FRAME_HEIGHT);
    double fps = cvGetCaptureProperty(input, CV_CAP_PROP_FPS);
    int frame_count = (int)cvGetCaptureProperty(input,  CV_CAP_PROP_FRAME_COUNT);

    printf("Video Size = %d x %d\n",width,height);
    printf("FPS = %f\nTotal Frames = %d\n",fps,frame_count);

    CvSize imgSize = cvSize(width, height);

    printf("ImgSize w/h= %d/%d\n",imgSize.width,imgSize.height);

	// Prepare windows
	cvNamedWindow("colourImage", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("greyImage", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("greyImage2", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("movingAverage", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("difference", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("motionHistory", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("floorMap", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("colourPlusMap", CV_WINDOW_AUTOSIZE);
	//cvResizeWindow("colourPlusMap",imgSizeDouble.width, imgSizeDouble.height);


	// Fixed framerate assumed
	//double fps = 20; //cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);

//	IplImage* greyImage = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
//	IplImage* colourImage = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
//	IplImage* movingAverage = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
//	IplImage* difference = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
//	IplImage* temp = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
	g_greyImage = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
	g_colourImage = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
	g_movingAverage = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);
	g_difference = cvCreateImage( imgSize, IPL_DEPTH_8U, 3);
	//g_difference = cvCreateImage( cvSize(g_greyImage->width,g_greyImage->height), 8, 3 );
    //cvZero( g_difference );
    //g_difference->origin = g_greyImage->origin;

	g_temp = cvCreateImage( imgSize, IPL_DEPTH_32F, 3);


   // Combine two images into one
   //IplImage* colourPlusMap = cvCreateImage( imgSizeDouble, IPL_DEPTH_8U, 3);

   // Init CURL to transmit results
   CURL *curl;
   CURLcode res;
   char postStr[200];

   if(posServerActive) {
	   /* init curl */
	   curl_global_init(CURL_GLOBAL_ALL);

	   /* get a curl handle */
	   curl = curl_easy_init();

	   if (!curl) {
		    connectedToPosServer = false;
			fprintf(stderr, "curl_easy_init() failed\n");
			//return(1);
	   } else {

		   // Transmit test data results
			curl_easy_setopt(curl, CURLOPT_URL, g_posServerURL);
			sprintf(postStr,"test=%d", 0);
			curl_easy_setopt(curl, CURLOPT_POSTFIELDS, postStr);
			res = curl_easy_perform(curl);

			if(res != CURLE_OK) {
				printf("curl connection to server failed: %s\n", curl_easy_strerror(res));
				connectedToPosServer = false;
			} else {
				printf("curl connection to server successfully established\n");
				connectedToPosServer = true;
			}
	   }
   }

   CvFont font;
   char tmpStr[65];
   bool first = true;

   // Capture video to output file
   CvVideoWriter *writer1 = cvCreateVideoWriter( "out_col.avi", CV_FOURCC('P','I','M','1'), fps, imgSize ); // MPEG1 codec
   //CvVideoWriter *writer1 = cvCreateVideoWriter( "out_col.avi", CV_FOURCC('D','I','V','X'), fps, imgSize ); // H263 codec

   //CvVideoWriter *writer2 = cvCreateVideoWriter( "out_bin.avi", CV_FOURCC('P','I','M','1'), fps, imgSize ); // MPEG1 codec
   //CvVideoWriter *writer3 = cvCreateVideoWriter( "out_diff.avi", CV_FOURCC('P','I','M','1'), fps, imgSize ); // MPEG1 codec


  /** TODO
   * PROCESSING LOOP
   */
  bool quit = false;
  while ( !quit ) //&& cvGrabFrame(input) )
  {
	 g_colourImage = cvQueryFrame(input);
	 //g_colourImage = cvRetrieveFrame(input);

	 //drawFloorMap(g_colourImage);

     if( !g_colourImage )
         break;


     // MOTION DETECTION
     //CvSeq* contour = motionDetection(first, colourImage, greyImage, movingAverage, difference, temp);
     int mhiThres = 20;  // lower thres for more motion detections (and false detections)
     std::vector<Rect> motionROIs = update_mhi(g_colourImage, g_difference, mhiThres);
     //std::vector<Rect> motionROIs = simpleMotionDetect(first, imgSize);
     first = false;

     // PEDESTRIAN DETECTION
     std::vector<Rect> pedestrians;
     //std::vector<Rect> pedestrians = detectPedestrians(g_colourImage, motionROIs);
     std::vector<Rect> motionROIs_NF = getNearFieldROIs(g_colourImage, motionROIs);
     std::vector<Rect> pedestrians_HOG = detectPedestrians_HOG(g_colourImage, motionROIs_NF); // HOG

     std::vector<Rect> motionROIs_FF = getFarFieldROIs(g_colourImage, motionROIs);
     std::vector<Rect> pedestrians_Haar = detectPedestrians_Haar(g_colourImage, motionROIs_FF); // Haar

     // Implement grab cut algorithm to cut relevant part from detected feet
     std::vector<Point2i> feetPos_HOG = extractFeetPos(g_colourImage, pedestrians_HOG);
     std::vector<Point2i> feetPos_Haar = extractFeetPos(g_colourImage, pedestrians_Haar);


     // Iterate through all feet detections
     // TODO Integrate extracted feet pos
     //ostringstream os = generateResults(feetPos, Hinv);

/*
	  // Transmit results if available and connection ok
	  if(connectedToPosServer && pedestrians.size()>0) {

			String xmlString = os.str();

			curl_easy_setopt(curl, CURLOPT_URL, g_posServerURL);
			curl_easy_setopt(curl, CURLOPT_POST, 1);
			curl_easy_setopt(curl, CURLOPT_POSTFIELDS, xmlString.c_str());
			curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, xmlString.length());
			struct curl_slist *slist = curl_slist_append(NULL, "Content-Type: text/xml; charset=utf-8");
			curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
			curl_easy_perform(curl);

		    // Specify the POST data
		    //sprintf(postStr,"coordinates=%str", xmlString);
		    curl_easy_setopt(curl, CURLOPT_POSTFIELDS, xmlString.c_str());

		    // Perform the request, res will get the return code
		    res = curl_easy_perform(curl);

		    // Check for errors
		    if(res != CURLE_OK)
		      fprintf(stderr, "curl_easy_perform() failed: %s\n",
		              curl_easy_strerror(res));

	 }
*/
     // Add text
/*
     cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2);
     sprintf(tmpStr,"%d",motionROIs.size());
     cvPutText(g_colourImage, tmpStr, cvPoint(imgSize.width-50, imgSize.height-50), &font, CV_RGB(0,0,255));
     sprintf(tmpStr,"%d",pedestrians.size());
     cvPutText(g_colourImage, tmpStr, cvPoint(imgSize.width-50, imgSize.height-80), &font, CV_RGB(255,0,0));
*/

     // Draw bounding boxes
     drawRectangles(g_colourImage, motionROIs_FF, CV_RGB(255,0,255));
     drawRectangles(g_colourImage, motionROIs_NF, CV_RGB(0,0,255));
     drawRectangles(g_colourImage, pedestrians_HOG, CV_RGB(255,0,0));
     drawRectangles(g_colourImage, pedestrians_Haar, CV_RGB(255,100,0));
     //drawPositions(g_colourImage, feetPos, CV_RGB(255,0,0));

     // Near/far threshold
     cvLine(g_colourImage, cvPoint(0,g_colourImage->height*g_nearFarFieldThres), cvPoint(g_colourImage->width,g_colourImage->height*g_nearFarFieldThres), CV_RGB(222,222,222), 1);


     // Draw last feet detection
     drawFeet(g_lastDetectedFeet, g_lastDetectedFeet2, g_colourImage);
     drawFeetPoints(feetPos_Haar, g_colourImage);
     drawFeetPoints(feetPos_HOG, g_colourImage);



	//cvSetImageROI( colourImage, cvRect( 20, 140, g_lastDetectedFeet->width, g_lastDetectedFeet->height ) );
	//cvCopy(g_lastDetectedFeet, colourImage);


     // Display results
     cvShowImage("colourImage", g_colourImage);
     //cvShowImage( "greyImage", greyImage );
     //cvShowImage("movingAverage", g_movingAverage);
     cvShowImage("difference", g_difference);
     //cvShowImage("motionHistory", motionHistory);
     //cvShowImage( "ForegroundCodeBook", ImaskCodeBook);
     //cvShowImage("floorMap", floorMap);
     //cvShowImage("colourPlusMap", colourPlusMap);


     cvWriteFrame( writer1, g_colourImage );
     //cvWaitKey(10);
     //cvWriteFrame( writer1, g_colourImage );
     //cvWriteFrame( writer2, greyImage );
     //cvWriteFrame( writer3, difference );


     // Timeout in ms before fetching next image

     cvWaitKey(20);


     // Write to output video file
     //cvWriteFrame(outputMovie, colourImage);


    // React on user key press
    char k = cvWaitKey(10)&0xff;
    switch (k)
    {
      case 27:
      case 'q':
      case 'Q':
        quit = true;
        break;
      case 's':
      case 'S':
    	/*
        for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
        {
          std::stringstream filename;
          filename << "redobject_blob_" << std::setw(5) << std::setfill('0') << blobNumber << ".png";
          cvSaveImageBlob(filename.str().c_str(), img, it->second);
          blobNumber++;
          std::cout << filename.str() << " saved!" << std::endl;
        }
        */
        break;
    }

    // Optionally: timing statistics
    /*
      while (cvWaitKey(10)!=atoi("q")){

          double t1=(double)cvGetTickCount();

          IplImage *img = cvQueryFrame(input);

          double t2=(double)cvGetTickCount();

          printf("time: %gms  fps: %.2g\n",(t2-t1)/(cvGetTickFrequency()*1000.), 1000./((t2-t1)/(cvGetTickFrequency()*1000.)));

          cvShowImage("img",img);
      }
      cvReleaseCapture(&camera);
    */
  }

  // Cleanup HTTP connections
  curl_easy_cleanup(curl);
  curl_global_cleanup();

  // Terminate and clean up properly
  cvReleaseVideoWriter( &writer1 );
  //cvReleaseVideoWriter( &writer2 );
  //cvReleaseVideoWriter( &writer3 );

  cvReleaseImage(&g_colourImage);
  cvReleaseImage(&g_greyImage);
  cvReleaseImage(&g_movingAverage);
  cvReleaseImage(&g_temp);
  cvReleaseImage(&g_difference);
  cvDestroyWindow("colourImage");

  cvReleaseCapture(&input);
  //frameNumber++;


  return 0;
}
//#endif


#if 0
int main(int argc, char** argv)
{
    IplImage* motion = 0;
    //CvCapture* capture = 0;

    //help();
/*
    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromFile( argv[1] );
*/
    CvCapture* capture = cvCreateFileCapture("passageway1-c0.avi");   // Local video file

    //IplImage* frame = NULL;

    if (capture==NULL)
           printf("Error opening video stream\n");
        else
           printf("Video stream successfully opened\n");

    if( capture )
    {
        cvNamedWindow( "Motion", 1 );

        for(;;)
        {
            IplImage* image = cvQueryFrame( capture );
            if( !image )
                break;

            if( !motion )
            {
                motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
                cvZero( motion );
                motion->origin = image->origin;
            }

            update_mhi( image, motion, 20 );
            cvShowImage( "Motion", motion );

            if( cvWaitKey(10) >= 0 )
                break;
        }
        cvReleaseCapture( &capture );
        cvDestroyWindow( "Motion" );
    }

    return 0;
}
#endif


