/*	--Sparse Optical Flow Demo Program--
 *	Written by David Stavens (dstavens@robotics.stanford.edu)
 *
 *	Adapted by Brent Bannister, University of Bristol, 2013 (brent.bannister22@gmail.com)
 *
 */
#include "stdafx.h"
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <stdio.h>
#include <math.h>
#include <fstream>
#include <opencv2/legacy/legacy.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/photo/photo.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/calib3d/calib3d.hpp"

#include "OpticalFlow.h"
#include "BackgroundSegmentation.h"
#include "SurfFeatures.h"

using namespace cv;
using namespace std;

static const double pi = 3.14159265358979323846;

OpticalFlow::OpticalFlow(ResourceLibrary* rl)
{
	resources = rl;
	goodStart;
	goodEnd;
	badStart;
	badEnd;
}

OpticalFlow::~OpticalFlow(void)
{
}

inline static double square(int a)
{
	return a * a;
}

inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels)
{
	if ( *img != NULL ) return;
		*img = cvCreateImage( size, depth, channels );
	if ( *img == NULL )
	{
		fprintf(stderr, "Error: Couldn't allocate image \n");
		exit(-1);
	}
}

double calcMedian(vector<double> values)
{
	double median;
	size_t size = values.size();

	sort(values.begin(), values.end());

	if (size  % 2 == 0)
	{
		median = (values[size / 2 - 1] + values[size / 2]) / 2;
	}
	else 
	{
		median = values[size / 2];
	}
	return median;
}

int OpticalFlow::calcOpticalFlow(BgSegmenter* bgSeg)
{
	/* Creates video capture object to store and decode the video that is input */
	CvCapture *input_video = cvCaptureFromFile("images/obj_cam_move.MOV");
	if (input_video == NULL)
	{
		fprintf(stderr, "Error: Can't open video.\n");
		return -1;
	}
	/* Get the first frame of the video so we can extract key info such as size etc*/
	cvQueryFrame( input_video );
	/* Get size based on first frame */
	CvSize frame_size;
	frame_size.height = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
	frame_size.width = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );
	/* Get total number of frames in video */
	long number_of_frames;
	//long number_of_frames2;
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_AVI_RATIO, 1. );
	number_of_frames = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES );
	//number_of_frames2 = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_COUNT );
	/* Return to the beginning */
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, 0. );

	/* Windows for visualisation */
	cvNamedWindow("Current Frame", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Next Frame", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("FG", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Test", CV_WINDOW_AUTOSIZE);

	long current_frame = 0;

	IplImage *foreground = NULL;
	IplImage *foregroundArrows = NULL;
	//BgSegmenter* bgSeg = new BgSegmenter();

	while(true)
	{
		goodStart.clear();
		goodEnd.clear();
		badStart.clear();
		badEnd.clear();
		static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;
		
		cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, current_frame );

		/* Get next frame */
		frame = cvQueryFrame( input_video );
		/* Create Mat image1 for Surf detection */
		Mat image1(frame);
		/* Check frame loaded correctly */
		if (frame == NULL)
		{
			fprintf(stderr, "Error reading current frame from file \n");
			return -1;
		}
		allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
		cvConvertImage(frame, frame1_1C);
		/* Make a copy of the image to draw onto */
		allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
		cvConvertImage(frame, frame1);
		/* Get the next frame for comparison */
		frame = cvQueryFrame( input_video );
		/* Create Mat image2 for Surf detection */
		Mat image2(frame);
		if (frame == NULL)
		{
			fprintf(stderr, "Error reading next frame from file\n");
			return -1;
		}
		allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
		cvConvertImage(frame, frame2_1C);
		/* Shi and Tomasi Feature Tracking! */
		/* Prepare storage that is needed */
		allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
		allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );
		/* Array to hold detected features */
		CvPoint2D32f frame1_features[500];
		/* Preparation: BEFORE the function call this variable is the array size
		* (or the maximum number of features to find).  AFTER the function call
		* this variable is the number of features actually found.
		*/
		int number_of_features;
		number_of_features = 500;
		/* 
		* "frame1_1C" is the input image.
		* "eig_image" and "temp_image" are just workspace for the algorithm.
		* The first ".01" specifies the minimum quality of the features (based on the eigenvalues).
		* The second ".01" specifies the minimum Euclidean distance between features.
		* "NULL" means use the entire input image.  You could point to a part of the image.
		* WHEN THE ALGORITHM RETURNS:
		* "frame1_features" will contain the feature points.
		* "number_of_features" will be set to a value <= 400 indicating the number of feature points found.
		*/
		cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, &number_of_features, .01, .01, NULL);


		/*
		SurfFeatures* surf = new SurfFeatures();
		surf->findFeatures(image1,image2,0);
		*/

			
		/* Prepare for Lucas Kanade Algorithm */
		/* This array will contain the locations of the points from frame 1 in frame 2. */
		CvPoint2D32f frame2_features[500];
		/* The i-th element of this array will be non-zero if and only if the i-th feature of
		* frame 1 was found in frame 2.
		*/
		char optical_flow_found_feature[500];
		/* The i-th element of this array is the error in the optical flow for the i-th feature
		* of frame1 as found in frame 2.  If the i-th feature was not found (see the array above)
		* I think the i-th entry in this array is undefined.
		*/
		float optical_flow_feature_error[500];
		/* This is the window size to use to avoid the aperture problem (see slide "Optical Flow: Overview"). */
		CvSize optical_flow_window = cvSize(3,3);
		/* This termination criteria tells the algorithm to stop when it has either done 20 iterations or when
		* epsilon is better than .3.  You can play with these parameters for speed vs. accuracy but these values
		* work pretty well in many situations.
		*/
		CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
		/* This is some workspace for the algorithm.
		* (The algorithm actually carves the image into pyramids of different resolutions.)
		*/
		allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
		allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );
		/* Actually run Pyramidal Lucas Kanade Optical Flow!!
		* "frame1_1C" is the first frame with the known features.
		* "frame2_1C" is the second frame where we want to find the first frame's features.
		* "pyramid1" and "pyramid2" are workspace for the algorithm.
		* "frame1_features" are the features from the first frame.
		* "frame2_features" is the (outputted) locations of those features in the second frame.
		* "number_of_features" is the number of features in the frame1_features array.
		* "optical_flow_window" is the size of the window to use to avoid the aperture problem.
		* "5" is the maximum number of pyramids to use.  0 would be just one level.
		* "optical_flow_found_feature" is as described above (non-zero iff feature found by the flow).
		* "optical_flow_feature_error" is as described above (error in the flow for this feature).
		* "optical_flow_termination_criteria" is as described above (how long the algorithm should look).
		* "0" means disable enhancements.  (For example, the second aray isn't preinitialized with guesses.)
		*/
		cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, number_of_features, optical_flow_window, 5, optical_flow_found_feature, optical_flow_feature_error, optical_flow_termination_criteria, 0 );
   
		/* Draw visual representation of the flow */
		vector<double> lineLengths;
		vector<double> lineAngles;
		double runningLength = 0;
		double runningAngle = 0;
		double line_length, line_angle;
		double medianLength, medianAngle;

		/* Calculate average length and direction of flow to identify outliers */
		for (int n = 0; n < number_of_features;n++)
		{
			CvPoint p1,q1;
			p1.x = (int) frame1_features[n].x;
			p1.y = (int) frame1_features[n].y;
			q1.x = (int) frame2_features[n].x;
			q1.y = (int) frame2_features[n].y;
			line_angle = atan2( (double) p1.y - q1.y, (double) p1.x - q1.x );
			runningAngle += line_angle;
			lineAngles.push_back(line_angle);	 
			line_length = sqrt( square(p1.y - q1.y) + square(p1.x - q1.x) );
			runningLength += line_length;
			lineLengths.push_back(line_length);
		}
		medianAngle = calcMedian(lineAngles);
		medianLength = calcMedian(lineLengths);
		//foreground = bgSeg->subtractOpticalFlow(frame,foreground);
		IplImage* curfr = &bgSeg->images.at(current_frame+1);
		Mat colour_convert(curfr);
		cv::cvtColor(colour_convert, colour_convert, CV_GRAY2RGB);
		foregroundArrows=cvCloneImage(&(IplImage)colour_convert);
		for(int i = 0; i < number_of_features; i++)
		{
			/* Skip feature if no match found */
			if ( optical_flow_found_feature[i] == 0 )  continue;
			int line_thickness;     
			line_thickness = 1;
			CvScalar line_color;    
			line_color = CV_RGB(0,255,0);
			CvPoint p,q;
			p.x = (int) frame1_features[i].x;
			p.y = (int) frame1_features[i].y;
			q.x = (int) frame2_features[i].x;
			q.y = (int) frame2_features[i].y;
			double angle;   
			double hypotenuse;
			angle = atan2( (double) p.y - q.y, (double) p.x - q.x );	  
			hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );
			if (hypotenuse > (medianLength + (medianLength/3)) || angle > (medianAngle+0.3) || angle < (medianAngle-0.3))
			{
				line_color = CV_RGB(255,0,0);
				badStart.push_back(p);
				badEnd.push_back(q);
			}
			else
			{
				line_color = CV_RGB(0,255,0);
				goodStart.push_back(p);
				goodEnd.push_back(q);
			}
			/* Here we lengthen the arrow by a factor of three. */
			q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
			q.y = (int) (p.y - 3 * hypotenuse * sin(angle));
			/* Now we draw the main line of the arrow. */
			/* "frame1" is the frame to draw on.
			*  "p" is the point where the line begins.
			*  "q" is the point where the line stops.
			*  "CV_AA" means antialiased drawing.
			*  "0" means no fractional bits in the center cooridinate or radius.
			*/
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			cvLine( foregroundArrows, p, q, line_color, line_thickness, CV_AA, 0 );
			/* Now draw the tips of the arrow.  I do some scaling so that the
			* tips look proportional to the main line of the arrow.
			*/   
			p.x = (int) (q.x + 9 * cos(angle + pi / 4));
			p.y = (int) (q.y + 9 * sin(angle + pi / 4));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			cvLine( foregroundArrows, p, q, line_color, line_thickness, CV_AA, 0 );
			p.x = (int) (q.x + 9 * cos(angle - pi / 4));
			p.y = (int) (q.y + 9 * sin(angle - pi / 4));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			cvLine( foregroundArrows, p, q, line_color, line_thickness, CV_AA, 0 );
		}

		/* Display output image with arrows */
		cvShowImage("Optical Flow", frame1);
		cvShowImage("FG", foregroundArrows);

		/* Remove Optical Flow from FGBG segmentation */
		//bgSeg->removeOpticalFlow(current_frame);

		int key_pressed;
		key_pressed = cvWaitKey(0);
		if (key_pressed == 'b' || key_pressed == 'B')  current_frame--;
		else           current_frame++;
		if (current_frame < 0)            current_frame = 0;
		if (current_frame >= number_of_frames - 1)  current_frame = number_of_frames - 2;
		/*if (current_frame < 0) current_frame = 0;
		if (current_frame >= number_of_frames - 1)  break;
		current_frame++;*/
		
	}
}
