/* --Sparse Optical Flow Demo Program--
* Written by David Stavens (david.stavens@ai.stanford.edu)
*/
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <math.h>

static const double pi = 3.14159265358979323846;

inline static double square(int a)
{
	return a * a;
}

/* This is just an inline that allocates images.  I did this to reduce clutter in the
* actual computer vision algorithmic code.  Basically it allocates the requested image
* unless that image is already non-NULL.  It always leaves a non-NULL image as-is even
* if that image's size, depth, and/or channels are different than the request.
*/
inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels )
{
	if ( *img != NULL )	return;

	*img = cvCreateImage( size, depth, channels );
	if ( *img == NULL )
	{
		fprintf(stderr, "Error: Couldn't allocate image.  Out of memory?\n");
		exit(-1);
	}
}

int main(void)
{
	/* Create an object that decodes the input video stream. */
	CvCapture *input_video = cvCaptureFromFile(
		//"E:\\Desktop\\Video\\PETS\\Fight_Chase.avi"
		//"E:\\Desktop\\Video\\SIAT_Video\\Crowdsplit.avi"
		"E:\\Desktop\\Video\\SIAT_Video\\Crowdwalk125fps_cut.avi"
		//"E:\\Desktop\\Video\\optical_flow_input.avi"
		);
	
	if (input_video == NULL)
	{
		fprintf(stderr, "Error: Can't open video.\n");
		return -1;
	}

	CvSize frame_size;
	frame_size.height =
		(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
	frame_size.width =
		(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );

	CvVideoWriter *videoWriter = cvCreateVideoWriter("E:\\Desktop\\abnormal.avi", CV_FOURCC('X', 'V', 'I', 'D'), 25, frame_size, 1);
	/* Determine the number of frames in the AVI. */
	long number_of_frames;
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_AVI_RATIO, 1. );
	number_of_frames = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES );
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, 0. );

	cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);

	long current_frame = 0;
	while(true)
	{
		static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;

		cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, current_frame );

		frame = cvQueryFrame( input_video );
		if (frame == NULL)
		{
			/* Why did we get a NULL frame?  We shouldn't be at the end. */
			fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
			return -1;
		}

		allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );

		cvConvertImage(frame, frame1_1C, CV_CVTIMG_FLIP);

		allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
		cvConvertImage(frame, frame1, CV_CVTIMG_FLIP);

		/* Get the second frame of video.  Same principles as the first. */
		frame = cvQueryFrame( input_video );
		cvWriteFrame(videoWriter, frame);

		if (frame == NULL)
		{
			fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
			return -1;
		}
		allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
		cvConvertImage(frame, frame2_1C, CV_CVTIMG_FLIP);

		allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
		allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );

		/* Preparation: This array will contain the features found in frame 1. */
		CvPoint2D32f frame1_features[550];

		int number_of_features;

		number_of_features = 550;

		cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, &number_of_features, .01,5.0/* .01*/, NULL);
		cvFindCornerSubPix( frame1_1C,frame1_features,number_of_features,cvSize(3,3),cvSize(-1,-1),
			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));

		CvPoint2D32f frame2_features[550];

		char optical_flow_found_feature[550];

		float optical_flow_feature_error[550];

		/* This is the window size to use to avoid the aperture problem (see slide "Optical Flow: Overview"). */
		CvSize optical_flow_window = cvSize(3,3);

		CvTermCriteria optical_flow_termination_criteria
			= cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

		allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
		allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

		cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, number_of_features, 
			optical_flow_window, 5, optical_flow_found_feature, optical_flow_feature_error, optical_flow_termination_criteria, 0 );

		/* For fun (and debugging :)), let's draw the flow field. */
		float fenergy = 0.0f;
		for(int i = 0; i < number_of_features; i++)
		{
			/* If Pyramidal Lucas Kanade didn't really find the feature, skip it. */
			if ( optical_flow_found_feature[i] == 0 )	continue;

			int line_thickness;				line_thickness = 1;

			CvScalar line_color;			line_color = CV_RGB(255,0,0);

			CvPoint p,q;
			p.x = (int) frame1_features[i].x;
			p.y = (int) frame1_features[i].y;
			q.x = (int) frame2_features[i].x;
			q.y = (int) frame2_features[i].y;

			double angle;		angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
			double hypotenuse;	hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );

			fenergy += hypotenuse * hypotenuse;
// 			if (hypotenuse > 0.0)
// 			{
				/* Here we lengthen the arrow by a factor of three. */
				q.x = (int) (p.x - 9 * hypotenuse * cos(angle));
				q.y = (int) (p.y - 9 * hypotenuse * sin(angle));

				cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
				/* Now draw the tips of the arrow.  I do some scaling so that the
				* tips look proportional to the main line of the arrow.
				*/			
				p.x = (int) (q.x + 6 * cos(angle + pi / 18));
				p.y = (int) (q.y + 6* sin(angle + pi / 18));
				cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
				p.x = (int) (q.x + 6* cos(angle - pi / 18));
				p.y = (int) (q.y + 6* sin(angle - pi / 18));
				cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );	
			}
/*		}*/
		/* Now display the image we drew on.  Recall that "Optical Flow" is the name of
		* the window we created above.
		*/

		printf("Energy @ Frame %d: %f\n", current_frame, fenergy);
		cvShowImage("Optical Flow", frame1);
		/* And wait for the user to press a key (so the user has time to look at the image).
		* If the argument is 0 then it waits forever otherwise it waits that number of milliseconds.
		* The return value is the key the user pressed.
		*/
		int key_pressed;
		key_pressed = cvWaitKey(100);

		/* If the users pushes "b" or "B" go back one frame.
		* Otherwise go forward one frame.
		*/
		if (key_pressed == 'b' || key_pressed == 'B')	
			current_frame--;
		else											
			current_frame++;
		/* Don't run past the front/end of the AVI. */
		if (current_frame < 0)						current_frame = 0;
		if (current_frame >= number_of_frames - 1)	current_frame = number_of_frames - 2;
	}

	cvReleaseVideoWriter(&videoWriter);

}
