#ifndef OPTIC_H
#define OPTIC_H

#include "opencv/cv.h"
#include "opencv/highgui.h"
#include <cstdio>
#include <sys/time.h>

using namespace std;

#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif

#define FLOW_POINTS 200
#define ROIradius 240
// NSECTORS MUST BE EVEN, and FLOW_POINTS / NSECTORS must be an integer!
#define NSECTORS 20

class Optic {
	private:
		clock_t frame1time, frame2time;
		CvFont font;
		CvCapture* input_video;
		IplImage 	*frame,		              // last grabbed COLOR frame, CANNOT DRAW ON THIS
					    *frame1_1C,	            // second to last grabbed frame in B&W
					    *frame2_1C,	            // last grabbed frame in B&W
					    *pyramid1, *pyramid2,   // pyramids for Lucas-Kanade
					    *lastpyramid,		        // save the last pyramid so we don't have to recalc it
					    *scribble_frame;	      // used to "scribble" X's and lines on, to send to display

    // Detect a red ball
    CvScalar hsv_min;   // H values will wrap around from 0 to 360
    CvScalar hsv_max;

    IplImage *hsv_frame;
    IplImage *thresholded;

    // Default capture size - 640x480
    CvSize size;
  
		CvPoint2D32f frame1_features[FLOW_POINTS];	// Optic flow features in frame 1
		
		// The following are vectors for each point in the optic flow features array that tell us what direction and
		// magnitude for positive flow of 1 unit when translating in Z (forward) and X (right)
		CvPoint2D32f Z_trans_field[FLOW_POINTS],		// Z translation flow field
					       X_trans_field[FLOW_POINTS];		// X translation flow field

		int number_of_features;		            // Number of features to track

		double Z_translation, X_translation;	// Last tracked Z and X translation (set by calculate_flows)

		bool view_enabled;

		static const int hres = 640;
		static const int vres = 480;
		static const int stddevradius = 2.0 ;	// distance from mean that data is kept
		static const int centerx = hres/2;    // adjust to camera
		static const int centery = vres/2;

		// needs to be defined here because it's part of constructor
		inline void buildfeatures(int centerx, int centery, int sectors, int numfeatures, int maxradius)
		{
			int radinterval = maxradius * sectors / numfeatures;
			int radius;
			for (int i = 1; i <= (int)(numfeatures/sectors); i++)	// for each "ring" i
			{
				radius = i * radinterval;
				for (int j = 1; j <= sectors; j++)		// for each sector j
				{
					const int n = (i-1) * sectors + (j-1);
					frame1_features[n].x = centerx + radius * cos(2.0 * M_PI * ((double)j + (double)(i%2) / 2) / (double)sectors);
					frame1_features[n].y = centery + radius * sin(2.0 * M_PI * ((double)j + (double)(i%2) / 2) / (double)sectors);
					
					// ASSUMES NO DISTORTION i.e. FISHEYE
					Z_trans_field[n].x = (double)(frame1_features[n].x - centerx) * 4 / hres;
					Z_trans_field[n].y = (double)(frame1_features[n].y - centery) * 4 / vres;
					X_trans_field[n].x = -1;
					X_trans_field[n].y = 0;
				}
			}
		}
		
		// allocates OpenCV image resources
		inline void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels)
		{
			if ( *img != NULL )
				return;
			*img = cvCreateImage( size, depth, channels );
			if ( *img == NULL ) {
				ROS_INFO("Optic Flow Error: Couldn't allocate image. Out of memory?\n");
				exit(1);
			}
		}

		inline clock_t realclock()
		{
			timeval tv;
			gettimeofday(&tv, 0);
			return (clock_t)(tv.tv_sec * CLOCKS_PER_SEC + tv.tv_usec);
		}

		void calculate_flows();

		// following are used by calculate_flows()
		void draw_x( IplImage* img, CvPoint pt, int radius, int weight, CvScalar color);
		void draw_x2( IplImage* img, CvPoint center, int radius, int weight, int sectors, CvScalar color);

		// overloads for extra speed
		inline static double square(double a)
		{
			return a * a;
		}
		inline static int square(int a)
		{
			return a * a;
		}

	public:
		Optic(bool scribble = true) : frame(NULL), frame1_1C(NULL), frame2_1C(NULL), pyramid1(NULL),
                                  pyramid2(NULL), lastpyramid(NULL), scribble_frame(NULL), hsv_frame(NULL),
                                  thresholded(NULL), number_of_features(FLOW_POINTS),
                                  Z_translation(0), X_translation(0), view_enabled(scribble)  
		{

      //ROS_INFO("here 00");

      hsv_min = cvScalar(150, 84, 130, 0);
      hsv_max = cvScalar(358, 256, 255, 0);

      // Default capture size - 640x480
      size = cvSize(640,480);

      //ROS_INFO("here 01");
			cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX, 0.2,0.2,0,1);
      //ROS_INFO("here 02");
			input_video = cvCaptureFromCAM( 0 );		// For externally connected camera
      //ROS_INFO("here 03");
			if (input_video == NULL)						// Make sure data can be read from video source
			{
				ROS_INFO("Optic Flow Error: Can't open video.\n");
				exit(1);
			}

			frame = cvQueryFrame( input_video );			// Get first frame
			if (frame == NULL)
			{
				ROS_INFO("Optic Flow Error: Can't grab frame\n");
				exit(1);
			}
			frame2time = realclock();
      //ROS_INFO("here 04");
			// allocate everything first, need to get frames, and INITIAL frame:
			allocateOnDemand( &frame1_1C, cvGetSize(frame), IPL_DEPTH_8U, 1 );
			allocateOnDemand( &frame2_1C,  cvGetSize(frame), IPL_DEPTH_8U, 1 );
			allocateOnDemand( &scribble_frame, cvGetSize(frame), IPL_DEPTH_8U, 3);
			allocateOnDemand( &hsv_frame,  cvGetSize(frame), IPL_DEPTH_8U, 3 );
			allocateOnDemand( &thresholded, cvGetSize(frame), IPL_DEPTH_8U, 1);
			cvConvertImage(frame, frame2_1C);
			cvConvertImage(frame, scribble_frame);
      //ROS_INFO("here 05");
			// allocate pyramids for Lucas-Kanade, only needs to be done once
			allocateOnDemand( &pyramid1, cvGetSize(frame1_1C), IPL_DEPTH_8U, 1 );
			allocateOnDemand( &pyramid2, cvGetSize(frame1_1C), IPL_DEPTH_8U, 1 );
			buildfeatures(centerx, centery, NSECTORS, FLOW_POINTS, ROIradius - 10);
		}
		~Optic() {
			// Free frame1_1C, frame2_1C
			if (frame1_1C)
				cvReleaseImage(&frame1_1C);
			if (frame2_1C)
				cvReleaseImage(&frame2_1C);
			if (scribble_frame)
				cvReleaseImage(&scribble_frame);
			if (pyramid1)
				cvReleaseImage(&pyramid1);
			if (pyramid2)
				cvReleaseImage(&pyramid2);
			if (input_video)
				cvReleaseCapture(&input_video);
		}

		void getflows(double &translationX, double &translationZ,
                  double &ballX, double &ballY, double &ballR, int &ballDetect);
    void trackBall(IplImage* frame, double &ballX, double &ballY, double &ballR, int &ballDetect);

} ;

#endif
