#include <iostream>
using std::cout;
using std::endl;
#include <list>
using std::list;
#include <queue>
using std::queue;
#include <vector>
using std::vector;

#include <algorithm>
using std::max;
using std::min;


#include "PlateLocalization.h"
#include "CarHandler.h"
#include <math.h>


#include <cv.h>
//#include <cxcore.h>
#include <highgui.h>

#define SIZE_THRESHOLD_HEIGHT 30 //in image size %              
#define SIZE_THRESHOLD_WIDTH 30 //in image size %
#define VELOCITY_THRESHOLD 0.5
#define CAMERA_DEVICE -1
//#define TEST_AVI "data\\ofir_enter.avi"
#define TEST_AVI "data\\ofir_enter.avi"
#define LK_WINDOW 13
#define MEDIAN_WINDOW 25
#define NO_MOVEMENT_CONST 1
#define FRAME_SKIP_THRES 4

/*
Assumptions - Car in mdl of camera


*/

// will find out when a car is arriving and will get images of the car
static void waitForCar(CarHandler::t_imageList& imgList , CvCapture* camCapture );
static bool checkForCarMovement( IplImage* previousImageCapture , IplImage* currentImageCapture );
static inline double getVelocity( IplImage* velImageX, IplImage* velImageY, int xPixel, int yPixel );
static bool isThereBigObject( IplImage *image, int widthThreshold = 0, int heightThreshold = 0 , int colorThresholdLow = 0, int colorThresholdHigh = 255  ); //invalidates velocity images , gets grayscale image
static bool getStartingPoint( IplImage *image, CvPoint& startingPoint , int colorThresholdLow = 0, int colorThresholdHigh = 255, bool reset = false ); //goes over image and always get a new starting point in threshold limits , gets grayscale image , should be reset between images
static IplImage* medianFilter( IplImage *source , int windowSize );


void main_front(void *param){
	cout << "front thread" << endl;
	CvCapture* camCapture = (CvCapture*)param;
	//CvCapture* camCapture = cvCaptureFromFile( TEST_AVI );
	//CvCapture* camCapture = cvCaptureFromCAM( CAMERA_DEVICE );
	while (true) {
		CarHandler::t_imageList imgList;
		waitForCar(imgList,camCapture); // waiting for a car to arrive.
		// when we get here, we got a car.

		IplImage *tmp = imgList.back();
		PlateLocalization::Instance()->FindLicenseNumber(tmp);
		//CarHandler::instance()->notifyArrival(imgList); // gives images of the car to  
		// the proccesing unit that 
		// opens its own thread and returns
	}
	cvReleaseCapture(&camCapture);
}

static void waitForCar(CarHandler::t_imageList& imgList, CvCapture* camCapture ){
	bool foundCar = false;
	bool carStopped = false;
	int timesNoObject = 0;

	IplImage *previousImageCapture = NULL, *currentImageCapture = NULL;
	

	currentImageCapture = cvQueryFrame( camCapture );

	cvNamedWindow("Video",0);	
	cvNamedWindow("Thres",0);
	cvNamedWindow("Pic1",0);	
	cvNamedWindow("Pic2",0);

	//gets first frame
	int frameCount = 0;
	while(!foundCar){
		++frameCount;
		if( frameCount <= FRAME_SKIP_THRES ){ 
			cvQueryFrame( camCapture );
			continue;
		} 
		frameCount = 0;
		cvShowImage("Video", currentImageCapture);
		if( previousImageCapture != NULL ) cvReleaseImage(&previousImageCapture);
		previousImageCapture = cvCloneImage(currentImageCapture);
		currentImageCapture = cvQueryFrame( camCapture );
		if( currentImageCapture == NULL ){
			cvReleaseImage(&previousImageCapture);
			cvReleaseCapture(&camCapture);
			return;
		}
		cvWaitKey(1);
		foundCar = checkForCarMovement( previousImageCapture , currentImageCapture );
	}
	imgList.push_back(cvCloneImage(currentImageCapture));
	cvShowImage("Pic1", currentImageCapture);
	while(!carStopped){
		++frameCount;
		if( frameCount <= FRAME_SKIP_THRES ){ 
			cvQueryFrame( camCapture );
			continue;
		} 
		frameCount = 0;
		cvShowImage("Video", currentImageCapture);
		if( previousImageCapture != NULL ) cvReleaseImage(&previousImageCapture);
		previousImageCapture = cvCloneImage(currentImageCapture);
		currentImageCapture = cvQueryFrame( camCapture );
		if( currentImageCapture == NULL ){
			imgList.push_back(cvCloneImage(previousImageCapture));
			cvShowImage("Pic2", previousImageCapture);
			cvWaitKey(1);
			cvReleaseImage(&previousImageCapture);
			cvReleaseCapture(&camCapture);
			return;
		}
		cvWaitKey(1);
		if(!checkForCarMovement( previousImageCapture , currentImageCapture )) ++timesNoObject;
		else timesNoObject = 0;
		if( timesNoObject > NO_MOVEMENT_CONST ) carStopped = true;
	}
	imgList.push_back(cvCloneImage(currentImageCapture));
	cvShowImage("Pic2", currentImageCapture);
	cvReleaseImage(&previousImageCapture);
	return;
}


static bool checkForCarMovement( IplImage* previousImageCapture ,IplImage* currentImageCapture ){
	IplImage *grayPrevious, *grayCurrent, *velocityX, *velocityY;

	IplImage *thresImage = cvCreateImage(cvGetSize(previousImageCapture), IPL_DEPTH_8U , 1);

	grayPrevious = cvCreateImage(cvGetSize(previousImageCapture), IPL_DEPTH_8U , 1);
	grayCurrent = cvCreateImage(cvGetSize(currentImageCapture), IPL_DEPTH_8U , 1);
	cvCvtColor(previousImageCapture , grayPrevious, CV_RGB2GRAY);
	cvCvtColor(currentImageCapture , grayCurrent, CV_RGB2GRAY);

	velocityX = cvCreateImage(cvSize(previousImageCapture->width, previousImageCapture->height), IPL_DEPTH_32F, 1);
	velocityY = cvCreateImage(cvSize(previousImageCapture->width, previousImageCapture->height), IPL_DEPTH_32F, 1);

	cvCalcOpticalFlowLK( grayPrevious, grayCurrent, cvSize(LK_WINDOW, LK_WINDOW), velocityX, velocityY );

	// get ThresholdImage and do median

	for( int i=0 ; i < velocityX->width ; ++i ){
		for( int j=0 ; j < velocityX->height ; ++j ){
			if( getVelocity(velocityX,velocityY,i,j) < VELOCITY_THRESHOLD ){
				((uchar*)thresImage->imageData + thresImage->widthStep*j)[i] = 0;
			}
			else{
				((uchar*)thresImage->imageData + thresImage->widthStep*j)[i] = 255;
			}	
		}
	}

	IplImage *thresImageFiltered = medianFilter( thresImage , MEDIAN_WINDOW );


	cvShowImage("Thres", thresImageFiltered);

	bool result = isThereBigObject( thresImageFiltered, SIZE_THRESHOLD_WIDTH, SIZE_THRESHOLD_HEIGHT, 255 );

	cvReleaseImage(&thresImage);
	cvReleaseImage(&thresImageFiltered);

	cvReleaseImage(&grayPrevious);
	cvReleaseImage(&grayCurrent);
	cvReleaseImage(&velocityX);
	cvReleaseImage(&velocityY);

	return result;
}


static inline double getVelocity( IplImage* velImageX, IplImage* velImageY, int xPixel, int yPixel ){
	float velX = ((float*)(velImageX->imageData + velImageX->widthStep*yPixel))[xPixel];
	float velY = ((float*)(velImageY->imageData + velImageY->widthStep*yPixel))[xPixel];

	return sqrt(pow(velX,2)+pow(velY,2)); 

} //should be sqrt(velX^2+velY^2)


static bool isThereBigObject( IplImage *image, int widthThreshold , int heightThreshold  , int colorThresholdLow , int colorThresholdHigh ){ //invalidates velocity images , gets grayscale image

	CvPoint startingPoint;

	getStartingPoint(image,startingPoint,colorThresholdLow,colorThresholdHigh,true); //reseting

	while( getStartingPoint(image,startingPoint,colorThresholdLow,colorThresholdHigh) ){
		queue<CvPoint> qPoints;
		qPoints.push(startingPoint);
		CvPoint topLeft = startingPoint, bottomRight = startingPoint; // bounding box
		while (!qPoints.empty()) {
			CvPoint currentPoint(qPoints.front()); 
			qPoints.pop();

			if( ((uchar*)image->imageData + image->widthStep*currentPoint.y)[currentPoint.x] < colorThresholdLow ||
				((uchar*)image->imageData + image->widthStep*currentPoint.y)[currentPoint.x] > colorThresholdHigh ) continue;

			((uchar*)image->imageData + image->widthStep*currentPoint.y)[currentPoint.x] = 0;

			// getting the bounding box:
			if (currentPoint.x < topLeft.x)
				topLeft.x = currentPoint.x;
			if (currentPoint.y > topLeft.y)
				topLeft.y = currentPoint.y;
			if (currentPoint.x > bottomRight.x)
				bottomRight.x = currentPoint.x;
			if (currentPoint.y < bottomRight.y)
				bottomRight.y = currentPoint.y;

			// doing the bfs action, basically:
			if (currentPoint.x > 0)
				qPoints.push( cvPoint(currentPoint.x - 1, currentPoint.y) );
			if (currentPoint.x + 1 < image->width)
				qPoints.push( cvPoint(currentPoint.x + 1, currentPoint.y) );
			if (currentPoint.y > 0)
				qPoints.push( cvPoint(currentPoint.x, currentPoint.y - 1) );
			if (currentPoint.y + 1 < image->height)
				qPoints.push( cvPoint(currentPoint.x, currentPoint.y + 1) );
		}
		CvSize objectSize = cvSize( bottomRight.x - topLeft.x, topLeft.y - bottomRight.y );
		if( (objectSize.width*100/image->width) >= widthThreshold && (objectSize.height*100/image->height) >= heightThreshold ) return true;
	}

	return false;
}


static bool getStartingPoint( IplImage *image, CvPoint& startingPoint , int colorThresholdLow , int colorThresholdHigh , bool reset ){ //goes over image and always get a new starting point in threshold limits , gets grayscale image , should be reset between images

	static int currentX;
	static int currentY;

	if(reset){
		currentX = currentY = 0;
		return false;
	}

	for( ; currentX < image->width ; ++currentX ){
		for( ; currentY < image->height ; ++currentY ){
			if( ((uchar*)image->imageData + image->widthStep*currentY)[currentX] < colorThresholdLow ||
				((uchar*)image->imageData + image->widthStep*currentY)[currentX] > colorThresholdHigh ) continue;
			else{
				startingPoint.x = currentX;
				startingPoint.y = currentY;
				return true;
			}
		}
		currentY = 0;
	}
	return false;
}

/*
static IplImage* medianFilter( IplImage *source , int windowSize ){

IplImage *outputImage = cvCreateImage(cvGetSize(source), IPL_DEPTH_8U , 1);
windowSize = ((windowSize/2)*2) + 1 ; //fixing if window size isnt odd


for( int i=0 ; i < source->width ; ++i ){
for( int j=0 ; j < source->height ; ++j ){
vector<int> pixelValues;
for( int k = -(windowSize-1)/2 ; k <= (windowSize-1)/2 ; ++k ){
for( int l = -(windowSize-1)/2 ; l <= (windowSize-1)/2 ; ++l ){
int x = min(max(0,i+k), source->width - 1); 
int y = min(max(0,j+l), source->width - 1); 
pixelValues.push_back( ((source->imageData + source->widthStep*y))[x] );
}
}
//nth_element( pixelValues.begin() , pixelValues.begin() + (pixelValues.size()/2) , pixelValues.end() );
int median = *(pixelValues.begin() + (pixelValues.size()/2));
((outputImage->imageData + source->widthStep*j))[i] = median;
}
}
return outputImage;
}*/

static IplImage* medianFilter( IplImage *source , int windowSize ){ 

	IplImage *outputImage = cvCreateImage(cvGetSize(source), source->depth, source->nChannels);
	windowSize = ((windowSize/2)*2) + 1 ; //fixing if window size isnt odd

	cvSmooth( source, outputImage, CV_MEDIAN, windowSize );
	return outputImage;
}