#include "HandTracker.hpp"
#include "HiVisualiser.hpp"

#include <iostream>
#include "HIGeom-inl.hpp"

//To activate visualisation: define VISUALISE

using namespace cv;
using namespace std;
using namespace hi;

HandTracker::HandTracker( float meanshiftWeightFactor, float shortDistIgnoreThreshold, Rect trackingRect,
						 Ptr< Segmenter > segmenter, float measurementNoiseCovScalar )
: meanshiftWeightFactor(meanshiftWeightFactor), shortDistIgnoreThreshold( shortDistIgnoreThreshold ),
trackingRect( trackingRect ), segmenter( segmenter ), measurementNoiseCovScalar( measurementNoiseCovScalar ) {
	
	//FIX : Test these values //FULLTEST
	maxCorners = 200;//200;
	minCornersToTrack = 100;
	qualityLevel = 0.01;;
	minDistance = 3;
	
	//SET THRESHOLDS FOR MEASUREMENTS //FULLTEST
	
	cornerReuseErrorThreshold = 2000;
	
	//FULLTEST ?
	//FIX: Should be scaled according to size of hand? Or at least size of image.
	accelerationIgnoreThreshold = 4000.0;//VERY GOOD?: 2500.0; //NOTE: To ignore extreme measurements.
}

AbstractHandTracker* HandTracker::init(
											  float meanshiftWeightFactor, float processNoiseCovScalar,
											  float measurementNoiseCovScalar, float shortDistIgnoreThreshold,
											  Rect trackingRect, Mat& initImage, Ptr< Segmenter > segmenter ) {

	HandTracker* ht = new HandTracker( meanshiftWeightFactor, shortDistIgnoreThreshold, trackingRect, segmenter, measurementNoiseCovScalar  );
	
	ht->initCornersAndKF( processNoiseCovScalar, initImage );
	
	return ht;
}


AbstractHandTracker* HandTracker::init(
									   float meanshiftWeightFactor, float processToMeasurementNoiseRatio, float shortDistIgnoreThreshold,
									   Rect trackingRect, Mat& initImage, Ptr< Segmenter > segmenter ) {
	
	return init( meanshiftWeightFactor, processToMeasurementNoiseRatio, 1, shortDistIgnoreThreshold, trackingRect, initImage, segmenter );
}

void HandTracker::initCornersAndKF( float processNoiseCovScalar, cv::Mat& initImage ) {
	imgPtr1 = &imgA;
	imgPtr2 = &imgB;
	
	cvtColor( initImage, imgB, CV_BGR2GRAY );
	
	Mat handImg( initImage, trackingRect ); //get hand image for segmentation
	segmenter->segment( handImg ); //segment
	addMoreCorners( corners ); //adds initial corners to track (needs segmenter and imgPtr2)
	
	positionKF.init( 2, 2 ); //Two "dynamParams": (x,y). Two "measureParams", also (x,y).
	
	setIdentity( positionKF.transitionMatrix );
	setIdentity( positionKF.measurementMatrix );
	
	setIdentity( positionKF.processNoiseCov, Scalar::all(processNoiseCovScalar) );
	setIdentity( positionKF.measurementNoiseCov, Scalar::all(measurementNoiseCovScalar) );
	setIdentity( positionKF.errorCovPost, Scalar::all(1) );
	
	positionKF.statePre = (Mat_<float>(2, 1) << (trackingRect.x + trackingRect.width/2), (trackingRect.y + trackingRect.height/2));
	positionKF.statePost = (Mat_<float>(2, 1) << (trackingRect.x + trackingRect.width/2), (trackingRect.y + trackingRect.height/2));
}

Rect HandTracker::track( cv::Mat& nextImage ) {
	Rect dummyRect = Rect( -1, -1, -1, -1 );
	return track( nextImage, dummyRect, false );
}

Rect HandTracker::track( cv::Mat& nextImage, cv::Rect& forbiddenRect ) {
	return track( nextImage, forbiddenRect, true );
}

Rect HandTracker::track( cv::Mat& nextImage, cv::Rect& forbiddenRect, bool useForbiddenRect ) {
	// -- swap pointers to prepare for next image -- //
	Mat* imgPtrTmp = imgPtr1;
	imgPtr1 = imgPtr2;
	imgPtr2 = imgPtrTmp;
	
	//  -- convert image to GRAY -- //
	cvtColor( nextImage, *imgPtr2, CV_BGR2GRAY );
	
	// -- track features -- //
	vector<cv::Point2f> nextCorners;
	vector<uchar> cornersFound;
	vector<float> cornerErrors;
	
	calcOpticalFlowPyrLK( *imgPtr1, *imgPtr2, corners, nextCorners, cornersFound, cornerErrors );

	//FIX test / tweak, use derivLambda? how many levels? etc.
	
#ifdef VISUALISE
	//VISUALISATION : KLT tracking lines
	{		
		Mat imgToDraw = HiVisualiser::windowMatrixMap["handtracker"];
		
		Scalar green( 0, 255, 0 );
		for( int j=0; j<nextCorners.size(); j++ ) {
			if (cornersFound[j] == 0) { //TODO also discard if error is very high?
				//corner not found
				continue;
			}
			
			Point p0(
					 cvRound( corners[j].x ),
					 cvRound( corners[j].y )
					 );
			Point p1(
					 cvRound( nextCorners[j].x ),
					 cvRound( nextCorners[j].y )
					 );
			line( imgToDraw, p0, p1, green );
		}
	}
#endif
	
	// -- phase one update and rejection -- //
		//rejects corners based on acceleration and short dist thresholds
		//updates Kalman Filter and tracking rect based on accepted points 
	vector<cv::Point2f> phaseOneAcceptedCorners;
	Size frameSize = nextImage.size();
	phaseOneUpdateAndRejection( frameSize, corners, nextCorners, cornersFound, cornerErrors, phaseOneAcceptedCorners );
	
	// -- phase two update and rejection -- //
		//rejects corners based on new tracking rect location and on new segmentation
		//updates Kalman Filter and tracking rect based on accepted points
	
	//segment
	//segmentation uses BGR
	//TODO switch to HSV? : //Mat handImg( *imgPtr2, trackingRect ); 
	//Requires refactoring segmenter slightly. Which may also mean refactoring GestureClassifiers slightly 
	Mat handImg( nextImage, trackingRect ); //get hand image for segmentation
	
	if ( useForbiddenRect ) {
		segmenter->segment( handImg, forbiddenRect );
	} else {
		segmenter->segment( handImg );
	}

#ifdef VISUALISE
	//VISUALISATION : segmentation mask
	{
		if (segmenter->didSegmentation()) {
			Mat imgToDraw = HiVisualiser::windowMatrixMap["handtracker"];
			//mask for LK features
			Mat maskInBGR;
			
			cvtColor( segmenter->getContourImage(), maskInBGR, CV_GRAY2BGR );
			
			Mat imgToDraw_handRect( imgToDraw, trackingRect );
			
			imgToDraw_handRect = imgToDraw_handRect + ( 0.3 * maskInBGR );
		}		
	}
#endif
	
	
	phaseTwoUpdateAndRejection( frameSize, phaseOneAcceptedCorners, corners );
		//Clears vector "corners", then
		//fills phase two accepted corners into vector
	
	if ( corners.size() < minCornersToTrack ) {	
		// -- corner (a.k.a. feature) selection -- //
		//if too few corners are left, then select more
		
		//repeat segmentation because of recentering
		Mat handImg2( nextImage, trackingRect ); //get hand image for segmentation
		if ( useForbiddenRect ) {
			segmenter->segment( handImg2, forbiddenRect );
		} else {
			segmenter->segment( handImg2 );
		}
		
		addMoreCorners( corners ); //gets img from imgPtr2
	}
		
	return trackingRect;
}

void HandTracker::addMoreCorners( vector<cv::Point2f>& corners ) {
	int featureFindCount = maxCorners - corners.size();
	
	vector<cv::Point2f> newCorners;
	
	//NOTE: goodFeaturesToTrack uses grayscale, which we have already compute in imgPtr2
	Mat grayHandImg( *imgPtr2, trackingRect );
	
	if (segmenter->didSegmentation()) {
		goodFeaturesToTrack( grayHandImg, newCorners, featureFindCount, qualityLevel, minDistance, segmenter->getContourImage());
	} else {
		goodFeaturesToTrack( grayHandImg, newCorners, featureFindCount, qualityLevel, minDistance );
	}
	
	//Change frame of reference for corners to be the full image
	for( int i=0; i<newCorners.size(); i++) {
		corners.push_back( Point2f( newCorners[i].x + trackingRect.x, newCorners[i].y + trackingRect.y ) );
	}
}

void HandTracker::phaseTwoUpdateAndRejection( cv::Size& frameSize, vector<cv::Point2f>& inputCorners, vector<cv::Point2f>& outputCorners ) {
	outputCorners.clear();
	
	// Input corners are those accepted in phase one
	// Of these, we keep only those that lie on the newly computed hand contour
	
	float reusedCornersSumX = 0;
	float reusedCornersSumY = 0;
	
	for( int i=0; i<inputCorners.size(); i++ ) {
		Point corner(
					 cvRound( inputCorners[i].x ),
					 cvRound( inputCorners[i].y )
					 );
		
		if ( trackingRect.x <= corner.x && corner.x < trackingRect.x + trackingRect.width &&
			 trackingRect.y <= corner.y && corner.y < trackingRect.y + trackingRect.height
			 &&
			 ( !segmenter->didSegmentation() ||
			  segmenter->inForeground( corner.x  - trackingRect.x, corner.y  - trackingRect.y )
			 )
			//NOTE: if segmentation not done (contour too small), then the mask is not used.
			) { 
			
			reusedCornersSumX += inputCorners[i].x;
			reusedCornersSumY += inputCorners[i].y;
			
			outputCorners.push_back( inputCorners[i] );
		}
	} 
	
	if (outputCorners.size() > 0) {
		Mat measurement = ( Mat_<float>(2, 1)
						   << ( reusedCornersSumX / (float) (outputCorners.size()) ),
						   ( reusedCornersSumY / (float) (outputCorners.size()) ) );
		
		
		float meanshiftNoiseFactor = (float) minCornersToTrack / (float) (outputCorners.size()) / meanshiftWeightFactor; //TODO what factor? Test this
		
		positionKF.measurementNoiseCov *= meanshiftNoiseFactor; //Modifies precision according to number of corners tracked! 
		
		positionKF.correct( measurement );
		positionKF.predict();
		
		setIdentity( positionKF.measurementNoiseCov, Scalar::all(measurementNoiseCovScalar) ); //Reset precision so it's correct for later individual measurements
		
		trackingRect.x = cvRound( positionKF.statePost.at<float>(0) ) - trackingRect.width/2;
		trackingRect.y = cvRound( positionKF.statePost.at<float>(1) ) - trackingRect.height/2;
		
		//forcing rectangle to stay inside frame
		forceInside( frameSize, trackingRect );
	}
	
#ifdef VISUALISE
	//VISUALISATION : point acceptance markers
	{		
		Mat imgToDraw = HiVisualiser::windowMatrixMap["handtracker"];
		Scalar green( 0, 255, 0 );
		
		for( int j=0; j<outputCorners.size(); j++ ) {		
			Point corner(
						 cvRound( outputCorners[j].x ),
						 cvRound( outputCorners[j].y )
						 );
			
			circle( imgToDraw, corner, 3, green, 1 );
		}
	}
#endif
}

void HandTracker::phaseOneUpdateAndRejection( cv::Size& frameSize, vector<cv::Point2f>& corners, vector<cv::Point2f>& nextCorners,
											 vector<uchar>& cornersFound, vector<float>& cornerErrors, vector<cv::Point2f>& phaseOneAcceptedCorners ) {

	// -- determine displacement of tracking rect and reject bad corners -- //
	
	//calculate speed and acceleration
	float prevDX = trackingRect.x - prevX;
	float prevDY = trackingRect.y - prevY;
	
	prevX = trackingRect.x;
	prevY = trackingRect.y;
	
	double startX = positionKF.statePost.at<float>(0);
	double startY = positionKF.statePost.at<float>(1);
	
	for( int j=0; j<corners.size(); j++ ) {
		if (cornersFound[j] == 0) {
			continue;
		}
		
		float cornerDX = nextCorners[j].x  - corners[j].x;
		float cornerDY = nextCorners[j].y - corners[j].y;
		
		float cornerDisplacementSquared = cornerDX*cornerDX + cornerDY*cornerDY; 
		
		float cornerD2X = cornerDX - prevDX;
		float cornerD2Y = cornerDY - prevDY;
		
		float cornerAccelerationSquared = cornerD2X*cornerD2X + cornerD2Y*cornerD2Y;
		
		if ( cornerErrors[j] < cornerReuseErrorThreshold &&
			 cornerAccelerationSquared < accelerationIgnoreThreshold &&
			 cornerDisplacementSquared > shortDistIgnoreThreshold //NOTE: Opposite inequality to the others
			) {

			phaseOneAcceptedCorners.push_back(nextCorners[j]);
			
			Mat positionMeasurement = (Mat_<float>(2, 1) << (startX + cornerDX), (startY + cornerDY) );
			positionKF.correct( positionMeasurement );
			positionKF.predict(); //TODO optimise by merging updates while adjusting uncertainty?
		}
		
#ifdef VISUALISE
		//VISUALISATION : point rejection / acceptance markers
		{		
			Mat imgToDraw = HiVisualiser::windowMatrixMap["handtracker"];
			Scalar red( 0, 0, 255 );
			Scalar yellow( 0, 255, 255 );
			
			Point corner(
						 cvRound( nextCorners[j].x ),
						 cvRound( nextCorners[j].y )
						 );
			Point tlCorner( corner.x - 2, corner.y - 2 ); //WARNING: could crash near edges
			Point brCorner( corner.x + 2, corner.y + 2 );
			
			if ( cornerErrors[j] > cornerReuseErrorThreshold ) {//unexpected
				circle( imgToDraw, corner, 3, red, 3 );
			} else if ( cornerAccelerationSquared > accelerationIgnoreThreshold ) {
				rectangle( imgToDraw, brCorner, tlCorner, red, 1 );
			} else if ( cornerDisplacementSquared < shortDistIgnoreThreshold ) {
				circle( imgToDraw, corner, 3, red, 1 );
			} else { //accepted
				circle( imgToDraw, corner, 3, yellow, 1 );
			}
		}
#endif
		
		
	}
	
	if ( phaseOneAcceptedCorners.size() > 0 ) {
		
		trackingRect.x = cvRound( positionKF.statePost.at<float>(0) ) - trackingRect.width/2;
		trackingRect.y = cvRound( positionKF.statePost.at<float>(1) ) - trackingRect.height/2;
		
		//forcing rectangle to stay inside frame
		forceInside( frameSize, trackingRect );
	}
	
}

