#include "HandTrackerLKHSVCD.hpp"

#include <iostream>

using namespace cv;
using namespace std;

HandTrackerLKHSVCD::HandTrackerLKHSVCD( bool useHSV, float meanshiftWeightFactor, float shortDistIgnoreThreshold, Rect trackingRect, Ptr< GeometricGestureClassifier > gClassifier, float measurementNoiseCovScalar )
: useHSV(useHSV), meanshiftWeightFactor(meanshiftWeightFactor), shortDistIgnoreThreshold( shortDistIgnoreThreshold ), trackingRect( trackingRect ), gClassifier( gClassifier ), measurementNoiseCovScalar( measurementNoiseCovScalar ) {
	
	//FIX : Test these values //FULLTEST
	maxCorners = 200;
	minCornersToTrack = 100;
	qualityLevel = 0.01;;
	minDistance = 3;
	
	//SET THRESHOLDS FOR MEASUREMENTS
	
	cornerReuseErrorThreshold = 2000;
	
	//FIXNOW: Should be scaled according to size of hand? Or at least size of image.
	accelerationIgnoreThreshold = 4000.0;//VERY GOOD?: 2500.0; //NOTE: To ignore extreme measurements.
}

AbstractHandTracker* HandTrackerLKHSVCD::init( bool	useHSV,
											  float meanshiftWeightFactor, float processNoiseCovScalar,
											  float measurementNoiseCovScalar, float shortDistIgnoreThreshold,
											  Rect trackingRect, Mat& initImage, Ptr< GeometricGestureClassifier > gClassifier ) {

	HandTrackerLKHSVCD* ht = new HandTrackerLKHSVCD( useHSV, meanshiftWeightFactor, shortDistIgnoreThreshold, trackingRect, gClassifier, measurementNoiseCovScalar  );
	
	ht->imgPtr1 = &(ht->imgA);
	ht->imgPtr2 = &(ht->imgB);
	
	if ( useHSV ) {
		cvtColor( initImage, ht->imgB, CV_BGR2HSV );
	} else {
		cvtColor( initImage, ht->imgB, CV_BGR2GRAY );
	}
	
	
	
	ht->positionKF.init( 2, 2 ); //Two "dynamParams": (x,y). Two "measureParams", also (x,y).
	
	setIdentity( ht->positionKF.transitionMatrix );
	setIdentity( ht->positionKF.measurementMatrix );
	
	//FIXNOW REVISE VALUES
	setIdentity( ht->positionKF.processNoiseCov, Scalar::all(processNoiseCovScalar) );
	setIdentity( ht->positionKF.measurementNoiseCov, Scalar::all(measurementNoiseCovScalar) );
	setIdentity( ht->positionKF.errorCovPost, Scalar::all(1) );
	
	ht->positionKF.statePre = (Mat_<float>(2, 1) << (trackingRect.x + trackingRect.width/2), (trackingRect.y + trackingRect.height/2));
	ht->positionKF.statePost = (Mat_<float>(2, 1) << (trackingRect.x + trackingRect.width/2), (trackingRect.y + trackingRect.height/2));
	
	// -- visualisation -- //
	{
		namedWindow( "trackingTest", 1 );
	}
	
	return ht;
}


Rect HandTrackerLKHSVCD::track(cv::Mat& nextImage ) {
	
	//VISUALISATION //TODO RAT : for visualisation only!
	Mat imgToDraw;
	nextImage.copyTo( imgToDraw );
	
	Rect oldTrackingRect(trackingRect);
	//VISUALISATION -- draw older tracking rects //TODO RAT
	rectangle( imgToDraw, Point( oldTrackingRect.x, oldTrackingRect.y ), Point( oldTrackingRect.x + oldTrackingRect.width, oldTrackingRect.y + oldTrackingRect.height ),
			  Scalar( 0, 0, 255) ); //NOTE: oldTrackingRect is used later for more visualisation
	
	int countOfReusedCorners;
	
	
	// -- swap pointers to prepare for next image -- //
	Mat* imgPtrTmp = imgPtr1;
	imgPtr1 = imgPtr2;
	imgPtr2 = imgPtrTmp;
	
	Mat featureImg;
	
	if ( useHSV ) {
		//  -- convert image to HSV -- //
		cvtColor( nextImage, *imgPtr2, CV_BGR2HSV );
		
		// -- set ROI for feature selection -- //
		Mat imgGrey; //TODO goodFeaturesToTrack only runs on grey? 
		cvtColor( nextImage, imgGrey, CV_BGR2GRAY );
		featureImg = Mat( imgGrey, trackingRect );
		
	} else {
		//  -- convert image to GRAY -- //
		cvtColor( nextImage, *imgPtr2, CV_BGR2GRAY );
		
		// -- set ROI for feature selection -- //
		featureImg = Mat( *imgPtr2, trackingRect );
	}
	
// -- run classifier -- //
	
	// get hand image for classifier
	Mat handImg( nextImage, trackingRect );
	
	bool classified = gClassifier->classify( handImg );
	
	Mat contourImage = gClassifier->getContourImage();
	
// -- Decide on corners to reuse -- //
	
	//NOTE: also generate meanshift image
	
	float reusedCornersSumX = 0;
	float reusedCornersSumY = 0;
	
	// "Step 1 of reuse" 
	// These are points we previously tracked with error below the threshold cornerReuseErrorThreshold.
	// Of these, we keep only those that lie on the newly computed hand contour.
	
	if (classified) { //TODO only on this condition, yeah?
		for( int i=0; i<potentiallyReusedCorners.size(); i++ ) {
			Point corner(
						 cvRound( potentiallyReusedCorners[i].x ),
						 cvRound( potentiallyReusedCorners[i].y )
						 );

			if ( trackingRect.x <= corner.x && corner.x < trackingRect.x + trackingRect.width &&
				 trackingRect.y <= corner.y && corner.y < trackingRect.y + trackingRect.height
				&&
				contourImage.at<uchar>( corner.y  - trackingRect.y, corner.x  - trackingRect.x ) != 0
				) { //WARNING: since this is matrix index notation, rows go before columns, and hence y before x.
				
				reusedCornersSumX += potentiallyReusedCorners[i].x;
				reusedCornersSumY += potentiallyReusedCorners[i].y;
				
				reusedCorners.push_back( potentiallyReusedCorners[i] );
				
				circle(imgToDraw, corner, 1, Scalar(255, 0, 0), 2);
			}
		} 
	}//TODO update to take advantage of memory continuity?
	potentiallyReusedCorners.clear(); //done with these for now. Prepare for new corners

// -- position update from reused corners -- //
	if ( reusedCorners.size() > 0 ) { //TODO RAT
		
		
		//VISUALISATION //TODO RAT
		circle(imgToDraw, Point( cvRound( reusedCornersSumX / (float) (reusedCorners.size()) ), cvRound ( reusedCornersSumY / (float) (reusedCorners.size()) ) ), 
										 
										 1, Scalar(0, 0, 255), 2);
		
		//FIXNOW does that make sense?
		Mat measurement = ( Mat_<float>(2, 1)
						   << ( reusedCornersSumX / (float) (reusedCorners.size()) ),
						      ( reusedCornersSumY / (float) (reusedCorners.size()) ) );
		
		
		float meanshiftNoiseFactor = (float) minCornersToTrack / (float) (reusedCorners.size()) / meanshiftWeightFactor; //FIXNOW what factor? Test this
		
		positionKF.measurementNoiseCov *= meanshiftNoiseFactor; //Modify precision according to number of corners tracked! 
		//FIX semantics wrong? Would want precision to depend on number of corners still in view after meanshift
		
		positionKF.correct( measurement );
		positionKF.predict(); //FIXNOWs is that necessary each time?
		
		setIdentity( positionKF.measurementNoiseCov, Scalar::all(measurementNoiseCovScalar) ); //Reset precision so it's correct for later individual measurements
		
		trackingRect.x = cvRound( positionKF.statePost.at<float>(0) ) - trackingRect.width/2;
		trackingRect.y = cvRound( positionKF.statePost.at<float>(1) ) - trackingRect.height/2;
		
		//forcing rectangle to stay inside frame  //TODO is this necessary?
		forceInside( nextImage.size(), trackingRect );
	}
	
// -- select features (corners) to track -- //
	corners.clear(); //Prepare for new corners.
	if ( reusedCorners.size() < minCornersToTrack ) {
		int featureFindCount = maxCorners - reusedCorners.size();
		
		if (classified) {
			goodFeaturesToTrack( featureImg, corners, featureFindCount, qualityLevel, minDistance, gClassifier->getContourImage());
		} else {
			goodFeaturesToTrack( featureImg, corners, featureFindCount, qualityLevel, minDistance );
		}
		//FIXNOW what to do on track fail? Give up?
		//FIX dilate mask?
		
		//Change frame of reference for corners to be the full image
		for( int i=0; i<corners.size(); i++) {
			corners[i].x += trackingRect.x;
			corners[i].y += trackingRect.y;
		} //TODO update to take advantage of memory continuity?
		
		countOfReusedCorners = reusedCorners.size(); //VISUALISATION //TODO RAT : for visualisation only!
	}
	
	//TODO RAT
	corners.swap(reusedCorners);
	for( int i=0; i<reusedCorners.size(); i++) {
		corners.push_back( reusedCorners[i] );
	} //Now we've put the reused corners at the front
	
	reusedCorners.clear();//done with these for now. Prepare for new corners
	
// -- track features -- //
	
	calcOpticalFlowPyrLK( *imgPtr1, *imgPtr2, corners, nextCorners, cornersFound, cornerErrors );
	//FIX test / tweak, use derivLambda? how many levels? etc.
	
// -- determine displacement of tracking rect -- //
	//TODO: Explain the strange fact that past frame tracking really finishes just above here.
	
	//calculate speed and acceleration
	float prevDX = trackingRect.x - prevX;
	float prevDY = trackingRect.y - prevY;
	
	prevX = trackingRect.x;
	prevY = trackingRect.y;
	
	int trackedCornerCount = 0;

	bool extremeMeasurementIgnore[corners.size()];
	
	double startX = positionKF.statePost.at<float>(0);
	double startY = positionKF.statePost.at<float>(1);

	
	for( int j=0; j<corners.size(); j++ ) {
		if (cornersFound[j] == 0) { //TODO also discard if error is high?
			//cout << "corner " << j << " NOT FOUND" << endl;
			//corner not found
			continue;
		}
		

		float cornerDX = nextCorners[j].x  - corners[j].x;
		float cornerDY = nextCorners[j].y - corners[j].y;
		
		float cornerDisplacementSquared = cornerDX*cornerDX + cornerDY*cornerDY; 
		
		float cornerD2X = cornerDX - prevDX;
		float cornerD2Y = cornerDY - prevDY;
		
		float cornerAccelerationSquared = cornerD2X*cornerD2X + cornerD2Y*cornerD2Y;
		
		if ( cornerReuseErrorThreshold < cornerErrors[j] || //FIX TEST?
			 accelerationIgnoreThreshold < cornerAccelerationSquared ||
			cornerDisplacementSquared < shortDistIgnoreThreshold ) { //NOTE: opposite way round to the other checks
			extremeMeasurementIgnore[j] = true;
		} else {
			extremeMeasurementIgnore[j] = false;
			
			if ( cornerErrors[j] < cornerReuseErrorThreshold ) {
				potentiallyReusedCorners.push_back(nextCorners[j]);
			}
			
			Mat positionMeasurement = (Mat_<float>(2, 1) << (startX+cornerDX), (startY + cornerDY) );
			positionKF.correct( positionMeasurement );
			positionKF.predict(); //FIXNOWs is that necessary each time?
			
			trackedCornerCount++;
			
		} //FIXNOW: only ignore if some points do move?
	}
	
	if ( trackedCornerCount > 0 ) {
		
		trackingRect.x = cvRound( positionKF.statePost.at<float>(0) ) - trackingRect.width/2;
		trackingRect.y = cvRound( positionKF.statePost.at<float>(1) ) - trackingRect.height/2;
		
		//forcing rectangle to stay inside frame
		forceInside( nextImage.size(), trackingRect );
	}
	
	// -- visualisation -- //  //VISUALISATION
	{
		
		//Tracking rect
		rectangle( imgToDraw, Point( trackingRect.x, trackingRect.y ), Point( trackingRect.x + trackingRect.width, trackingRect.y + trackingRect.height ),
				  Scalar(0,255,0) );
		
		if (classified) {
			//mask for LK features
			Mat maskInBGR;
			
			cvtColor( gClassifier->getContourImage(), maskInBGR, CV_GRAY2BGR );
			
			Mat imgToDraw_handRect( imgToDraw, oldTrackingRect );
			
			imgToDraw_handRect = imgToDraw_handRect + ( 0.3 * maskInBGR );
		}
		
		
		
		for( int j=0; j<corners.size(); j++ ) {
			if (cornersFound[j] == 0) { //TODO also discard if error is very high?
				//cout << "corner " << j << " REUSED NOT FOUND" << endl;
				//corner not found
				continue;
			}
			
			Scalar colour;
			
			if (extremeMeasurementIgnore[j]) {
				colour = Scalar( 0, 0, 255 ); // Scalar( 0, 0, 0 );
				
			} else if (j < countOfReusedCorners) {
				//colour = Scalar( 55 * ( 600 - cornerErrors[j] ) / 600, 200 + 55 * ( 600 - cornerErrors[j] ) / 600, 55 * ( 600 - cornerErrors[j] ) / 600);
				colour = Scalar( 255, 0, 0 ); //colour = Scalar( 200 + 55 * ( 600 - cornerErrors[j] ) / 600, 55 * ( 600 - cornerErrors[j] ) / 600, 55 * ( 600 - cornerErrors[j] ) / 600);
			} else {
				colour = Scalar( 0, 255, 0 );
			}
			
			Point p0(
					 cvRound( corners[j].x ),
					 cvRound( corners[j].y )
					 );
			Point p1(
					 cvRound( nextCorners[j].x ),
					 cvRound( nextCorners[j].y )
					 );
			line( imgToDraw, p0, p1, colour ); //draw by thickness?
		}
		
		imshow( "trackingTest", imgToDraw );
	}
	
	return trackingRect;
}
								   
								   
								   
void HandTrackerLKHSVCD::forceInside( const Size& outerFrame, Rect& innerRect ) {
	//forcing inner rect to stay inside outer rect  --- assuming inner rect fits inside the outer rect!
	if ( innerRect.x < 0 ) innerRect.x = 0;
	if ( outerFrame.width < innerRect.x + innerRect.width ) innerRect.x = outerFrame.width - innerRect.width;
	if ( innerRect.y < 0 ) innerRect.y = 0;
	if ( outerFrame.height < innerRect.y + innerRect.height ) innerRect.y = outerFrame.height - innerRect.height;
}



// based on code from camshift.cpp
//TODO reuse code from handLocaliser? -- make static

Point2f HandTrackerLKHSVCD::initialLocationGuess( const Mat& probImage ) {
	
	//FIX investigate correct Matrix conversion
	
	CvMat _probImage = probImage;
	
	CvMoments moments;
	CvMat  stub, *mat = (CvMat*) &_probImage;
	//stub is used as a tmp buffer just for the copying of imgProb header info to mat
	
	moments.m00 = moments.m10 = moments.m01 = 0;
	
	mat = cvGetMat( mat, &stub );

	//TODO RAT?
	if( CV_MAT_CN( mat->type ) > 1 )
		CV_Error(CV_BadNumChannels, "Too many channels in input image to initialLocationGuess function in handLocaliser. Expected 1 channel only.");

	double inv_m00;
	
	cvMoments( mat, &moments );
	
	inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00;
	
	return Point2f( moments.m10 * inv_m00, moments.m01 * inv_m00 );
}



