#include "HandTrackerLKHSVCD.hpp"

#include <iostream>

using namespace cv;
using namespace std;

HandTrackerLKHSVCD::HandTrackerLKHSVCD( bool useHSV, Rect trackingRect, Ptr< GeometricGestureClassifier > gClassifier )
	: useHSV(useHSV), trackingRect( trackingRect ), gClassifier( gClassifier ) {}

AbstractHandTracker* HandTrackerLKHSVCD::init( bool	useHSV, double processNoiseCovScalar, double measurementNoiseCovScalar,
											  Rect trackingRect, Mat& initImage, Ptr< GeometricGestureClassifier > gClassifier ) {
	
	HandTrackerLKHSVCD* ht = new HandTrackerLKHSVCD( useHSV, trackingRect, gClassifier  );
	
	ht->imgPtr1 = &(ht->imgA);//TODO could maybe move this to constructor?
	ht->imgPtr2 = &(ht->imgB);
	
	if ( useHSV ) {
		cvtColor( initImage, ht->imgB, CV_BGR2HSV );
	} else {
		cvtColor( initImage, ht->imgB, CV_BGR2GRAY );
	}

	
	
	ht->KF.init( 2, 2 ); //Two "dynamParams": (x,y). Two "measureParams", also (x,y)	
	
	setIdentity( ht->KF.transitionMatrix );
	setIdentity( ht->KF.measurementMatrix );
	//FIXNOW REVISE VALUES
	setIdentity( ht->KF.processNoiseCov, Scalar::all(processNoiseCovScalar) ); //FIXNOW ?  //1e-1 good?
	setIdentity( ht->KF.measurementNoiseCov, Scalar::all(measurementNoiseCovScalar) ); //DIFFICULT! 1 is too sensitive and 10 is too insensitive?
	setIdentity( ht->KF.errorCovPost, Scalar::all(1) ); //TODO drop arg scalar all?
	
	//ht->KF.transitionMatrix = (Mat_<float>(2, 2) << (trackingRect.x + trackingRect.width/2), 0, 0, (trackingRect.y + trackingRect.height/2) );

	//FIX does this work?
	ht->KF.statePre = (Mat_<float>(2, 1) << (trackingRect.x + trackingRect.width/2), (trackingRect.y + trackingRect.height/2) );
	ht->KF.statePost = (Mat_<float>(2, 1) << (trackingRect.x + trackingRect.width/2), (trackingRect.y + trackingRect.height/2) );
	
	//NOTE: sets all elements to zero!  //= Mat::zeros( 2, 1, CV_32F);
	
	
	
	// -- visualisation -- //
	{
		namedWindow( "trackingTest", 1 );
		namedWindow( "classFail", 1 );
		cvMoveWindow( "classFail", 100, 700);
	}
	
	return ht;
}


Rect HandTrackerLKHSVCD::track(cv::Mat& nextImage ) {
	
	// -- run classifier -- //
	
	// get hand image for classifier
	Mat handImg( nextImage, trackingRect );
	
	//TODO RAT : for visualisation only!
	Rect oldTrackingRect(trackingRect);
	
	bool classified = gClassifier->classify( handImg );
	
//	if ( !classified ){
//		
//		cout << "\n\n\n\n\n NONCLASS \n\n\n\n\n" << endl;
//		
//		// -- visualisation -- //
//		{	
//			Mat imgToDraw;
//			nextImage.copyTo( imgToDraw );
//			
//			rectangle( imgToDraw, Point( trackingRect.x, trackingRect.y ), Point( trackingRect.x + trackingRect.width, trackingRect.y + trackingRect.height ),
//					  Scalar::all(255) );
//			
//			imshow( "classFail", imgToDraw );
//		}
//		
//		//return trackingRect; //FIXNOW really?
//	}
	
	// -- swap pointers to prepare for next image -- //
	Mat* imgPtrTmp = imgPtr1;
	imgPtr1 = imgPtr2;
	imgPtr2 = imgPtrTmp;
	
	Mat featureImg;
	
	if ( useHSV ) {
	
		//  -- convert image to HSV -- //
		cvtColor( nextImage, *imgPtr2, CV_BGR2HSV );
		
		// -- set ROI for feature selection -- //
		Mat imgGrey; //TODO goodFeaturesToTrack only runs on grey? 
		cvtColor( nextImage, imgGrey, CV_BGR2GRAY );
		featureImg = Mat( imgGrey, trackingRect );
		
	} else {
		//  -- convert image to GRAY -- //
		cvtColor( nextImage, *imgPtr2, CV_BGR2GRAY );
		
		// -- set ROI for feature selection -- //
		featureImg = Mat( *imgPtr2, trackingRect );
	}

	
	// -- select features (corners) to track -- //
	
	vector<Point2f> corners;
	int maxCorners = 100;
	double qualityLevel = 0.01;;
	double minDistance = 3;
	//FIX : Test these values //FULLTEST
	
	if (classified) {
		goodFeaturesToTrack( featureImg, corners, maxCorners, qualityLevel, minDistance, gClassifier->getContourImage());
	} else {
		goodFeaturesToTrack( featureImg, corners, maxCorners, qualityLevel, minDistance );
	}
	//FIXNOW what to do on track fail? Give up?
	//goodFeaturesToTrack( featureImg, corners, maxCorners, qualityLevel, minDistance, gClassifier->getContourImage()); //, mask ); //TODO RAT reenable?
	//FIX dilate mask?
	
	
	//Change frame of reference for corners to be the full image
	for( int i=0; i<corners.size(); i++) {
		corners[i].x += trackingRect.x;
		corners[i].y += trackingRect.y;
	}
	
	// -- track features -- //
	
	vector<Point2f> nextCorners;
	vector<uchar> cornersFound;
	vector<float> cornerErrors;
	
	calcOpticalFlowPyrLK( *imgPtr1, *imgPtr2, corners, nextCorners, cornersFound, cornerErrors );
	//FIX test / tweak, use derivLambda? how many levels? etc.
	
	// -- determine displacement of tracking rect -- //
	
//	double sumDeltaX = 0;
//	double sumDeltaY = 0;
	//double invErrSum = 0;
	
	int trackedCornerCount;
	//TODO RAT
	bool shortDistIgnore[corners.size()];
	
	double startX = KF.statePost.at<float>(0);
	double startY = KF.statePost.at<float>(1);
	
	for( int j=0; j<corners.size(); j++ ) {
		if (cornersFound[j] == 0) { //TODO also discard if error is very high?
			//corner not found
			continue;
		}
		
		//		sumDeltaX += (nextCorners[j].x  - corners[j].x) * invErr;
		//		sumDeltaY += (nextCorners[j].y  - corners[j].y) * invErr;
		//		
		//		invErrSum += invErr;
		
		double shortDistIgnoreHack = (nextCorners[j].x  - corners[j].x) * (nextCorners[j].x  - corners[j].x);
		shortDistIgnoreHack += (nextCorners[j].y  - corners[j].y) * (nextCorners[j].y  - corners[j].y);
		
		double shortDistIgnoreThreshold = 50.0; //FIXNOW should be scale dependent? -- needs FULL TESTING //FIXNOW //FULLTEST
		if (shortDistIgnoreHack < shortDistIgnoreThreshold) {
			shortDistIgnore[j] = true;
		} else {
			shortDistIgnore[j] = false;
			
			//TODO RAT
			//			double invErr = 1/(cornerErrors[j]+1); //+1 avoids division by zero
			//			
			//			cout << "err = " << cornerErrors[j] << endl;
			//			cout << "invErr = " << invErr << endl;
			//			
			//			sumDeltaX += (nextCorners[j].x  - corners[j].x) * invErr;
			//			sumDeltaY += (nextCorners[j].y  - corners[j].y) * invErr;
			//			
			//			invErrSum += invErr;
			
		
			
//			sumDeltaX += (nextCorners[j].x  - corners[j].x);
//			sumDeltaY += (nextCorners[j].y  - corners[j].y);
			
			
			//TODO RAT
			
			
//			cout << "statePost x = " << KF.statePost.at<float>(0)<< endl;
//			cout << "statePost y = " << KF.statePost.at<float>(1)<< endl << endl;
//			
//			cout << "delta x = " << (nextCorners[j].x - corners[j].x) << endl;
//			cout << "delta y = " << (nextCorners[j].y - corners[j].y) << endl << endl;
			
			//FIXNOW does that make sense?
			Mat measurement = (Mat_<float>(2, 1) << (startX+(nextCorners[j].x  - corners[j].x)), (startY + (nextCorners[j].y - corners[j].y)) );
			
//			cout << "trackingRect.x" << measurement.at<float>(0) << endl;
//			cout << "trackingRect.y" << measurement.y << endl << endl;
//			
			KF.correct( measurement );
			KF.predict(); //FIXNOWs is that necessary each time?
			
			trackedCornerCount++;
			
		} //FIXNOW: only ignore if some points do move?
		
		
		
	}
	
	//TODO RAT
	//	cout << "invErr = " << invErrSum << endl;
	//	
	//	int weightedAvgDeltaX = cvRound( sumDeltaX / invErrSum );
	//	int weightedAvgDeltaY = cvRound( sumDeltaY / invErrSum );
	
	if ( trackedCornerCount > 0 ) {
		
//		int weightedAvgDeltaX = cvRound( sumDeltaX / trackedCornerCount );
//		int weightedAvgDeltaY = cvRound( sumDeltaY / trackedCornerCount );
//		
//		trackingRect.x += weightedAvgDeltaX;
//		trackingRect.y += weightedAvgDeltaY;
		
		
		trackingRect.x = cvRound( KF.statePost.at<float>(0) ) - trackingRect.width/2;
		trackingRect.y = cvRound( KF.statePost.at<float>(1) ) - trackingRect.height/2;
		
		
//		cout << "trackingRect centre x = " << trackingRect.x + trackingRect.width/2 << endl;
//		cout << "trackingRect centre y = " << trackingRect.y + trackingRect.height/2 << endl << endl;
		
		//forcing rectangle to stay inside frame
		if ( trackingRect.x < 0 ) trackingRect.x = 0;
		if ( nextImage.cols < trackingRect.x + trackingRect.width ) trackingRect.x = nextImage.cols - trackingRect.width;
		if ( trackingRect.y < 0 ) trackingRect.y = 0;
		if ( nextImage.rows < trackingRect.y + trackingRect.height ) trackingRect.y = nextImage.rows - trackingRect.height;
		
	}
	
	// -- visualisation -- //
	{
		Mat imgToDraw;
		nextImage.copyTo( imgToDraw );
		
		//Tracking rect
		rectangle( imgToDraw, Point( trackingRect.x, trackingRect.y ), Point( trackingRect.x + trackingRect.width, trackingRect.y + trackingRect.height ),
				  Scalar::all(255) );
		
		
		if (classified) {
			//mask for LK features
			Mat maskInBGR;
			
			cvtColor( gClassifier->getContourImage(), maskInBGR, CV_GRAY2BGR );
			
			Mat imgToDraw_handRect( imgToDraw, oldTrackingRect );
			
			//imgToDraw = imgToDraw + ( 0.3 * maskInBGR );
			imgToDraw_handRect = imgToDraw_handRect + ( 0.3 * maskInBGR );
			//cout << "maskInBGR has type CV_8UC3? " << (maskInBGR.type() == CV_8UC3) << endl; //TRUE
		}
		
		
		
		for( int j=0; j<corners.size(); j++ ) {
			if (cornersFound[j] == 0) { //TODO also discard if error is very high?
				//corner not found
				continue;
			}
			
			Scalar colour;
			
			if (shortDistIgnore[j]) {
				colour = Scalar( (rand()&255)+10, (rand()&255)+10, 255 ); // Scalar( 0, 0, 0 );
			} else {
				colour = Scalar( 55 * ( 600 - cornerErrors[j] ) / 600, 200 + 55 * ( 600 - cornerErrors[j] ) / 600, 55 * ( 600 - cornerErrors[j] ) / 600);
			}
			
			Point p0(
					 cvRound( corners[j].x ),
					 cvRound( corners[j].y )
					 );
			Point p1(
					 cvRound( nextCorners[j].x ),
					 cvRound( nextCorners[j].y )
					 );
			line( imgToDraw, p0, p1, colour ); //draw by thickness?
		}
		
		imshow( "trackingTest", imgToDraw );
	}
	
	
	
	return trackingRect;
	
}



