#include "HandTrackerLKGREYCD.hpp"

#include <iostream>

using namespace cv;
using namespace std;

HandTrackerLKGREYCD::HandTrackerLKGREYCD( Rect trackingRect, Ptr< GeometricGestureClassifier > gClassifier ) : trackingRect( trackingRect ), gClassifier( gClassifier ) {}

AbstractHandTracker* HandTrackerLKGREYCD::init( int dummy, Rect trackingRect, Mat& initImage, Ptr< GeometricGestureClassifier > gClassifier ) {
	
	HandTrackerLKGREYCD* ht = new HandTrackerLKGREYCD( trackingRect, gClassifier  );
	
	ht->imgPtr1 = &(ht->imgA);//TODO could maybe move this to constructor?
	ht->imgPtr2 = &(ht->imgB);
	
	cvtColor( initImage, ht->imgB, CV_BGR2GRAY );
	
	// -- visualisation -- //
	{
		namedWindow( "trackingTest", 1 );
		
	}
	
	return ht;
}


Rect HandTrackerLKGREYCD::track(cv::Mat& nextImage ) {
	
	// -- run classifier -- //
	
	// get hand image for classifier
	Mat handImg( nextImage, trackingRect );
	
	//TODO RAT : for visualisation only!
	Rect oldTrackingRect(trackingRect);
	
	Mat mask;
	if ( gClassifier->classify( handImg ) ) {
		mask = gClassifier->getContourImage();
		//FIXNOW dilate?
	} else {
		// -- visualisation -- //
		{
			Mat imgToDraw;
			nextImage.copyTo( imgToDraw );
			
			rectangle( imgToDraw, Point( trackingRect.x, trackingRect.y ), Point( trackingRect.x + trackingRect.width, trackingRect.y + trackingRect.height ),
					  Scalar::all(255) );
			
			imshow( "trackingTest", imgToDraw );
		}
		
		return trackingRect; //FIXNOW really?
	}
	
	// -- swap pointers to prepare for next image -- //
	Mat* imgPtrTmp = imgPtr1;
	imgPtr1 = imgPtr2;
	imgPtr2 = imgPtrTmp;
	
	
	//  -- convert image to grayscale -- //
	cvtColor( nextImage, *imgPtr2, CV_BGR2GRAY );
	
	// -- set ROI for feature selection -- //
	Mat featureImg( *imgPtr1, trackingRect );
	
	// -- select features (corners) to track -- //
	
	vector<Point2f> corners;
	int maxCorners = 100;
	double qualityLevel = 0.01;;
	double minDistance = 3;
	//FIX : Test these values
	
	goodFeaturesToTrack( featureImg, corners, maxCorners, qualityLevel, minDistance, mask );
	
	//Change frame of reference for corners to be the full image
	for( int i=0; i<corners.size(); i++) {
		corners[i].x += trackingRect.x;
		corners[i].y += trackingRect.y;
	}
	
	// -- track features -- //
	
	vector<Point2f> nextCorners;
	vector<uchar> cornersFound;
	vector<float> cornerErrors;
	
	calcOpticalFlowPyrLK( *imgPtr1, *imgPtr2, corners, nextCorners, cornersFound, cornerErrors );
	//FIX test / tweak, use derivLambda? how many levels? etc.
	
	// -- determine displacement of tracking rect -- //
	
	double sumDeltaX = 0;
	double sumDeltaY = 0;
	double invErrSum = 0;
	
	
	bool shortDistIgnore[corners.size()];
	
	for( int j=0; j<corners.size(); j++ ) {
		if (cornersFound[j] == 0) { //TODO also discard if error is very high?
			//corner not found
			continue;
		}
		
//		sumDeltaX += (nextCorners[j].x  - corners[j].x) * invErr;
//		sumDeltaY += (nextCorners[j].y  - corners[j].y) * invErr;
//		
//		invErrSum += invErr;
		
		double shortDistIgnoreHack = (nextCorners[j].x  - corners[j].x) * (nextCorners[j].x  - corners[j].x);
		shortDistIgnoreHack += (nextCorners[j].y  - corners[j].y) * (nextCorners[j].y  - corners[j].y);
		
		nextCorners[j].y  - corners[j].y;
		double shortDistIgnoreThreshold = 5.0;
		if (shortDistIgnoreHack < shortDistIgnoreThreshold) {
			shortDistIgnore[j] = true;
		} else {
			shortDistIgnore[j] = false;
			
			double invErr = 1/(cornerErrors[j]+1); //+1 avoids division by zero (very unlikely, but still)
			
			sumDeltaX += (nextCorners[j].x  - corners[j].x) * invErr;
			sumDeltaY += (nextCorners[j].y  - corners[j].y) * invErr;
			
			invErrSum += invErr;
		} //FIXNOW: only ignore if some points do move?
		
		
		
	}
	
	int weightedAvgDeltaX = cvRound( sumDeltaX / invErrSum );
	int weightedAvgDeltaY = cvRound( sumDeltaY / invErrSum );
	
	trackingRect.x += weightedAvgDeltaX;
	trackingRect.y += weightedAvgDeltaY;
	
	//forcing rectangle to stay inside frame
	if ( trackingRect.x < 0 ) trackingRect.x = 0;
	if ( nextImage.cols < trackingRect.x + trackingRect.width ) trackingRect.x = nextImage.cols - trackingRect.width;
	if ( trackingRect.y < 0 ) trackingRect.y = 0;
	if ( nextImage.rows < trackingRect.y + trackingRect.height ) trackingRect.y = nextImage.rows - trackingRect.height;
	
	
	// -- visualisation -- //
	{
		Mat imgToDraw;
		nextImage.copyTo( imgToDraw );
		
		//Tracking rect
		rectangle( imgToDraw, Point( trackingRect.x, trackingRect.y ), Point( trackingRect.x + trackingRect.width, trackingRect.y + trackingRect.height ),
				  Scalar::all(255) );
		
		//mask for LK features
		Mat maskInBGR;
		
		cvtColor( mask, maskInBGR, CV_GRAY2BGR );
		
		Mat imgToDraw_handRect( imgToDraw, oldTrackingRect );
		
		//imgToDraw = imgToDraw + ( 0.3 * maskInBGR );
		imgToDraw_handRect = imgToDraw_handRect + ( 0.3 * maskInBGR );
		//cout << "maskInBGR has type CV_8UC3? " << (maskInBGR.type() == CV_8UC3) << endl; //TRUE
		
		
		for( int j=0; j<corners.size(); j++ ) {
			if (cornersFound[j] == 0) { //TODO also discard if error is very high?
				//corner not found
				continue;
			}
			
			Scalar colour;
			
			if (shortDistIgnore[j]) {
				colour = Scalar( 255, 255, 0 );
			} else {
				colour = Scalar( 255, 0, 0 );
			}
			
			Point p0(
					 cvRound( corners[j].x ),
					 cvRound( corners[j].y )
					 );
			Point p1(
					 cvRound( nextCorners[j].x ),
					 cvRound( nextCorners[j].y )
					 );
			line( imgToDraw, p0, p1, colour ); //draw by thickness?
		}
		
		
		imshow( "trackingTest", imgToDraw );
	}
	
	
	
	return trackingRect;
	
}



