#include "abstractFilter.hpp"
#include "HandTrackerLKGREYwMSwBPwFOx.hpp"

#include <iostream>

using namespace cv;

HandTrackerLKGREYwMSwBPwFOx::HandTrackerLKGREYwMSwBPwFOx( Ptr< AbstractProbabilityTransformer > probTrans,
						 Rect trackingRect,
						 Ptr< AbstractFilter > filter
						 ) : probTrans( probTrans ),
							trackingRect( trackingRect ),
							filter( filter ) {}

AbstractHandTracker* HandTrackerLKGREYwMSwBPwFOx::init( Ptr< AbstractProbabilityTransformer > probTrans, Rect trackingRect, Mat& initImage, Ptr< AbstractFilter > filter ) {
	
	HandTrackerLKGREYwMSwBPwFOx* ht = new HandTrackerLKGREYwMSwBPwFOx( probTrans, trackingRect, filter );
	
	ht->imgPtr1 = &(ht->imgA);//TODO could maybe move this to constructor?
	ht->imgPtr2 = &(ht->imgB);
	
	cvtColor( initImage, ht->imgB, CV_BGR2GRAY );
	
	return ht;
}
							  

Rect HandTrackerLKGREYwMSwBPwFOx::track(cv::Mat& nextImage ) {
	
	// -- swap pointers to prepare for next image -- //
	Mat* imgPtrTmp = imgPtr1;
	imgPtr1 = imgPtr2;
	imgPtr2 = imgPtrTmp;
	
	//  -- convert image to grayscale -- //
	
	cvtColor( nextImage, *imgPtr2, CV_BGR2GRAY );

	// -- set ROI for feature selection -- //
	Mat featureRegionImg( *imgPtr1, trackingRect );
	
	// -- select features (corners) to track -- //
	
	vector<Point2f> corners;
	int maxCorners = 100;
	double qualityLevel = 0.01;;
	double minDistance = 3;
	//FIX : Test these values
	
	goodFeaturesToTrack( featureRegionImg, corners, maxCorners, qualityLevel, minDistance );
	
	//Change frame of reference for corners to be the full image
	for( int i=0; i<corners.size(); i++) {
		corners[i].x += trackingRect.x;
		corners[i].y += trackingRect.y;
	}
	
	// -- track features -- //
	
	vector<Point2f> nextCorners;
	vector<uchar> cornersFound;
	vector<float> cornerErrors;
		
	calcOpticalFlowPyrLK( *imgPtr1, *imgPtr2, corners, nextCorners, cornersFound, cornerErrors );
	//FIX test / tweak, use derivLambda? how many levels? etc.
	
	// -- determine displacement of tracking rect -- //
	
	double sumDeltaX = 0;
	double sumDeltaY = 0;
	double invErrSum = 0;
	
	for( int j=0; j<corners.size(); j++ ) {
		if (cornersFound[j] == 0) { //TODO also discard if error is very high?
			//corner not found
			continue;
		}
		
		double invErr = 1/(cornerErrors[j]+1); //+1 avoids division by zero (very unlikely, but still)
		
		sumDeltaX += (nextCorners[j].x  - corners[j].x) * invErr;
		sumDeltaY += (nextCorners[j].y  - corners[j].y) * invErr;
		
		invErrSum += invErr;
		
	}
	
	int weightedAvgDeltaX = cvRound( sumDeltaX / invErrSum );
	int weightedAvgDeltaY = cvRound( sumDeltaY / invErrSum );
		
	trackingRect.x += weightedAvgDeltaX;
	trackingRect.y += weightedAvgDeltaY;
	
	//forcing rectangle to stay inside frame
	if ( trackingRect.x < 0 ) trackingRect.x = 0;
	if ( nextImage.cols < trackingRect.x + trackingRect.width ) trackingRect.x = nextImage.cols - trackingRect.width;
	if ( trackingRect.y < 0 ) trackingRect.y = 0;
	if ( nextImage.rows < trackingRect.y + trackingRect.height ) trackingRect.y = nextImage.rows - trackingRect.height;
	
	
	// -- meanshift adjustment -- //
	
	//Compute HSV for backprojection
	cvtColor(nextImage, imgHSV, CV_BGR2HSV); //FIXNOW : use one buffer for HSV (3 channel) and another for probproj (1 channel)
	
	//Compute backprojection
	probTrans->getBackProjection( imgHSV, *imgPtr1 );
	
	filter->filterInPlace( *imgPtr1 );
	
	meanShift( *imgPtr1, trackingRect, TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS , 3, 0.01) );
	
//	rectangle( nextImage, Point(trackingRect.x, trackingRect.y),
//			  Point(trackingRect.x + trackingRect.width, trackingRect.y + trackingRect.height),
//			  Scalar::all(250));
//	
//	imshow( "tracking test 1", nextImage );
	
	return trackingRect;

}
	

	
