#include <highgui.h>
#include <iostream>
#include <cv.h>

#include "HandInterface.hpp"
using namespace cv;
using namespace std;
using namespace hi;

class ClassificationTester {
public:
	ClassificationTester();
	
	cv::Ptr< AbstractGestureClassifier > gc;
	cv::Ptr< ResourceTracker > rt;
	cv::Ptr< AbstractHandTracker > ht;
	cv::Ptr< HandLocaliser > hl;
	
	cv::Ptr< cv::VideoCapture > capPtr;
	cv::Ptr< VideoPointTagSequence > pointTagSequencePtr;
	cv::Ptr< VideoGestureTagSequence > gestureTagSequencePtr;
	cv::Mat frame;
	cv::Rect trackingRect;
	double errorThreshold;
	int localisedHandInFrame;
	
	void test( const char* mlName, string pcaName, const char* videoNameString );
private:
	int initTest( const char* videoNameString );
};

ClassificationTester::ClassificationTester() {
};

void ClassificationTester::test( const char* mlName, string pcaName, const char* videoNameString ) { 
	//TODO refactor to test other GestureClassifiers by using callback for constructor
	//OR by adding setter methods and half-way constructors to classifiers
	
	int localisationFrameNumber = initTest( videoNameString ); //returns number of first frame where face was found
	
	int firstTrackingFrame = localisationFrameNumber+1;
	
	ht = HandTracker::init( 10, 0.1, 100, 1, trackingRect, frame, Ptr< Segmenter >( new Segmenter( hl->getProbabilityTransformer() ) ) );
	
	gc = new BoostGestureClassifier( mlName, pcaName, hl->getProbabilityTransformer() );
	
	int tagCount = pointTagSequencePtr->tagCount;
	
	int truePositive = 0;
	int falsePositive = 0;
	int trueNegative = 0;
	int falseNegative = 0;
	
	// -- Prepare timing data -- //
	
	double totalClassificationTime = 0;
	double totalSquareClassificationTime = 0;
	
	for( int frameCount = firstTrackingFrame; frameCount<tagCount; frameCount++) {
		// -- get next frame -- //
		(*capPtr) >> frame;	
		//VISUALISATION
		{
			Mat imgToDraw;
			frame.copyTo( imgToDraw );
			HiVisualiser::windowMatrixMap["handtracker"] = imgToDraw;
		}
		
		// -- tracking frame -- //
		trackingRect = ht->track( frame );
		
		Mat handImg( frame, trackingRect );
		
		double trackTime = (double)cvGetTickCount();
		bool classifiedActive = gc->isHandActive( handImg );
		trackTime = (double)cvGetTickCount() - trackTime;
		
		totalClassificationTime += trackTime;
		totalSquareClassificationTime += trackTime*trackTime;
		
		//VISUALISATION
		{
			if (classifiedActive) {
				circle(HiVisualiser::windowMatrixMap["handtracker"], Point(15, 5), 4, Scalar(0, 0, 255), 2);
			} else {
				circle(HiVisualiser::windowMatrixMap["handtracker"], Point(15, 5), 2, Scalar(0, 255, 0), 2);
			}
		}
		
		// -- get and display gesture tag -- //
		char gestureTag = gestureTagSequencePtr->getNextGestureTag();
		bool taggedActive;
		switch (gestureTag) {
			case 'o':
				taggedActive = true;
				break;
			case 'c':
				taggedActive = false;
				break;
			default:
				CV_Error( CV_StsError, "Unrecognised tag in gesture tags!"); //throws exception!
				break;
		}
		
		//VISUALISATION
		{
			if (taggedActive) {
				circle(HiVisualiser::windowMatrixMap["handtracker"], Point(5, 5), 4, Scalar(0, 0, 255), 2);//VISUALISATION
			} else {
				circle(HiVisualiser::windowMatrixMap["handtracker"], Point(5, 5), 2, Scalar(0, 255, 0), 2);//VISUALISATION
			}
		}
		
		//Could consider only testing classification when error in tracking is small?
		if ( classifiedActive ) {
			if ( taggedActive ) {
				truePositive++;
			} else {
				falsePositive++;
			}
		} else {
			if ( taggedActive ) {
				falseNegative++;
			} else {
				trueNegative++;
			}
		}
		
		//VISUALISATION
		HiVisualiser::refreshWindow( "handtracker" );
		waitKey(1);
	}
	
	cout << "truePositive=" << truePositive << endl;
	cout << "falsePositive=" << falsePositive << endl;
	cout << "falseNegative=" << falseNegative  << endl;
	cout << "trueNegative=" << trueNegative << endl << endl;
	
	double classificationCount = truePositive + falsePositive + falseNegative + trueNegative;
	
	double timeClassifyingOnly = totalClassificationTime/((double)cvGetTickFrequency()*1e6);
	
	double timeVariance = (totalSquareClassificationTime - totalClassificationTime*totalClassificationTime/classificationCount)/classificationCount;
	
	//To record data to a csv -- //NOTE: Also data recorded during init
	std::cerr << truePositive << ";";
	std::cerr << falsePositive << ";";
	std::cerr << falseNegative << ";";
	std::cerr << trueNegative << ";";
	std::cerr << timeClassifyingOnly << endl;
	std::cerr << std::sqrt(timeVariance)/((double)cvGetTickFrequency()*1e6) << std::endl;
}

int ClassificationTester::initTest( const char* videoNameString ) {
	rt = new ResourceTracker( "../config/runtimeSettings/cfg.xml" );
	rt->loadCategory("TestData");
	
	cerr << videoNameString << ";" ;
	
	capPtr = rt->getFileVideoCapture( videoNameString );
	pointTagSequencePtr = rt->getPointTags( videoNameString );
	gestureTagSequencePtr = rt->getGestureTags( videoNameString );
	
	int tagCount = pointTagSequencePtr->tagCount; //TODO ok?
	
	hl = new HandLocaliser();
	
	int localisationframeNumber = 0; //NOTE: frame zero is never tracked as it is always used by 
	
	for ( ; localisationframeNumber < tagCount; localisationframeNumber++ ) {
		try {
			(*capPtr) >> frame;
			pointTagSequencePtr->getNextPointTag();
			gestureTagSequencePtr->getNextGestureTag();
			trackingRect = hl->localiseHand( frame );
			
			cout << "Face found. First tracking frame:" << endl;
			
			cerr << localisationframeNumber + 1 << ";" ;
			
			break;
		}
		catch (cv::Exception e) {
			if ( e.code == HI_ERR_NOFACEFOUND ) {
				continue;
			} else {
				throw;
			}
			
		}
	}
	
	errorThreshold  = 0.75 * (trackingRect.width + trackingRect.height);
	
	forceInside( frame.size(), trackingRect);
	
	return localisationframeNumber;
}

int main( int argc, char** argv ) {
	
	for (int i = 3; i<argc; i++) {
		ClassificationTester cTester;
		cTester.test( argv[1], argv[2], argv[i] );
	}
	
}


