#include "HandInterface.hpp"
#include <cv.h>
#include <ml.h>
#include "HIString-inl.hpp"
#include <string>

//TODO

/*
 *  MLTrainer.cpp
 *  
 *
 *  Created by Rasmus Kyng on 29/01/2011.
 *  Copyright 2011 __MyCompanyName__. All rights reserved.
 *
 */


using namespace cv;
using namespace hi;

#include <iostream>
using namespace std;


class ClassificationDataLoader : public MLGestureClassifier {
public:
	ClassificationDataLoader( Ptr< AbstractProbabilityTransformer > probTrans, Ptr< VideoGestureTagSequence > gestureSequencePtr,
							 Mat& trainingData, Mat& trainingResponses, int& dataSampleIndex );
	
	virtual bool classify( Mat& handImg );
	virtual bool isHandActive( Mat& handImg );
protected:
	Mat trainingData;
	Mat trainingResponses;
	Ptr< VideoGestureTagSequence > gestureSequencePtr;
	
	void putSample_8UC1( Mat& sample );
	
	int& dataSampleIndex;
};

ClassificationDataLoader::ClassificationDataLoader( Ptr< AbstractProbabilityTransformer > probTrans,
												   Ptr< VideoGestureTagSequence > gestureSequencePtr,
												   Mat& trainingData, Mat& trainingResponses,
												   int& dataSampleIndex )
: MLGestureClassifier( probTrans ), gestureSequencePtr( gestureSequencePtr ),
trainingData( trainingData ), trainingResponses( trainingResponses ),
dataSampleIndex( dataSampleIndex ) {
}

//METHOD deprecated!
bool ClassificationDataLoader::classify( Mat& handImg ) {
	bool geomResult = GeometricGestureClassifier::classify( handImg );
	return geomResult;
}

bool ClassificationDataLoader::isHandActive( Mat& handImg ) {
	
	Mat backProj, handHSV;
	//Compute HSV for backprojection
	cvtColor(handImg, handHSV, CV_BGR2HSV);
	//Compute backprojection
	m_ProbTransPtr->getBackProjection( handHSV, backProj);
	
	Mat resizedBP = getResizedImg( backProj );
	
	putSample_8UC1( resizedBP );
	
	char gestureTag = gestureSequencePtr->getNextGestureTag();
	
	switch (gestureTag) {
		case 'o':
			trainingResponses.at<float>( dataSampleIndex, 0 ) = 0.0;
			break;
		case 'c':
			trainingResponses.at<float>( dataSampleIndex, 0 ) = 1.0;
			break;
		default:
			CV_Error( CV_StsError, "Unrecognised tag in gesture tags!"); //throws exception!
			break;
	}
	
	//VISUALISATON
	{
		Mat testDataOut( 100, 100, CV_32FC1, Scalar::all(0) );
		CV_Assert( testDataOut.isContinuous() );
		testDataOut.data = (uchar*) trainingData.ptr<float>( dataSampleIndex );
		
		Mat testDataToDraw;
		testDataOut.copyTo( testDataToDraw );
		HiVisualiser::windowMatrixMap["bpHand"] = testDataToDraw;
		
		float gestureTag = (char) trainingResponses.at<float>( dataSampleIndex, 0 );
		
		if ( gestureTag == 0.0) {
			circle(testDataToDraw, Point(5, 5), 2, Scalar::all(0), 2);
			circle(testDataToDraw, Point(5, 5), 4, Scalar::all(1.0), 2);
		} else if ( gestureTag == 1.0) {
			circle(testDataToDraw, Point(5, 5), 2, Scalar::all(1.0), 2);
			circle(testDataToDraw, Point(5, 5), 4, Scalar::all(0.0), 2);
		} else {
			CV_Error( CV_StsError, "Unexpected tag!"); //throws exception!
		}	
		
		HiVisualiser::refreshWindow( "bpHand" );
		waitKey(33);
		
	}
	dataSampleIndex++;
	
	return false;
}

void ClassificationDataLoader::putSample_8UC1( Mat& sample ) {
	CV_Assert( sample.isContinuous() );
	CV_Assert( sample.type() == CV_8UC1 );
	
	int cols = sample.cols*sample.rows;
	//int rows = 1;
	
	const uchar* sampleData = sample.ptr<uchar>(0);
	for(int j = 0; j < cols; j++) {
		trainingData.at<float>( dataSampleIndex, j ) = sampleData[j]/255.0;
	}
}

class MLTrainer {
public:
	MLTrainer();
	void train( const char* svnName, int pcaVectorCount, string pcaName );
	void prepareVideo( Ptr< VideoCapture > capPtr, Ptr< VideoGestureTagSequence > gestureTagSequencePtr );
	PCA doPCA( Mat& trainingData, int pcaVectorCount );
private:
	void trackAndLoadSamples( Ptr< VideoCapture > capPtr,
							 int tagsLeft,
							 Ptr< AbstractHandTracker > ht,
							 Mat frame,
							 Rect trackingRect
							 );
	void trainAndSaveSVM(Mat& trainingData, Mat& trainingResponses, const char* svmName );
	int findEigVecForQuantile( PCA& pca, float quantile );
	
	//void putTrainingData( Mat& trainingData, int dataSampleIndex, Mat& handImg ); //TODO RAT / MOVE
	
	vector< Ptr< VideoCapture > > videos;
	vector< int > tagsLeftList;
	vector< Ptr< VideoGestureTagSequence > > gestureSequences;
	vector< Ptr< AbstractProbabilityTransformer > > probTransList;
	vector< Mat > localisationFrameList;
	vector< Rect > trackingRectList;
	
	int totalSampleCount;
};

MLTrainer::MLTrainer() {	
	totalSampleCount = 0;
}

void MLTrainer::prepareVideo( Ptr< VideoCapture > capPtr, Ptr< VideoGestureTagSequence > gestureTagSequencePtr ) {
	Ptr< HandLocaliser > hl = new HandLocaliser();
	int tagCount = gestureTagSequencePtr->tagCount;
	Mat frame;
	Rect trackingRect;
	
	int localisationframeNumber = 0; //NOTE: frame zero is never tracked as it is always used by localiser
	for ( ; localisationframeNumber < tagCount; localisationframeNumber++ ) {
		try {
			(*capPtr) >> frame;
			gestureTagSequencePtr->getNextGestureTag(); //Skip this tag
			trackingRect = hl->localiseHand( frame );
			forceInside( frame.size(), trackingRect );
			break;
		}
		catch (cv::Exception e) {
			if ( e.code == HI_ERR_NOFACEFOUND ) {
				continue;
			} else {
				throw;
			}
		}
	}
	
	int firstFrameToTrack = localisationframeNumber + 1; //start tracking on the frame following the one on which localisation succeeded
	
	videos.push_back( capPtr );
	tagsLeftList.push_back( tagCount - firstFrameToTrack );
	gestureSequences.push_back( gestureTagSequencePtr );
	probTransList.push_back( hl->getProbabilityTransformer() );
	localisationFrameList.push_back( frame );
	trackingRectList.push_back( trackingRect );
	
	totalSampleCount += tagCount - firstFrameToTrack; //tags left
}

void MLTrainer::train( const char* svmName, int pcaVectorCount, string pcaName ) {
	// -- ML Classifier memory allocation -- //
	Size perSampleSize = MLGestureClassifier::perSampleSize;
	Mat trainingData( totalSampleCount, perSampleSize.width*perSampleSize.height, CV_32FC1, Scalar::all(0) );	
	Mat trainingResponses( totalSampleCount, 1, CV_32FC1, Scalar::all(0) );
	int dataSampleIndex = 0;
	
	cout << "in MLTRAINER trainingData.cols=" << trainingData.cols << endl;
	
	// -- Load training data into memory -- // 
	
	for (int i = 0; i<videos.size(); i++) {
		Ptr< VideoCapture > video = videos[i];
		int tagsLeft = tagsLeftList[i];
		Ptr< VideoGestureTagSequence > gestureSequencePtr = gestureSequences[i];
		Ptr< AbstractProbabilityTransformer > probTrans = probTransList[i];
		Mat frame = localisationFrameList[i];
		Rect trackingRect = trackingRectList[i];
		
		
		// initialising HandTracker
		// IMPORTANT this config is paramset1
		bool useHSV = false;
		float meanshiftWeightFactor = 5.0;
		float processNoiseCovScalar = 0.5;
		float measurementNoiseCovScalar = 20.0;
		float shortDistIgnoreThreshold = 10.0;
		
		Ptr< GeometricGestureClassifier > classificationDataLoader = new ClassificationDataLoader(
																								  probTrans, gestureSequencePtr,
																								  trainingData, trainingResponses,
																								  dataSampleIndex
																								  );
		
		Ptr< AbstractHandTracker > ht = HandTracker::init( useHSV, meanshiftWeightFactor, processNoiseCovScalar,
														  measurementNoiseCovScalar, shortDistIgnoreThreshold,
														  trackingRect, frame,
														  classificationDataLoader );
		
		trackAndLoadSamples( video, tagsLeft, ht, frame, trackingRect );
	}
	
	// -- PCA -- //
	//TODO RAT Reenable
//	PCA pca = doPCA( trainingData, pcaVectorCount );
//	int eigenvectorIndexForQuantile = findEigVecForQuantile( pca, 0.95 );
//	
//	Mat reducedEigVecs( pca.eigenvectors, Rect( 0, 0, pca.eigenvectors.cols, eigenvectorIndexForQuantile+1 ) ); //NOTE: detail, but using indexForQuantile+1 because end is exclusive
//	Mat reducedEigVals( pca.eigenvalues, Rect( 0, 0, 1, eigenvectorIndexForQuantile+1 ) ); //only one column!
	
	//TODO RAT
		// -- PCA load/prepare -- //
		PCA reducedPCA;
		
		FileStorage fs( pcaName + ".pca.yml" , FileStorage::READ);
		
		if ( !fs.isOpened() ) {
			CV_Error( CV_StsError, "PCA file not found!"); //throws exception!
		}
		
		Mat eigenvectorsForLoading;
		Mat eigenvaluesForLoading;
		Mat meanForLoading;
		
		fs["eigenvectors"] >> eigenvectorsForLoading;
		fs["eigenvalues"] >> eigenvaluesForLoading;
		fs["mean"] >> meanForLoading;
		
		reducedPCA.eigenvectors = eigenvectorsForLoading.clone();
		reducedPCA.eigenvalues = eigenvaluesForLoading.clone();
		reducedPCA.mean = meanForLoading.clone();
	
	//TODO RAT reenable
//	PCA reducedPCA;
//	reducedPCA.eigenvectors = reducedEigVecs.clone();//reducedEigVecs.clone();
//	reducedPCA.eigenvalues = reducedEigVals.clone();
//	reducedPCA.mean = pca.mean.clone();
//	
//
//	pca.mean.rows = 100;
//	pca.mean.cols = 100;
//	
//	HiVisualiser::windowMatrixMap["Mean"] = pca.mean;
//	HiVisualiser::moveWindow( "Mean", 0, 480);
//	HiVisualiser::refreshWindow("Mean");
	
	//CLEAN UP!!!
	waitKey(33);
	
	Mat trainingDataCompressed( totalSampleCount, reducedPCA.eigenvectors.rows, CV_32FC1, Scalar::all(0) ); //One col coordinate per eigenvector
	
	
	//VISUALISATION
	{
		Mat	compressed;
		Mat reconstructed;
		
		double pcaSquareErrorSum = 0;
		double totalSquareDev = 0;
		
		//TODO RAT
		double totalMoment2 = 0;
		
		for( int i = 0; i < trainingData.rows; i++ )
		{
			Mat vec = trainingData.row(i);
			Mat compressed = trainingDataCompressed.row(i);

			// compress the vector
			reducedPCA.project(vec, compressed);
			// and then reconstruct it
			reducedPCA.backProject(compressed, reconstructed);
			// and measure the error
			

			//TODO RAT
			//Mat zeroMat = Mat( vec.rows, 1, CV_8UC1, Scalar::all(0) );
			
			Mat diff;
			absdiff( vec, reconstructed, diff );
			
			Mat diffFromMean;
			absdiff( vec, reducedPCA.mean, diffFromMean );

			double pcaErrorSquare = 0; //seriously, you actually get overflow with int?
			double devSquare = 0;
			double moment2 = 0;
			
			CV_Assert( diff.isContinuous() );
			int cols = diff.rows*diff.cols;
			for(int j = 0; j < cols; j++) {
				pcaErrorSquare += diff.at<float>( 0, j )*diff.at<uchar>( 0, j );
				devSquare += diffFromMean.at<float>( 0, j )*diffFromMean.at<uchar>( 0, j );
				moment2 += vec.at<float>( 0, j )*vec.at<uchar>( 0, j );
			}

			pcaSquareErrorSum += pcaErrorSquare;
			totalSquareDev += devSquare;
			totalMoment2 += moment2;
			
//			cout << "frame=" << i << endl;
//			cout << "pcaError^2=" << pcaError*pcaError << endl;
//			cout << "dev^2=" << dev*dev << endl;
//			cout << "pcaSquareErrorPreSum=" << pcaSquareErrorSum << endl;
//			cout << "cumulativeSquareDev=" << totalSquareDev << endl;
			
			vec.rows = 100;
			vec.cols = 100;
			
			HiVisualiser::windowMatrixMap["Precomp"] = vec;
			HiVisualiser::moveWindow( "Precomp", 0, 120);
			HiVisualiser::refreshWindow("Precomp");
			
			reconstructed.rows = 100;
			reconstructed.cols = 100;
			
			HiVisualiser::windowMatrixMap["Recons"] = reconstructed;
			HiVisualiser::moveWindow( "Recons", 0, 240);
			HiVisualiser::refreshWindow("Recons");
			
			diff.rows = 100;
			diff.cols = 100;
			
			HiVisualiser::windowMatrixMap["Diff"] = diff;
			HiVisualiser::moveWindow( "Diff", 0, 360);
			HiVisualiser::refreshWindow("Diff");
			
			diffFromMean.rows = 100;
			diffFromMean.cols = 100;
			
			HiVisualiser::windowMatrixMap["MDiff"] = diffFromMean;
			HiVisualiser::moveWindow( "MDiff", 0, 480);
			HiVisualiser::refreshWindow("MDiff");
		
			waitKey(100);
			
		}
		cout << "pca residual square error  =" << pcaSquareErrorSum << endl;
		cout << "total square dev           =" << totalSquareDev << endl;
		cout << "pca residual error fraction=" << pcaSquareErrorSum/totalSquareDev << endl;
		cout << "total moment2              =" << totalMoment2 << endl;
	}

	//TODO RAT Reenable
//	FileStorage fs( pcaName + ".pca.yml" , FileStorage::WRITE);
//	fs << "eigenvectors" << reducedPCA.eigenvectors;
//	fs << "eigenvalues" << reducedPCA.eigenvalues;
//	fs << "mean" << reducedPCA.mean;

	trainAndSaveSVM( trainingDataCompressed, trainingResponses, svmName );
}


PCA MLTrainer::doPCA( Mat& trainingData, int pcaVectorCount ) {
	PCA pca( trainingData, Mat(), CV_PCA_DATA_AS_ROW, pcaVectorCount );
	return pca;
}

int MLTrainer::findEigVecForQuantile( PCA& pca, float quantile ) {
	
	float eigValSum = sum(pca.eigenvalues)[0];
	cout << "eigValSum=" << eigValSum << endl;
	float acc = 0;
	
	int eigVecForQuantile = -1;
	
	for( int i = 0; i < pca.eigenvalues.rows ; i++ ) {
		acc += pca.eigenvalues.at<float>( i, 0 );
		//cout << "index i=" << i << "  cumulative eigval frac=" << (acc/eigValSum) << endl;
		if ( (acc/eigValSum) > quantile && eigVecForQuantile == -1 ) {
			eigVecForQuantile = i;
		}
	}
	
	cout << "quantile=" << quantile << endl; 
	cout << "eigVecForQuantile=" << eigVecForQuantile << endl;
	
	return eigVecForQuantile;
	
}


//Parallelise?
void MLTrainer::trackAndLoadSamples( Ptr< VideoCapture > capPtr,
									int tagsLeft,
									Ptr< AbstractHandTracker > ht,
									Mat frame,
									Rect trackingRect
									) {
	for ( int i=0; i<tagsLeft; i++) {
		// -- get next frame -- //
		(*capPtr) >> frame;
		// -- tracking frame -- //
		trackingRect = ht->track( frame );
		
		//VISUALISATION
		{
//			HiVisualiser::windowMatrixMap["Basic"] = frame;
//			HiVisualiser::refreshWindow("Basic");
//			waitKey(1);		
		}
	}
}


void MLTrainer::trainAndSaveSVM(Mat& trainingData, Mat& trainingResponses, const char* svmName ) {
	
	//FIXNOW
	//FULLTEST //FULL TEST
	CvSVMParams svmParams( 100, //CvSVM::C_SVC,
						  2, //CvSVM::RBF,
						  1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
						  NULL,
						  TermCriteria( TermCriteria::MAX_ITER | TermCriteria::EPS, 100, 1e-20 ) );
	//FULLTEST!!! OR at least try and find out what iteration count is appropriate.
	//No idea if this is appropriate
	
	
	CvSVM svm = CvSVM();
	
	svm.train_auto( trainingData, trainingResponses, Mat(), Mat(), svmParams, 2 ); //Not ok idx? //586, 10, 20
	
	char* svmFileName_orphan = strAppend_orphanOut( svmName, ".svm.xml" );
	
	svm.save( svmFileName_orphan, svmName ); //TODO save via resource tracker?
	
	delete svmFileName_orphan;
	
}


// -- Main Method -- //
int main(int argc, char **argv) {
	
	//	Usage:
	//	MLTrainer.out <svmName> <pcaName> <video> [more videos]
	//  SVM stored in svmName.svm.xml
	//  PCA stored in pcaName.pca.yml
	
	if (argc > 3) {
		// -- Set up resource tracker -- //
		Ptr< ResourceTracker > videoRT;
		videoRT = new ResourceTracker( "../config/runtimeSettings/cfg.xml" );
		videoRT->loadCategory("TestData");
		
		// -- prepare MLTrainer with videos -- //
		MLTrainer mlt;
		
		for (int i=3; i<argc; i++) {
			mlt.prepareVideo( videoRT->getFileVideoCapture( argv[i] ),
							 videoRT->getGestureTags( argv[i] ) );
		}
		
		mlt.train( argv[1], 1000, argv[2] ); //1000 eig vec in PCA
		
		return 0;
	} else {
		cout << "Usage:\n\t MLTrainer.out <svmName> <pcaName> <video> [more videos]\n\t SVM stored in svmName.svm.xml \n\t PCA stored in pcaName.pca.yml" << endl;
		return -1;
	}
}









