
#include <iostream>


#include <cv.h>
#include <highgui.h>
#include <ml.h>
#include "resourceTracker.h"
#include "handLocaliser.hpp"
#include "abstractProbabilityTransformer.hpp"

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"


/*
 *  GestureClassifier_scrap.cpp
 *  
 *
 *  Created by Rasmus Kyng on 11/01/2011.
 *  Copyright 2011 __MyCompanyName__. All rights reserved.
 *
 */


//class GestureClassifier {
//public:
//	train( CvDTreeTrainData*
//
//}

using namespace cv;

ResourceTracker* rt;
HandLocaliser* hl;
cv::Ptr< cv::VideoCapture > capPtr;
cv::Ptr< VideoPointTagSequence > seqPtr;

Rect handRect;

Mat frame;

void initTest(const char* videoNameString) {
	
	rt = new ResourceTracker( "../config/runtimeSettings/cfg.xml" );
	rt->loadCategory("TestData");
	
	capPtr = rt->getFileVideoCapture( videoNameString );
	seqPtr = rt->getPointTags( videoNameString );
	
	(*capPtr) >> frame;
	
	hl = new HandLocaliser();
	
	handRect = hl->localiseHand( frame );
	
	//forcing rectangle to stay inside frame
	//FIX: this approach is simplistic?
	//used clipped frame instead?
	//maybe not, this approach does seem quite easy to use as user
	if ( handRect.x < 0 ) handRect.x = 0;
	if ( frame.cols < handRect.x + handRect.width ) handRect.x = frame.cols - handRect.width;
	if ( handRect.y < 0 ) handRect.y = 0;
	if ( frame.rows < handRect.y + handRect.height ) handRect.y = frame.rows - handRect.height;
}


int main(int argc, const char* argv[]) {
	
	const char* videoNameString;
	
	if (argc == 1) {
		videoNameString = "locVid1c";
	} else {
		videoNameString = argv[1];
	}
	
	
	initTest( videoNameString );
	
	int tagCount = seqPtr->tagCount;
	
	// -- visualisation: set up -- //
	
	char* windows[] = { "classifier scrap", "1", "2", "3", "4", "5", "6", "7", "8", "9"  };
	int wc = 5;
	
	//Note: using OpenCV/C function as this has not been implemented in OpenCV/C++
	
	int xbase = frame.cols;
	int xskip = 250;
	int ybase = 100;
	int yskip = 100; //handRect.height
	
	namedWindow( windows[0], 1);
	cvMoveWindow( windows[0], 0, ybase);
	
	for (int i = 1; i<wc; i++) {
		namedWindow( windows[i], 1);
		if ( i % 2 == 1) {
			cvMoveWindow( windows[i], xbase, ybase+(i-1)*yskip);
		} else {
			cvMoveWindow( windows[i], xbase+xskip, ybase+(i-2)*yskip);
		}


	}
	
	
	AbstractProbabilityTransformer* pt = hl->getProbabilityTransformer();
	
	
	for( int frameCount = 0; frameCount<tagCount; frameCount++) {
		
		
		
		// -- tracking frame -- //
		Point groundTruth = seqPtr->getNextPointTag();
		handRect = Rect( groundTruth.x - handRect.width/2, groundTruth.y - handRect.height/2, handRect.width, handRect.height );
		
		
		//forcing rectangle to stay inside frame
		if ( handRect.x < 0 ) handRect.x = 0;
		if ( frame.cols < handRect.x + handRect.width ) handRect.x = frame.cols - handRect.width;
		if ( handRect.y < 0 ) handRect.y = 0;
		if ( frame.rows < handRect.y + handRect.height ) handRect.y = frame.rows - handRect.height;
		
		
		// -- segmentation on hand ROI -- //
		
		Mat handImg(frame, handRect);
		
		Mat backProj, handHSV;
		
		//Compute HSV for backprojection
		cvtColor(handImg, handHSV, CV_BGR2HSV);
		
		pt->getBackProjection( handHSV, backProj);
		
		Mat bpThreshed;
		
		threshold( backProj, bpThreshed, 30, 255, THRESH_BINARY); //FIX HOW to pick?
		
		// contours
		Mat bptForContour; //NOTE: is consumed!
		
		bpThreshed.copyTo( bptForContour );
		
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		
		findContours( bptForContour, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE ); //TODO CV_RETR_CCOMP may be more appropriate?
		
		
		//get largest contour:
		int contourCount = contours.size();
		int largestContourIndex = -1;
		
		double largestContourArea = 0;
		
		if ( contourCount > 0 ) {
			for( int i = 0; i<contourCount; i++) {
				double currentContourArea = contourArea( Mat( contours[i] ) );
				if ( currentContourArea > largestContourArea ) {
					largestContourArea = currentContourArea;
					largestContourIndex = i;
				}
			}
		}
		
//		double contourAreaThreshold = handRect.x*handRect.y / 4; //FIX What thres?
//		
//		if (largestContourArea < contourAreaThreshold) {
//			//FIXNOW handle tracking lost!
//		}
		
		
		vector<Point> convexHullVector;
		
		convexHull( Mat( contours[largestContourIndex] ), convexHullVector );
		
		double convexHullArea = contourArea( Mat(convexHullVector) );
		
		std::cout << "Contour area          = " << largestContourArea << std::endl;
		std::cout << "Convex hull area      = " << convexHullArea << std::endl;
		std::cout << "Convexity defect area = " << convexHullArea - largestContourArea << std::endl;
		
		// -- visualisation: display -- //
		
		{
			rectangle( frame, Point(handRect.x, handRect.y),
					  Point(handRect.x + handRect.width, handRect.y + handRect.height),
					  Scalar::all(250));
			
			circle( frame, groundTruth, 1, Scalar(0, 255, 0), 2 ); //radius 1, thickness 2, colour green
			//NOTE: BGR colour!
			
			Mat contoursImg = Mat::zeros( bptForContour.size(), CV_8UC3);
			
			if( largestContourIndex != -1 )
			{
				Scalar colorBlue( 255, 0 , 0 );
				drawContours( contoursImg, contours, largestContourIndex, colorBlue, CV_FILLED, 8, hierarchy );
				
				
				Point* contourVertices = &convexHullVector[0];
				const Point** contourVerticesPrts  = (const Point**)&contourVertices;
				int contoursVerticesCount[] = { convexHullVector.size() };
				int contoursToDraw = 1;
				bool isClosed = true;
				Scalar colorGreen( 0, 255, 0 );
				polylines( contoursImg, contourVerticesPrts, contoursVerticesCount, contoursToDraw, isClosed, colorGreen );
				
			}
			
			
			Mat visuals[] = {frame, handImg, backProj, bpThreshed, contoursImg };
			
			for (int i = 0; i<wc; i++) {
				imshow( windows[i], visuals[i] );
			}
	
		}
		
		// -- get next frame -- //
		(*capPtr) >> frame;
		
		//waitKey(frameCount*2+1);
		char c = waitKey(0);
		if( c == 27 ) break;
	}
	
	
	
	delete rt;
	delete hl;
	
	//delete boostClassifier;
	
	return 0;
	
}