
#include <stdio.h>


#include <cv.h>
#include <highgui.h>
#include <ml.h>
#include "resourceTracker.h"
#include "handLocaliser.hpp"
#include "abstractProbabilityTransformer.hpp"

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"


/*
 *  GestureClassifier_scrap.cpp
 *  
 *
 *  Created by Rasmus Kyng on 11/01/2011.
 *  Copyright 2011 __MyCompanyName__. All rights reserved.
 *
 */


//class GestureClassifier {
//public:
//	train( CvDTreeTrainData*
//
//}

using namespace cv;

ResourceTracker* rt;
HandLocaliser* hl;
cv::Ptr< cv::VideoCapture > capPtr;
cv::Ptr< VideoPointTagSequence > seqPtr;

Rect handRect;

Mat frame;

void initTest(const char* videoNameString) {
	
	rt = new ResourceTracker( "../config/runtimeSettings/cfg.xml" );
	rt->loadCategory("TestData");
	
	capPtr = rt->getFileVideoCapture( videoNameString );
	seqPtr = rt->getPointTags( videoNameString );
	
	(*capPtr) >> frame;
	
	hl = new HandLocaliser();
	
	handRect = hl->localiseHand( frame );
	
	//forcing rectangle to stay inside frame
	//FIX: this approach is simplistic?
	//used clipped frame instead?
	//maybe not, this approach does seem quite easy to use as user
	if ( handRect.x < 0 ) handRect.x = 0;
	if ( frame.cols < handRect.x + handRect.width ) handRect.x = frame.cols - handRect.width;
	if ( handRect.y < 0 ) handRect.y = 0;
	if ( frame.rows < handRect.y + handRect.height ) handRect.y = frame.rows - handRect.height;
}


int main(int argc, const char* argv[]) {
	
	const char* videoNameString;
	
	if (argc == 1) {
		videoNameString = "locVid1c";
	} else {
		videoNameString = argv[1];
	}
	
	
	initTest( videoNameString );
	
	int tagCount = seqPtr->tagCount;
	
	// -- visualisation: set up -- //
	
	char* windows[] = { "classifier scrap", "1", "2", "3", "4", "5", "6", "7", "8", "9"  };
	int wc = 10;
	
	//Note: using OpenCV/C function as this has not been implemented in OpenCV/C++
	
	int xbase = frame.cols;
	int xskip = 250;
	int ybase = 100;
	int yskip = 100; //handRect.height;
	
	printf("%d\n", handRect.height);
	
	namedWindow( windows[0], 1);
	cvMoveWindow( windows[0], 0, ybase);
	
	for (int i = 1; i<wc; i++) {
		namedWindow( windows[i], 1);
		if ( i % 2 == 1) {
			cvMoveWindow( windows[i], xbase, ybase+(i-1)*yskip);
		} else {
			cvMoveWindow( windows[i], xbase+xskip, ybase+(i-2)*yskip);
		}


	}
	
	
	AbstractProbabilityTransformer* pt = hl->getProbabilityTransformer();
	
	
	for( int frameCount = 0; frameCount<tagCount; frameCount++) {
		
		
		
		// -- tracking frame -- //
		Point groundTruth = seqPtr->getNextPointTag();
		handRect = Rect( groundTruth.x - handRect.width/2, groundTruth.y - handRect.height/2, handRect.width, handRect.height );
		
		
		//forcing rectangle to stay inside frame
		if ( handRect.x < 0 ) handRect.x = 0;
		if ( frame.cols < handRect.x + handRect.width ) handRect.x = frame.cols - handRect.width;
		if ( handRect.y < 0 ) handRect.y = 0;
		if ( frame.rows < handRect.y + handRect.height ) handRect.y = frame.rows - handRect.height;
		
		
		// -- segmentation on hand ROI -- //
		
		Mat handImg(frame);
		
		Mat backProj, handHSV;
		
		//Compute HSV for backprojection
		cvtColor(handImg, handHSV, CV_BGR2HSV); //FIXNOW : use one buffer for HSV (3 channel) and another for probproj (1 channel)
		
		pt->getBackProjection( handHSV, backProj);
		
		Mat bpThreshed;
		
		threshold( backProj, bpThreshed, 30, 255, THRESH_BINARY); //HOW to pick?
		
		// convert image to grayscale
		Mat handGreyImg;
		cvtColor( handImg, handGreyImg, CV_BGR2GRAY );
		
		//Detect edges
		Mat edgeImg;
		Canny( handGreyImg, edgeImg, 5, 70 );
		
		//Thicken edges
		Mat thickEdge;
		dilate( edgeImg, thickEdge, Mat(), Point(-1, -1), 1);
		
		//Get thresholded bp with edges zeroed
		
		Mat bptez;
		
		bptez = bpThreshed - thickEdge;
		
		
		// contours
		Mat bptezForContour; //NOTE: is consumed!
		
		bptez.copyTo( bptezForContour );
		
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		
		findContours( bptezForContour, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
		
		
		// contours
		Mat bptForContour; //NOTE: is consumed!
		
		bpThreshed.copyTo( bptForContour );
		
		vector<vector<Point> > contours1;
		vector<Vec4i> hierarchy1;
		
		findContours( bptForContour, contours1, hierarchy1, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
		
		// -- visualisation: display -- //
		
		{
			rectangle( frame, Point(handRect.x, handRect.y),
					  Point(handRect.x + handRect.width, handRect.y + handRect.height),
					  Scalar::all(250));
			
			circle( frame, groundTruth, 1, Scalar(0, 255, 0), 2 ); //radius 1, thickness 2, colour green
			//NOTE: BGR colour!
			
			Mat contoursImg = Mat::zeros( bptezForContour.size(), CV_8UC3);
			
			if( contours.size() > 0 )
			{
				// iterate through all the top-level contours,
				// draw each connected component with its own random color
				int idx = 0;
				for( ; idx >= 0; idx = hierarchy[idx][0] )
				{
					Scalar color( (rand()&255)+10, (rand()&255)+10, (rand()&255)+10 );
					drawContours( contoursImg, contours, idx, color, CV_FILLED, 8, hierarchy );
				}
			}
			
			
			Mat contoursImg1 = Mat::zeros( bptForContour.size(), CV_8UC3);
			
			if( contours1.size() > 0 )
			{
				// iterate through all the top-level contours,
				// draw each connected component with its own random color
				int idx = 0;
				for( ; idx >= 0; idx = hierarchy1[idx][0] )
				{
					Scalar color( (rand()&255)+10, (rand()&255)+10, (rand()&255)+10 );
					drawContours( contoursImg1, contours1, idx, color, CV_FILLED, 8, hierarchy1 );
				}
			}
			
			
			Mat visuals[] = {frame, handImg, backProj, bpThreshed, bptez, handGreyImg, edgeImg, thickEdge, contoursImg, contoursImg1 };
			
//			for (int i = 0; i<wc; i++) {
//				imshow( windows[i], visuals[i] );
//			}
			
			imshow( windows[0], visuals[0] );
			imshow( windows[8], visuals[8] );
			imshow( windows[9], visuals[9] );
			
			
			
		}
		
		// -- get next frame -- //
		(*capPtr) >> frame;
		
		//waitKey(frameCount*2+1);
		char c = waitKey(0);
		if( c == 27 ) break;
	}
	
	
	
	delete rt;
	delete hl;
	
	//delete boostClassifier;
	
	return 0;
	
}