//#define DEBUG 100
//#define DEBUG_MATCH false
//
//#ifdef DEBUG
//#define DEBUG_MSG(str) do { std::cout << str << std::endl; } while( false )
//#else
//#define DEBUG_MSG(str) do { } while ( false )
//#endif
//
//#include <stdio.h>
//#include <iostream>
//#include <fstream>
//#include <opencv2/core/core.hpp>
//#include <opencv2/features2d/features2d.hpp>
//#include <opencv2/highgui/highgui.hpp>
//#include <opencv2/calib3d/calib3d.hpp>
////#include <opencv2/nonfree/nonfree.hpp>
//#include <opencv2/imgproc/imgproc.hpp>
//#include <opencv2/flann/flann.hpp>
//
//#define NUM_MARKER 6
//#define MINHESSIAN 400
//#define FRAMERATE 3 
//#define MARKER_DIRECTORY "C:/Documents and Settings/Roboteam/Desktop/Robot/Debug/markerqr"
//
//using namespace cv;
//using namespace std;
//
//Scalar color[]={Scalar(0, 255, 0),Scalar(0, 255, 0),Scalar(0, 104, 255),Scalar(255, 64, 64),Scalar(255, 127, 36),Scalar(255, 255, 0)};
//
//
//Point2f disegna_marker_identificato(std::vector<Point2f>& obj, std::vector<Point2f>& scene, std::vector<Point2f>& obj_corners,std::vector<Point2f>& scene_corners,
//								 Mat& img_matches,Mat (&marker_vector)[NUM_MARKER],int best)	
//{
//	Mat H;
//	
//	if(obj.size() >= 4 && scene.size() >= 4)
//	{
//		H = findHomography( obj , scene, CV_RANSAC );
//
//		// Ottengo gli angoli dell'oggetto nell'img_1 ( quella solo dell'oggetto)
//		scene_corners.clear();
//		scene_corners.resize(4);
//
//		obj_corners[1] = cvPoint( marker_vector[best].cols, 0 );
//		obj_corners[2] = cvPoint( marker_vector[best].cols, marker_vector[best].rows ); 
//		obj_corners[3] = cvPoint( 0, marker_vector[best].rows );
//
//		perspectiveTransform( obj_corners, scene_corners, H);
//		DEBUG_MSG("Coordinate Marker:" << obj_corners[0] << " - "<< obj_corners[1] << " - " << obj_corners[2] << " - " << obj_corners[3] << endl);
//		DEBUG_MSG("Coordinate Marker Scene:" << scene_corners[0] << " - " << scene_corners[1] << " - "<< scene_corners[2] << " - " <<  scene_corners[3] << endl);
//
//		// Disegno i bordi dell'oggetto
//		if(DEBUG_MATCH)
//		{
//			line( img_matches, scene_corners[0], scene_corners[1], color[best], 9 );
//			line( img_matches, scene_corners[1], scene_corners[2], color[best], 9 );
//			line( img_matches, scene_corners[2], scene_corners[3], color[best], 9 );
//			line( img_matches, scene_corners[3], scene_corners[0], color[best], 9 );
//		}
//
//		Point2f punto_centrale((scene_corners[0].x + scene_corners[1].x+ scene_corners[2].x+ scene_corners[3].x)/4, (scene_corners[0].y + scene_corners[1].y+ scene_corners[2].y+ scene_corners[3].y)/4);
//		line( img_matches,  punto_centrale, punto_centrale,	color[best], 13);
//
//		return punto_centrale;
//	}
//
//	return NULL;
//}
//
//
//void processa_marker(std::vector<KeyPoint> (&keypoints_object) [NUM_MARKER],Mat (&marker_vector)[NUM_MARKER],Mat (&descriptors_object)[NUM_MARKER] )	
//{
//	SiftFeatureDetector detector;
//	SiftDescriptorExtractor extractor;
//
//	for(int i=1;i<NUM_MARKER;i++)
//	{
//		std::stringstream sstm;
//		sstm << MARKER_DIRECTORY << i << ".jpg";
//	
//		marker_vector[i]=imread(sstm.str(), CV_LOAD_IMAGE_GRAYSCALE);
//
//		if( !marker_vector[i].data)
//		{ 
//			std::cout<< " --(!) Error reading images " << std::endl; 
//			exit(-5); 
//		}
//	
//		//GaussianBlur( img_o, img_object, Size( 9, 9 ), 0, 0 );
//
//		// Trovo i keypoints usando un detector SURF
//		detector.detect( marker_vector[i], keypoints_object[i]);
//  
//		// Calcolo i descrittori (vettori di feature)
//		extractor.compute( marker_vector[i], keypoints_object[i], descriptors_object[i]);
//	}
//
//	DEBUG_MSG("OK marker" << endl);
//
//}
//
//void processa_scena( )	
//{
//	/*cvtColor(edges, img_scene, CV_RGB2GRAY);
//
//	detector.detect( img_scene, keypoints_scene );
//	extractor.compute( img_scene, keypoints_scene, descriptors_scene );*/
//
//}
//
//VideoCapture init_webcam()	
//{
//	VideoCapture cap(CV_CAP_OPENNI); 
//    if(!cap.isOpened())  // esco dal programma in caso di fallimento
//        exit(-1);
//
//	/*cap.set(CV_CAP_PROP_FPS, FRAMERATE);*/
//	DEBUG_MSG( "OK Video Open" << endl);
//
//	return cap;
//}
//
//int keypoint_clustering(std::vector<KeyPoint>& keypoints_scene)	
//{
//	//Mat labels; 
//	//Mat points(keypoints_scene.size(),2, CV_32F,Scalar(1,1));
//	//Mat centers(5, 1, points.type());
//	//int ncluster=0; 
//
//	//for(int k=0;k<keypoints_scene.size();k++)
//	//{
//	//	DEBUG_MSG(keypoints_scene[k].pt << endl);
//	//	points.at<float>(k,0)=keypoints_scene[k].pt.x;
//	//	points.at<float>(k,1)=keypoints_scene[k].pt.y;
//	//}
//
//	//DEBUG_MSG("kmeans - inizio" << endl);
//
//	//kmeans(points,3, labels, TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0), 10, KMEANS_PP_CENTERS, centers);
//
//
//	//DEBUG_MSG("Label: " << labels << endl);
//	//DEBUG_MSG("Centri: " << centers << endl);
//	//DEBUG_MSG("kmeans - fine" << endl);
//
//	return 1;
//}
//
//void elab_matches(std::vector< DMatch > (&matches)[NUM_MARKER],std::vector< DMatch > (&good_matches)[NUM_MARKER],
//				  vector<double>& distv, vector<double>& nmatch , int rows, int i)	
//{
//	double min_dist=9999999;
//
//	nmatch[i]=0;
//	
//	//-- Calcolo distanza max e min tra i keypoints
//	for( int j = 0; j < rows; j++ )
//	{ 
//		double dist = (matches[i])[j].distance;
//		if( dist < min_dist ) min_dist = dist;
//	}
//
//	// Disegno i match soddisfacenti (distanza < 3*min_dist )
//	good_matches[i].clear();
//
//	for( int j = 0; j < rows; j++ )
//	{ 
//		if( (matches[i])[j].distance < 2.5*min_dist )
//			good_matches[i].push_back( (matches[i])[j]); 
//	}  
//
//	// Cerco distanza minore
//	distv[i] = 0;
//	for( int j = 0; j < good_matches[i].size(); j++ )
//	{ 
//		distv[i] += (good_matches[i])[j].distance;
//		(nmatch[i])++;
//	}
//
//}
//
//int min_distance(vector<double>& distv,vector<double>& nmatch)	
//{
//	long int best=0,best_dist=99999999;
//
//	// nmatch
//
//	for(int y=1;y<NUM_MARKER;y++)
//	{ 
//		distv[y]=distv[y]/nmatch[y];
//
//		if( distv[y] < best_dist )
//		{
//			best_dist = distv[y];
//			best=y;
//		}
//	}
//
//	DEBUG_MSG(endl << " Best: " <<  best << endl);
//
//	return best;
//}
//
//void extract_keypoint(std::vector<KeyPoint> keypoints_best,std::vector<KeyPoint> keypoints_scene,std::vector< DMatch > best_match,
//					 std::vector<Point2f>& obj, std::vector<Point2f>& scene)	
//{
//	// Cerco l'oggetto in img_1 nell'img_2 
//	obj.clear();
//	scene.clear();
//
//	for( int j = 0; j < best_match.size(); j++ )
//	{
//		// Prendo i keypoint che corrispondono ai buoni match
//		obj.push_back( keypoints_best[ best_match[j].queryIdx].pt );
//		scene.push_back( keypoints_scene[ best_match[j].trainIdx ].pt ); 
//	}
//
//	DEBUG_MSG(endl << "obj:" << obj.size() << "   scene:" << scene.size() << endl);
//
//}
//
//int main( int argc, char** argv )
//{	
//	std::vector<KeyPoint> keypoints_object[NUM_MARKER], keypoints_scene;
//	Mat marker_vector[NUM_MARKER], descriptors_object[NUM_MARKER], descriptors_scene, img_scene,prova, img_matches;
//
//	// Carico i marker e i relativi keypoint e descrittori
//	processa_marker(keypoints_object, marker_vector, descriptors_object);
//
//	// Apro la webcam di default
//	VideoCapture cap = init_webcam(); 
//
//	int conta_frame=0;
//	SiftFeatureDetector detector;
//	SiftDescriptorExtractor extractor;
//	FlannBasedMatcher matcher;
//	/*BFMatcher matcher(NORM_L2);*/
//	std::vector< DMatch > matches[NUM_MARKER], good_matches[NUM_MARKER];
//	std::vector<Point2f> obj,scene,obj_corners(4),scene_corners(4);	
//
//	obj_corners[0] = cvPoint(0,0);
//
//	while(cap.grab())
//	{
//		cap.retrieve(img_scene, CV_CAP_OPENNI_BGR_IMAGE); // get a new frame from camera
//
//		conta_frame++;
//		if(conta_frame!=20)
//			continue;
//
//		conta_frame=0;
//
//		cvtColor(img_scene, img_scene, CV_RGB2GRAY);
//
//		// Estraggo i keypoint e i relativi descrittori
//		detector.detect( img_scene, keypoints_scene );
//		extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//	
//		// Effettuo il clustering dei keypoint con kmeans
//		/*int ncluster=keypoint_clustering(keypoints_scene); */
//
//		vector<double> distv(NUM_MARKER), nmatch(NUM_MARKER);
//
//		for(int i=1;i<NUM_MARKER;i++)
//		{
//			// Match dei descrittori usando un matcher
//			matches[i].clear();
//			matcher.match( descriptors_scene, descriptors_object[i], matches[i]);
//
//			DEBUG_MSG(endl << "MARKER KEYP=" << descriptors_object[i].rows);
//			DEBUG_MSG(endl << "IMAGE KEYP=" << descriptors_scene.rows);
//			DEBUG_MSG(endl << "Matching KEYP=" << matches[i].size());
//			DEBUG_MSG(endl << "number=" << i);
//
//			// Seleziono i match migliori e calcolo la somma delle distanze
//			elab_matches(matches, good_matches, distv, nmatch, matches[i].size(),i);
//		}
//
//		// Identifico il marker nell'immagine
//		int best = min_distance(distv, nmatch);
//
//		DEBUG_MSG(endl << "BEST = " << best);
//
//		if(DEBUG_MATCH)
//		{
//			drawMatches(img_scene, keypoints_scene, marker_vector[best], keypoints_object[best], good_matches[best], img_matches,
//						Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 
//		}
//		else
//			img_matches=img_scene;
//
//		// Cerco l'oggetto in img_1 nell'img_2 
//		extract_keypoint(keypoints_scene,keypoints_object[best],good_matches[best], scene, obj);
//		disegna_marker_identificato(obj, scene, obj_corners, scene_corners, img_matches, marker_vector, best);	
//
//		// Visualizzo 
//		imshow( "Object detection", img_matches );
//		
//		/*cvWaitKey(20);*/
//
//		if(waitKey(30) >= 0) break;
//		
//	} 	 
//
//	return 0;
//}
//
