////#include <cv.h>
////#include <highgui.h>
////#include <Aria.h>
////
////using namespace cv;
////using namespace std;
//////void riconosciColoriHSV(Mat in, Mat out);
////int main(int argc, char** argv) {
////
////	VideoCapture capture;
////	Mat input;
////	Mat output;
////	capture.open( CV_CAP_OPENNI );
////	if( !capture.isOpened() )
////	    {
////	        cout << "Can not open a capture object." << endl;
////	        return -1;
////	    }
////	while (true)
////	{
////		capture.grab();
////		capture.retrieve( input, CV_CAP_OPENNI_DEPTH_MAP);
////		Mat hsv;
////		cvtColor(input, hsv, CV_BGR2HSV);
////		imshow()
////
////
////
////		//riconosciColoriHSV(input, output);
////			imshow( "rgb image", input);
////
////			if( waitKey( 10 ) == 27 )
////		            break;
////
////	}
////
////	return 0;
////}
////int main()
////{
////    Mat src = imread("color-balls.jpg");
////    if (src.empty())
////        return -1;
////
////    Mat hsv;
////    cvtColor(src, hsv, CV_BGR2HSV);
////
////    Mat bw;
////    inRange(hsv, Scalar(19, 204, 153), Scalar(27, 255, 255), bw);
////
////    vector<vector<Point> > contours;
////    findContours(bw.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
////
////    Mat dst = Mat::zeros(src.size(), src.type());
////    drawContours(dst, contours, -1, Scalar::all(255), CV_FILLED);
////
////    dst &= src;
////
////    imshow("src", src);
////    imshow("dst", dst);
////    waitKey(0);
////
////    return 0;
////}
////
////void riconosciColoriHSV(Mat in, Mat out){
////	int r,c;
////	double hMinYellow,hMaxYellow,sMinYellow,sMaxYellow,vMinYellow,vMaxYellow;
////	double hMinOrange,hMaxOrange,sMinOrange,sMaxOrange,vMinOrange,vMaxOrange;
////	CvScalar p,yellow,orange;
////	Mat tmp(in.rows, in.cols, in.depth());
////
////	//tmp.create(in.rows, in.cols, in.depth());
////
////	/* Conversione dei colori BGR in HSV*/
////	cvCvtColor(&in,&tmp,CV_BGR2HSV)
////
////	/* Settaggio dei paramentri delle labels */
////	yellow.val[0]=0;
////	yellow.val[1]=255;
////	yellow.val[2]=255;
////	orange.val[0]=0;
////	orange.val[1]=127;
////	orange.val[2]=255;
////
////	/* Settaggio dei range*/
////	hMinYellow=25;
////	hMaxYellow=40;
////	sMinYellow=114;
////	sMaxYellow=255;
////	vMinYellow=127.5;
////	vMaxYellow=255;
////	hMinOrange=4;
////	hMaxOrange=17.5;
////	sMinOrange=126;
////	sMaxOrange=255;
////	vMinOrange=117.5;
////	vMaxOrange=255;
////
////	/* Formule di conversione*/
////
////	//	val_opencv = val*180/360; H
////	//	val_opencv = val*255/100; S
////	//	val_opencv = val*255/100; V
////
////	for(r=0; r<out.rows; r++)
////		   for(c=0; c<out.cols; c++){
////			   p = cvGet2D(&tmp, r, c);
////			   if( (p.val[0]>=hMinYellow && p.val[0]<=hMaxYellow )
////					   && (p.val[1]>=sMinYellow && p.val[1]<=sMaxYellow )
////					   && (p.val[2]>=vMinYellow && p.val[2]<=vMaxYellow) ){
////				   //cvSet2D(tmp, r, c, yellowHSV);
////			   	   cvSet2D(&out,r,c,yellow);
////			   }
////			   else if( (p.val[0]>=hMinOrange && p.val[0]<=hMaxOrange )
////					   && (p.val[1]>=sMinOrange && p.val[1]<=sMaxOrange )
////					   && (p.val[2]>=vMinOrange && p.val[2]<=vMaxOrange) )
////				   cvSet2D(&out, r, c, orange);
////			   else
////				   cvSet2D(&out, r, c, cvScalarAll(0));
////		   }
////	tmp.deallocate();
////}
//
//#include <opencv2/opencv.hpp>
//#include <opencv/cv.h>
//#include <sstream>
//#include <string>
//#include <iostream>
//#include <opencv/highgui.h>
//#include "VisionControllerBW.h"
//
//using namespace cv;
////initial min and max HSV filter values.
////these will be changed using trackbars
//int Y_MIN = 0;
//int Y_MAX = 256;
//int U_MIN = 0;
//int U_MAX = 256;
//int V_MIN = 0;
//int V_MAX = 256;
////default capture width and height
//const int FRAME_WIDTH = 640;
//const int FRAME_HEIGHT = 480;
////max number of objects to be detected in frame
//const int MAX_NUM_OBJECTS = 50;
////minimum and maximum object area
//const int MIN_OBJECT_AREA = 20 * 20;
//const int MAX_OBJECT_AREA = FRAME_HEIGHT * FRAME_WIDTH / 1.5;
////names that will appear at the top of each window
//const string windowName = "Original Image(peppe)";
//const string windowName1 = "YUV Image";
//const string windowName2 = "Thresholded Image";
//const string windowName3 = "After Morphological Operations";
//const string trackbarWindowName = "Trackbars";
//
//void on_trackbar(int, void*) { //This function gets called whenever a
//							   // trackbar position is changed
//
//}
//string intToString(int number) {
//
//	std::stringstream ss;
//	ss << number;
//	return ss.str();
//}
//void createTrackbars() {
//	//create window for trackbars
//
//	namedWindow(trackbarWindowName, 0);
//	//create memory to store trackbar name on window
//	char TrackbarName[50];
//	sprintf(TrackbarName, "Y_MIN", Y_MIN);
//	sprintf(TrackbarName, "Y_MAX", Y_MAX);
//	sprintf(TrackbarName, "U_MIN", U_MIN);
//	sprintf(TrackbarName, "U_MAX", U_MAX);
//	sprintf(TrackbarName, "V_MIN", V_MIN);
//	sprintf(TrackbarName, "V_MAX", V_MAX);
//	//create trackbars and insert them into window
//	//3 parameters are: the address of the variable that is changing when the trackbar is moved(eg.H_LOW),
//	//the max value the trackbar can move (eg. H_HIGH),
//	//and the function that is called whenever the trackbar is moved(eg. on_trackbar)
//	//                                  ---->    ---->     ---->
//	createTrackbar("Y_MIN", trackbarWindowName, &Y_MIN, Y_MAX, on_trackbar);
//	createTrackbar("Y_MAX", trackbarWindowName, &Y_MAX, Y_MAX, on_trackbar);
//	createTrackbar("U_MIN", trackbarWindowName, &U_MIN, U_MAX, on_trackbar);
//	createTrackbar("U_MAX", trackbarWindowName, &U_MAX, U_MAX, on_trackbar);
//	createTrackbar("V_MIN", trackbarWindowName, &V_MIN, V_MAX, on_trackbar);
//	createTrackbar("V_MAX", trackbarWindowName, &V_MAX, V_MAX, on_trackbar);
//
//}
//void drawObject(int x, int y, Mat &frame) {
//
//	//use some of the openCV drawing functions to draw crosshairs
//	//on your tracked image!
//
//	//UPDATE:JUNE 18TH, 2013
//	//added 'if' and 'else' statements to prevent
//	//memory errors from writing off the screen (ie. (-25,-25) is not within the window!)
//
//	circle(frame, Point(x, y), 20, Scalar(0, 255, 0), 2);
//	if (y - 25 > 0)
//		line(frame, Point(x, y), Point(x, y - 25), Scalar(0, 255, 0), 2);
//	else
//		line(frame, Point(x, y), Point(x, 0), Scalar(0, 255, 0), 2);
//	if (y + 25 < FRAME_HEIGHT)
//		line(frame, Point(x, y), Point(x, y + 25), Scalar(0, 255, 0), 2);
//	else
//		line(frame, Point(x, y), Point(x, FRAME_HEIGHT), Scalar(0, 255, 0), 2);
//	if (x - 25 > 0)
//		line(frame, Point(x, y), Point(x - 25, y), Scalar(0, 255, 0), 2);
//	else
//		line(frame, Point(x, y), Point(0, y), Scalar(0, 255, 0), 2);
//	if (x + 25 < FRAME_WIDTH)
//		line(frame, Point(x, y), Point(x + 25, y), Scalar(0, 255, 0), 2);
//	else
//		line(frame, Point(x, y), Point(FRAME_WIDTH, y), Scalar(0, 255, 0), 2);
//
//	putText(frame, intToString(x) + "," + intToString(y), Point(x, y + 30), 1,
//			1, Scalar(0, 255, 0), 2);
//
//}
//void morphOps(Mat &thresh) {
//
//	//create structuring element that will be used to "dilate" and "erode" image.
//	//the element chosen here is a 3px by 3px rectangle
//
//	morphologyEx(thresh, thresh, MORPH_OPEN, Mat(), Point(-1, -1), 2);
//	morphologyEx(thresh, thresh, MORPH_CLOSE, Mat(), Point(-1, -1), 2);
//
//}
//int main(int argc, char* argv[]) {
//	//some boolean variables for different functionality within this
//	//program
//	bool trackObjects = false;
//	bool useMorphOps = false;
//	int b=50;
//	VisionControllerBW* v = new VisionControllerBW(0.5,2,5,&b,&b);
////	while(1){
////		v->processFrame();
////	}
//
//	//Matrix to store each frame of the webcam feed
//	Mat cameraFeed;
//	//matrix storage for HSV image
//	Mat YUV;
//	//matrix storage for binary threshold image
//	Mat threshold;
//	//x and y values for the location of the object
//	int x = 0, y = 0;
//	//create slider bars for HSV filtering
//	createTrackbars();
//	//video capture object to acquire webcam feed
//	cv::VideoCapture capture;
//	//open capture object at location zero (default location for webcam)
//	capture.open(CV_CAP_OPENNI);
//	//capture.open(0);
//	//set height and width of capture frame
//	int i = 0;
//	//start an infinite loop where webcam feed is copied to cameraFeed matrix
//	//all of our operations will be performed within this loop
//	while (1) {
//		i++;
////		std::cout << "peppe!"<< std::endl;
////		std::cout << i <<std::endl;
//		if (capture.grab()) {
//			Target* t =v->processFrame();
//			//delete t;
//			vector<uint16_t>* left =v->getLeftObstaclesDistances();
//			//delete left;
//
//			Mat depth, depth8, out, blurred;
//			capture.retrieve(cameraFeed, CV_CAP_OPENNI_BGR_IMAGE);
//			capture.retrieve(depth, CV_CAP_OPENNI_DEPTH_MAP);
//			depth.convertTo(depth8, CV_8UC1, ((double) 1 / (double) 256));
//			//capture.retrieve(cameraFeed);
//			imshow("depth8", depth8);
////			Canny(depth8,out,40,100);
////			imshow("canny",out);
//			medianBlur(cameraFeed, blurred, 7);
//			//convert frame from BGR to HSV colorspace
//			cvtColor(blurred, YUV, COLOR_BGR2HSV);
////			vector<Mat> splitted;
////			split(YUV,splitted);
////			imshow("Hue" ,splitted.at(0));
////			imshow("Sat" ,splitted.at(1));
////			imshow("Value" ,splitted.at(2));
//			//filter HSV image between values and store filtered image to
//			//threshold matrix
//			inRange(YUV, Scalar(Y_MIN, U_MIN, V_MIN),
//					Scalar(Y_MAX, U_MAX, V_MAX), threshold);
//			//perform morphological operations on thresholded image to eliminate noise
//			//and emphasize the filtered object(s)
//			if (useMorphOps)
//				morphOps(threshold);
//			//pass in thresholded frame to our object tracking function
//			//this function will return the x and y coordinates of the
//			//filtered object
//
//			//show frames
////			imshow(windowName2, threshold);
////			imshow(windowName, cameraFeed);
////			imshow(windowName1, YUV);
//
//			//delay 30ms so that screen can refresh.
//			//image will not appear without this waitKey() command
//			time_t timestamp;
//			time(&timestamp);
//			int key = 0;
//			key = waitKey(1);
//			//cout << intToString(key) <<endl;
//			cout << key << endl;
//			std::stringstream ss;
//
//			if (key == 1048586 || key == 10) //invio
//					{
//				ss << "/home/raf/Scrivania/img" << timestamp << ".jpg";
//				cout << ss.str();
//				imwrite(ss.str(), cameraFeed);
//			}
//
//		}
//	}
//
//	return 0;
//}
//
//int pixelStretching(int x, int r1, int s1, int r2, int s2) {
//	float result;
//	if (0 <= x && x <= r1) {
//		result = s1 / r1 * x;
//	} else if (r1 < x && x <= r2) {
//		result = ((s2 - s1) / (r2 - r1)) * (x - r1) + s1;
//	} else if (r2 < x && x <= 255) {
//		result = ((255 - s2) / (255 - r2)) * (x - r2) + s2;
//	}
//	return (int) result;
//}
//
//void contrastStretching(Mat& img, int r1, int s1, int r2, int s2) {
//	for (int y = 0; y < img.rows; y++) {
//		for (int x = 0; x < img.cols; x++) {
//			int output = pixelStretching(img.at<uchar>(y, x), r1, s1, r2, s2);
//			img.at<uchar>(y, x) = saturate_cast<uchar>(output);
//
//		}
//	}
//}

