/*
*  vision_control_server.c
*
*
*  Created by Martin Neururer on 8/30/09.
*  Copyright 2009 ITBA. All rights reserved.
*
*/

#ifdef _CH_
	#pragma package <opencv>
#endif

#include <string.h>
#include "find_patterns.h"

#define ERROR -1
#define EXIT_SUCCESS 0


/*int thresh = 50;
CvPoint pt[4]; */

static const char *pstrWndName[2] = { "Image", "Processed_Image" };

static void createWindow(const char *pstrName, int iPosX, int iPosY);
static int createCapture(CvCapture **pCapture);
static void processImages(CvCapture **pCapture);
static int grabFrame(CvCapture **pCapture, IplImage **pFrame);

/*CvSeq* findSquares4( IplImage* img, CvMemStorage* storage );
double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 );
void drawSquares( IplImage* img, CvSeq* squares ); */


// Vision Control Framewok to observe the robots in the arena.s
int main() {
	int iStatus = EXIT_SUCCESS;
	static CvCapture* pCapture = NULL;

	// try to allocate memory for the capture device ...
	iStatus = createCapture(&pCapture);

	// create a window for presenting the captuerd images ...
	createWindow(*(pstrWndName + 0), 50, 50);
	// create a second window to show the modified images for pattern recognition ...
	createWindow(*(pstrWndName + 1), 600, 50);

	//createWindow("contours", 500, 100);


	processImages(&pCapture);

	cvDestroyAllWindows();
	// release the capture device housekeeping
	//cvReleaseCapture(&pCapture); // causes memory leaks (double free) ...

	// finally ...
	return 0;
}

void createWindow(const char *pstrName, int iPosX, int iPosY) {
	// create a window to present the captuered frame of the camera ...
	cvNamedWindow(pstrName, CV_WINDOW_AUTOSIZE);
	// offset from the UL corner of the screen ...
	cvMoveWindow(pstrName, iPosX, iPosY);
}

int createCapture(CvCapture **pCapture) {
	if(*pCapture == NULL) {
		// try to allocate and initialize the memory for capturing the image frames of the camera ...
		*pCapture = cvCaptureFromCAM(CV_CAP_ANY);
		if(! *pCapture) {
			fprintf(stderr, "ERROR: pCapture is NULL!\n");
			return ERROR;
		}
	}
	// else ...
	return 0;
}

void processImages(CvCapture **pCapture) {
	register int i = 0;
	int iNElem = 0;
	CvSize frameSize;

	static CvCapture *pCapCpy = NULL;
	static IplImage* pImgFrame = NULL;
	static IplImage* pImgGrey = NULL;
	static IplImage* pImgGrayBlur = NULL;
	static IplImage* pImgCanny = NULL;
	static IplImage* pImgThres = NULL;
	static IplImage* pImgDst = NULL;

	static CvSeq* pLineSeq = NULL;
	static CvPoint* pLine = NULL;
	static CvMemStorage* pMemStorage = NULL;
	
	static rightAngle rAnlge;

	// create a direct copy of the capture structure
	// (try to prevent const-casting) ...
	pCapCpy = *pCapture;

	if(pCapCpy != NULL) {
		// try to grab the frame of the camera ...
		if(grabFrame(&pCapCpy, &pImgFrame) == ERROR) {
			fprintf(stderr, "ERROR: could not grab the image frame of the camera!\n");
			cvReleaseCapture(&pCapCpy);
			return;
		}
		// get the size of the captuerd frame ...
		frameSize = cvGetSize(pImgFrame);

		/// memory allocation for image processing:
		// allocate memory for image data conversion (grey frame) by
		// reading the needed parameters of the original image ...
		pImgGrey = cvCreateImage(frameSize, pImgFrame->depth, 1);
		pImgGrey->origin = pImgFrame->origin; // position of the origin of the frame ...
		// clone the allocated image memory for the Canny Operator
		// and also for the Threshold Operator ...
		pImgCanny = cvCloneImage(pImgGrey);
		pImgThres = cvCloneImage(pImgGrey);
		// allocate memory for the destination image (with 3 channels) ...
		pImgDst = cvCreateImage(frameSize, IPL_DEPTH_8U, 3);
		// create a memory storage with the default size of 64 Kb ...
		pMemStorage = cvCreateMemStorage(0);

		//CvMemStorage* storage = cvCreateMemStorage(0);

		
		/// image capturing and processing:
		// show the captured and transformed images of the camera ...
		int iRed, iGrn, iBlu;
		double dFreq[3] = {0.1f, 0.1f, 0.1f};
		int iPhase[3] = {0, 2, 4};
		int iCenter = 128;
		int iWidth = 127;
		while(1) {
			// grab one frame of the allocated memory ...
			if(grabFrame(&pCapCpy, &pImgFrame) == ERROR) break;

			// convert the colored image frame into a grey frame ...
			cvCvtColor(pImgFrame, pImgGrey, CV_BGR2GRAY);

			pImgGrayBlur = cvCreateImage(frameSize, IPL_DEPTH_8U, 1);
			cvSmooth(pImgGrey, pImgGrayBlur, CV_GAUSSIAN, 3, 3, 0, 0);
			//cvSmooth(pImgGrey, pImgGrayBlur, CV_GAUSSIAN, 5, 5, 0, 0);
			cvThreshold(pImgGrayBlur, pImgThres, 160, 255, CV_THRESH_BINARY);

			/// Canny edge detection operator:
			//cvCanny(pImgGrey, pImgCanny, 45, 220, 3);
			cvCanny(pImgGrayBlur, pImgCanny, 45, 220, 3);
			// erode the picture ...
			//cvErode(pImgCanny, pImgCanny, 1, 0);
			// dilate canny output to remove potential
      // holes between edge segments (correcting) ...
      //cvDilate(pImgCanny, pImgCanny, 0, 1);
			/// applying the Threshold Operator (uniform thresholding) to get
			/// a bi-level (binary) image for removing noise ...
			//cvThreshold(pImgCanny, pImgThres, 160, 255, CV_THRESH_BINARY);
			/// Hough Transform for finding lines:
			//cvCvtColor(pImgThres, pImgDst, CV_GRAY2BGR);
			//pLineSeq = cvHoughLines2(pImgThres,
			cvCvtColor(pImgCanny, pImgDst, CV_GRAY2BGR);
			pLineSeq = cvHoughLines2(pImgCanny,
														pMemStorage,
														CV_HOUGH_PROBABILISTIC,
														1,
														CV_PI/180,
														10,//20, //80,
														30, //30,
														10); //10);
			// draw into the frames the detected lines ...
			for(i=0; i < pLineSeq->total; i++) {
				pLine = (CvPoint*)cvGetSeqElem(pLineSeq, i);
				// calculate rainbow colors ...
				iRed = round(sin(dFreq[0]*i + iPhase[0]) * iWidth + iCenter);
				iGrn = round(sin(dFreq[1]*i + iPhase[1]) * iWidth + iCenter);
				iBlu = round(sin(dFreq[2]*i + iPhase[2]) * iWidth + iCenter);
				cvLine(pImgDst, *(pLine + 0), *(pLine + 1), CV_RGB(iRed,iGrn,iBlu), 2, 8, 0);
				//cvLine(pImgDst, *(pLine + 0), *(pLine + 1), CV_RGB(0,0,255), 2, 8, 0);

				//cvLine(pImgFrame, *(pLine + 0), *(pLine + 1), CV_RGB(255,0,0), 2, 8, 0);
				//cvLine(pImgFrame, *(pLine + 0), *(pLine + 1), CV_RGB(80+i*10,10+i*5,100), 2, 8, 0);
			}
			/// try to find rectangles in the line sequence ...
			iNElem = findRectangles(pImgFrame, pLineSeq);
			// draw the found line segments ...
			for(i=0; i < iNElem; i++) {
				rAnlge = *(angleParamList + i);
				//cvLine(pImgDst, *(pLine + 0), *(pLine + 1), CV_RGB(0,0,255), 2, 8, 0);
				//cvLine(pImgFrame, *(pLine + 0), *(pLine + 1), CV_RGB(255,0,0), 2, 8, 0);
				//cvLine(pImgFrame, *(pLine + 0), *(pLine + 1), CV_RGB(80+i*10,10+i*5,100), 2, 8, 0);
			}



			//drawSquares( pImgFrame, findSquares4( pImgFrame, storage ) );
			//cvClearMemStorage( storage );

			cvShowImage(*(pstrWndName + 0), pImgFrame);
			cvShowImage(*(pstrWndName + 1), pImgDst);
			//cvShowImage(*(pstrWndName + 1), pImgThres);
			//cvShowImage(*(pstrWndName + 1), pImgCanny);


			/// clear the memory storage (reset the free space boundary) ...
			cvClearMemStorage(pMemStorage);
			/// removes all elements (lines) from the given line sequence [time complexity: O(1)] ...
			cvClearSeq(pLineSeq);

			//If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
			//remove higher bits using AND operator
			if( (cvWaitKey(10) & 255) == 27 ) break;
		}
	} else {
		fprintf(stderr, "ERROR: capture structure is NULL!\n");
	}

	/// finally (do housekeeping) ...
	// try to deallocate all storage memory blocks of the given memory storage ...
	if(pMemStorage != NULL)
		cvReleaseMemStorage(&pMemStorage);

	//if(storage != NULL)
	//	cvReleaseMemStorage(&storage);
	
	/// release the allocated memory of the image processing frames ...
	//cvReleaseImage(&pImgFrame); // do not release this frame!!
	// cvQueryFrame uses the memory already allocated in the CvCapture structure!
	// the image memory will be freed when the CvCapture is released!!
	cvReleaseImage(&pImgGrey);
	cvReleaseImage(&pImgGrayBlur);
	cvReleaseImage(&pImgCanny);
	cvReleaseImage(&pImgThres);
	cvReleaseImage(&pImgDst);
	// release the memory of the copied capture device (including the
	// frame image memory of the frame pointer "pImgFrame") ...
	cvReleaseCapture(&pCapCpy);
}

int grabFrame(CvCapture **pCapture, IplImage **pFrame) {
	if(*pCapture != NULL) {
		*pFrame = cvQueryFrame(*pCapture);
		if(! *pFrame) {
			fprintf(stderr, "ERROR: image frame is NULL!\n");
			return ERROR;
		}
	}
	return 0;
}


/*CvSeq* findSquares4( IplImage* img, CvMemStorage* storage )
{
		CvSeq* contours;
		int i, c, l, N = 11;
		CvSize sz = cvSize( img->width & -2, img->height & -2 );
		IplImage* timg = cvCloneImage( img ); // make a copy of input image
		IplImage* gray = cvCreateImage( sz, 8, 1 ); 
		IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
		IplImage* tgray;
		CvSeq* result;
		double s, t;
		// create empty sequence that will contain points -
		// 4 points per square (the square's vertices)
		CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );
		
		// select the maximum ROI in the image
		// with the width and height divisible by 2
		cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));
		
		// down-scale and upscale the image to filter out the noise
		cvPyrDown( timg, pyr, 7 );
		cvPyrUp( pyr, timg, 7 );
		tgray = cvCreateImage( sz, 8, 1 );
		
		// find squares in every color plane of the image
		for( c = 0; c < 3; c++ )
		{
				// extract the c-th color plane
				cvSetImageCOI( timg, c+1 );
				cvCopy( timg, tgray, 0 );
				
				// try several threshold levels
				for( l = 0; l < N; l++ )
				{
						// hack: use Canny instead of zero threshold level.
						// Canny helps to catch squares with gradient shading   
						if( l == 0 )
						{
								// apply Canny. Take the upper threshold from slider
								// and set the lower to 0 (which forces edges merging) 
								cvCanny( tgray, gray, 0, thresh, 5 );
								// dilate canny output to remove potential
								// holes between edge segments 
								cvDilate( gray, gray, 0, 1 );
						}
						else
						{
								// apply threshold if l!=0:
								//     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
								cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
						}
						
						// find contours and store them all as a list
						cvFindContours( gray, storage, &contours, sizeof(CvContour),
														CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
														//CV_RETR_LIST, CV_CHAIN_APPROX_TC89_L1, cvPoint(0,0) );
														//CV_RETR_LIST, CV_LINK_RUNS, cvPoint(0,0) );

						/// DRAW ALL FOUND CONTOURS ON THE SCREEN ...
						//if(contours->total > 0) { // liefert BUS ERROR zurueck (contours kann NULL sein) ...
						if(contours != NULL) {
							CvPoint *pLine = NULL;

							//printf("SEQ. OF CONTOURS:\n");
							//printf("length: %d\n\n", contours->total);
							int j;
							for(j=0; j < contours->total; ++j) {
								pLine = (CvPoint*)cvGetSeqElem(contours, j);
								//printf("[%d]: (%d, %d)\n", j, pLine->x, pLine->y);
								cvLine(img, *(pLine + 0), *(pLine + 1), CV_RGB(255,165,0), 2, 8, 0);
							}
							//printf("END OF SEQ.\n\n");
						}
						/// END OF DRAWING CONTOURS ...
					
						// test each contour
						while( contours )
						{
								// approximate contour with accuracy proportional
								// to the contour perimeter
								result = cvApproxPoly( contours, sizeof(CvContour), storage,
										CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );

								/// DRAW ALL RESULTS ON THE SCREEN ...
								if(result != NULL) {
									printf("result length: %d\n", result->total);
									CvPoint *pLine = NULL;
									int j;
									for(j=0; j < result->total; ++j) {
										pLine = (CvPoint*)cvGetSeqElem(result, j);
										//cvLine(img, *(pLine + 0), *(pLine + 1), CV_RGB(250,250,210), 2, 8, 0);
									}
								}
								/// END OF DRAWING ALL RESULTS ...

								// square contours should have 4 vertices after approximation
								// relatively large area (to filter out noisy contours)
								// and be convex.
								// Note: absolute value of an area is used because
								// area may be positive or negative - in accordance with the
								// contour orientation
								if( result->total == 4 &&
										fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 1000 &&
										cvCheckContourConvexity(result) )
								{
										/// DRAW ALL SQUARE CONTOURS ON THE SCREEN ...
										if(result != NULL) {
											CvPoint *pLine = NULL;
											int j;
											for(j=0; j < result->total; ++j) {
												pLine = (CvPoint*)cvGetSeqElem(result, j);
												cvLine(img, *(pLine + 0), *(pLine + 1), CV_RGB(255,192,203), 2, 8, 0);
											}
										}
										/// END OF DRAWING ALL SQUARE CONTOURS ...

										s = 0;
										
										for( i = 0; i < 5; i++ )
										{
												// find minimum angle between joint
												// edges (maximum of cosine)
												if( i >= 2 )
												{
														t = fabs(angle(
														(CvPoint*)cvGetSeqElem( result, i ),
														(CvPoint*)cvGetSeqElem( result, i-2 ),
														(CvPoint*)cvGetSeqElem( result, i-1 )));
														s = s > t ? s : t;
												}
										}
										
										// if cosines of all angles are small
										// (all angles are ~90 degree) then write quandrange
										// vertices to resultant sequence 
										if( s < 0.3 )
												for( i = 0; i < 4; i++ )
														cvSeqPush( squares,
																(CvPoint*)cvGetSeqElem( result, i ));
								}
								
								// take the next contour
								contours = contours->h_next;
						}
				}
		}
		
		// release all the temporary images
		cvReleaseImage( &gray );
		cvReleaseImage( &pyr );
		cvReleaseImage( &tgray );
		cvReleaseImage( &timg );
		
		return squares;
}

double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 )
{
		double dx1 = pt1->x - pt0->x;
		double dy1 = pt1->y - pt0->y;
		double dx2 = pt2->x - pt0->x;
		double dy2 = pt2->y - pt0->y;
		return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}

// the function draws all the squares in the image
void drawSquares( IplImage* img, CvSeq* squares )
{
		CvSeqReader reader;
		IplImage* cpy = cvCloneImage( img );
		int i;
		
		// initialize reader of the sequence
		cvStartReadSeq( squares, &reader, 0 );
		
		// read 4 sequence elements at a time (all vertices of a square)
		for( i = 0; i < squares->total; i += 4 )
		{
				CvPoint* rect = pt;
				int count = 4;
				
				// read 4 vertices
				memcpy( pt, reader.ptr, squares->elem_size );
				CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
				memcpy( pt + 1, reader.ptr, squares->elem_size );
				CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
				memcpy( pt + 2, reader.ptr, squares->elem_size );
				CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
				memcpy( pt + 3, reader.ptr, squares->elem_size );
				CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
				
				// draw the square as a closed polyline 
				cvPolyLine( cpy, &rect, &count, 1, 1, CV_RGB(0,255,0), 2, 8, 0 );
		}
		
		// show the resultant image
		cvShowImage("contours",cpy);
		cvReleaseImage( &cpy );
} */

