#include <iostream>
#include <stdio.h>
#include "string.h"
#include "Algebra3D.h"
#include "cv.h"
#include "highgui.h"
#include "vision.h"
#include "scorbot.h"
#define DEBUG 0

using namespace std;
int THRESHHOLD = 10;


CvCapture *capture;

void mapImage(IplImage *src, IplImage *dst, float targetHue, float tolerance);
float getHue(CvScalar rgb);
boxCoordinates *getBoxCoordinates(IplImage* img_orig, int targetDimension, int type);


float getHue(CvScalar rgb)
{
  // r,g,b values are from 0 to 1
  // h = [0,360], s = [0,1], v = [0,1]
  float r = rgb.val[2];
  float g = rgb.val[1];
  float b = rgb.val[0];
  float min, max, delta, h, s, v;
  min = MIN( r, MIN(g, b ));
  max = MAX( r, MAX(g, b ));
  v = max;
  delta = max - min;
  if( max != 0 )
    s = delta / max;
  else {
    // r = g = b = 0
    // s = 0, v is undefined
    s = 0;
    h = -1;
    return h;
  }
  if (delta == 0)
    return 360;
  if( r == max )    // between yellow & magenta
    h = ( g - b ) / delta;
  else if( g == max )  // between cyan & yellow
    h = 2 + ( b - r ) / delta;
  else  // between magenta & cyan
    h = 4 + ( r - g ) / delta;
  h *= 60;
  if( h < 0 )
    h += 360;
  return h;
}

void mapImage(IplImage* src1, IplImage* dst, float targetHue, float tolerance)
{
	CvScalar s;
	int histogram[360];
	for(int i = 0; i < 360; i++)
		histogram[i] = 0;
	int maxGreen = 0;
	int minBlue = 100000;
	float min  = 0;
	IplImage* src = cvCreateImage(cvGetSize(src1), IPL_DEPTH_8U, 3); 
	cvCvtColor( src1, src , CV_RGB2HSV);
	int nl= src->height; // number of lines
	int nc= src->width;

	for (int i=0; i<nl; i++) {
		for (int j=0; j<nc; j++) {
			// process each pixel ---------------------
			s = cvGet2D(src, i, j);
			//printf("B=%f, G=%f, R=%f\n",s.val[0],s.val[1],s.val[2]);
			//double distance = (targetColor.val[0]-s.val[0])*(targetColor.val[0]-s.val[0]) + (targetColor.val[1]-s.val[1])*(targetColor.val[1]-s.val[1]) + (targetColor.val[2]-s.val[2])*(targetColor.val[2]-s.val[2]);
			//distance /= 300;
			float hue = s.val[0];
			float difference = hue-targetHue;
			if (difference < 0)
				difference *= -1;
			if (difference > tolerance)// || s.val[2] > 210 || s.val[2] < 125)
				s.val[0] = 255;
			else{
				s.val[0] = 0;
				histogram[(int)hue]++;
			}
			//s.val[0] = hue*255/180;
			cvSet2D(dst, i, j, s);
			if (min < s.val[0]){
				printf("hue=%f\n",s.val[0]);
				min = s.val[0];
			}
			// end of pixel processing ---------------
		} // end of line
	}

#if DEBUG == 1
	for(int i = 0; i < 360; i++)
		if (histogram[i] > 0)
			cout << "i: " << i << "   val: " << histogram[i]<< endl;
#endif
}

int initializeCamera()
{
	if( (capture = cvCreateCameraCapture(1)) == NULL)	{
		cout << "ERROR ON CREATING CAMERA CAPTURE!\n";
		return -1;
	}

	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 640);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 480);

	if(DEBUG == 1){
		cvNamedWindow("Camera", CV_WINDOW_AUTOSIZE);
		cvNamedWindow("Map", CV_WINDOW_AUTOSIZE);
	}

	return 0;
}

void destroyCamera()
{
	cvReleaseCapture(&capture);
	if (DEBUG == 1)
	{
		cvDestroyWindow("Camera");
		cvDestroyWindow("Map");
	}
}

boxCoordinates getContourInfo(CvSeq* contour)
{
	boxCoordinates b;
	b.pixelWidth = 0;
	for( int i=0; i < 10000; ++i ){
		CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, contour, i );
		if (p == NULL) {/*cout << "done with contour\n";*/break;}
		for(int j = 0; j < 10000; j++){
			CvPoint* q = CV_GET_SEQ_ELEM( CvPoint, contour, j );
			if (q == NULL) {/*cout << "done with contour\n";*/break;}
			double distance = sqrt((p->x-q->x)*(p->x-q->x) + (p->y-q->y)*(p->y-q->y));
			if (distance > b.pixelWidth)
			{
				b.pixelWidth = distance;
				b.centerX = (p->x + q->x)/2;
				b.centerY = (p->y + q->y)/2;
				b.angle = atan2(p->y-q->y, p->x-q->x);
			}
		}
	}
	return b;
}

boxCoordinates *getBoxCoordinates(IplImage* img_orig, int targetDimension, int type)
{
	IplImage* img_map = cvCreateImage( cvGetSize(img_orig), IPL_DEPTH_8U, 1);
	cout << "width: " << cvGetSize(img_orig).width << endl;
	cout << "height: " << cvGetSize(img_orig).height << endl;
	IplImage* img_filter = cvCreateImage( cvGetSize(img_orig), 8, 1 );
	double lbound, ubound;

	if (type == BLOCK) {
		cout << "Looking for block" << endl;
		mapImage(img_orig, img_map, 28, 6); // block 182
		lbound = 60;
		ubound = 200;
	}
	else	{
		cout << "looking for area" << endl;
		mapImage(img_orig, img_map, 13, 10); // landing zone 224
		lbound = 150;
		ubound = 350;
	}
	boxCoordinates *best = new boxCoordinates;

	//cvThreshold( img_8uc1, img_edge, THRESHHOLD, 255, CV_THRESH_BINARY );
	cvCopy( img_map, img_filter, NULL );
	

	CvMemStorage* storage = cvCreateMemStorage();
	CvSeq* first_contour = NULL;

	double bestDimensionScore = 1000000000;

	int Nc = cvFindContours(
			img_filter,
			storage,
			&first_contour,
			sizeof(CvContour),
			CV_RETR_LIST );

	for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ){
		boxCoordinates contourCoord = getContourInfo(c);
		double dimensionScore = contourCoord.pixelWidth-targetDimension;
		if (dimensionScore < 0) dimensionScore *= -1;
		if (dimensionScore < bestDimensionScore) {
			bestDimensionScore = dimensionScore;
			//best = contourCoord;
			memcpy(best, &contourCoord, sizeof(*best));
		}
	}

	CvPoint center = {best->centerX, best->centerY};
	CvPoint corner = {best->centerX + cos(best->angle)*best->pixelWidth/2, best->centerY+sin(best->angle)*best->pixelWidth/2};
	cvCircle(img_orig, center, best->pixelWidth/2, cvScalar(0,255,255));
	cvLine(img_orig, center, corner, cvScalar(0, 255, 255));

	if (DEBUG == 1)
	{
		cvShowImage( "Camera", img_orig );
		cvShowImage( "Map", img_map);
	}

	if (best->pixelWidth < lbound || best->pixelWidth > ubound) {
		cout << "Returning here!!!!!" << endl << best->pixelWidth << endl;
		delete(best);
		return NULL;
	}
	return best;
}

boxCoordinates *take_picture(IMAGE_TARGET type) {
	IplImage *img_orig = cvQueryFrame(capture);
	if (type == BLOCK)
		return getBoxCoordinates(img_orig, 70, type); // Locate block
	else if(type == AREA)
		return getBoxCoordinates(img_orig, 150, type); // Locate landing zone
	else
		return NULL;
}

poDesc_t *getBlockCoordinates(H_Matrix *h, IMAGE_TARGET type){

	double ici[] = {0.0012266462, 0, -0.4098195063, 0, 0.0012316967, -0.3181708228, 0, 0, 1}; //intrinsic camera inverse.
	double time = 0;
	const double PI = 3.1415926535;

	//0 1 2 3
	//4 5 6 7
	//8 9 10 11

	Matrix4 t_c2g = Matrix4(-0.0108, 0.0116, 0.9999, -784.0, //-770  // camera with respect to gripper
						    -0.0307, 0.9995, -.0119, -714.1, 
						    -0.9995, -.0308, -.0105, 16.59, 
						     0,       0,     0,       1);

	// gripper with respect to base
	Matrix4 t_g2b = Matrix4(h->e[0][0], h->e[0][1], h->e[0][2], h->e[0][3],
						    h->e[1][0], h->e[1][1], h->e[1][2], h->e[1][3],
						    h->e[2][0], h->e[2][1], h->e[2][2], h->e[2][3],
						    h->e[3][0], h->e[3][1], h->e[3][2], h->e[3][3]);

	t_g2b.print();
	poDesc_t *result;
	boxCoordinates* b = take_picture(type);
	if (b == NULL) return NULL;

	//b->centerX *= 2;
	//b->centerY *= 2;
	if(DEBUG == 1)
		cout << "centerX centerY: " << b->centerX << " " << b->centerY << endl;

	//b->centerX and b->centerY are the center of the block with respect to the camera
	//calculate the position with respect to the camera
/*	MAGIC NUMBERS:
	335 mm is x length
	640 pixels is x pixels

	265 mm is y length
	480 pixels is y pixels
	the camera is 425 mm from the table

	the angle field of view of the camera to whatever is 0.3754 radians for x direction within the table
	the angle field of view of the camera to whatever is 0.3022 radians for y direction within the table
*/
	// block with respect to camera
//	Point3 c2Block = Point3((b->centerX - 320) * 3350.0 / 640.0, (b->centerY - 240) * 3350.0 / 640.0, 4250.0);
	Point3 c2Block = Point3(((b->centerX * ici[0]) + ici[2])*4350.0, ((b->centerY * ici[4]) + ici[5])*4350.0, 4350.0);

	// block w.r.t. base = g2b * c2g * bl2c
	Point3 g2Block = t_c2g * c2Block;
	Point3 b2Block = t_g2b * g2Block;

	double a = b->angle + PI/4;
	Matrix4 t_blockrotation = Matrix4(cos(a), -sin(a), 0, 0,
									  sin(a),  cos(a), 0, 0,
									       0,       0, 1, 0,
										   0,		0, 0, 1);
	t_blockrotation = t_c2g * t_blockrotation;
	t_blockrotation = t_g2b * t_blockrotation;

	if(DEBUG == 1){
		cout << "Position vector from camera to block: " << endl;
		c2Block.print();
		cout << endl;

		cout << "Position vector from gripper to block: " << endl;
		g2Block.print();

		cout << "G2B:\n";
		t_g2b.print();
		cout << endl;
	
		cout << "Position vector from base to block: " << endl;
		b2Block.print();
		cout << endl;
	}

	result = new poDesc_t;
	result->x = b2Block.x;
	result->y = b2Block.y;
//	result->z = b2Block.z;
	result->z = 0;
//	result->rho = 0;

	double arm_offset = 325.0;
	double offset_theta = asin(arm_offset / (sqrt(pow(result->x,2) + pow(result->y,2))));
	double ntheta = atan2(result->y, result->x) + offset_theta; //get yaw from y and x
	
	result->rho = (acos(t_blockrotation.m[0]) + ntheta) * -1800 / PI;
	cout << "Initial rotation: " << result->rho << endl;
	while (result->rho > 450)
		result->rho -= 900;
	while (result->rho < -450)
		result->rho += 900;
	cout << "Final rotation: " << result->rho << endl;
	result->phi = -900;
	result->theta = 0;

	return result;
}

int camera_setup()
{
	return initializeCamera();
}

