#include "main.h"

int objectMap[MAPSIZEY][MAPSIZEX];


/****************************
*   Image processing        *
****************************/

IplImage* pProcessedFrame;
IplImage* tempFrame;

// Slider for the low threshold value of our edge detection
int maxLowThreshold = 1024;
int lowSliderPosition = 150;

// Slider for the high threshold value of our edge detection
int maxHighThreshold = 1024;
int highSliderPosition = 250;

// Function to find the edges of a given IplImage object
IplImage* findEdges(IplImage* sourceFrame, double thelowThreshold, double theHighThreshold, double theAperture)
{
	// Convert source frame to greyscale version (tempFrame has already been initialised to use greyscale colour settings)
	cvCvtColor(sourceFrame, tempFrame, CV_RGB2GRAY);

	// Perform canny edge finding on tempframe, and push the result back into itself!
	cvCanny(tempFrame, tempFrame, thelowThreshold, theHighThreshold, theAperture);

	// Pass back our now processed frame!
	return tempFrame;
}

// Callback function to adjust the low threshold on slider movement
void onLowThresholdSlide(int theSliderValue)
{
	lowSliderPosition = theSliderValue;
}

// Callback function to adjust the high threshold on slider movement
void onHighThresholdSlide(int theSliderValue)
{
	highSliderPosition = theSliderValue;
}


void onMouse(int event, int x, int y, int flags, void* params) {
	KinectOpenCV* dev = static_cast<KinectOpenCV*>(params);
	double dist = dev->GetDepthInMetres(x, y);

	if( dist != -1 ) {
		std::cout << "Depth(" << x << ", " << y << "): " << dist << " metres" << std::endl;
	}
	else {
		std::cout << "Depth(" << x << ", " << y << "): cannot compute value" << std::endl;
	}
	std::cout.flush();
}

struct points{
    float xcor;
    float ycor;
    float angle;
};

/****************************
*            Main           *
****************************/


int main(int argc, char *argv[]) {
    points kinectmap[640][480];
	IMU imu;
	KinectOpenCV kinect(0);
	GPS gps(16);
    RoboTeq roboteq(17);

    for(int i = 0 ; i < MAPSIZEX; i++){
        for(int j = 0 ; j < MAPSIZEX; j++){
            objectMap[i][j] = 0;
        }
    }

    //Create OpenCV images
    IplImage *camera = cvCreateImage( cvSize(640, 480), IPL_DEPTH_8U, 3);
	IplImage *depth = cvCreateImage( cvSize(640, 480), IPL_DEPTH_8U, 3);

	//Potentially remove IplImage *
    IplImage *tempFrame = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
    IplImage *pProcessedFrame = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);

    cvZero(camera);
	cvZero(depth);

	cvNamedWindow("Camera", 1);
    cvNamedWindow("Processed Image", CV_WINDOW_AUTOSIZE);

	cvShowImage("Camera", camera);
	cvShowImage("Depth", depth);

    gps.getCoords();
    cout << gps.getLat();
    cout << gps.getLong();

	// Create Sliders
	cvCreateTrackbar("Low Threshold", "Processed Image", &lowSliderPosition, maxLowThreshold, onLowThresholdSlide);
	cvCreateTrackbar("High Threshold", "Processed Image", &highSliderPosition, maxHighThreshold, onHighThresholdSlide);

	// Loop controling vars
	bool quit = false;
    char key;
    CvScalar d, p, temp;

	while (quit == false)
    {
        for(;;) {
            key = (cvWaitKey(5) & 255);
            if( key == 27 ) {
                quit = true;
            }
            if( key == 'w' ) {
                kinect.SetTiltAngle(20);
            }
            if( key == 'x' ) {
                kinect.SetTiltAngle(-20);
            }
            if( key == 's' ) {
                kinect.SetTiltAngle(0);
            }

            std::cout.flush();

            kinect.GetColourImage(camera);
            kinect.GetDepthImage(depth);

            pProcessedFrame = findEdges(camera, lowSliderPosition, highSliderPosition, 3);
            // Showed the processed output in our other window
            cvShowImage("Processed WebCam", pProcessedFrame);
            cvShowImage("Camera", camera);
            cvShowImage("Depth", depth);

            std::cout << kinect.GetDepthInMetres(320,240) << std::endl;
            for(int i = 0; i < 640; i++) {
                for(int j = 0; j < 480; j++) {
                    p = cvGet2D(pProcessedFrame,j,i);
                    if(p.val[0] == 0) {
                        cvSet2D(depth,j,i,p);
                    } else{
                        kinectmap[i][j].ycor=kinect.GetDepthInMetres(i,j);
                        kinectmap[i][j].xcor=tanf(30.3)*kinectmap[i][j].ycor*(i-320)/320;
                        kinectmap[i][j].angle=atanf(kinectmap[i][j].xcor/kinectmap[i][j].ycor);

                    }
                }
            }


        }
	} // End of while loop

	kinect.StopKinect();

	cvDestroyWindow("Camera");
	cvReleaseImage(&camera);
	cvReleaseImage(&pProcessedFrame);


	return 0;
}

