/*
 * File:   main.cpp
 * Author: samcrow
 *
 * Created on March 9, 2012, 3:48 PM
 */

#include <cstdlib>
#include <iostream>
#include <vector>
#include <cmath>
#include <pthread.h>

#include <libfreenect.hpp>
#include <libfreenect/libfreenect.h>

#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/wimage.hpp>
#include <opencv2/core/mat.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgproc/types_c.h>
#include <opencv2/imgproc/imgproc_c.h>

//Local includes
#include "ServerSocket.hpp"

//Definitions not in the recent OpenCV headers
#define FREENECT_FRAME_W 640
#define FREENECT_FRAME_H 480
#define FREENECT_FRAME_PIX (FREENECT_FRAME_H*FREENECT_FRAME_W)
#define FREENECT_VIDEO_RGB_SIZE (FREENECT_FRAME_PIX*3)
#define FREENECT_DEPTH_11BIT_SIZE (FREENECT_FRAME_PIX*sizeof(uint16_t))

class Mutex {
public:

	Mutex() {
		pthread_mutex_init(&m_mutex, NULL);
	}

	void lock() {
		pthread_mutex_lock(&m_mutex);
	}

	void unlock() {
		pthread_mutex_unlock(&m_mutex);
	}
private:
	pthread_mutex_t m_mutex;
};

using namespace cv;
using namespace std;

/**
 * Extends FreenectDevice and implements methods to get the most recent image data
 * independently of the libfreenect callbacks.
 */
class Team751FreenectDevice : public Freenect::FreenectDevice {
public:

	/**
	 * Get the reference to the device
     * @param _ctx The Freenect context to create the device in
     * @param _index The device index. For setups with only one Kinect, this should be zero.
     */
	Team751FreenectDevice(freenect_context *_ctx, int _index) : Freenect::FreenectDevice(_ctx, _index),
	m_buffer_depth(FREENECT_DEPTH_11BIT_SIZE),
	m_buffer_rgb(FREENECT_VIDEO_RGB_SIZE),
	m_gamma(2048),
	m_new_rgb_frame(false),
	m_new_depth_frame(false),
	depthMat(Size(FREENECT_FRAME_W, FREENECT_FRAME_H), CV_16UC1),
	rgbMat(Size(FREENECT_FRAME_W, FREENECT_FRAME_H), CV_8UC3),
	ownMat(Size(FREENECT_FRAME_W, FREENECT_FRAME_H), CV_8UC3, Scalar(0)) {
		for (unsigned int i = 0; i < 2048; i++) {
			float v = 1 / 2048.0;
			v = pow(v, 3) * 6;
			m_gamma[i] = v * 6 * 256;
		}
	}

	void VideoCallback(void* _rgb, uint32_t timestamp) {
		//cout << "Got a video callback timestamped " << timestamp << "." << endl;
		m_rgb_mutex.lock();
		uint8_t* rgb = static_cast<uint8_t*> (_rgb);
		rgbMat.data = rgb;
		m_new_rgb_frame = true;
		m_rgb_mutex.unlock();
	}

	void DepthCallback(void* _depth, uint32_t timestamp) {
		//cout << "Got a depth callback timestamped " << timestamp << "." << endl;
		m_depth_mutex.lock();
		uint16_t* depth = static_cast<uint16_t*> (_depth);
		depthMat.data = (uchar*) depth;
		m_new_depth_frame = true;
		m_depth_mutex.unlock();
	}

	bool getVideo(Mat& output) {
		m_rgb_mutex.lock();
		if (m_new_rgb_frame) {
			cv::cvtColor(rgbMat, output, CV_RGB2BGR);
			m_new_rgb_frame = false;
			m_rgb_mutex.unlock();
			return true;
		} else {
			m_rgb_mutex.unlock();
			return false;
		}
	}

	bool getDepth(Mat& output) {
		m_depth_mutex.lock();
		if (m_new_depth_frame) {
			depthMat.copyTo(output);
			m_new_depth_frame = false;
			m_depth_mutex.unlock();
			return true;
		} else {
			m_depth_mutex.unlock();
			return false;
		}
	}

private:
	vector<uint8_t> m_buffer_depth;
	vector<uint8_t> m_buffer_rgb;
	vector<uint16_t> m_gamma;
	Mat depthMat;
	Mat rgbMat;
	Mat ownMat;
	Mutex m_rgb_mutex;
	Mutex m_depth_mutex;
	bool m_new_rgb_frame;
	bool m_new_depth_frame;
};

/**
 * Get the angle between two vectors
 * @param pt1 The end point of the first vector
 * @param pt2 The end point of the second vector
 * @param pt0 The point that is the start point of both vectors
 * @return The angle. I don't know what units it's in.
 */
double angle(Point pt1, Point pt2, Point pt0) {
	double dx1 = pt1.x - pt0.x;
	double dy1 = pt1.y - pt0.y;
	double dx2 = pt2.x - pt0.x;
	double dy2 = pt2.y - pt0.y;
	return (dx1 * dx2 + dy1 * dy2) / sqrt((dx1 * dx1 + dy1 * dy1)*(dx2 * dx2 + dy2 * dy2) + 1e-10);
}

// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage

void findSquares(const Mat& image, vector<vector<Point> >& squares) {
	squares.clear();

	int thresh = 50, N = 11;

	Mat pyr, timg, gray0(image.size(), CV_8U), gray;

	// down-scale and upscale the image to filter out the noise
	pyrDown(image, pyr, Size(image.cols / 2, image.rows / 2));
	pyrUp(pyr, timg, image.size());
	vector<vector<Point> > contours;

	// find squares in every color plane of the image
	for (int c = 0; c < 3; c++) {
		int ch[] = {c, 0};
		mixChannels(&timg, 1, &gray0, 1, ch, 1);

		// try several threshold levels
		for (int l = 0; l < N; l++) {
			// hack: use Canny instead of zero threshold level.
			// Canny helps to catch squares with gradient shading
			if (l == 0) {
				// apply Canny. Take the upper threshold from slider
				// and set the lower to 0 (which forces edges merging)
				Canny(gray0, gray, 0, thresh, 5);
				// dilate canny output to remove potential
				// holes between edge segments
				dilate(gray, gray, Mat(), Point(-1, -1));
			} else {
				// apply threshold if l!=0:
				//     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
				gray = gray0 >= (l + 1)*255 / N;
			}

			// find contours and store them all as a list
			findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

			vector<Point> approx;

			// test each contour
			for (size_t i = 0; i < contours.size(); i++) {
				// approximate contour with accuracy proportional
				// to the contour perimeter
				approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

				// square contours should have 4 vertices after approximation
				// relatively large area (to filter out noisy contours)
				// and be convex.
				// Note: absolute value of an area is used because
				// area may be positive or negative - in accordance with the
				// contour orientation
				if (approx.size() == 4 &&
						fabs(contourArea(Mat(approx))) > 1000 &&
						isContourConvex(Mat(approx))) {
					double maxCosine = 0;

					for (int j = 2; j < 5; j++) {
						// find the maximum cosine of the angle between joint edges
						double cosine = fabs(angle(approx[j % 4], approx[j - 2], approx[j - 1]));
						maxCosine = MAX(maxCosine, cosine);
					}

					// if cosines of all angles are small
					// (all angles are ~90 degree) then write quandrange
					// vertices to resultant sequence
					if (maxCosine < 0.3)
						squares.push_back(approx);
				}
			}
		}
	}
}

int main(int argc, char** argv) {
	cout << "It works!" << endl;

	Freenect::Freenect freenect;
	if(freenect.deviceCount() < 1){
		cout << "Zero Kinect devices available!" << endl;
		exit(EXIT_FAILURE);
	}
	Team751FreenectDevice& device = freenect.createDevice<Team751FreenectDevice >(0);

	device.startVideo();
	device.startDepth();

	//Sockets!
	ServerSocket socket(7510);//This blocks while waiting for a connection


	while (true) {

		Mat depth(Size(FREENECT_FRAME_W, FREENECT_FRAME_H), CV_16UC1);

		if (device.getDepth(depth)) {

			//Get the average depth across the whole image
			double total = 0;
			for (int i = 0; i < FREENECT_FRAME_W; i++) {
				for (int j = 0; j < FREENECT_FRAME_H; j++) {
					total += depth.at<double>(j, i); //This function is y, then x
				}
			}
			double average = total / (double) FREENECT_FRAME_PIX;
			average *= pow(10, 271); //Change it from 10^-271 to something closer to zero
			average = 1 / average; //Invert it and see if that is more proportional

			cout << "Average distance: " << average << endl;

			device.setTiltDegrees(0);
		}

		Mat video(Size(FREENECT_FRAME_W, FREENECT_FRAME_H), CV_8UC3);

		if (device.getVideo(video)) {

			vector<uchar> compressedImage;
			vector<int> params;
			params.insert(params.end(), CV_IMWRITE_JPEG_QUALITY);//Add key for JPEG quality
			params.insert(params.end(), 70);//Add value of 50

			imencode("jpg", video, compressedImage, params);//Compress the image into JPEG

			cout << string(compressedImage.begin(), compressedImage.end()) << endl;//Test printing it

			const char header[] = {1, 0, 0, 0};
			socket.send(string(header));//Send the header that the dashboard program expects
			socket.send(string(compressedImage.begin(), compressedImage.end()));
		}

	}

	cout << "Stopping..." << endl;
	device.stopVideo();
	device.stopDepth();

	return 0;
}

