#include <iostream>
#include <process.h>
#include <vector>
#include <fstream>

#include <cv.h>
#include <highgui.h>

#include "PGRCamera.h"
#include "utils.h"
#include "Tracking/BlobDetector.h"
#include "Tracking/Tracker.h"
#include "Filtering/Pipelining.h"
#include "Event/TouchMessenger.h"
#include "Coordination/SelfCalibrator.h"

#include "DrawInformationWrapping.h"

/* to tweak parameters */
bool SHOW_PROCESS = false;

const int WIDTH = 320;
const int HEIGHT = (WIDTH * 3) / 4;
const int HOLE_PROCESSING_TIME = 33; // ms

int PARAM_SMOOTHNESS = 2;
int PARAM_HIGH_PASS_BLUR = 1;
int PARAM_HIGH_PASS_NOISE = 5;
double PARAM_HIGH_PASS_AMPLIFY = 500.0;
double PARAM_BINALIZE_THREHOLD = 0.20;
int PARAM_DILATION_STEP = 1;

int MIN_BLOB_AREA = 9;
int MAX_BLOB_AREA = WIDTH * HEIGHT / 200;

double MAX_TRACKING_DISTANCE = (double)(WIDTH + HEIGHT) / 15.0;

const char* TUIO_HOST = "default";
const int TUIO_PORT = 0;

/* Pipelining and Blob Tracking Thread */
Pipelining* pipeline = new Pipelining(WIDTH, HEIGHT, IPL_DEPTH_32F);
BlobDetector* detector = new BlobDetector();
BlobTracker* tracker = new BlobTracker();

/* TUIO Protocal Messenger */
TouchMessenger* messenger = new TouchMessenger(TUIO_HOST, TUIO_PORT);

/* using inside thread */
IplImage* inputImage			= cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
IplImage* floatingInputImage	= cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_32F, 1);
IplImage* floatingOutputImage	= cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_32F, 1);
IplImage* integerImage			= cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
IplImage* tempImage				= cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);

/* coordination convertor */
windage::SelfCalibrator* convertor;
const char* SELF_CALIBRATION_FILE = "selfcalibration.txt";

/* processing thread*/
double processingTime = 0;
CRITICAL_SECTION criticalSection;
bool threadWorking = false;
void ThreadFunc(void* proc)
{
	int64 startTime = cvGetTickCount();

	std::vector<Blob> trackedBlobList;
	std::vector<Blob> removedBlobList;

	// processing(filtering)
	cvCvtScale(inputImage, floatingInputImage, 1.0/255.0);

	// filtering
	pipeline->SetInputImage(floatingInputImage);
	pipeline->SetOutputImage(floatingOutputImage);
	pipeline->Do();

	EnterCriticalSection(&criticalSection);
	{
		// blob detection
		cvCvtScale(floatingOutputImage, integerImage, 255.0);
		detector->SetInputImage(integerImage);
		detector->Do();
		
		// blob tracking
		std::vector<Blob>* newBlob = detector->GetBlogList();
		for(unsigned int i=0; i<newBlob->size(); i++)
			tracker->PushNewBlob((*newBlob)[i]);
		tracker->Do();

		// coordination normalization
		for(unsigned int i=0; i<tracker->trackedBlobList.size(); i++)
		{
			Blob blob = tracker->trackedBlobList[i];
			blob.pt.x /= (double)WIDTH;
			blob.pt.y /= (double)HEIGHT;

			// convert coordination
			// use self calibration
			windage::Vector2 outPoint;
			outPoint = convertor->ConvertPoint(windage::Vector2(blob.pt.x, blob.pt.y));
			blob.pt.x = outPoint.x;
			blob.pt.y = outPoint.y;

			trackedBlobList.push_back(blob);
		}
		for(unsigned int i=0; i<tracker->removedBlobList.size(); i++)
		{
			Blob blob = tracker->removedBlobList[i];
			blob.pt.x /= (double)WIDTH;
			blob.pt.y /= (double)HEIGHT;

			// convert coordination
			// use self calibration
			windage::Vector2 outPoint;
			outPoint = convertor->ConvertPoint(windage::Vector2(blob.pt.x, blob.pt.y));
			blob.pt.x = outPoint.x;
			blob.pt.y = outPoint.y;

			removedBlobList.push_back(blob);
		}

		if(SHOW_PROCESS)
		{
			ShowPipelineImage(pipeline);
			ShowBlobImage(tempImage, newBlob);
		}
	}
	LeaveCriticalSection(&criticalSection);

	// send message using TUIO
	messenger->Do(&trackedBlobList, &removedBlobList);

	processingTime = (cvGetTickCount() - startTime) / (cvGetTickFrequency() * 1000.0);
}

/* main procedure */
void main()
{
	char message[200];
	// create window
	cvNamedWindow("result image");

	// initialize
	IplImage* resizeImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* rawImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* backgroundImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	cvSetZero(backgroundImage);

	// setting filter pipelining
//	Pipelining* pipeline = new Pipelining(WIDTH, HEIGHT, IPL_DEPTH_32F);
	pipeline->SetInputImage(floatingInputImage);
	pipeline->SetOutputImage(floatingOutputImage);

	Filter* smooth		= new Smooth(WIDTH, HEIGHT, IPL_DEPTH_32F);
	Filter* highpass	= new Highpass(WIDTH, HEIGHT, IPL_DEPTH_32F);
	Filter* binalize	= new Binalize(WIDTH, HEIGHT, IPL_DEPTH_32F);
	Filter* dilate		= new Dilate(WIDTH, HEIGHT, IPL_DEPTH_32F);
	pipeline->PushBack(smooth);
	pipeline->PushBack(highpass);
	pipeline->PushBack(binalize);
	pipeline->PushBack(dilate);
	
	// for blob detection & tracking
//	BlobDetector* detector = new BlobDetector();
//	BlobTracker* tracker = new BlobTracker();
	detector->SetInputImage(integerImage);
	tracker->SetMaxBlobMovement(MAX_TRACKING_DISTANCE);


	// for coordinate convertor
	int gridX, gridY;
	convertor = new windage::SelfCalibrator();
	{
		std::ifstream in(SELF_CALIBRATION_FILE);
		in >> gridX >> gridY;
		convertor->SetGridSize(gridX, gridY);
		std::vector<windage::Vector2>* adjustmentPoints = convertor->GetAdjustmentPoints();
		for(unsigned int i=0; i<adjustmentPoints->size(); i++)
		{
			in >> (*adjustmentPoints)[i].x >> (*adjustmentPoints)[i].y;
		}
		convertor->UpdateHomography();
	}

	// Initialize Thread
	InitializeCriticalSection(&criticalSection);

	// camera connection
	CPGRCamera* camera = new CPGRCamera();
	camera->open();
	camera->start();

	bool first = true;
	bool processing = true;
	bool trackingInfo = false;
	while(processing)
	{
		int64 startTime = cvGetTickCount();

		// grab frame
		camera->update();
		IplImage* grab = camera->GetIPLImage();
		cvResize(grab, resizeImage);
		
		// TODO
		// undistortion & warping & combining multiple images
		cvCopyImage(resizeImage, rawImage);

		// backgruond subtraction
		{
			if(first)
			{
				cvCopyImage(rawImage, backgroundImage);
				first = false;
			}
			cvSub(rawImage, backgroundImage, inputImage);
		}

		// set/update parameter
		{
			smooth->SetParameter(	Filter::SMOOTHNESS,			PARAM_SMOOTHNESS);
			highpass->SetParameter(	Filter::HIGH_PASS_BLUR,		PARAM_HIGH_PASS_BLUR);
			highpass->SetParameter(	Filter::HIGH_PASS_NOISE,	PARAM_HIGH_PASS_NOISE);
			highpass->SetParameter(	Filter::HIGH_PASS_AMPLIFY,	PARAM_HIGH_PASS_AMPLIFY);
			binalize->SetParameter(	Filter::BINALIZE_THRESHOLD, PARAM_BINALIZE_THREHOLD);
			dilate->SetParameter(	Filter::DILATE_STEP,		PARAM_DILATION_STEP);

			detector->SetThreshold(MIN_BLOB_AREA, MAX_BLOB_AREA);
		}

		// do processing
#if USE_THREAD
		_beginthread(ThreadFunc, 0, NULL);
#else
		ThreadFunc(NULL);
#endif

		// draw information
		{
			cvCvtColor(rawImage, resultImage, CV_GRAY2BGR);
			DrawResultInformation(resultImage, &tracker->trackedBlobList, trackingInfo, SHOW_PROCESS);

			if(SHOW_PROCESS)
			{
				sprintf_s(message, "_S_mooth : %d", PARAM_SMOOTHNESS);
				DrawTextToImage(resizeImage, cvPoint(5, 15), message);
				sprintf_s(message, "Highpass _B_lur : %d, _N_oise : %d", PARAM_HIGH_PASS_BLUR, PARAM_HIGH_PASS_NOISE);
				DrawTextToImage(resizeImage, cvPoint(5, 30), message);
				sprintf_s(message, "_A_mplify : %.2f", PARAM_HIGH_PASS_AMPLIFY);
				DrawTextToImage(resizeImage, cvPoint(5, 45), message);
				sprintf_s(message, "_T_hreshold : %.2f", PARAM_BINALIZE_THREHOLD);
				DrawTextToImage(resizeImage, cvPoint(5, 60), message);
				sprintf_s(message, "Blob : %d < area < %d", MIN_BLOB_AREA, MAX_BLOB_AREA);
				DrawTextToImage(resizeImage, cvPoint(5, 75), message);

				cvShowImage("input image", resizeImage);
			}
		}

		// result
		double processingWholeTime = (cvGetTickCount() - startTime)/(cvGetTickFrequency() * 1000.0);
		sprintf_s(message, "Prcessing time : %.2f ms | Thread time : %.2f ms",  processingWholeTime, processingTime);
		std::cout << message << std::endl;
		DrawTextToImage(resultImage, cvPoint(5, HEIGHT-15), message);

		cvShowImage("result image", resultImage);

		// calculate wating time for next step
		// to guarantee the interval between frames
		int delay = HOLE_PROCESSING_TIME - cvRound(processingWholeTime);
		if(delay < 1) delay = 1;
		char ch = cvWaitKey(delay);

		switch(ch)
		{
		case 27:
		case 'q':
		case 'Q':
			processing = false;
			break;

		case 'S': PARAM_SMOOTHNESS++; break;
		case 's': if(PARAM_SMOOTHNESS > 1) PARAM_SMOOTHNESS--; break;
		case 'B': PARAM_HIGH_PASS_BLUR++; break;
		case 'b': if(PARAM_HIGH_PASS_BLUR > 1) PARAM_HIGH_PASS_BLUR--; break;
		case 'N': PARAM_HIGH_PASS_NOISE++; break;
		case 'n': if(PARAM_HIGH_PASS_NOISE > 1) PARAM_HIGH_PASS_NOISE--; break;
		case 'A': PARAM_HIGH_PASS_AMPLIFY += 5; break;
		case 'a': PARAM_HIGH_PASS_AMPLIFY -= 5; break;
		case 'T': PARAM_BINALIZE_THREHOLD += 0.05; break;
		case 't': PARAM_BINALIZE_THREHOLD -= 0.05; break;

		case 'g':
		case 'G':
			cvResize(rawImage, backgroundImage);
			break;

		case 'i':
		case 'I':
			trackingInfo = !trackingInfo;
			break;
				
		case 'm':
		case 'M':
			cvWaitKey();
			break;
		case ' ':
			SHOW_PROCESS = !SHOW_PROCESS;
			cvDestroyAllWindows();
			cvNamedWindow("result image");
			if(SHOW_PROCESS)
				CreateWindows(pipeline);				
			break;
		}
		
	}

	// waiting to terminate thread
	cvWaitKey(100);

	if(messenger) delete messenger;

	cvReleaseImage(&resultImage);
	cvReleaseImage(&inputImage);
	cvReleaseImage(&resizeImage);
	
	cvReleaseImage(&tempImage);
	cvReleaseImage(&integerImage);
	cvReleaseImage(&floatingInputImage);
	cvReleaseImage(&floatingOutputImage);

	cvReleaseImage(&backgroundImage);

	camera->stop();
	camera->close();
	delete camera;

	cvDestroyAllWindows();
}