//includes for ROS and messages
#include "ros/ros.h"
#include <std_msgs/Int32.h>
#include <std_msgs/Int8MultiArray.h>

//includes for cvopen
#include "cv.h"
#include "highgui.h"
//#include "features2d.h"


#include <iostream>
#include <string.h>
#include <signal.h>

#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>

#include <vector>
#include <sys/types.h>
#include <dirent.h>
#include <errno.h>
#include <sstream>

#include "saint_nr_9/RobotPosition.h"
#include "StateMachine.h"
#include "queen.h"


#define IMAGEWIDTH 320
#define IMAGEHEIGHT 240
#define DETECT_THRESHOLD 30

enum center_definition{PERFECT, LEFT_OUT, RIGHT_OUT};

using namespace std;

int posX;
int posY;
int heading;
int ID = 0;

int counter = 0;
ros::Publisher detect_Pub;
ros::Publisher finished_detec_Pub;
ros::Publisher finished_process_Pub;

int global_state;
bool seen = false;

IplImage *image = 0;
float pairs = 0;
float maxV, second, third = 0;
int indexMax, indexSec, indexTh = 0;
vector<string> files = vector<string>();



int getdir (string dir, vector<string> &file)
{
    DIR *dp;
    struct dirent *dirp;
    if((dp  = opendir(dir.c_str())) == NULL) {
        cout << "Error(" << errno << ") opening " << dir << endl;
        return errno;
    }

    while ((dirp = readdir(dp)) != NULL) {
        file.push_back(string(dirp->d_name));
    }
    closedir(dp);
    return 0;
}

double compareSURFDescriptors( const float* d1, const float* d2, double best, int length )
{
    double total_cost = 0;
    assert( length % 4 == 0 );
    for( int i = 0; i < length; i += 4 )
    {
        double t0 = d1[i] - d2[i];
        double t1 = d1[i+1] - d2[i+1];
        double t2 = d1[i+2] - d2[i+2];
        double t3 = d1[i+3] - d2[i+3];
        total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3;
        if( total_cost > best )
            break;
    }
    return total_cost;
}
int naiveNearestNeighbor( const float* vec, int laplacian,
                      const CvSeq* model_keypoints,
                      const CvSeq* model_descriptors ){
                      
    int length = (int)(model_descriptors->elem_size/sizeof(float));
    int i, neighbor = -1;
    double d, dist1 = 1e6, dist2 = 1e6;
    CvSeqReader reader, kreader;
    cvStartReadSeq( model_keypoints, &kreader, 0 );
    cvStartReadSeq( model_descriptors, &reader, 0 );

    for( i = 0; i < model_descriptors->total; i++ )
    {
        const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
        const float* mvec = (const float*)reader.ptr;
    	CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
        CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
        if( laplacian != kp->laplacian )
            continue;
        d = compareSURFDescriptors( vec, mvec, dist2, length );
        if( d < dist1 )
        {
            dist2 = dist1;
            dist1 = d;
            neighbor = i;
        }
        else if ( d < dist2 )
            dist2 = d;
    }
    if ( dist1 < 0.6*dist2 )
        return neighbor;
    return -1;
}

void findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
           const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vector<int>& ptpairs )
{
    int i;
    CvSeqReader reader, kreader;
    cvStartReadSeq( objectKeypoints, &kreader );
    cvStartReadSeq( objectDescriptors, &reader );
    ptpairs.clear();

    for( i = 0; i < objectDescriptors->total; i++ )
    {
        const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
        const float* descriptor = (const float*)reader.ptr;
        CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
        CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
        int nearest_neighbor = naiveNearestNeighbor( descriptor, kp->laplacian, imageKeypoints, imageDescriptors );
        if( nearest_neighbor >= 0 )
        {
            ptpairs.push_back(i);
            ptpairs.push_back(nearest_neighbor);
        }
    }
}

int locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
                    const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
                    const CvPoint src_corners[4], CvPoint dst_corners[4] ){
    double h[9];
    CvMat _h = cvMat(3, 3, CV_64F, h);
    vector<int> ptpairs;
    vector<CvPoint2D32f> pt1, pt2;
    CvMat _pt1, _pt2;
    int i, n;

    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

    n = ptpairs.size()/2;
    pairs = (float)n;
    if( n < 4 )
        return 0;

    pt1.resize(n);
    pt2.resize(n);
    for( i = 0; i < n; i++ )
    {
        pt1[i] = ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;
        pt2[i] = ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;
    }

    _pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );
    _pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );
    if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 ))
        return 0;

    for( i = 0; i < 4; i++ )
    {
        double x = src_corners[i].x, y = src_corners[i].y;
        double Z = 1./(h[6]*x + h[7]*y + h[8]);
        double X = (h[0]*x + h[1]*y + h[2])*Z;
        double Y = (h[3]*x + h[4]*y + h[5])*Z;
        dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
    }

    return 1;
}

float surf(IplImage *image, IplImage *object, int count){
	CvMemStorage* storage = cvCreateMemStorage(0);
	
	CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
	CvSeq *imageKeypoints = 0, *imageDescriptors = 0;

	CvSURFParams params = cvSURFParams(500, 1);
	double tt = (double)cvGetTickCount();
	cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );
	//printf("Object Descriptors: %d\n", objectDescriptors->total);
	
	cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
	//printf("Image Descriptors: %d\n", imageDescriptors->total);
	
	tt = (double)cvGetTickCount() - tt;
	//printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));
	CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
	CvPoint dst_corners[4];
	IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
	cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );
	cvCopy( object, correspond );
	cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );
	cvCopy( image, correspond );
	cvResetImageROI( correspond );

	locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,imageDescriptors, src_corners, dst_corners );

	vector<int> ptpairs;
	findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

	if(pairs <= 2)
		pairs = 0;
	
	return pairs/objectDescriptors->total;
}

int is_centered_image(IplImage *img)
{
  int countWhite = cvCountNonZero(img);
  if (countWhite < DETECT_THRESHOLD)
  {
    return RIGHT_OUT;
  }
    
  for (int x = 0; x < img->height; x++)
  {
    for (int y = img->width-2; y < img->width; y++)
    {
      if (CV_IMAGE_ELEM(img,uchar,x,y))
      {
        //printf("Too right oO");
        return RIGHT_OUT;
      }
    }
  }
  
  return PERFECT;
}


void identify_feature(IplImage *img){
  string dir = string("./image_recognition/objects/objects_black/");
  getdir(dir,files);

	/////////////////////////////////////////////////
	/////										                    /////
	/////			FIND SHAPE				                /////
	/////////////////////////////////////////////////
	
	int temp = (int)files.size();
	for (int i = 0; i < temp; i++) {
	   if(files[i].find(".jpg") == string::npos)
	   		continue;

    //cout<<endl<<dir + files[i]<<endl;

		IplImage* object = cvLoadImage( (dir + files[i]).c_str(), CV_LOAD_IMAGE_GRAYSCALE );
    IplImage *image = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1);
    cvCvtColor(img, image, CV_RGB2GRAY );
	
		if(!image ){
			cout<<"Image!\n";
			exit(-1);
		}
		if(!object){
			cout<<"Object!\n";
			exit(-1);
		}
	
		cvSmooth(object, object, CV_GAUSSIAN, 5);
		cvSmooth(image, image, CV_GAUSSIAN, 1);
	  
	  int y = i;
		float wtf = surf(image, object, y);
		printf("%s   \t%f\n", files[i].c_str(), wtf); 
	
		if(wtf > maxV){
			maxV = wtf;
			indexMax = i;
		}	
  }  
  	
	printf("\nShape is %s with value %f.\n", files[indexMax].c_str(), maxV);
	maxV = 0;

	/////////////////////////////////////////////////
	/////										                    /////
	/////			FIND COLOR	                      /////
	/////////////////////////////////////////////////
	string dir2;
	if(files[indexMax].find("apple") != string::npos)
		dir2 = string("./image_recognition/objects/dryer/");
	else if(files[indexMax].find("banana") != string::npos)
		dir2 = string("./image_recognition/objects/banana/");
	else if(files[indexMax].find("book") != string::npos)
		dir2 = string("./image_recognition/objects/book/");
	else if(files[indexMax].find("camera") != string::npos)
		dir2 = string("./image_recognition/objects/camera/");
	else if(files[indexMax].find("computer") != string::npos)
		dir2 = string("./image_recognition/objects/computer/");
	else if(files[indexMax].find("dryer") != string::npos)
		dir2 = string("./image_recognition/objects/dryer/");
	else if(files[indexMax].find("glass") != string::npos)
		dir2 = string("./image_recognition/objects/glass/");
	else if(files[indexMax].find("glasses") != string::npos)
		dir2 = string("./image_recognition/objects/glasses/");
	else if(files[indexMax].find("hammer") != string::npos)
		dir2 = string("./image_recognition/objects/hammer/");		
	else if(files[indexMax].find("cup") != string::npos)
		dir2 = string("./image_recognition/objects/cup/");		
	else if(files[indexMax].find("scissors") != string::npos)
		dir2 = string("./image_recognition/objects/scissors/");
	else if(files[indexMax].find("teddy") != string::npos)
		dir2 = string("./image_recognition/objects/teddy/");
	
	vector<string> files2 = vector<string>();
	getdir(dir2,files2);
	indexMax = 0;
	for (int i = 0; i < (int)files2.size(); i++) {
	   if(files2[i].find(".jpg") == string::npos)
	   		continue;

    //cout<<endl<<dir2 + files2[i]<<endl;
    
		IplImage* object = cvLoadImage( (dir2 + files2[i]).c_str(), CV_LOAD_IMAGE_UNCHANGED );
	  IplImage *image = img;
	  
		if(!image ){
			cout<<"Image!\n";
			exit(-1);
		}
		if(!object){
			cout<<"Object!\n";
			exit(-1);
		}
	
		cvSmooth(object, object, CV_GAUSSIAN, 5);
		cvSmooth(image, image, CV_GAUSSIAN, 1);

		CvSize s = cvSize(object->width, object->height);
		int d = object->depth;
		IplImage* Ro = cvCreateImage(s, d, 1);
		IplImage* Go = cvCreateImage(s, d, 1);
		IplImage* Bo = cvCreateImage(s, d, 1);
		cvSplit(object, Bo, Go, Ro, 0);
	
		CvSize s2 = cvSize(image->width, image->height);
		d = image->depth;
		IplImage* Ri = cvCreateImage(s2, d, 1);
		IplImage* Gi = cvCreateImage(s2, d, 1);
		IplImage* Bi = cvCreateImage(s2, d, 1);
		cvSplit(image, Bi, Gi, Ri, 0);
	  
	  int y = i;
		float wtf = sqrt(pow(surf(Ri, Ro, y), 2) + pow(surf(Gi, Go, y), 2) + pow(surf(Bi, Bo, y), 2));
		printf("%s   \t%f\n", files2[i].c_str(), wtf); 
	
		if(wtf > maxV){
			third = second;
			second = maxV;
			indexTh = indexSec;
			indexSec = indexMax;
	
			maxV = wtf;
			indexMax = i;
		}
		else if(wtf > second){
			third = second;
			indexTh = indexSec;
	
			second = wtf;
			indexSec = i;
		}
		else if(wtf > third){
			third = wtf;
			indexTh = i;
		}
  }  		
    
  printf("\n1: %f %s\n2: %f %s\n3: %f %s\n", maxV, files2[indexMax].c_str(), second, files2[indexSec].c_str(), third, files2[indexTh].c_str());
  
  if((maxV - second)/maxV > 0.08)
      printf("Tag is %s!\n", files2[indexMax].c_str());
  else{
    int pos = (int)files2[indexMax].find("_");
    string tag = files2[indexMax].substr(0, pos);
    
    
    printf("Tag is %s!\n", tag.c_str());
  }
  
  //CLEAR ALL SHiT
  files.clear();
  files2.clear();
  maxV = 0;
  second = 0;
  third = 0;
  indexMax = 0;
  indexSec = 0;
  indexTh = 0;
}

void recognize_tag(IplImage *img, IplImage *img_b_w)
{
  std_msgs::Int32 p;
  p.data = 1;
  int pos = is_centered_image(img_b_w);
  //printf("The tag is: %d \n", pos);
  if (pos == PERFECT)
  {  
    finished_detec_Pub.publish(p);	
    ros::spinOnce();
    //Mazbe wait for state change, but should be not necessary
    //identify_feature(img);
    std::stringstream lol;
    lol<<"./image_recognition/scenes/"<<ID<<"_"<<posX<<"_"<<posY<<"_"<<heading<<".jpg"; 
    cvSaveImage((lol.str()).c_str(), img);
    
    std::ofstream myfile;
    myfile.open(TAG_PATH_1.c_str(),ios::app);
    myfile << ID << " " << posX << " " << posY << " " << heading << endl;
    myfile.close();
    //printf("FInished!!!!!!!!!!!!!!!!!!!!!\n");
    sleep(1);
    finished_process_Pub.publish(p);
    ros::spinOnce();
  }
  
}

bool isThereSomethingInTheImage(IplImage *img)
{
	CvSize size = cvGetSize(img);
	IplImage* hsv_frame = cvCreateImage(size, 8, 3);
	IplImage* thresholded = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* thresholded2 = cvCreateImage(size, IPL_DEPTH_8U, 1);
	CvScalar hsv_min = cvScalar(0, 165, 105, 0);
	CvScalar hsv_max = cvScalar(10,256, 256, 0);
	CvScalar hsv_min2 = cvScalar(175, 165, 105, 0);
	CvScalar hsv_max2 = cvScalar(180, 256, 256, 0);

	cvCvtColor(img, hsv_frame, CV_BGR2HSV);
	cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
	cvInRangeS(hsv_frame, hsv_min2, hsv_max2, thresholded2);
	cvOr(thresholded, thresholded2, thresholded);
	//cvSaveImage("foo.png",thresholded);
	//cvSmooth(thresholded, thresholded, CV_GAUSSIAN, 9, 9);
	int countWhite = cvCountNonZero(thresholded);
	
	
	if (global_state == TAG_HANDLE)
	{
	  printf("Reset Tag\n");
	  seen = true;
	  recognize_tag(img, thresholded);
	  return false;
  }
	//printf("Test");
	if (countWhite > DETECT_THRESHOLD && !seen) 
	{
		printf("Detected Tag\n");
		//recognize_tag(img, thresholded); /////TEMP PLEASE REMOVE
		return true;
	}
	else if (countWhite < DETECT_THRESHOLD)
	{
	  seen = false;
  } 
	else return false;
}

void state_callback(const std_msgs::Int32::ConstPtr& state)
{
  global_state = state->data;
}

void posi_update(const saint_nr_9::RobotPosition::ConstPtr& pos)
{
    posX = pos->x;
    posY = pos->y;
    heading = pos->heading;
}

void cam0_cb(const std_msgs::Int8MultiArray::ConstPtr& array)
{
	printf("cam0_cb\n");
	IplImage *img 				= cvCreateImage(cvSize(IMAGEWIDTH, IMAGEHEIGHT), IPL_DEPTH_8U, 3);
	char * data 				= img->imageData;
	
	for(int i = 0; i < IMAGEWIDTH*IMAGEHEIGHT*3; i++)
	{
		data[i] = char(array->data.at(i));
	}
	if (isThereSomethingInTheImage(img))
	{
		std_msgs::Int32 p;
		p.data = 1;
		detect_Pub.publish(p);	
	}
	ros::spinOnce();
  cvReleaseImage(&img);
}

void cam1_cb(const std_msgs::Int8MultiArray::ConstPtr& array)
{
	printf("cam1_cb\n");
	
	IplImage *img 				= cvCreateImage(cvSize(IMAGEWIDTH, IMAGEHEIGHT), IPL_DEPTH_8U, 3);
	char * data 				= img->imageData;
	
	for(int i = 0; i < IMAGEWIDTH*IMAGEHEIGHT*3; i++)
	{
		data[i] = char(array->data.at(i));
	}
	if (isThereSomethingInTheImage(img))
	{
		std_msgs::Int32 p;
		p.data = 1;
		detect_Pub.publish(p);	
	}
  ros::spinOnce();
  cvReleaseImage(&img);
}

int main(int argc, char **argv)
{
	printf("Starting TagDetecter\n");
	ros::init(argc, argv, "TagDetecter");
	ros::NodeHandle n;
	ros::Subscriber img0_sub = n.subscribe("/camera0_img", 1, cam0_cb);
	ros::Subscriber img1_sub = n.subscribe("/camera1_img", 1, cam1_cb);
  ros::Subscriber state_sub = n.subscribe("State/Info", 1, state_callback);
	detect_Pub = n.advertise<std_msgs::Int32>("Signals/state_tag",10);
	finished_detec_Pub = n.advertise<std_msgs::Int32>("Signals/state_tag_finish",10);
	finished_process_Pub = n.advertise<std_msgs::Int32>("Signals/state_process_finish",10);
	ros::Subscriber posi = n.subscribe<saint_nr_9::RobotPosition>("/position", 1, posi_update);
	//centered_image_client = n.serviceClient<saint_nr_9::is_centered_image>("test_image_centered");
	ros::spin();
}
