#include "Utilities.h"
#include <stdarg.h>

static float range[] = { 0,256 };	// The upper boundary is exclusive
static const float* histRange = { range };
static int histSize = 256;
static std::vector< std::vector<int> > offset(121, std::vector<int>(2) );	// Addition
static int r=15;

//====================================================================================================================//
cv::Mat histCalc256( const cv::Mat& inputImage, const cv::Mat& mask )
{
	// This function calculates the 256-bin histogram for a given input image "img" (separate channels)
	bool uniform = true; bool accumulate = false;
	cv::Mat inputImage_hist,temp;
	cv::calcHist( &inputImage,1,0, mask, temp, 1, &histSize, &histRange, uniform, accumulate );
	cv::normalize( temp,inputImage_hist,0,inputImage.rows,cv::NORM_MINMAX,-1);

	return inputImage_hist;
}

//====================================================================================================================//
float getHistBinVal( const cv::Mat& hist,const int binNum )
{
	return hist.at<float>( binNum );
}

//====================================================================================================================//
float getFreqOfBin( const cv::Mat& histogram )
{
	float frequency = 0.0;
	for( int i=1; i<histSize; i++ )
	{
		float Hc = abs( getHistBinVal( histogram,i ) );
		frequency += Hc;
	}
	return frequency;
}

//====================================================================================================================//
float shannonEntropy( const cv::Mat& Im, const cv::Mat& mask )
{
	float entropy = 0.0;

	if (!Im.empty())
	{

		// This function estimates the Entropy of a particular "histogram" (separate channels)
		cv::Mat imHist = histCalc256( Im, mask);

		float frequency = getFreqOfBin( imHist );

		for( int i=1; i<histSize; i++ )
		{
			float Hc = abs( getHistBinVal( imHist,i ) );
			if( Hc!=0 )
				entropy += -(Hc/frequency) * log10( (Hc/frequency) );
		}
	}
	return entropy;
}

//====================================================================================================================//
float shannonEntropy( const cv::Mat& Im )
{
	return shannonEntropy(Im, cv::Mat());
}

//====================================================================================================================//
std::vector< cv::Mat > splitFrame( const cv::Mat& img )
{
	std::vector< cv::Mat > channels;
	split( img,channels );

	return channels;
}

cv::Mat hessianFilter( cv::Mat inputImage, int sigma )
{
	if( sigma %2 == 0 )
		sigma = sigma+1;
	sigma = sigma*3;
	/*
	cv::Mat filterX,filterY,X,Y;
	for( int i=-sigma;i<sigma;i++)
	{
		for( int j=-sigma;j<sigma;j++ )
		{
			X(i)(j) = i;
			Y[i][j] = j;
		}
	}
	*/
	cv::Mat X = cv::Mat::ones( sigma,sigma,sigma );
	return X;
}

cv::Mat& keypoint2Image( cv::Mat &inputImage, KeypointList &keypoints, StatusVector &status /*= StatusVector()*/, ErrorVector& errors /*= ErrorVector() */, cv::Matx33d &H /*= cv::Mat()*/ )
{
	/*KeypointList keypoints = keypoint2point( keypoint );*/

	int r = 10;
	cv::RNG rng(12345);
	
	cv::perspectiveTransform(keypoints,keypoints,H);

	if (status.size() != keypoints.size())
	{
		status.resize(keypoints.size(),1);
		errors.resize(keypoints.size(),1);
	}

	for( int i = 0; i < keypoints.size(); i++ )
	{ 
		if (status[i])
		{
			/*cv::circle( inputImage, keypoints[i], r, cv::Scalar(rng.uniform(0,255), rng.uniform(0,255),
			rng.uniform(0,255)), 2, 8, 0 );*/
			cv::circle( inputImage, keypoints[i], 10, cv::Scalar::all(255), 2, 8, 0 );
			//cv::putText( inputImage, toString(errors[i]),keypoints[i],cv::FONT_HERSHEY_COMPLEX_SMALL,2,cv::Scalar::all(255));
		}
	}

	return inputImage;
}

//====================================================================================================================//
void showImageChannels( char *title, cv::Mat &inputImage, cv::Mat &frame1, cv::Mat &frame2, cv::Mat &frame3 )
{
	// Displays the original image "inputImage" along with its split channels
	/*std::vector< cv::Mat > channels;*/
	cv::Mat tempImage=inputImage.clone(), H=frame1.clone(), S=frame2.clone(), V=frame3.clone();
	/*cv::split( inputImage,channels );
	H = channels[0];
	S = channels[1];
	V = channels[2];*/
	
	/*tempImage = inputImage.clone();*/
	std::string temp = "Original";
	cv::putText( tempImage, temp,cvPoint(50,50),cv::FONT_HERSHEY_COMPLEX_SMALL,2,cv::Scalar::all(255),2);
	
	temp = "Channel-0";
	cv::putText( H, temp,cvPoint(50,50),cv::FONT_HERSHEY_COMPLEX_SMALL,2,cv::Scalar::all(255),2);
//	std::string maxV_1 = "Max Value = ";
//	double max;
////	max = std::max_element( H.begin<double>(),H.end<double>() );
//	maxV_1 += toString( max );
//	cv::putText( H, maxV_1,cvPoint(0,50),cv::FONT_HERSHEY_COMPLEX_SMALL,2.0,cv::Scalar::all(255), 2);

	temp = "Channel-1";
	cv::putText( S, temp,cvPoint(50,50),cv::FONT_HERSHEY_COMPLEX_SMALL,2,cv::Scalar::all(255),2);
	/*std::string maxV_2 = "Max Value = ";
	maxV_2 += toString( std::max_element( S.begin<double>(),S.end<double>() ) );
	cv::putText( S, maxV_2,cvPoint(0,50),cv::FONT_HERSHEY_COMPLEX_SMALL,2.0,cv::Scalar::all(255), 2);*/

	temp = "Channel-2";
	cv::putText( V, temp,cvPoint(50,50),cv::FONT_HERSHEY_COMPLEX_SMALL,2,cv::Scalar::all(255),2);
	/*std::string maxV_3 = "Max Value = ";
	maxV_3 += toString( std::max_element( V.begin<double>(),V.end<double>() ) );
	cv::putText( V, maxV_3,cvPoint(0,50),cv::FONT_HERSHEY_COMPLEX_SMALL,2.0,cv::Scalar::all(255), 2);*/


	cv::Size size = inputImage.size();
	cv::Mat dispImage( size.height*2, size.width*2, tempImage.depth() );
	cv::Mat temp1;
	cv::cvtColor( tempImage,temp1,CV_BGR2GRAY );

	cv::Mat topLeft( dispImage, cv::Rect( 0,0,size.width,size.height ) );
	temp1.copyTo( topLeft );
	cv::Mat topRight( dispImage,cv::Rect( size.width,0,size.width,size.height ) );
	H.copyTo( topRight );
	cv::Mat bottomLeft( dispImage,cv::Rect( 0,size.height,size.width,size.height ) );
	S.copyTo( bottomLeft );
	cv::Mat bottomRight( dispImage,cv::Rect( size.width,size.height,size.width,size.height ) );
	V.copyTo( bottomRight );
	
	cv::namedWindow( title,cv::WINDOW_NORMAL );
	cv::imshow( title,dispImage );
}

//====================================================================================================================//
std::vector< cv::KeyPoint > keypointFAST( const cv::Mat &inputImage, const cv::Mat &mask /*= cv::Mat()*/ )
{
	// It takes a particular rame as input and generates keypoints based on that frame alone. 
	cv::Mat tempImage;

	if ( inputImage.channels() == 3 ) // HSV
	{
		std::vector< cv::Mat > chann;
		cv::split( inputImage, chann );	
		cv::Mat frameB = chann[0];
		cv::Mat frameG = chann[1];
		cv::Mat frameR = chann[2];
		tempImage = frameB.clone();
	}
	else // single frame
		tempImage = inputImage.clone();


	std::vector< cv::KeyPoint > keypoint;
	cv::Mat frameGray;
	/*std::vector< cv::Mat > chann,chann2;
	cv::split( image, chann );	
	cv::Mat frameB = chann[0];
	cv::Mat frameG = chann[1];
	cv::Mat frameR = chann[2];*/
	//frameGray = 0.299*frameR+ 0.587*frameG+ 0.114*frameB;
	//cv::cvtColor( image,frameGray,CV_RGB2GRAY );
	
	//frameGray = customEdge( tempImage,2,10 );
	//frameGray = LoGFilter( tempImage );
	//DoGFilter( tempImage,frameGray );
	vesselnessMorphology( tempImage,frameGray,5,5 );
	//frameGray = tempImage.clone();

	/*cv::cvtColor( frameGray,frameGray,CV_HSV2RGB_FULL );
	cv::cvtColor( frameGray,frameGray,CV_RGB2GRAY );*/

	/*cv::namedWindow( "Edge Map",cv::WINDOW_NORMAL );
	cv::imshow( "Edge Map",frameGray );*/
	cvWaitKey(20);
	//cv::cvtColor(image,frameGray, cv::COLOR_RGB2GRAY);
	cv::FastFeatureDetector fast( 10,true );
	fast.detect( frameGray, keypoint, mask );
	//cv::Mat image_2 /*= image.clone()*/;
	//cv::cvtColor( image,image_2,CV_HSV2RGB_FULL );

	/*cv::drawKeypoints( temp2,keypoints,temp2,cv::Scalar(255,255,255) );
	cv::namedWindow( "FAST-test",cv::WINDOW_NORMAL );
	cv::imshow( "FAST-test",temp2 );
	cv::imwrite( "cornerFAST_H.png",temp2 );*/

	//getCircularROI( frameGray,keypoint );

	return keypoint;
}

std::vector< cv::KeyPoint > keypointTomasi( const cv::Mat &inputImage, const cv::Mat &mask /*= cv::Mat()*/ )
{
	cv::Mat tempImage;

	
	if ( inputImage.channels() == 3 ) // color
	{
		/// RGB
		std::vector< cv::Mat > chann;
		cv::split( inputImage, chann );	
		cv::Mat frameB = chann[0];
		cv::Mat frameG = chann[1];
		cv::Mat frameR = chann[2];
		cv::Mat imageGray = 0.299*frameR+ 0.587*frameG+ 0.114*frameB;
		tempImage = imageGray;

		/// HSV
		//std::vector< cv::Mat > hsvChann;
		//cv::cvtColor( inputImage, tempImage, CV_BGR2HSV );
		//cv::split( tempImage,hsvChann );
		//tempImage = hsvChann[1];
	}
	else // gray
		tempImage = inputImage;
	
	//vesselnessMorphology( inputImage,tempImage );
	
	KeypointList corners;
	std::vector< cv::KeyPoint > keypoint;
	double qualityLevel = 0.01;
	double minDistance = 15;
	int blockSize = 31;
	bool useHarrisDetector = true;
	double k = 0.04;
	int maxCorners = 200;

	goodFeaturesToTrack( tempImage,corners,maxCorners,qualityLevel,minDistance,mask,blockSize,
		useHarrisDetector,k );
	
	cv::KeyPoint::convert( corners,keypoint );

	return keypoint;

}

//====================================================================================================================//
std::vector< cv::KeyPoint > keypointORB( const cv::Mat &inputImage, const cv::Mat &mask /*= cv::Mat()*/ )
{
	cv::Mat tempImage,imageGray;
	if ( inputImage.channels() == 3 ) // HSV
	{
		std::vector< cv::Mat > chann;
		cv::split( inputImage, chann );	
		cv::Mat frameB = chann[0];
		cv::Mat frameG = chann[1];
		cv::Mat frameR = chann[2];
		tempImage = frameB.clone();
	}
	else // single frame
		tempImage = inputImage.clone();
	
	std::vector< cv::KeyPoint > keypoint;

	cv::ORB orb( 500,1.2f,8,30,0,2,0,30 );
	//cv::OrbFeatureDetector det(500,1.2f,8,14,0,2,0,14);

	orb( tempImage, mask, keypoint/*, cv::noArray(),false*/ );
	//cv::drawKeypoints( tempImage,keypoint,imageGray,cv::Scalar(255,255,255) );

/*	KeypointList keypointsORB(keypoint.size());

	for( it= keypoints.begin(); it!= keypoints.end();it++)
	{
		keypointsORB.push_back(it->pt);
	}
*/
	return keypoint;

}

std::vector< cv::KeyPoint > keypointBRISK( const cv::Mat &inputImage, const cv::Mat &mask )
{
	std::vector<cv::KeyPoint> keypoint1,keypoint2;
	cv::Mat descriptors1,descriptors2;
	
	cv::Mat tempImage,imageGray;
	if ( inputImage.channels() == 3 ) // HSV
	{
		std::vector< cv::Mat > chann;
		cv::split( inputImage, chann );	
		cv::Mat frameB = chann[0];
		cv::Mat frameG = chann[1];
		cv::Mat frameR = chann[2];
		tempImage = frameB.clone();
	}
	else // single frame
		tempImage = inputImage.clone();


	/*cv::Mat imEdge_H = LoGFilter( temp_H );*/
	cv::Mat imEdge_V = LoGFilter( tempImage );

	//set brisk parameters
	int Threshl=60;
	int Octaves=2; // (pyramid layer) from which the keypoint has been extracted
	float PatternScales = 1.0f;

	cv::BRISK  BRISKD( Threshl,Octaves,PatternScales ); // Initialize algoritm
	BRISKD.create("Feature2D.BRISK");
/*
	BRISKD.detect( imEdge_H, keypoints1 );
	BRISKD.compute( imEdge_H, keypoint1,descriptors1 );*/

	BRISKD.detect( imEdge_V, keypoint2 );
	BRISKD.compute( imEdge_V, keypoint2,descriptors2 );

	/*KeypointList keypointsBRISK(keypoints2.size());

	for( it= keypoints2.begin(); it!= keypoints2.end();it++)
	{
		keypointsBRISK.push_back(it->pt);
	}*/

	return keypoint2;

}

cv::Mat customEdge( const cv::Mat& inputImage, int scale /*= 1*/, int delta /*= 0 */ )
{
	// "inputImage" should always be grayscale.
	cv::Mat grad_x, grad_y, abs_grad_x, abs_grad_y, outputImage=inputImage.clone();
	//int scale = 1;
	//int delta = 0;
	int ddepth = CV_16S;
	
	// Noise Removal
	//cv::GaussianBlur( inputImage, outputImage, cv::Size(3,3), 3, 3, cv::BORDER_DEFAULT );
	//cv::bilateralFilter( inputImage,inputImage,3,5,3,cv::BORDER_DEFAULT );
	vesselnessMorphology( inputImage,outputImage );

	//// Sobel
	cv::Sobel( outputImage, grad_x, ddepth, 1, 0, 3, scale, delta, cv::BORDER_DEFAULT );
	//cv::Scharr( inputImage, grad_x, ddepth, 1, 0, scale, delta, cv::BORDER_DEFAULT );
	cv::convertScaleAbs( grad_x, abs_grad_x );
	cv::Sobel( outputImage, grad_y, ddepth, 0, 1, 3, scale, delta, cv::BORDER_DEFAULT );
	//cv::Scharr( inputImage, grad_y, ddepth, 0, 1, scale, delta, cv::BORDER_DEFAULT );
	convertScaleAbs( grad_y, abs_grad_y );
	cv::addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, outputImage );
	/*
	//// Laplacian
	cv::Laplacian( inputImage, outputImage, ddepth, 5, scale, delta, cv::BORDER_DEFAULT );
	cv::convertScaleAbs( outputImage, outputImage );
	*/
	cv::GaussianBlur( outputImage, outputImage, cv::Size(9,9), 0, 0, cv::BORDER_DEFAULT );

	return outputImage;
}

int contrastStretch( int x, int r1, int r2, int s1, int s2 )
{
    float result;
    if(0 <= x && x <= r1)
	{
        result = (float) s1/r1 * x;
    }
	else if(r1 < x && x <= r2)
	{
        result = (float) ( (s2 - s1)/(r2 - r1) ) * (x - r1) + s1;
    }
	else if(r2 < x && x <= 255)
	{
        result = (float) ( (255 - s2)/(255 - r2) ) * (x - r2) + s2;
    }
    
	return (int)result;
}

// ================================================================================= //
/// Color space conversion
//cv::Mat convertImageRGBtoHSV(const cv::Mat imageRGB)
//{
//	float fR, fG, fB;
//	float fH, fS, fV;
//	const float FLOAT_TO_BYTE = 255.0f;
//	const float BYTE_TO_FLOAT = 1.0f / FLOAT_TO_BYTE;
//
//	// Create a blank HSV image
//	cv::Size size = imageRGB.size();
//	cv::Mat imageHSV(size.height,size.width,imageRGB.depth() );
//
//	int h = size.height;		// Image height.
//	int w = size.width;			// Image width.
//	int rowSizeRGB = imageRGB->widthStep;	// Size of row in bytes, including extra padding.
//	char *imRGB = imageRGB->imageData;	// Pointer to the start of the image pixels.
//	int rowSizeHSV = imageHSV->widthStep;	// Size of row in bytes, including extra padding.
//	char *imHSV = imageHSV->imageData;	// Pointer to the start of the image pixels.
//	for (int y=0; y<h; y++) 
//	{
//		for (int x=0; x<w; x++) 
//		{
//			// Get the RGB pixel components. NOTE that OpenCV stores RGB pixels in B,G,R order.
//			uchar *pRGB = (uchar*)(imRGB + y*rowSizeRGB + x*3);
//			int bB = *(uchar*)(pRGB+0);	// Blue component
//			int bG = *(uchar*)(pRGB+1);	// Green component
//			int bR = *(uchar*)(pRGB+2);	// Red component
//
//			// Convert from 8-bit integers to floats.
//			fR = bR * BYTE_TO_FLOAT;
//			fG = bG * BYTE_TO_FLOAT;
//			fB = bB * BYTE_TO_FLOAT;
//
//			// Convert from RGB to HSV, using float ranges 0.0 to 1.0.
//			float fDelta;
//			float fMin, fMax;
//			int iMax;
//			// Get the min and max, but use integer comparisons for slight speedup.
//			if (bB < bG) 
//			{
//				if (bB < bR) 
//				{
//					fMin = fB;
//					if (bR > bG) 
//					{
//						iMax = bR;
//						fMax = fR;
//					}
//					else 
//					{
//						iMax = bG;
//						fMax = fG;
//					}
//				}
//				else 
//				{
//					fMin = fR;
//					fMax = fG;
//					iMax = bG;
//				}
//			}
//			else 
//			{
//				if (bG < bR) 
//				{
//					fMin = fG;
//					if (bB > bR) 
//					{
//						fMax = fB;
//						iMax = bB;
//					}
//					else 
//					{
//						fMax = fR;
//						iMax = bR;
//					}
//				}
//				else 
//				{
//					fMin = fR;
//					fMax = fB;
//					iMax = bB;
//				}
//			}
//			fDelta = fMax - fMin;
//			fV = fMax;				// Value (Brightness).
//			if (iMax != 0) {			// Make sure its not pure black.
//				fS = fDelta / fMax;		// Saturation.
//				float ANGLE_TO_UNIT = 1.0f / (6.0f * fDelta);	// Make the Hues between 0.0 to 1.0 instead of 6.0
//				if (iMax == bR) {		// between yellow and magenta.
//					fH = (fG - fB) * ANGLE_TO_UNIT;
//				}
//				else if (iMax == bG) {		// between cyan and yellow.
//					fH = (2.0f/6.0f) + ( fB - fR ) * ANGLE_TO_UNIT;
//				}
//				else {				// between magenta and cyan.
//					fH = (4.0f/6.0f) + ( fR - fG ) * ANGLE_TO_UNIT;
//				}
//				// Wrap outlier Hues around the circle.
//				if (fH < 0.0f)
//					fH += 1.0f;
//				if (fH >= 1.0f)
//					fH -= 1.0f;
//			}
//			else {
//				// color is pure Black.
//				fS = 0;
//				fH = 0;	// undefined hue
//			}
//
//			// Convert from floats to 8-bit integers.
//			int bH = (int)(0.5f + fH * 255.0f);
//			int bS = (int)(0.5f + fS * 255.0f);
//			int bV = (int)(0.5f + fV * 255.0f);
//
//			// Clip the values to make sure it fits within the 8bits.
//			if (bH > 255)
//				bH = 255;
//			if (bH < 0)
//				bH = 0;
//			if (bS > 255)
//				bS = 255;
//			if (bS < 0)
//				bS = 0;
//			if (bV > 255)
//				bV = 255;
//			if (bV < 0)
//				bV = 0;
//
//			// Set the HSV pixel components.
//			uchar *pHSV = (uchar*)(imHSV + y*rowSizeHSV + x*3);
//			*(pHSV+0) = bH;		// H component
//			*(pHSV+1) = bS;		// S component
//			*(pHSV+2) = bV;		// V component
//		}
//	}
//	return imageHSV;
//}
//
//cv::Mat convertImageHSVtoRGB(const cv::Mat imageHSV)
//{
//	float fH, fS, fV;
//	float fR, fG, fB;
//	const float FLOAT_TO_BYTE = 255.0f;
//	const float BYTE_TO_FLOAT = 1.0f / FLOAT_TO_BYTE;
//
//	// Create a blank RGB image
//	IplImage *imageRGB = cvCreateImage(cvGetSize(imageHSV), 8, 3);
//	if (!imageRGB || imageHSV->depth != 8 || imageHSV->nChannels != 3) {
//		printf("ERROR in convertImageHSVtoRGB()! Bad input image.\n");
//		exit(1);
//	}
//
//	int h = imageHSV->height;			// Pixel height.
//	int w = imageHSV->width;			// Pixel width.
//	int rowSizeHSV = imageHSV->widthStep;		// Size of row in bytes, including extra padding.
//	char *imHSV = imageHSV->imageData;		// Pointer to the start of the image pixels.
//	int rowSizeRGB = imageRGB->widthStep;		// Size of row in bytes, including extra padding.
//	char *imRGB = imageRGB->imageData;		// Pointer to the start of the image pixels.
//	for (int y=0; y<h; y++) {
//		for (int x=0; x<w; x++) {
//			// Get the HSV pixel components
//			uchar *pHSV = (uchar*)(imHSV + y*rowSizeHSV + x*3);
//			int bH = *(uchar*)(pHSV+0);	// H component
//			int bS = *(uchar*)(pHSV+1);	// S component
//			int bV = *(uchar*)(pHSV+2);	// V component
//
//			// Convert from 8-bit integers to floats
//			fH = (float)bH * BYTE_TO_FLOAT;
//			fS = (float)bS * BYTE_TO_FLOAT;
//			fV = (float)bV * BYTE_TO_FLOAT;
//
//			// Convert from HSV to RGB, using float ranges 0.0 to 1.0
//			int iI;
//			float fI, fF, p, q, t;
//
//			if( bS == 0 ) {
//				// achromatic (grey)
//				fR = fG = fB = fV;
//			}
//			else {
//				// If Hue == 1.0, then wrap it around the circle to 0.0
//				if (fH >= 1.0f)
//					fH = 0.0f;
//
//				fH *= 6.0;			// sector 0 to 5
//				fI = floor( fH );		// integer part of h (0,1,2,3,4,5 or 6)
//				iI = (int) fH;			//		"		"		"		"
//				fF = fH - fI;			// factorial part of h (0 to 1)
//
//				p = fV * ( 1.0f - fS );
//				q = fV * ( 1.0f - fS * fF );
//				t = fV * ( 1.0f - fS * ( 1.0f - fF ) );
//
//				switch( iI ) {
//					case 0:
//						fR = fV;
//						fG = t;
//						fB = p;
//						break;
//					case 1:
//						fR = q;
//						fG = fV;
//						fB = p;
//						break;
//					case 2:
//						fR = p;
//						fG = fV;
//						fB = t;
//						break;
//					case 3:
//						fR = p;
//						fG = q;
//						fB = fV;
//						break;
//					case 4:
//						fR = t;
//						fG = p;
//						fB = fV;
//						break;
//					default:		// case 5 (or 6):
//						fR = fV;
//						fG = p;
//						fB = q;
//						break;
//				}
//			}
//
//			// Convert from floats to 8-bit integers
//			int bR = (int)(fR * FLOAT_TO_BYTE);
//			int bG = (int)(fG * FLOAT_TO_BYTE);
//			int bB = (int)(fB * FLOAT_TO_BYTE);
//
//			// Clip the values to make sure it fits within the 8bits.
//			if (bR > 255)
//				bR = 255;
//			if (bR < 0)
//				bR = 0;
//			if (bG > 255)
//				bG = 255;
//			if (bG < 0)
//				bG = 0;
//			if (bB > 255)
//				bB = 255;
//			if (bB < 0)
//				bB = 0;
//
//			// Set the RGB pixel components. NOTE that OpenCV stores RGB pixels in B,G,R order.
//			uchar *pRGB = (uchar*)(imRGB + y*rowSizeRGB + x*3);
//			*(pRGB+0) = bB;		// B component
//			*(pRGB+1) = bG;		// G component
//			*(pRGB+2) = bR;		// R component
//		}
//	}
//	return imageRGB;
//}

cv::Mat LoGFilter( cv::Mat &inputImage )
{
	/*cv::cvtColor( inputImage,inputImage,CV_RGB2GRAY );*/
	cv::Mat result = inputImage.clone();
	cv::Mat temp = result.clone();

	//// Gaussian Kernel
	//cv::Mat kernelGauss( 3, 3, CV_32F, cv::Scalar(0.0625) );
	//
	//kernelGauss.at<float>(0, 1) = 0.125;
	//kernelGauss.at<float>(1, 0) = 0.125;
	//kernelGauss.at<float>(1, 1) = 0.25;
	//kernelGauss.at<float>(1, 2) = 0.125;
	//kernelGauss.at<float>(2, 1) = 0.125;
	//  
	//// Convolve the original image with Gaussian filter
	//cv::filter2D( inputImage, result, inputImage.depth(), kernelGauss );

	cv::GaussianBlur( inputImage,temp,cv::Size( 5,5 ),0,0,cv::BORDER_DEFAULT );

	//// Laplacian Kernel
	//cv::Mat kernelLaplace( 3, 3, CV_32F, cv::Scalar(0.0) );
	//
	//kernelLaplace.at<float>(0, 1) = -1.0;
	//kernelLaplace.at<float>(1, 0) = -1.0;
	//kernelLaplace.at<float>(1, 1) = +4.0;
	//kernelLaplace.at<float>(1, 2) = -1.0;
	//kernelLaplace.at<float>(2, 1) = -1.0;
	//
	//// Convolve Gaussian Filtered Image with Laplacian
	//cv::filter2D( result, result, inputImage.depth(), kernelLaplace );

	cv::Laplacian( temp,result,inputImage.depth(),3,3,0,cv::BORDER_DEFAULT );
	cv::GaussianBlur( result,result,cv::Size( 3,3 ),0,0,cv::BORDER_DEFAULT );

	//cv::namedWindow( "testLOGinput",cv::WINDOW_NORMAL );
	//cv::imshow( "testLOGinput",inputImage );	// The input does not change after operation but kpFAST result is blurred

	/*cv::namedWindow( "LoG",cv::WINDOW_NORMAL );
	cv::imshow( "LoG",result );*/
	  
	return result;
  
} 

cv::Mat* mergeImages(int nArgs, ...) {

	// img - Used for getting the arguments 
	IplImage *img;

	// DispImage - the image in which input images are to be copied
	IplImage *DispImage;

	int size;
	int i;
	int m, n;
	int x, y;

	// w - Maximum number of images in a row 
	// h - Maximum number of images in a column 
	int w, h;

	// scale - How much we have to resize the image
	float scale;
	int max;

	// If the number of arguments is lesser than 0 or greater than 12
	// return without displaying 
	if(nArgs <= 0) {
		printf("Number of arguments too small....\n");
		return NULL;
	}
	else if(nArgs > 12) {
		printf("Number of arguments too large....\n");
		return NULL;
	}
	// Determine the size of the image, 
	// and the number of rows/cols 
	// from number of arguments 
	else if (nArgs == 1) {
		w = h = 1;
		size = 300;
	}
	else if (nArgs == 2) {
		w = 1; h = 2;
		size = 300;
	}
	else if (nArgs == 3 || nArgs == 4) {
		w = 2; h = 2;
		size = 300;
	}
	else if (nArgs == 5 || nArgs == 6) {
		w = 3; h = 2;
		size = 200;
	}
	else if (nArgs == 7 || nArgs == 8) {
		w = 4; h = 2;
		size = 200;
	}
	else {
		w = 4; h = 3;
		size = 150;
	}

	// Create a new 3 channel image
	DispImage = cvCreateImage( cvSize(100 + size*w, 60 + size*h), 8, 3 );

	// Used to get the arguments passed
	va_list args;
	va_start(args, nArgs);

	// Loop for nArgs number of arguments
	for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (20 + size)) {

		// Get the Pointer to the IplImage
		cv::Mat *img_temp = va_arg(args, cv::Mat*);

		img=cvCloneImage(&(IplImage)(*img_temp));

		// Check whether it is NULL or not
		// If it is NULL, release the image, and return
		if(img == 0) {
			printf("Invalid arguments");
			cvReleaseImage(&DispImage);
			return NULL;
		}

		// Find the width and height of the image
		x = img->width;
		y = img->height;

		// Find whether height or width is greater in order to resize the image
		max = (x > y)? x: y;

		// Find the scaling factor to resize the image
		scale = (float) ( (float) max / size );

		// Used to Align the images
		if( i % w == 0 && m!= 20) {
			m = 20;
			n+= 20 + size;
		}

		// Set the image ROI to display the current image
		cvSetImageROI(DispImage, cvRect(m, n, (int)( x/scale ), (int)( y/scale )));

		// Resize the input image and copy the it to the Single Big Image
		cvResize(img, DispImage);

		// Reset the ROI in order to display the next image
		cvResetImageROI(DispImage);
	}

	cv::Mat * ret = new cv::Mat(cv::cvarrToMat(DispImage));
	//cvReleaseImage(&DispImage);

	return ret;
}

//====================================================================================================================//
void histogramEqualization( cv::Mat &input, cv::Mat &output )
{
	cv::Mat frameHSV, frameV_corr;
	std::vector< cv::Mat > channels;

	cv::cvtColor( input, frameHSV,CV_RGB2HSV_FULL );
	cv::split( frameHSV, channels );		
	cv::equalizeHist( channels[2], frameV_corr);
	channels[2] = frameV_corr;
	cv::merge(channels, output);
	cv::cvtColor( output, output, CV_HSV2RGB_FULL);
}

cv::Mat SURFkeypoints( cv::Mat &inputImage )
{
	cv::Mat outputImage /*= inputImage.clone()*/;
	int minHessian = 100;
	cv::cvtColor( inputImage,outputImage,CV_RGB2GRAY );

	cv::SurfFeatureDetector detector( minHessian );
	std::vector< cv::KeyPoint > keypoints;

	detector.detect( outputImage,keypoints );
	cv::drawKeypoints( outputImage,keypoints,outputImage,cv::Scalar::all(255),cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
	cv::namedWindow( "newKP",cv::WINDOW_NORMAL );
	cv::imshow( "newKP",outputImage );
	cv::imwrite( "newKP.png",outputImage );
	return outputImage;
}


KeypointList keypoint2point( std::vector< cv::KeyPoint > keypoint )
{
	std::vector <cv::KeyPoint >::iterator it;
	KeypointList keypointsLIST;
	
	for( it= keypoint.begin(); it!= keypoint.end();it++)
	{
		keypointsLIST.push_back(it->pt);
	}

	return keypointsLIST;
}

//====================================================================================================================//
void vesselnessMorphology( const cv::Mat& img, cv::Mat& vessIm, int morph_size /*= 9*/, 
	int kernel_size /*= 1*/ )
{
	vessIm = img.clone();
	//cv::GaussianBlur( img, vessIm, cv::Size(2*kernel_size+1,2*kernel_size+1), 1);

	int morph_elem = 1;
	cv::Mat element = cv::getStructuringElement( cv::MORPH_CROSS, cv::Size( 2*morph_size + 1, 2*morph_size+1 ), cv::Point( morph_size, morph_size ) );

	/// Apply the specified morphology operation
	cv::morphologyEx( vessIm, vessIm, cv::MORPH_BLACKHAT, element );

	//cv::Mat element_2 = cv::getStructuringElement( cv::MORPH_ERODE, cv::Size( 2*morph_size + 1, 2*morph_size+1 )
	//	, cv::Point( morph_size, morph_size ) );
	//cv::erode( vessIm,vessIm,element_2 );

	cv::normalize(vessIm, vessIm, 0, 255, cv::NORM_MINMAX, CV_8UC1);

	cv::threshold(vessIm,vessIm,25,255,CV_THRESH_TOZERO);

	//cv::GaussianBlur( vessIm, vessIm, cv::Size(2*kernel_size+1,2*kernel_size+1), 1);

}

// take number image type number (from cv::Mat.type()), get OpenCV's enum string.
std::string getImgType(int imgTypeInt)
{
	int numImgTypes = 35; // 7 base types, with five channel options each (none or C1, ..., C4)

	int enum_ints[] =       {CV_8U,  CV_8UC1,  CV_8UC2,  CV_8UC3,  CV_8UC4,
		CV_8S,  CV_8SC1,  CV_8SC2,  CV_8SC3,  CV_8SC4,
		CV_16U, CV_16UC1, CV_16UC2, CV_16UC3, CV_16UC4,
		CV_16S, CV_16SC1, CV_16SC2, CV_16SC3, CV_16SC4,
		CV_32S, CV_32SC1, CV_32SC2, CV_32SC3, CV_32SC4,
		CV_32F, CV_32FC1, CV_32FC2, CV_32FC3, CV_32FC4,
		CV_64F, CV_64FC1, CV_64FC2, CV_64FC3, CV_64FC4};

	std::string enum_strings[] = {"CV_8U",  "CV_8UC1",  "CV_8UC2",  "CV_8UC3",  "CV_8UC4",
		"CV_8S",  "CV_8SC1",  "CV_8SC2",  "CV_8SC3",  "CV_8SC4",
		"CV_16U", "CV_16UC1", "CV_16UC2", "CV_16UC3", "CV_16UC4",
		"CV_16S", "CV_16SC1", "CV_16SC2", "CV_16SC3", "CV_16SC4",
		"CV_32S", "CV_32SC1", "CV_32SC2", "CV_32SC3", "CV_32SC4",
		"CV_32F", "CV_32FC1", "CV_32FC2", "CV_32FC3", "CV_32FC4",
		"CV_64F", "CV_64FC1", "CV_64FC2", "CV_64FC3", "CV_64FC4"};

	for(int i=0; i<numImgTypes; i++)
	{
		if(imgTypeInt == enum_ints[i]) return enum_strings[i];
	}
	return "unknown image type";
}

//cv::Mat descriptorORB( const cv::Mat &inputImage, std::vector< cv::KeyPoint > &keypoint, const cv::Mat &mask /*= cv::Mat()*/ )
//{
//	cv::Mat tempImage,descriptor;
//	std::vector< cv::KeyPoint > kp = keypoint;
//
//	if ( inputImage.channels() == 3 ) // RGB
//	{
//		std::vector< cv::Mat > chann;
//		cv::split( inputImage, chann );	
//		cv::Mat frameB = chann[0];
//		cv::Mat frameG = chann[1];
//		cv::Mat frameR = chann[2];
//		tempImage = frameG.clone();
//	}
//	else // single frame
//		tempImage = inputImage.clone();
//
//	cv::ORB orb( 500,1.2f,8,25,0,2,0,25 );
//	orb.compute( tempImage,kp,descriptor );
//	//orb( tempImage, mask, keypoint, descriptor,true );
//
//	cv::namedWindow( "descriptorORB",CV_WINDOW_NORMAL );
//	cv::imshow( "descriptorORB",descriptor );
//
//	return descriptor;
//
//	// Note : BRIEF descriptors are not tested since ORB is basically BRIEF with orientation information
//}


cv::Mat descriptorFREAK( const cv::Mat &inputImage, std::vector< cv::KeyPoint > &keypoint, const cv::Mat &mask /*= cv::Mat() */ )
{
	cv::Mat imageGray /*= inputImage.clone()*/,descriptor;
	// Make sure inputImage is grayscale
	if( inputImage.depth() > 1 )
		cv::cvtColor( inputImage,imageGray,CV_RGB2GRAY );
	else
		imageGray = inputImage.clone();

	/*std::vector< cv::KeyPoint > keypoint;	
	keypoint = keypointFAST( imageGray,cv::Mat() );
	cv::drawKeypoints( inputImage,keypoint,descriptor,cv::Scalar(255,0,0) );*/

	cv::FREAK freak( true,true,20.0f,4 );
	freak.compute( imageGray,keypoint,descriptor );
	//freak( imageGray, mask, keypoints, cv::noArray() );	

	cv::namedWindow( "FREAK",cv::WINDOW_NORMAL );
	cv::imshow( "FREAK",descriptor );
	
	return descriptor;	
}

cv::Mat descriptorBRISK( const cv::Mat &inputImage, std::vector<cv::KeyPoint> keypoint, const cv::Mat &mask )
{
	cv::Mat descriptor;
	
	cv::Mat tempImage;
	if ( inputImage.channels() == 3 ) // RGB
	{
		std::vector< cv::Mat > chann;
		cv::split( inputImage, chann );	
		cv::Mat frameB = chann[0];
		cv::Mat frameG = chann[1];
		cv::Mat frameR = chann[2];
		tempImage = frameG.clone();
	}
	else // single frame
		tempImage = inputImage.clone();
	
	//set brisk parameters
	int Threshl=60;
	int Octaves=2; // (pyramid layer) from which the keypoint has been extracted
	float PatternScales = 1.0f;

	cv::BRISK  BRISKD( Threshl,Octaves,PatternScales ); // Initialize algoritm
	BRISKD.create("Feature2D.BRISK");

	BRISKD.compute( tempImage, keypoint,descriptor );

	/*KeypointList keypointsBRISK(keypoints2.size());

	for( it= keypoints2.begin(); it!= keypoints2.end();it++)
	{
		keypointsBRISK.push_back(it->pt);
	}*/

	return descriptor;

}

//====================================================================================================================//
double compareHistArray( const std::vector<MatUniqPtr>& array1, const std::vector<MatUniqPtr>& array2 )
{
	double corr=0;
	int comparingmethod=0; // CORRELATION
	int n = array1.size();

	if (array2.size() == n)
	{
		for (int i = 0; i<n; i++)
			corr+=abs(cv::compareHist(*array1[i].get(),*array2[i].get(),comparingmethod));

		corr/=n;
	}

	return corr;
}


void getMask( cv::Mat &inputImage_R,cv::Mat &inputImage_H, cv::Mat &mask )
{
	//cv::Mat* mask = new cv::Mat();
	
	cv::Mat frameR;
	cv::normalize(inputImage_R, frameR, 0, 255, cv::NORM_MINMAX, CV_8UC1);
	threshold( frameR, mask, 20, 255, CV_THRESH_BINARY );
	

	//cv::Mat frameH;
	//cv::normalize(inputImage_H, frameH, 0, 255, cv::NORM_MINMAX, CV_8UC1);
	////cv::medianBlur( frameH, frameH, 15);
	//
	//cv::Mat hsvMask;
	//threshold( frameH, hsvMask, 30, 255, CV_THRESH_BINARY_INV);
	//cv::multiply(hsvMask, mask, mask);
	//
	//cv::Mat tempMask;
	//cv::Size s( mask.size().width /4, mask.size().height /4 );
	//cv::resize( mask,tempMask,s, 0,0, CV_INTER_AREA );
	//
	//// Refine segmentation.
	///*cv::Mat morphStr(11,11,CV_8UC1,cv::Scalar(1));
	//cv::morphologyEx(*mask,*mask,cv::MORPH_OPEN,morphStr);
	//cv::morphologyEx(*mask,*mask,cv::MORPH_CLOSE,morphStr);*/
	//
	//cv::Mat morphStr(11,11,CV_8UC1,cv::Scalar(1));
	//cv::morphologyEx(tempMask,tempMask,cv::MORPH_OPEN,morphStr);
	//cv::morphologyEx(tempMask,tempMask,cv::MORPH_CLOSE,morphStr);
	//
	//cv::Size s2( mask.size().width, mask.size().height );
	//cv::resize( tempMask,tempMask,s2,0,0,CV_INTER_AREA );
	//
	//cv::bitwise_and( tempMask,mask,mask );
}

// Additions to RetinaTracker ========================== >>>>>>>>>>>>>>>


bool InitializeVector()
{
	int j=0, k=0;
	for( int i=0; i<120; i++ )
	{
		offset[i][j] = r*cos( k*3.145/180 );
		offset[i][j+1] = r*sin( k*3.145/180 );
		k=k+3;
	}
	offset[120][j] = 0;
	offset[120][j+1] = 0;
	return true;
}


std::vector< int > doubleDerivative( CircROI roiSignal )
{
	// to be noted that this code has been written with the consideration that 
	// roiSignal's last member will contain "inputImage.at<location>"

	int i=1;
	CircROI deriv;
	CircROI::iterator it;
	//it = roiSignal.begin();

	for( it= roiSignal.begin()+1; it< roiSignal.end()-1; it++ )
	{
		//deriv[i-1] = roiSignal[i+1] + roiSignal[i-1] - 2*roiSignal[i];
		deriv.push_back( *(it+1) + *(it-1) - 2*(*it) );
	}
	
	std::ofstream output_deriv( "./derv.txt" );
	std::ostream_iterator< int > output_iterator( output_deriv, "\n" );
	std::copy( deriv.begin(), deriv.end(), output_iterator );

	return deriv;

}

std::vector< cv::KeyPoint > getCircularROI( cv::Mat inputImage, cv::Mat &mask )
{
	
	CircROI::iterator it;
	//int k=0;
	std::vector< cv::KeyPoint > keypoint;
	//cv::bitwise_and( inputImage,mask,inputImage);
	cv::normalize(inputImage, inputImage, 0, 255, cv::NORM_MINMAX, CV_8UC1);
	cv::imwrite( "test.png",inputImage );

	//std::string test getImgType( inputImage.type() );
	//it = roiSignal.begin();
	for( int i=r; i<inputImage.rows-r; i++ )
	{
		for( int j=r; j<inputImage.cols-r; j++ )
		{
			//int x = inputImage.at<uchar>( i,j );
			//if( (inputImage.at<uchar>( i,j ) >50) /*&& (inputImage.at<uchar>( i+15,j ) >50) &&
			//	(inputImage.at<uchar>( i,j+15 ) >50) && (inputImage.at<uchar>( i+15,j+15 ) >50)*/ )
			//std::cout << i << "\t" << j << std::endl;
			if( (inputImage.at<uchar>( i,j ) < 160) && (inputImage.at<uchar>( i,j ) > 50) )
			{
				int sum=0, k=0, v, v_1, v_2,d;
				CircROI roiSignal;
				std::vector< int > deriv;
				roiSignal.push_back( inputImage.at< uchar >( i + offset[0][k], j + offset[0][k+1] ) );
				for( int m=1;m<120;m++ )
				{
					//roiSignal[m] = inputImage.at< uchar >( i + offset[m][k], j + offset[m][k+1] );
					

					roiSignal.push_back( inputImage.at< uchar >( i + offset[m][k], j + offset[m][k+1] ) );
					/*v_1 = inputImage.at<uchar>( i + offset[m-1][k], j + offset[m-1][k+1] );
					v_2 = inputImage.at<uchar>( i + offset[m+1][k], j + offset[m+1][k+1] );
					v   = inputImage.at< uchar >( i + offset[m][k], j + offset[m][k+1] );
					d = inputImage.at<uchar>( i + offset[m-1][k], j + offset[m-1][k+1] ) + 
						inputImage.at<uchar>( i + offset[m+1][k], j + offset[m+1][k+1] ) -
						2*inputImage.at< uchar >( i + offset[m][k], j + offset[m][k+1] );
					if( (v>0 && v_1<0) || (v<0 && v_1>0) )
					{
						if( abs( v - v_1 ) >5 )
						{
							keypoint.push_back( cv::KeyPoint( cv::Point(i,j),1 ) );
							goto NEXT;
						}
					}*/
					
					deriv.push_back( inputImage.at<uchar>( i + offset[m-1][k], j + offset[m-1][k+1] ) + 
						inputImage.at<uchar>( i + offset[m+1][k], j + offset[m+1][k+1] ) -
						2*inputImage.at< uchar >( i + offset[m][k], j + offset[m][k+1] ) );
					//sum += inputImage.at< uchar >( i + offset[m][k], j + offset[m][k+1] );
		
					//int blah=1;


				}
				//roiSignal[120] = inputImage.at<uchar>( i,j );
				//roiSignal.push_back( inputImage.at<uchar>( i,j ));
				//sum += inputImage.at<uchar>( i,j );

				//deriv = doubleDerivative( roiSignal );



				for( it = deriv.begin()+1; it < deriv.end()-1; ++it )
				{
					if( ( *(it)>0 && *(it+1)<0 ) || ( (*it)<0 && (*it+1)>0 ) )
					{
						if( abs( *(it) - *(it+1) ) > 3 )
						{
							keypoint.push_back( cv::KeyPoint( cv::Point(i,j),1 ) );
							/*j=j+r;
							i=i+r;*/
							
							goto NEXT;
						}
					}
					//if( ( *(it)>0 && *(it-1)<0 ) || ( (*it)<0 && (*it-1)>0 ) )
					//{
					//	if( abs( *(it) - *(it-1) ) > 5 )
					//	{
					//		keypoint.push_back( cv::KeyPoint( cv::Point(i,j),1 ) );
					//		/*j=j+r;
					//		i=i+r;*/
					//		
					//		goto NEXT;
					//	}
					//}
				}

				NEXT:
				int bhah =1;
				
				roiSignal.clear();
				deriv.clear();
				/*std::ofstream output_file( "./roiSignal.txt" );
				std::ostream_iterator< int > output_iterator( output_file, "\n" );
				std::copy( roiSignal.begin(), roiSignal.end(), output_iterator );*/

			}			
		}
	}
	return keypoint;
}

void DoGFilter( cv::Mat inputImage, cv::Mat outputImage )
{
	cv::Mat tempImage, temp;
	if ( inputImage.channels() == 3 ) // RGB
		cv::cvtColor( inputImage,tempImage,CV_BGR2GRAY );
	else // single frame
		tempImage = inputImage.clone();

	cv::GaussianBlur( tempImage,temp,cv::Size( 3,3 ),0,0,cv::BORDER_DEFAULT );
	cv::GaussianBlur( tempImage,tempImage,cv::Size( 5,5 ),0,0,cv::BORDER_DEFAULT );

	outputImage = temp-tempImage;

	cv::namedWindow( "DoG-Test",CV_WINDOW_NORMAL );
	cv::imshow( "DoG-Test",outputImage );
}


void convolveDFT( cv::Mat A, cv::Mat B, cv::Mat C)
{
    // reallocate the output array if needed
    C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());
    cv::Size dftSize;
    // calculate the size of DFT transform
    dftSize.width = cv::getOptimalDFTSize(A.cols + B.cols - 1);
    dftSize.height = cv::getOptimalDFTSize(A.rows + B.rows - 1);

    // allocate temporary buffers and initialize them with 0's
    cv::Mat tempA(dftSize, A.type(), cv::Scalar::all(0));
    cv::Mat tempB(dftSize, B.type(), cv::Scalar::all(0));

    // copy A and B to the top-left corners of tempA and tempB, respectively
    cv::Mat roiA(tempA, cv::Rect(0,0,A.cols,A.rows));
    A.copyTo(roiA);
    cv::Mat roiB(tempB, cv::Rect(0,0,B.cols,B.rows));
    B.copyTo(roiB);

    // now transform the padded A & B in-place;
    // use "nonzeroRows" hint for faster processing
    cv::dft(tempA, tempA, 0, A.rows);
    cv::dft(tempB, tempB, 0, B.rows);

    // multiply the spectrums;
    // the function handles packed spectrum representations well
	cv::mulSpectrums( tempA, tempB, tempA,cv::DFT_ROWS );

    // transform the product back from the frequency domain.
    // Even though all the result rows will be non-zero,
    // you need only the first C.rows of them, and thus you
    // pass nonzeroRows == C.rows
    dft(tempA, tempA, cv::DFT_INVERSE + cv::DFT_SCALE, C.rows);

    // now copy the result back to C.
    tempA(cv::Rect(0, 0, C.cols, C.rows)).copyTo(C);

    // all the temporary buffers will be deallocated automatically
}