
#include <deque>
#include <string>
#include <sstream>

#include <cv.h>
#include <highgui.h>

#include <tiff.h>
#include <tiffio.h>

#include <iostream>
#include "utils/Features.h"
#include "utils/Homography.h"
#include "utils/Settings.h"
#include "utils/Output.h"
#include "utils/ImageUtils.h"
#include "utils/Pixel.h"

#include "input/HomographyFileParser.h"
#include "input/PTOParser.h"



/* global stuff */
std::deque<Homography>	homographies;
std::deque<std::string> imageNames;
std::deque<IplImage*>	images;
std::deque<std::deque<Features::FeaturePair> > featurePairs;




errorCodes::errorCode loadImage(std::string filename, IplImage* &image)
{
	Output::message("Loading image %s", Output::VERBOSE, filename.c_str());
	
	unsigned int width=0,height=0;
	
	errorCodes::errorCode returnCode;
	
	//open image with LibTiff
	TIFF* tif= TIFFOpen(filename.c_str(), "r"); 

	//IplImage *image;
	//cvReleaseImage(&image);
	size_t npixels;
	uint32 *raster;
	std::string datname;
	int datnamePos1=0, datnamePos2=0;

	//if file can be opened
	if (tif)
	{
		//get width & height
		TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &width);           
		TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &height);  

		//get image data.
		npixels=width*height;
		raster = (uint32*) _TIFFmalloc(npixels * sizeof (uint32));

		//OpenCV can't handle TIFF* => copy from raster to IplImage *image
		if (raster != NULL)
		{
			if (TIFFReadRGBAImage(tif, width, height, raster, 0))
			{
				image = cvCreateImage( cvSize(width,height), IPL_DEPTH_8U, 1 );

				//give data from raster to *image step by step
				for (unsigned int i=0; i<height; i++)
				{
					for (unsigned int j=0; j<width; j++)
					{
						(((uchar*)(image->imageData + image->widthStep*((height-i-1)))))[j * image->nChannels] = (uchar)raster[i*width + j];
					}
				}

				for(int i=filename.length()-1; i>=0; i--)
				{
					if(filename[i]=='.')
						datnamePos2=i;
					else if (filename[i]=='\\')
					{
						datnamePos1=i+1;
						i=0;
					}
				}
				datname=filename.substr(datnamePos1,datnamePos2-datnamePos1);

			}
			_TIFFfree(raster);
			returnCode = errorCodes::SUCCESS;
		} else {
			
			Output::message("ERROR: Raster = NULL", Output::CRITICAL);
			
			returnCode = errorCodes::FAILURE;
		}
		
		
		TIFFClose(tif);
		
	} else {
		returnCode = errorCodes::FAILURE;
	}
	
	return returnCode;

		
}



void calcMeanAndVariance(IplImage* image, Features::Feature* feature, int neighborhood){
	
	
	int valueSum;
	double currentVariance;
	double varianceSum;
	int totalValues;
	double mean;
	
	//totalValues = this->neighborhood * this->neighborhood;
	totalValues = 0;
	
	valueSum = 0;
	varianceSum = 0.0;
	
	
	/* get the mean value */
	for (int i = 0; i<Settings::pyramidalDepth; ++i) {
		int step=pow(2.f,i);
		for (int x = -neighborhood*step; x < neighborhood*step; x+=step) {
			for (int y = -neighborhood*step; y < neighborhood*step; y+=step) {
				if((int)(feature->x) - x >= 0 && (int)(feature->y) - y >= 0 && (int)(feature->x) - x < image->width && (int)(feature->y) - y < image ->height){
					valueSum += ImageUtils::getPixelValue(image, cvPoint( ((int)(feature->x) - x), ((int)(feature->y)) - y));
					totalValues++;
				}
			}
		}
		mean = ((double)valueSum)/totalValues;
		feature->mean.push_back(mean);

	
		/* get the variance */
		for (int x = -neighborhood*step; x < neighborhood*step; x+=step) {
			for (int y = -neighborhood*step; y < neighborhood*step; y+=step) {
				if((int)(feature->x) - x >= 0 && (int)(feature->y) - y >= 0 && (int)(feature->x) - x < image->width && (int)(feature->y) - y < image ->height){
					currentVariance = mean - ImageUtils::getPixelValue(image, cvPoint( ((int)(feature->x) - x), ((int)(feature->y)) - y));
					currentVariance = pow(currentVariance, 2);
					varianceSum += currentVariance;
				}
			}
		}	
		feature->variance.push_back(varianceSum/totalValues);
	}
	
	//Debug::message("Mean: %f, Variance: %f", Debug::DEBUG, mean, feature->variance);
}




double getVarianceNormalizedCorrelation(IplImage* image1, IplImage* image2, Features::Feature* feature1, Features::Feature* feature2, int neighborhood){
	
	double normalizer=0;
	double vnc=0;
	double meanDiffSum;
	double meanDiff1, meanDiff2;
	double quadDiff1=0, quadDiff2=0;
	int currentPixelValue1;
	int currentPixelValue2;
	
	
	meanDiffSum = 0.0;
	
	for (int k=1; k<=Settings::pyramidalDepth; ++k) {
		int step=pow(2.f,k);
		for (int i = -neighborhood*step; i < neighborhood*step; i+=step) {
			for (int j = -neighborhood*step; j < neighborhood*step; j+=step) {

				
				if((int)(feature1->x) - i >= 0 && (int)(feature1->y) - j >= 0 && (int)(feature1->x) - i < image1->width && (int)(feature1->y) - j < image1 ->height){
				
					currentPixelValue1 = ImageUtils::getPixelValue(image1, cvPoint( ((int)(feature1->x) - i), ((int)(feature1->y)) - j));
					meanDiff1 = currentPixelValue1 - feature1->mean[k-1];
					quadDiff1 += pow(meanDiff1,2);
					
					if((int)(feature2->x) - i >= 0 && (int)(feature2->y) - j >= 0 && (int)(feature2->x) - i < image2->width && (int)(feature2->y) - j < image2 ->height){
						
						currentPixelValue2 = ImageUtils::getPixelValue(image2, cvPoint( ((int)(feature2->x) - i), ((int)(feature2->y)) - j));
						meanDiff2 = currentPixelValue2 - feature2->mean[k-1];
						meanDiffSum += meanDiff1 * meanDiff2;
						quadDiff2 += pow(meanDiff2,2);
						
					}
					
					
				}
			}
		}
		
		/* The sizes of the neighborhoods may be different, but since only one of these values can be
		 * we take the one of the first feature.
		 * The different values have bee considered while calcualting the individual means and variances*/
		normalizer = 1.0 / (sqrt(quadDiff1) * sqrt(quadDiff2));
		vnc+=normalizer * meanDiffSum;
		quadDiff1=0;
		quadDiff2=0;
		meanDiffSum=0;
	}
	return vnc/Settings::pyramidalDepth;
}










CvMat* findHomography(std::deque<Features::FeaturePair> seqMatches){

	
	if (seqMatches.size() < Settings::ransacMinNumOfSupporters) {
		Output::message("Not enough pairs for finding homography (given: %d, needed: %d)", Output::IMPORTANT, seqMatches.size(), Settings::ransacMinNumOfSupporters);
		return NULL;
	}

	CvMat *homography;
	CvMat *homographyInv;
	CvMat *homographyVector;
	
	Features::FeaturePair currentMatch;
	
	
	//CvMemStorage* storageFinalMatches;
	//CvSeq* finalMatches;

	
	//storageFinalMatches = cvCreateMemStorage(Settings::maxFeaturePoints * sizeof(Features::FeaturePair));
	//finalMatches = cvCreateSeq(0, sizeof(CvSeq), sizeof(Features::FeaturePair), storageFinalMatches);

	homographyInv = cvCreateMat(3, 3, CV_32F);
	
	homography = cvCreateMat(3, 3, CV_32F);
	homographyVector = cvCreateMat(8, 1, CV_32F);
	
	
	CvMat* leftInliers;
	CvMat* rightInliers;
	
	leftInliers  = cvCreateMat(2, seqMatches.size(), CV_32F);
	rightInliers = cvCreateMat(2, seqMatches.size(), CV_32F);
	homography = cvCreateMat(3, 3, CV_32F);

	// initiate matrices A and B		
	for (unsigned int i = 0; i < seqMatches.size(); i++) {
		//currentMatch = *((Features::FeaturePair*)cvGetSeqElem(mostInliers, i));
		currentMatch = seqMatches[i];
		//cvSeqPush(finalMatches, &currentMatch);
	
		
		CV_MAT_ELEM(*leftInliers, float, 0, i) = (float)currentMatch.left.x;
		CV_MAT_ELEM(*leftInliers, float, 1, i) = (float)currentMatch.left.y;
		
		CV_MAT_ELEM(*rightInliers, float, 0, i) = (float)currentMatch.right.x;
		CV_MAT_ELEM(*rightInliers, float, 1, i) = (float)currentMatch.right.y;
		
		
	}
		
	
	
	cvFindHomography(leftInliers, rightInliers, homography);
	
	cvReleaseMat(&leftInliers);
	cvReleaseMat(&rightInliers);
	
	
	
	//cvClearSeq(finalMatches);

	//cvReleaseMemStorage(&storageFinalMatches);
	
	return homography;
	
}




std::deque<Homography> recalcHomographies(){
	
	PTOParser* ptoParser = new PTOParser();
	
	std::deque<Features::FeaturePair> parsedPairs;
	std::deque< std::deque<Features::FeaturePair> > dissectedPairs;
	std::deque<Homography> homographies;
	
	
	ptoParser->parse("input_filtered.pto");
	
	parsedPairs = ptoParser->getFeaturePairs();
	
	std::deque<Features::IDPair> idCombinations;
	
	
	bool idCombinationFound;
	int combinationIndex;
	
	
	for (unsigned int i = 0; i < parsedPairs.size(); ++i) {
		idCombinationFound  = false;
		
		for (unsigned int j = 0; j < idCombinations.size(); ++j) {
			if (parsedPairs[i].rightImageID == idCombinations[j].rightImageID && 
				parsedPairs[i].leftImageID  == idCombinations[j].leftImageID){
				idCombinationFound = true;
				combinationIndex = j;
				break;
			}
		}
		
		if (!idCombinationFound){
			/* this ID pair is not present in the 'database', so add it. */
			Features::IDPair newPair;
			newPair.rightImageID = parsedPairs[i].rightImageID;
			newPair.leftImageID  = parsedPairs[i].leftImageID;
			idCombinations.push_back(newPair);
			
			std::deque<Features::FeaturePair> newPairSet;
			dissectedPairs.push_back(newPairSet);
			
			combinationIndex = idCombinations.size() - 1;
		}
		
		dissectedPairs[combinationIndex].push_back(parsedPairs[i]);
			
	}
	
	
	for (unsigned int i = 0; i < dissectedPairs.size(); ++i) {
		
		Homography currentHomography;
		
		/* homography will always be 3x3 */
		/* the result of findHomography may be NULL. This has to be filtered in a later step.
		 * We can't do that here, because we need the same indices as the dissectedPairs. */
		currentHomography.data = findHomography(dissectedPairs[i]);
		if(currentHomography.data != NULL){
			Output::message("Found Homography");
			currentHomography.leftImageID = dissectedPairs[i][0].leftImageID;
			currentHomography.rightImageID = dissectedPairs[i][0].rightImageID;
			
			Output::message("%f %f %f", Output::TESTING, cvmGet(currentHomography.data, 0, 0), cvmGet(currentHomography.data, 0, 1), cvmGet(currentHomography.data, 0, 2));
			Output::message("%f %f %f", Output::TESTING, cvmGet(currentHomography.data, 1, 0), cvmGet(currentHomography.data, 1, 1), cvmGet(currentHomography.data, 1, 2));
			Output::message("%f %f %f", Output::TESTING, cvmGet(currentHomography.data, 2, 0), cvmGet(currentHomography.data, 2, 1), cvmGet(currentHomography.data, 2, 2));
			
			
			homographies.push_back(currentHomography);
		}
	}
	
	
	
	
	/* ================= TEST =====================*/
	
	CvMat* leftPosition;
	CvMat* rightPosition;
	leftPosition  = cvCreateMat(3, 1, CV_32F);
	rightPosition = cvCreateMat(3, 1, CV_32F);
	
	
	for (unsigned int i = 0; i < dissectedPairs.size(); ++i) {
		for (unsigned int j = 0; j < dissectedPairs[i].size(); ++j) {

			Features::FeaturePair currentPair;
			
			cvmSet(leftPosition, 0, 0, dissectedPairs[i][j].left.x);
			cvmSet(leftPosition, 1, 0, dissectedPairs[i][j].left.y);
			cvmSet(leftPosition, 2, 0, 1.0);
			
				
			
			cvMatMul(homographies[i].data, leftPosition, rightPosition);
			
			//currentPair.left.x = (int)(cvmGet(leftPosition, 0, 0));				
			//currentPair.left.y = (int)(cvmGet(leftPosition, 1, 0));	
			
			
			currentPair.right.x = (int)((cvmGet(rightPosition, 0, 0))  / (cvmGet(rightPosition, 2, 0)));				
			currentPair.right.y = (int)((cvmGet(rightPosition, 1, 0))  / (cvmGet(rightPosition, 2, 0)));	
					
			//cvSetZero(&leftPosition);
			//cvSetZero(&rightPosition);
			
			
		}
		Output::separator(Output::TESTING);
	}
	Output::separator(Output::TESTING);
	Output::separator(Output::TESTING);
	
	
	
	/* ======================= END TEST ===================*/
	
	return homographies;
	
}









int main(){
	
	Output::setOutputLevel(Output::DEBUG);
	
	HomographyFileParser* parser = new HomographyFileParser();
	
	parser->parse("homographies.txt");
	
	//homographies = parser->getHomographies();
	homographies = recalcHomographies();
	imageNames   = parser->getFileNames();
	
	for (unsigned int i = 0; i < imageNames.size(); ++i) {
		IplImage* currentImage;
		loadImage(imageNames[i], currentImage);
		images.push_back(currentImage);
	}
	
	IplImage* leftImage;
	IplImage* rightImage;
	
	int gridWidthStep;
	int gridHeightStep;
	
	int overlappingImageArea;
	
	int horizontalBorderDistance;
	int verticalBorderDistance;
	
	CvMat* leftPosition;
	CvMat* rightPosition;
	leftPosition  = cvCreateMat(3, 1, CV_32F);
	rightPosition = cvCreateMat(3, 1, CV_32F);
	
	for (unsigned int i = 0; i < homographies.size(); ++i) {
		
		std::deque<Features::FeaturePair> currentSet;
		
		/* get current images */
		leftImage  = images[homographies[i].leftImageID];
		rightImage = images[homographies[i].rightImageID];
		
		overlappingImageArea = (int)((float)leftImage->width * Settings::maxImageOverlap);
		
		/* make a grid */
		gridWidthStep  = overlappingImageArea  / (Settings::horizontalGridPoints + 1); //+1 to get a distance from the borders
		gridHeightStep = leftImage->height     / (Settings::verticalGridPoints   + 1);
		
		horizontalBorderDistance = gridWidthStep /2;
		verticalBorderDistance   = gridHeightStep/2;
		
			
		/* go through grid */
		for (int x = 0; x <= Settings::horizontalGridPoints; ++x) {
			for (int y = 0; y <= Settings::verticalGridPoints; ++y) {
				
				Features::FeaturePair currentPair;
				CvSeq *objectKeypoints=0, *objectDescriptors=0;
				CvSURFParams params = cvSURFParams(100, 1);
				CvMemStorage* storage = cvCreateMemStorage(0);
				CvPoint2D32f pos=cvPoint2D32f(0,0);
				float maxHessian=0;
				CvPoint actGridPos=cvPoint((leftImage->width - overlappingImageArea)+(gridWidthStep*x),y*gridHeightStep);

				/* set left image ROI to actual grid-pos & search f  or featurepoints within */
				cvSetImageROI(leftImage, cvRect(actGridPos.x,actGridPos.y,gridWidthStep,gridHeightStep));
				cvExtractSURF(leftImage,0,&objectKeypoints,&objectDescriptors,storage,params);

				cvResetImageROI(leftImage);

				/* go through all found featurepoints & determine best one */
				for (int j=0; j<objectDescriptors->total; j++) {
					CvSURFPoint* tmpPoint = (CvSURFPoint*)cvGetSeqElem(objectKeypoints, j);
					if (tmpPoint->hessian > maxHessian) {
						maxHessian=tmpPoint->hessian;
						pos=tmpPoint->pt;
						currentPair.left.SURF=(CvSURFPoint*)cvGetSeqElem(objectKeypoints, j);
					}
				}

				cvmSet(leftPosition, 0, 0, (float)(pos.x + actGridPos.x));
				cvmSet(leftPosition, 1, 0, (float)(pos.y + actGridPos.y));
				cvmSet(leftPosition, 2, 0, 1.0);		
				
				
				cvMatMul(homographies[i].data, leftPosition, rightPosition);
				
				currentPair.left.x = (int)((cvmGet(leftPosition, 0, 0)));				
				currentPair.left.y = (int)((cvmGet(leftPosition, 1, 0)));	
				
				calcMeanAndVariance(leftImage, &(currentPair.left), Settings::correlationNeighborhood);
				
				currentPair.right.x = (int)((cvmGet(rightPosition, 0, 0))  / (cvmGet(rightPosition, 2, 0)));				
				currentPair.right.y = (int)((cvmGet(rightPosition, 1, 0))  / (cvmGet(rightPosition, 2, 0)));	
				
				
				/* set right image ROI to area around calculated point & search for featurepoints within */
				cvSetImageROI(rightImage, cvRect(currentPair.right.x-horizontalBorderDistance,currentPair.right.y-horizontalBorderDistance,gridWidthStep,gridWidthStep));
				cvExtractSURF(rightImage,0,&objectKeypoints,&objectDescriptors,storage,params);
				
				cvResetImageROI(rightImage);

				currentPair.maxVNC=0;
				double currentVNC=0;
				float distance=0;
				float maxDistance=2 * pow((float)(horizontalBorderDistance),2);

				Output::message("Testing matches for Point (%i/%i)", Output::TESTING, currentPair.left.x, currentPair.left.y);
				

				/* calc VNC for the featurepoint of left image and all determined FPs in right image //
				   determine best match & save it as best pair										 */
				for (int j=0; j<objectDescriptors->total; j++) {
					CvSURFPoint* tmpPoint = (CvSURFPoint*)cvGetSeqElem(objectKeypoints, j);
					
					//sort out featurepoints with wrong laplacian or wrong direction
					if (tmpPoint->laplacian != currentPair.left.SURF->laplacian)
						continue;

					float nearestNeighbourDist = abs(tmpPoint->size - currentPair.left.SURF->size) + abs(tmpPoint->dir - currentPair.left.SURF->dir) + abs(tmpPoint->hessian - currentPair.left.SURF->hessian);

					Features::Feature currentTestFeature;
					currentTestFeature.x=(int)(tmpPoint->pt.x + (currentPair.right.x-horizontalBorderDistance));
					currentTestFeature.y=(int)(tmpPoint->pt.y + (currentPair.right.y-horizontalBorderDistance));
					calcMeanAndVariance(rightImage, &(currentTestFeature), Settings::correlationNeighborhood);

					currentVNC = getVarianceNormalizedCorrelation(leftImage, rightImage, &(currentPair.left), &(currentTestFeature), Settings::correlationNeighborhood);
					distance = pow((float)(currentPair.right.x - currentTestFeature.x),2)+pow((float)(currentPair.right.y - currentTestFeature.y),2);
					
					currentVNC *= nearestNeighbourDist;
					currentVNC *= 1 - (distance / maxDistance);

					if(currentVNC > currentPair.maxVNC){
						currentPair.maxVNC = currentVNC;
						currentPair.correctedRight.x = currentTestFeature.x;
						currentPair.correctedRight.y = currentTestFeature.y;
					}
				}

				Output::message("Point found at pos (%i/%i) with VNC=%f\n", Output::TESTING, currentPair.correctedRight.x, currentPair.correctedRight.y, currentPair.maxVNC);
				//std::cout<<"Point found at pos ("<<currentPair.correctedRight.x<<","<<currentPair.correctedRight.y<<") with VNC="<<currentPair.maxVNC<<std::endl<<std::endl;

				currentSet.push_back(currentPair);
				
			}
			Output::separator();
		}
		
		featurePairs.push_back(currentSet);
		
		Output::separator();
	}
	
	IplImage* jointImage;
	
	
	for (unsigned int i = 0; i < homographies.size(); ++i) {
		
		leftImage = cvCreateImage(cvGetSize(images[homographies[i].leftImageID]), 8, 3);
		rightImage = cvCreateImage(cvGetSize(images[homographies[i].rightImageID]), 8, 3);
		
		cvCvtColor(images[homographies[i].leftImageID], leftImage, CV_GRAY2BGR);
		cvCvtColor(images[homographies[i].rightImageID], rightImage, CV_GRAY2BGR);
		
		//IplImage* leftImage  = images[homographies[i].leftImageID];
		//IplImage* rightImage = images[homographies[i].rightImageID];
	
		
		CvPoint leftFeature;
		CvPoint rightFeature;
		CvPoint correctedRightFeature;
		
		
		for (unsigned int j = 0; j < featurePairs[i].size(); ++j) {
			CvPoint actGridPosLeft=cvPoint((leftImage->width - overlappingImageArea)+(gridWidthStep*((int)(j/(Settings::horizontalGridPoints-1)))),((j%(Settings::verticalGridPoints+1))*gridHeightStep));
			int gridWidthStep  = overlappingImageArea  / (Settings::horizontalGridPoints + 1); //+1 to get a distance from the borders
			int gridHeightStep = leftImage->height     / (Settings::verticalGridPoints   + 1);
			CvPoint actGridPosRight;
			std::ostringstream stream1, stream2;
			CvFont font;
			cvInitFont(&font, CV_FONT_HERSHEY_PLAIN,1,1);

			//currentPair = featurePairs[i][j];
			leftFeature = cvPoint((int)(featurePairs[i][j].left.x), (int)(featurePairs[i][j].left.y));
			rightFeature = cvPoint((int)(featurePairs[i][j].right.x), (int)(featurePairs[i][j].right.y));
			correctedRightFeature = cvPoint((int)(featurePairs[i][j].correctedRight.x), (int)(featurePairs[i][j].correctedRight.y));

			actGridPosRight.x = (actGridPosLeft.x - leftFeature.x) + rightFeature.x;
			actGridPosRight.y = (actGridPosLeft.y - leftFeature.y ) + rightFeature.y; 
			
			
			//Output::message("Drawing left feature at (%d, %d)", Output::DEBUG, leftFeature.x, leftFeature.y);
			//Output::message("Drawing right feature at (%d, %d)", Output::DEBUG, rightFeature.x, rightFeature.y);
			
			cvRectangle(leftImage, actGridPosLeft, cvPoint(actGridPosLeft.x+gridWidthStep, actGridPosLeft.y+gridHeightStep), CV_RGB(0,0,255), 1);
			cvRectangle(rightImage, actGridPosRight, cvPoint(actGridPosRight.x+gridWidthStep, actGridPosRight.y+gridHeightStep), CV_RGB(0,0,255), 1);

			cvRectangle(leftImage,  cvPoint(leftFeature.x  - Settings::correlationNeighborhood*2, leftFeature.y  - Settings::correlationNeighborhood*2), cvPoint(leftFeature.x  + Settings::correlationNeighborhood*2, leftFeature.y  + Settings::correlationNeighborhood*2), CV_RGB(255,0,0), 1);
			cvRectangle(rightImage, cvPoint(rightFeature.x - Settings::correlationNeighborhood*2, rightFeature.y - Settings::correlationNeighborhood*2), cvPoint(rightFeature.x + Settings::correlationNeighborhood*2, rightFeature.y + Settings::correlationNeighborhood*2), CV_RGB(255,0,0), 1);
			cvRectangle(rightImage, cvPoint(correctedRightFeature.x - Settings::correlationNeighborhood*2, correctedRightFeature.y - Settings::correlationNeighborhood*2), cvPoint(correctedRightFeature.x + Settings::correlationNeighborhood*2, correctedRightFeature.y + Settings::correlationNeighborhood*2), CV_RGB(0,0,255), 1);
			
			stream1<<(int)(featurePairs[i][j].left.SURF->hessian / 10);
			cvPutText(leftImage, stream1.str().c_str(), actGridPosLeft, &font, cvScalar(255,40,40));

			stream2<<(int)featurePairs[i][j].maxVNC;
			cvPutText(rightImage, stream2.str().c_str(), actGridPosRight, &font, cvScalar(255,40,40));

			//cvRectangle(rightImage, cvPoint(rightFeature.x - horizontalBorderDistance, rightFeature.y - verticalBorderDistance), cvPoint(rightFeature.x + horizontalBorderDistance, rightFeature.y + verticalBorderDistance), CV_RGB(0,0,255), 1);
			
			/*
			cvNamedWindow("blub",0);
						cvShowImage("blub",leftImage);
						cvResizeWindow("blub",leftImage->width/4,leftImage->height/4);
						cvWaitKey(0);
						cvDestroyAllWindows();*/
			
			
		}
		
		
		
		jointImage = cvCreateImage(cvSize(leftImage->width + rightImage->width, leftImage->height), 8, 3);
		
		for (int x = 0; x < leftImage->width; ++x) {
			for (int y = 0; y < leftImage->height; ++y) {
				/*int currentPixelValue = ImageUtils::getPixelValue(leftImage, x, y);
				
				ImageUtils::setPixelValue(jointImage, x, y, 0, currentPixelValue);
				ImageUtils::setPixelValue(jointImage, x, y, 1, currentPixelValue);
				ImageUtils::setPixelValue(jointImage, x, y, 2, currentPixelValue);*/
				
				ImageUtils::setPixel(jointImage, ImageUtils::getPixel(leftImage, x, y));
			}
		}
		
		for (int x = 0; x < rightImage->width; ++x) {
			for (int y = 0; y < rightImage->height; ++y) {
				int blue  = ImageUtils::getPixelValue(rightImage, x, y, 0);
				int green = ImageUtils::getPixelValue(rightImage, x, y, 1);
				int red   = ImageUtils::getPixelValue(rightImage, x, y, 2);
				
				ImageUtils::setPixelValue(jointImage, x + leftImage->width, y, 0, blue);
				ImageUtils::setPixelValue(jointImage, x + leftImage->width, y, 1, green);
				ImageUtils::setPixelValue(jointImage, x + leftImage->width, y, 2, red);
				
				
				
				//ImageUtils::setPixel(jointImage, ImageUtils::getPixel(rightImage, x, y));
			}
		}
		
		
		
		std::stringstream outputFileName;
			
		outputFileName.str("");
		outputFileName << "Calculated Grid";
		outputFileName << "_";
		//outputFileName << frameNo;
		outputFileName << homographies[i].leftImageID << "_" << homographies[i].rightImageID;
		outputFileName << ".bmp";
		
		Output::message("Saving file %s", Output::DEBUG, outputFileName.str().c_str());
		cvSaveImage(outputFileName.str().c_str(), jointImage);
	
		
		cvReleaseImage(&jointImage);
		
			
	}
	
	
	return 1;
}


