#pragma once

#include "stdafx.h"

//const std::string *DATA_DIR = new std::string("D:/Projects/Data/US/08/");
const std::string* INPUT_FILE_1 = new std::string("volume0135.mhd");
const std::string* INPUT_FILE_2 = new std::string("volume0136.mhd");
const std::string* OUTPUT_FILE = new std::string("volume0136");
std::string rawFilename;
const std::string extension = ".mhd";

//typedef cv::Mat custom_cvMatType;
typedef cv::cuda::GpuMat custom_cvMatType;
bool twoDistinctLines, poly_line = false;

struct line_segment
{
	cv::Point3d point_1;
	cv::Point3d point_2;
	double distance;
	double slope;
};

bool compareDistances(line_segment line_1, line_segment line_2) 
{
	return (line_1.distance < line_2.distance);
}

bool compareSlopes(line_segment line_1, line_segment line_2) 
{
	return (line_1.slope < line_2.slope);
}

line_segment medianLine(std::vector<line_segment> lines_trunc) 
{
	std::vector<line_segment> lines_trunc_sorted = lines_trunc;
	line_segment line_median;

	if(!lines_trunc.empty())
	{
		std::sort(lines_trunc_sorted.begin(), lines_trunc_sorted.end(), compareSlopes);	

		if (lines_trunc_sorted.size() == 1)
		{
			line_median = lines_trunc_sorted[0];
		}
		else if (lines_trunc_sorted.size() % 2 == 0)
		{
			line_median.point_1.x = (lines_trunc_sorted[lines_trunc_sorted.size()/2].point_1.x + lines_trunc_sorted[lines_trunc_sorted.size()/2 - 1].point_1.x) / 2;
			line_median.point_1.y = (lines_trunc_sorted[lines_trunc_sorted.size()/2].point_1.y + lines_trunc_sorted[lines_trunc_sorted.size()/2 - 1].point_1.y) / 2;
			line_median.point_1.z = (lines_trunc_sorted[lines_trunc_sorted.size()/2].point_1.z + lines_trunc_sorted[lines_trunc_sorted.size()/2 - 1].point_1.z) / 2;
			line_median.point_2.x = (lines_trunc_sorted[lines_trunc_sorted.size()/2].point_2.x + lines_trunc_sorted[lines_trunc_sorted.size()/2 - 1].point_2.x) / 2;
			line_median.point_2.y = (lines_trunc_sorted[lines_trunc_sorted.size()/2].point_2.y + lines_trunc_sorted[lines_trunc_sorted.size()/2 - 1].point_2.y) / 2;
			line_median.point_2.z = (lines_trunc_sorted[lines_trunc_sorted.size()/2].point_2.z + lines_trunc_sorted[lines_trunc_sorted.size()/2 - 1].point_2.z) / 2;
			line_median.slope = (lines_trunc_sorted[lines_trunc_sorted.size()/2].slope + lines_trunc_sorted[lines_trunc_sorted.size()/2 - 1].slope) / 2;
		} 
		else
		{
			line_median.point_1.x = lines_trunc_sorted[std::floor(static_cast<double>(lines_trunc_sorted.size()/2))].point_1.x;
			line_median.point_1.y = lines_trunc_sorted[std::floor(static_cast<double>(lines_trunc_sorted.size()/2))].point_1.y;
			line_median.point_1.z = lines_trunc_sorted[std::floor(static_cast<double>(lines_trunc_sorted.size()/2))].point_1.z;
			line_median.point_2.x = lines_trunc_sorted[std::floor(static_cast<double>(lines_trunc_sorted.size()/2))].point_2.x;
			line_median.point_2.y = lines_trunc_sorted[std::floor(static_cast<double>(lines_trunc_sorted.size()/2))].point_2.y;
			line_median.point_2.z = lines_trunc_sorted[std::floor(static_cast<double>(lines_trunc_sorted.size()/2))].point_2.z;
			line_median.slope = lines_trunc_sorted[std::floor(static_cast<double>(lines_trunc_sorted.size()/2))].slope;
		}
	}
	else
	{
		line_median.point_1.x = 0;
		line_median.point_1.y = 0; 
		line_median.point_1.z = 0;
		line_median.point_2.x = 0;
		line_median.point_2.y = 0;
		line_median.point_2.z = 0;
		line_median.slope = 0;
	}
	
	return line_median;
}

template <class MatType>
static void showImageInWindow (const std::string &windowName, const MatType customMat)
{
	cv::Mat temp_Mat(customMat);
	cv::namedWindow(windowName, CV_WINDOW_AUTOSIZE);
	cv::imshow(windowName, temp_Mat);
	//cvWaitKey(); // This is commented out since multiple images can be displayed
}

// "Take number image type number (from cv::Mat.type()), get OpenCV's enum string.
std::string getImgType(int imgTypeInt)
{
	int numImgTypes = 35; // 7 base types, with five channel options each (none or C1, ..., C4)

	int enum_ints[] = {CV_8U,  CV_8UC1,  CV_8UC2,  CV_8UC3,  CV_8UC4,
		CV_8S,  CV_8SC1,  CV_8SC2,  CV_8SC3,  CV_8SC4,
		CV_16U, CV_16UC1, CV_16UC2, CV_16UC3, CV_16UC4,
		CV_16S, CV_16SC1, CV_16SC2, CV_16SC3, CV_16SC4,
		CV_32S, CV_32SC1, CV_32SC2, CV_32SC3, CV_32SC4,
		CV_32F, CV_32FC1, CV_32FC2, CV_32FC3, CV_32FC4,
		CV_64F, CV_64FC1, CV_64FC2, CV_64FC3, CV_64FC4};

	std::string enum_strings[] = {"CV_8U",  "CV_8UC1",  "CV_8UC2",  "CV_8UC3",  "CV_8UC4",
		"CV_8S",  "CV_8SC1",  "CV_8SC2",  "CV_8SC3",  "CV_8SC4",
		"CV_16U", "CV_16UC1", "CV_16UC2", "CV_16UC3", "CV_16UC4",
		"CV_16S", "CV_16SC1", "CV_16SC2", "CV_16SC3", "CV_16SC4",
		"CV_32S", "CV_32SC1", "CV_32SC2", "CV_32SC3", "CV_32SC4",
		"CV_32F", "CV_32FC1", "CV_32FC2", "CV_32FC3", "CV_32FC4",
		"CV_64F", "CV_64FC1", "CV_64FC2", "CV_64FC3", "CV_64FC4"};

	for(int i=0; i<numImgTypes; i++)
	{
		if(imgTypeInt == enum_ints[i]) return enum_strings[i];
	}
	return "unknown image type";
}

template <class MatType>
static cv::Mat drawHoughLinesOnMat_simple (MatType& hough_Mat, MatType& houghLines)
{
	cv::Mat output_Mat;
	cv::cvtColor(cv::Mat(hough_Mat), output_Mat, CV_GRAY2BGR);

	std::vector<cv::Vec4i> lines_vector;
	if (!houghLines.empty())
	{
		lines_vector.resize(houghLines.cols);
		cv::Mat temp_Mat (1, houghLines.cols, CV_32SC4, &lines_vector[0]);
		/*
		The lines_vector is a vector of cv::Vec4i. cv::Vec4i is 4-component vector of 32 bit integer 
		values. So it is a CV_32SC4, while CV_8UC3 is a 3-channel 8 bit values. GpuMat::download 
		method check the type of output matrix and reallocate data if type mismatch
		*/
		houghLines.download (temp_Mat);
	}

	for (size_t i=0; i<lines_vector.size(); ++i)
	{
		cv::Vec4i l = lines_vector[i];
		cv::line(output_Mat, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(0, 0, 255), 1, 8);
	}

	return output_Mat;
}

template <class MatType>
std::tuple <cv::Mat, std::vector<cv::Point3d>, cv::Point3d, cv::Point3d, bool> drawHoughLinesOnMat_complicated (MatType& hough_Mat, MatType& houghLines, int slice_number, cv::Rect& region)
{
	cv::Mat output_Mat;
	cv::cvtColor(cv::Mat(hough_Mat), output_Mat, CV_GRAY2BGR);

	std::vector<cv::Vec4i> lines;
	float x1=0, x2=0, y1=0, y2=0;
	line_segment line_median;

	if (!houghLines.empty())
	{
		lines.resize(houghLines.cols);

		// cv::Vec4i is 4-component vector of 32 bit integer values. So it is a CV_32SC4, while CV_8UC3 is a 3-channel 8 bit values. 
		// GpuMat::download method checks the type of output matrix and reallocates data if type mismatch
		
		cv::Mat temp_Mat (1, houghLines.cols, CV_32SC4, &lines[0]);
		houghLines.download (temp_Mat);

		std::vector<double> slopes(lines.size());
		std::vector<line_segment> lines_slopes(lines.size());
		std::vector<cv::Point3d> input_points(lines.size()), output_points(lines.size());

		//tbb::parallel_for (size_t(0), lines_vector.size(), size_t(1), [=](size_t i) {
		//	slopes[i] = std::atan(static_cast<double>( /*std::abs*/((lines_vector[i][3]-lines_vector[i][1]) / 
		//															(lines_vector[i][2]-lines_vector[i][0]) )))* 180 / CV_PI;
		//});

		for (size_t i=0; i<lines.size(); ++i)
		{
			lines_slopes[i].point_1 = cv::Point3d(lines[i][0] + region.x, lines[i][1] + region.y, slice_number);
			lines_slopes[i].point_2 = cv::Point3d(lines[i][2] + region.x, lines[i][3] + region.y, slice_number);
			lines_slopes[i].slope = std::atan2(static_cast<double> /*std::abs*/(lines[i][3]-lines[i][1]),  
			
			// For polynomial line approximation
			//if(poly_line)
			{
				input_points[i].point_1 = cv::Point3d(lines[i][0] + region.x, lines[i][1] + region.y, slice_number);
				input_points[i].point_2 = cv::Point3d(lines[i][2] + region.x, lines[i][3] + region.y, slice_number);
			}
			static_cast<double>(lines[i][2]-lines[i][0]) )* 180 / CV_PI;
		}
		
		if(poly_line)
			// cv::Point3d variant of approxPolyDP3D. Has not been pulled into OpenCV main trunk, yet. If this is used, please remove all subsequent lines of code.
			cv::approxPolyDP3D(input_points, output_points, 3, false);

		std::vector<size_t> slopes_trunc;

		switch (lines_slopes.size())
		{
		case 1:
			{
				//cv::line(output_Mat, lines_slopes[0].point_1, lines_slopes[0].point_2, cv::Scalar(0, 0, 255), 1, 8);
				slopes_trunc.push_back(0);
			}
			break;
		case 2:
			{
				if (std::abs(lines_slopes[0].slope - lines_slopes[1].slope) > 15)
				{
					std::cout << "There are two distinct lines detected for " << rawFilename.c_str()
						<< ", slice " << slice_number << ". Please check." << endl;
					twoDistinctLines = true;

					/*for (size_t i=0; i<2; ++i)
					{
						cv::Scalar lines_color = (i==0) ? cv::Scalar(0,0,255) : cv::Scalar(255, 0,0);
						cv::line(output_Mat, cv::Point(lines_slopes[i].point_1.x, lines_slopes[i].point_1.y), 
							cv::Point(lines_slopes[i].point_2.x, lines_slopes[i].point_2.y), lines_color, 1, 8);
					}*/
				}
				/*else
				{
					x1 = (lines_slopes[0].point_1.x + lines_slopes[1].point_1.x)/2;
					x2 = (lines_slopes[0].point_2.x + lines_slopes[1].point_2.x)/2;
					y1 = (lines_slopes[0].point_1.y + lines_slopes[1].point_1.y)/2;
					y2 = (lines_slopes[0].point_2.y + lines_slopes[1].point_2.y)/2;
				}
				cv::line(output_Mat, cv::Point(x1,y1), cv::Point(x2, y2), cv::Scalar(0, 0, 255), 1, 8);*/
				slopes_trunc.push_back(0);
				slopes_trunc.push_back(1);
			}
			break;
		default:
			{
				int count = 0;
			
				//tbb::parallel_for (size_t(0), lines_vector.size(), size_t(1), [=](size_t i)
				for (size_t i=0; i<lines_slopes.size(); ++i)
				{
					for (size_t j=0; j<lines_slopes.size(); ++j)
					{
						if (((i)!=j) && (std::abs(lines_slopes[i].slope - lines_slopes[j].slope) < 7))
							count++;
					}
				
					if (count > std::ceil(static_cast<double>(lines_slopes.size()/2)))
						slopes_trunc.push_back(i);
					else if (count == std::ceil(static_cast<double>(lines_slopes.size()/2)))
					{
						slopes_trunc.push_back(i);
						std::cout << "There are two distinct lines detected for " << rawFilename.c_str() 
							<< ", slice " << slice_number << ". Please check." << endl;
						twoDistinctLines = true;
					}
					count = 0;
				}
			}
			break;
		}

		std::vector<line_segment> lines_trunc(slopes_trunc.size());
		for (size_t i=0; i<slopes_trunc.size(); ++i)
			lines_trunc[i] = lines_slopes[slopes_trunc[i]];

		line_median = medianLine(lines_trunc);
		cv::line(output_Mat, cv::Point(line_median.point_1.x, line_median.point_1.y), 
			cv::Point(line_median.point_2.x, line_median.point_2.y), cv::Scalar(0,0,255), 1, 8);	
	}
	
	if(!poly_line)
		return std::make_tuple(output_Mat, input_points, line_median.point_1, line_median.point_2, twoDistinctLines);
	else
		return std::make_tuple(output_Mat, output_points, line_median.point_1, line_median.point_2, twoDistinctLines);
}

// Draw GEN_Hough lines on cv::Mat structure
template <class MatType>
static cv::Mat drawGeneralHTonMat (MatType hough_Mat, std::vector<cv::Vec4f> houghLines, MatType template_image)
{
	cv::Mat output_Mat;
	cv::cvtColor(cv::Mat(hough_Mat), output_Mat, CV_GRAY2BGR);
	std::vector<cv::Vec4f> position (houghLines);
	cv::Mat templ (template_image);

	for (int i = 0; i < position.size(); ++i)
	{
		cv::Point2f pos(position[i][0], position[i][1]);
		float scale = position[i][2];
		float angle = position[i][3];

		cv::RotatedRect rect;
		rect.center = pos;
		rect.size = cv::Size2f(templ.cols * scale, templ.rows * scale);
		rect.angle = angle;

		cv::Point2f pts[4];
		rect.points(pts);

		cv::line(output_Mat, pts[0], pts[1], cv::Scalar(0, 0, 255), 3);
		cv::line(output_Mat, pts[1], pts[2], cv::Scalar(0, 0, 255), 3);
		cv::line(output_Mat, pts[2], pts[3], cv::Scalar(0, 0, 255), 3);
		cv::line(output_Mat, pts[3], pts[0], cv::Scalar(0, 0, 255), 3);
	}
	return output_Mat;	
}

std::vector<std::string> fileSearch(std::string curr_directory, std::string extension)
{
	std::vector<std::string> results;
	DIR* dir_point = opendir(curr_directory.c_str());
	dirent* entry = readdir(dir_point);
	while (entry) // if !entry then end of directory
	{									
		if (entry->d_type == DT_DIR) // if entry is a directory
		{				
			std::string fname = entry->d_name;
			if (fname != "." && fname != "..")
				fileSearch(entry->d_name, extension); // search through it
		}
		else if (entry->d_type == DT_REG) // if entry is a regular file
		{		
			std::string fname = entry->d_name; // filename
			// if filename's last characters are extension
			if (fname.find(extension, (fname.length() - extension.length())) != std::string::npos)
				results.push_back(fname); // add filename to results vector
		}
		entry = readdir(dir_point);
	}
	return results;
}


int main(int argc, char** argv)
{
	/// Initialize vtk and OpenCV classes
	vtkSmartPointer<vtkMetaImageReader> reader = vtkSmartPointer<vtkMetaImageReader>::New();
	//vtkSmartPointer<vtkMetaImageWriter> writer = vtkSmartPointer<vtkMetaImageWriter>::New();
	vtkSmartPointer<vtkImageData> imageData_in = vtkSmartPointer<vtkImageData>::New();
	//vtkSmartPointer<vtkImageData> imageData_out = vtkSmartPointer<vtkImageData>::New();

	//cv::Ptr<cv::cuda::HoughLinesDetector> simpleHT = cv::cuda::createHoughLinesDetector(1.0f, (float)(CV_PI/180.0f), 30); 
	cv::Ptr<cv::cuda::HoughSegmentDetector> probHT = cv::cuda::createHoughSegmentDetector(1.0f, (float)(CV_PI/180.0f), 30, 50);

	/*
	Note that OpenCV has an implementation of particle filter called "ConDensation" at modules/legacy/src/condens.cpp
	which isn't being used here since it not an efficient implementation of the Sampling Importance Resampling (SIR).
	Also, the mog being used has already been pulled to the master branch of OpenCV but there are some issues with 
	integrating TBB and IPP with some other changes that others have done. Because of this reason, it is advised to use
	custom fork of OpenCV available from https://customopencv.codeplex.com to build the required libraries for this project.
	*/
	// Initialize recursive gaussian segmentation classes
	cv::Ptr<cv::cuda::BackgroundSubtractorMOG> mog = cv::cuda::createBackgroundSubtractorMOG(200, 5, 0.7, 0); // [KB2001] --> works pretty well
	cv::Ptr<cv::cuda::CannyEdgeDetector> canny = cv::cuda::createCannyEdgeDetector(5, 60, 3);

	std::ofstream needleCoordinates_text;
	std::string DATA_DIR;
	bool verbose = false;

	// Generalized HT initializers are just present to show that they have been already tried out.
	// Results were unsatisfactory since gen_HT is a template-matching based approach and US is noisy
	/*
	cv::Ptr<cv::GeneralizedHoughBallard> genHT_ballard = cv::cuda::createGeneralizedHoughBallard(); 
	cv::Ptr<cv::GeneralizedHoughGuil> genHT_guil = cv::cuda::createGeneralizedHoughGuil();
	bool flag_genHT_ballard = false, flag_genHT_guil = false;
	custom_cvMatType template_image; // template image

	//// Generalized HT parameters
	double minDist = 100; // min dist between centers of detected objects
	int levels = 360; // R-tablet levels
	int votesThreshold = 30; // accumulator threshold for the template centers at the detection stage
	int angleThresh = 10000; // angle votes threshold
	int scaleThresh = 1000; // scale votes threshold
	int posThresh = 100; // position votes threshold
	double dp = 2; // inverse ratio of accumulator resolution to the image resolution
	double minScale = 0.5; // minimum scale to detect
	double maxScale = 2; // maximum scale to detect
	double scaleStep = 0.05;
	double minAngle = 0; // minimum rotation angle to detect in degrees
	double maxAngle = 360; // maximum rotation angle to detect in degrees
	double angleStep = 1;
	int maxBufSize = 1000;

	if (flag_genHT_ballard)
	{
	genHT_ballard->setMinDist(minDist);
	genHT_ballard->setLevels(levels);
	genHT_ballard->setDp(dp);
	genHT_ballard->setMaxBufferSize(maxBufSize);
	genHT_ballard->setVotesThreshold(votesThreshold);
	genHT = genHT_ballard;
	template_image.download( cv::imread("D:/Projects/Data/ultrasound/08/volume0135_template.png"));
	genHT->setTemplate(template_image);
	}
	if (flag_genHT_guil)
	{
	genHT_guil->setMinDist(minDist);
	genHT_guil->setLevels(levels);
	genHT_guil->setDp(dp);
	genHT_guil->setMaxBufferSize(maxBufSize);
	genHT_guil->setMinAngle(minAngle);
	genHT_guil->setMaxAngle(maxAngle);
	genHT_guil->setAngleStep(angleStep);
	genHT_guil->setAngleThresh(angleThresh);
	genHT_guil->setMinScale(minScale);
	genHT_guil->setMaxScale(maxScale);
	genHT_guil->setScaleStep(scaleStep);
	genHT_guil->setScaleThresh(scaleThresh);
	genHT_guil->setPosThresh(posThresh);
	genHT = genHT_guil;
	template_image.download( cv::imread("D:/Projects/Data/ultrasound/08/volume0135_template.png"));
	genHT->setTemplate(template_image);
	}
	*/

	switch (argc)
	{
	case 1:
		DATA_DIR = argv[1];
		break;
	case 2:
		DATA_DIR = argv[1];
		verbose = argv[2];
		break;
	default:
		DATA_DIR = "D:/Projects/Data/US/2014-02-26_01/";
	}
	
	needleCoordinates_text.open(DATA_DIR + std::string("needleCoordinates.txt"));

	needleCoordinates_text << "volumeID\t\t" <<  "p1.X" << "\t" << "p1.Y" << "\t" << "p1.Z" << "\t" << 
		"p2.X" << "\t" << "p2.Y" << "\t"	<< "p2.Z" << "\t" << "dist" << "\t" << "twoLines" << 
		"\tSec" << std::endl;

	std::vector<std::string> fileList = fileSearch(DATA_DIR, extension);
	int test_var = 0;

	std::vector<int> compression_params; // Compression parameters for cv::imwrite
	compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
	compression_params.push_back(0);

	int file_count = 0;

	for (std::vector<std::string>::iterator it=fileList.begin(); it!= fileList.end(); ++it)
	{
		reader->SetFileName(std::string(DATA_DIR + *it).c_str());
		file_count ++;

		reader->Update();
		imageData_in = reader->GetOutput();
		twoDistinctLines = false;

		int lastindex = static_cast<int>(std::string(DATA_DIR + *it).find_last_of("/")); 
		rawFilename = std::string(DATA_DIR + *it).substr(lastindex+1, (std::string(DATA_DIR + *it)).size()); 
		lastindex = static_cast<int>(rawFilename.find_last_of(".")); 
		rawFilename = rawFilename.substr(0, lastindex); 

		cv::VideoWriter vid_writer; // VideoWriter class+process needs to be checked. It's output isn't working as expected
		//vid_writer.open("D:/Downloads/US_test.avi", -1, 5, cvVector_in.front().size());
	
		std::vector<custom_cvMatType> cvVector_in/*, cvVector_out*/;
		//std::vector<cv::Mat> cvVector_out, needle_vector; 

		clock_t tstart = clock();
		vtkOpenCVBridge* bridgeInputStructure = new vtkOpenCVBridge(imageData_in);
		//std::shared_ptr<vtkOpenCVBridge> bridgeInputStructure = std::make_shared<vtkOpenCVBridge>(imageData_in);
		cvVector_in = bridgeInputStructure->VTKtoOpenCV();
		cout << "Reading 1 volume took " << (double)(clock() - tstart)/(CLOCKS_PER_SEC) << " second(s)" << endl;

		long count = 0; // variable for writing slices as images

		int row_range = std::floor(static_cast<double>(cvVector_in[0].rows/7));
		int col_range = std::floor(static_cast<double>(cvVector_in[0].cols/7));
		cv::Rect region(col_range, row_range, cvVector_in[0].cols - 2*col_range, cvVector_in[0].rows - 2*row_range);

		std::vector<line_segment> line_coordinates/*(cvVector_in.size())*/;
		
		if (verbose)
			tstart = clock();

		custom_cvMatType input, bil_out, thresh, hough_lines, input_smallROI, segmented, image_canny;

		/*tbb::parallel_for (tbb::blocked_range<std::vector<custom_cvMatType>::iterator>(cvVector_in.begin(), cvVector_in.end()),
			[&] (tbb::blocked_range<std::vector<custom_cvMatType>::iterator> it)
		{*/ // parallel_for should NOT be used since recursive segmentation needs to go through volume sequentially
			for (size_t i=0; i<cvVector_in.size(); ++i)
			{
				input = cvVector_in[i];
				cv::Mat output = cv::Mat(input);
				input_smallROI = input(region).clone();

				double minVal, maxVal;
				cv::Point minLoc, maxLoc;
				cv::cuda::minMaxLoc(input_smallROI, &minVal, &maxVal, &minLoc, &maxLoc); // 
			
				if ( (maxVal>100) && (cv::countNonZero(cv::Mat(input_smallROI))>250) ) // simple check for optimum value to quicken algorithm
				{
					int bilateral_thresh = 5; // 5 == 0.085s; 15 == 0.467s at run-time	

					cv::cuda::bilateralFilter(input_smallROI, bil_out, bilateral_thresh, bilateral_thresh*2, bilateral_thresh/2 );

					//clock_t clock_mog = clock();
					mog->apply (bil_out, segmented, 0.01);
					//std::cout << "MOG processing: " << (double)(clock() - clock_mog)/(CLOCKS_PER_SEC) << std::endl;
				
					canny->detect(segmented, image_canny);

					//simpleHT->detect(thresh, hough_lines);
					probHT->detect(image_canny, hough_lines);

					cv::Mat hough_output, hough_output_2;
					std::vector<cv::Point3d> points_on_line;
					line_segment temp_coords;
					std::tie(output, points_on_line, temp_coords.point_1, temp_coords.point_2, twoDistinctLines) = drawHoughLinesOnMat_complicated(input, hough_lines, static_cast<int>(i), region);

					line_coordinates.push_back(temp_coords);
				
					// For debugging purposes
					/*cv::Mat(segmented).copyTo(output(region));
					showImageInWindow("bilateral", bil_out);
					showImageInWindow("mog", segmented);
					showImageInWindow("canny", image_canny);
					showImageInWindow("output", output);
					cvWaitKey(15);*/


				}
			
				cv::imwrite( std::string(DATA_DIR + "results/29-04/" + rawFilename + "_" + cv::format("%03d",count) + ".png" ), 
					output, compression_params ); // write output
				count++;
						
			}
		//});

			if (verbose)
			{
				double time_pr = (double)(clock() - tstart)/(CLOCKS_PER_SEC);
				cout << "Processing 1 volume took " << time_pr << " second(s)" << endl;
			}

		std::vector<line_segment> line_max_distances(line_coordinates.size());

		for (size_t i=0; i<line_coordinates.size(); ++i)
		{
			line_max_distances[i].distance = 0;
			for (size_t j=0; j<line_coordinates.size(); ++j)
			{
				cv::Point3d diff = line_coordinates[i].point_1 - line_coordinates[j].point_2;
				double temp_distance = std::sqrt(diff.x*diff.x + diff.y*diff.y + diff.z*diff.z);
				if ((i!=j) && (temp_distance>line_max_distances[i].distance))
				{
					line_max_distances[i].point_1 = line_coordinates[i].point_1;
					line_max_distances[i].point_2 = line_coordinates[j].point_2;
					line_max_distances[i].distance = temp_distance;
				}
			}
		}
		std::sort(line_max_distances.begin(), line_max_distances.end(), compareDistances);

		line_segment needle = line_max_distances.back();
		line_coordinates.clear();
		line_max_distances.clear();

		needleCoordinates_text << rawFilename << "\t\t" <<
			needle.point_1.x << "\t" << needle.point_1.y << "\t" << needle.point_1.z << "\t" 
			<< needle.point_2.x << "\t" << needle.point_2.y << "\t" << needle.point_2.z << "\t" 
			<< needle.distance << "\t" << twoDistinctLines << "\t\t" << time_pr << std::endl;
	
		//vtkOpenCVBridge* bridgeOutputStructure = new vtkOpenCVBridge(cvVector_out);
		//imageData_out = bridgeOutputStructure->OpenCVtoVTK();
		//delete [] bridgeInputStructure;

		test_var++;

	} // end of fileList for-loop

	needleCoordinates_text.close();

	return EXIT_SUCCESS;
}


