/**
 * File:    AmbigousZoneExtractor.cpp
 * Author:  Bui Quang Anh
 * Email:   quang_anh.bui@univ-lr.fr
 * Version: 1.0
 * 
 * DESCRIPTION:
 * Implemention file for class AmbigousZoneExtractor.
 * Provide algorithms for extraction of ambigous zones in image
 * 
 * CHANGE HISTORY:
 * (23/10/2011 12:55) File first time created - BUI Quang Anh
 */
#include "StdAfx.h"
#include "AmbigousZoneExtractor.h"
#include "ThinningAlgorithm.h"
#include "Tools.h"
#include "GrahamAlgorithm.h"

// Constructor
AmbigousZoneExtractor::AmbigousZoneExtractor(void)
{
	this->estimated_stroke_width = 0;
}

// Destructor
AmbigousZoneExtractor::~AmbigousZoneExtractor(void)
{
}

// Get estimated stroke width
int AmbigousZoneExtractor::getEstimatedStrokeWidth()
{
	return this->estimated_stroke_width;
}

// Run algorithm
// @input_filename: Name of source image file
int AmbigousZoneExtractor::run(const char* input_filename, vector<StrokeNode*> &stroke_graph, IplImage** ambigous_zones_map)
{
	IplImage* source_image = cvLoadImage(input_filename,CV_LOAD_IMAGE_GRAYSCALE);
	if (!source_image)
	{
		cerr << "Couldn't open the image file" << endl;
		return 1;
	}	

	//Tools::remove_spurious_loops(source_image,source_image);

	ThinningAlgorithm* thinning_algorithm = new ThinningAlgorithm();
	
	// Find thinned image and half thinned image
	IplImage *thinned_image;
	IplImage *half_thinned_image;
	thinned_image = cvCreateImage(cvSize(source_image->width,source_image->height),source_image->depth,source_image->nChannels);
	half_thinned_image = cvCreateImage(cvSize(source_image->width,source_image->height),source_image->depth,source_image->nChannels);
	// Apply thinning algorithm to find thinned image and half thinned image
	thinning_algorithm->thinning(source_image,thinned_image);
	thinning_algorithm->half_thinning(source_image,half_thinned_image);
	// Delete thinning_algorithm object to save memory space
	delete thinning_algorithm;
	// Calculate estimated stroke width
	this->estimated_stroke_width = Tools::calculate_estimated_stroke_width(source_image);
	// connected_feature_points_sets: list of the set of feature points that are in the same connected region
	vector<set<FeaturePoint>> connected_feature_points_sets;
	// Find feature points and put them to sets: Sf, Se
	// All feature points that are in the same connected region is in the same set in the list of set: connected_feature_points_sets
	this->FindFeaturePoints(thinned_image,connected_feature_points_sets);
	// S1 is the set of spurious candidate fork points
	set<FeaturePoint> S1;
	// Find spurious candidate fork points 
	this->FindSpuriousCandidateForkPoints(source_image, thinned_image,connected_feature_points_sets,S1);

	// Create set S2 that contains fork points without spurious fork points
	// Sf: Set of fork points
	// S2 = Sf - S1
	
	// components_S2 contains groups of fork points in S2. Each group contains points that is connected in thinned image
	vector<set<FeaturePoint>> components_S2;
	set<FeaturePoint> S2;
	components_S2.clear();
	S2.clear();
	for (int i = 0; i< (int) connected_feature_points_sets.size(); i++)
	{
		set<FeaturePoint> current_feature_points_set = connected_feature_points_sets[i];
		set<FeaturePoint> subset_S2;
		subset_S2.clear();
		for(set<FeaturePoint>::iterator it = current_feature_points_set.begin(); it != current_feature_points_set.end(); it++)
		{
			// If current feature point is a fork point
			if ((*it).Nb > 1)
			{
				// Check if it is a spurious fork point
				// If it is not a spurious fork point
				// Put it to subset_S2
				// And put it to S2
				if (S1.find((*it)) == S1.end())
				{
					// If current point is not a spurious fork point
					// Put to subset_S2
					subset_S2.insert((*it));
					// Put to S2
					S2.insert((*it));
				}
			}
		}
		// Put subset_S2 to S2
		if (subset_S2.size() > 0)
		{
			components_S2.push_back(subset_S2);
		}
	}

	vector<CvPoint> half_thinning_contour;
	Tools::find_contour(half_thinned_image,half_thinning_contour);

	// S3 is the set of contour pixels after half_thinning
	set<FeaturePoint> S3;
	S3.clear();
	for(int i = 0; i< (int) half_thinning_contour.size(); i++)
	{
		FeaturePoint p;
		p.position = half_thinning_contour[i];
		S3.insert(p);
	}
	// Clear half_thinning_contour for saving memory
	half_thinning_contour.clear();

	// S4 is the set of skeleton pixels after thinning
	set<FeaturePoint> S4;
	S4.clear();
	for(int i = 0; i< (int) thinned_image->height; i++)
	{
		for(int j = 0; j < (int) thinned_image->width; j++)
		{
			// If current point is an image pixel
			if (((uchar *)(thinned_image->imageData + i * thinned_image->widthStep))[j] == 0)
			{
				// Put it to set S4
				FeaturePoint p;
				p.position = cvPoint(j,i);
				S4.insert(p);
			}
		}
	}

	// S43 = S4 - S3
	set<FeaturePoint> S43;
	S43.clear();
	for (set<FeaturePoint>::iterator it = S4.begin(); it != S4.end(); it++)
	{
		if (S3.find((*it)) == S3.end())
		{
			S43.insert((*it));
		}
	}

	// Clear S3 and S4 for saving memory
	S3.clear();
	S4.clear();

	// Find connected components in S43
	vector<set<FeaturePoint>> components_S43;
	Tools::find_connected_components(S43,components_S43);

	// S5 = S2 ^ (S4 - S3)
	// Each set group in S5 (subset_S5) is connected components in set S43
	vector<set<FeaturePoint>> S5;
	S5.clear();
	for(int i = 0; i< (int) components_S43.size(); i++)
	{
		set<FeaturePoint> subset_S5;
		for(set<FeaturePoint>::iterator it = components_S43[i].begin(); it != components_S43[i].end(); it++)
		{
			if (S2.find((*it)) != S2.end())
			{
				subset_S5.insert((*it));

			}
		}
		if (subset_S5.size() > 0)
		{
			S5.push_back(subset_S5);
		}
	}
	
	// Clear S2 to save memory
	S2.clear();

	// Set Sa is the set of groups of fork points
	// Each group e of fork points is inside the same ambigous zone
	vector<set<FeaturePoint>> Sa;
	Sa.clear();
	for(int i = 0; i < (int) S5.size(); i++)
	{
		set<FeaturePoint> subset_S5;
		set<FeaturePoint> e;
		subset_S5 = S5[i];
		//e.insert(subset_S5.begin(), subset_S5.end());
		FeaturePoint subset_S5_first_element = (*subset_S5.begin());

		// Find in which subset of S2 contains elements in subset_S5
		int idx = -1;
		bool found = false;
		for (int j = 0; j < (int) components_S2.size(); j++)
		{
			if (components_S2[j].find(subset_S5_first_element) != components_S2[j].end())
			{
				idx = j;
				found = true;
				break;
			}
		}
		
		if (idx > -1)
		{
			list<FeaturePoint> queue;
			for(set<FeaturePoint>::iterator it = subset_S5.begin(); it != subset_S5.end(); it++)
			{
				queue.push_back(*it);
			}

			while(!queue.empty())
			{
				FeaturePoint front_queue = queue.front();
				queue.pop_front();
				for(set<FeaturePoint>::iterator it = components_S2[idx].begin(); it != components_S2[idx].end(); it++)
				{
					double distance = Tools::distance((*it).position, front_queue.position);
					if (distance < SE_THRESHOLD_D1 * this->estimated_stroke_width)
					{
						if (e.find(*it) == e.end())
						{
							e.insert(*it);
							queue.push_back(*it);
						}
					}
				}
			}
		}

		if (e.size() > 0)
		{
			Sa.push_back(e);
		}
		// Erase all elements in S2 that contains elements in e
		for(set<FeaturePoint>::iterator it = e.begin(); it != e.end(); it++)
		{
			components_S2[idx].erase(*it);
		}
	}

	// Clear S5 to save memory
	S5.clear();

	for(int i = 0; i < (int) components_S2.size(); i++)
	{
		while(!components_S2[i].empty())
		{
			list<FeaturePoint> queue;
			set<FeaturePoint> e;
			// Get first point of current component
			FeaturePoint first_point = (*components_S2[i].begin());
			// Put to set e
			e.insert(first_point);
			// Put to queue
			queue.push_back(first_point);
			while(!queue.empty())
			{
				FeaturePoint front_queue = queue.front();
				queue.pop_front();
				// Find points whose distance to front_queue is less than threshold D1
				for(set<FeaturePoint>::iterator it = components_S2[i].begin(); it != components_S2[i].end(); it++)
				{
					double distance = Tools::distance((*it).position, front_queue.position);
					if (distance < SE_THRESHOLD_D1 * this->estimated_stroke_width)
					{
						if (e.find(*it) == e.end())
						{
							e.insert(*it);
							queue.push_back(*it);
						}
					}
				}
			}

			// Put e to Sa
			if (e.size() > 0)
			{
				Sa.push_back(e);
			}
			// Erase all elements in S2 that contains elements in e
			for(set<FeaturePoint>::iterator it = e.begin(); it != e.end(); it++)
			{
				components_S2[i].erase(*it);
			}
		}
	}

	// Clear components_S2 to save memory
	components_S2.clear();

	// Find center points in each group e of Sa
	for(int i = 0; i< (int) Sa.size(); i++)
	{
		FeaturePoint center_point;
		if (Sa[i].size() > 1)
		{
			//center_point.position = cvPoint(0,0);
			for(set<FeaturePoint>::iterator it = Sa[i].begin(); it != Sa[i].end(); it++)
			{
				center_point.position.x+= (*it).position.x;
				center_point.position.y+= (*it).position.y;
			}
			center_point.position.x = center_point.position.x / Sa[i].size();
			center_point.position.y = center_point.position.y / Sa[i].size();
			Sa[i].clear();
			Sa[i].insert(center_point);
		}
	}

	IplImage* test_image = cvLoadImage(input_filename,CV_LOAD_IMAGE_COLOR);
	for(int i = 0; i< (int) Sa.size(); i++)
	{
		if (Sa[i].size() > 0)
		{
			FeaturePoint p = (*Sa[i].begin());
			cvCircle(test_image,p.position,this->estimated_stroke_width,cvScalar(0,0,255));
		}
	}

	cvSaveImage("Test Image.png",test_image);
	cvReleaseImage(&test_image);

	// Save half thinned image
	cvSaveImage("Half_thinned_image.png",half_thinned_image);
	// Visualize fork points, end points and spurious candidate fork points
	this->VisualyzeFeaturePoint("Feature_Points.png",thinned_image,connected_feature_points_sets,S1);
	// Clear S1 to save memory
	S1.clear();
	// Release images to save memory space
	cvReleaseImage(&thinned_image);
	cvReleaseImage(&half_thinned_image);
	cvReleaseImage(&source_image);

	
	IplImage *img = this->DetectAmbigousZones(input_filename,stroke_graph,Sa);
	*ambigous_zones_map = cvCloneImage(img);
	cvReleaseImage(&img);
	return 0;
}
// Find candidate fork points based on thinned image
// @thinned_image: source image
// @connected_feature_points_sets: list of the set of feature points that is in the same connected region
void AmbigousZoneExtractor::FindFeaturePoints(IplImage* thinned_image, vector<set<FeaturePoint>> &connected_feature_points_sets)
{

	// Clone source image 
	IplImage* temp = cvCloneImage(thinned_image);
	for(int i = 0; i< (int) temp->height; i++)
	{
		for(int j = 0; j< (int) temp->width; j++)
		{

			CvPoint first_point = cvPoint(j,i);
			unsigned char first_point_value = ((uchar *)(temp->imageData + first_point.y * temp->widthStep))[first_point.x];
			if (first_point_value == 0)
			{
				set<FeaturePoint> feature_points_set;
				feature_points_set.clear();			
				list<CvPoint> point_queue;
				point_queue.clear();
				point_queue.push_back(first_point);
				while(!point_queue.empty())
				{
					// Take the first point of the queue
					CvPoint front_queue = point_queue.front();
					// Set this point to background pixel
					((uchar *)(temp->imageData + front_queue.y * temp->widthStep))[front_queue.x] = 255;
					// Analyze this point
					int analyze_result = this->AnalysePoint(thinned_image,front_queue.x, front_queue.y);
					// If this is a fork point, create fork point object and push it to set
					if (analyze_result > 1)
					{
						FeaturePoint p;
						p.position = front_queue;
						p.Nb = analyze_result;
						feature_points_set.insert(p);
					}
					// If this is an end point, create end point object and push it to set
					else if (analyze_result == 1)
					{
						FeaturePoint p;
						p.position = front_queue;
						p.Nb = 1;
						feature_points_set.insert(p);
					}

					// Pop out this point from queue
					point_queue.pop_front();
					// Browse neighbour points and push them to queue
					for (int k = -1; k < 2; k++)
					{
						for (int l = -1; l < 2; l++)
						{
							if (((k == 0) && (l == 0)) || (front_queue.y + k < 0) || (front_queue.y + k >= thinned_image->height) || (front_queue.x + l < 0) || (front_queue.x + l >= thinned_image->width))
							{
								continue;
							}
							else
							{
								unsigned char pixel_value = ((uchar *)(temp->imageData + (front_queue.y + k) * temp->widthStep))[front_queue.x + l] == 255 ? 0 : 1;
								// If this neighbour point is image point ( pixel_value == 1), push it to queue
								if (pixel_value == 1)
								{
									// Push neighbour point to queue
									CvPoint neighbour_point = cvPoint(front_queue.x + l, front_queue.y + k);
									point_queue.push_back(neighbour_point);
									// Set this point to background pixel
									((uchar *)(temp->imageData + neighbour_point.y * temp->widthStep))[neighbour_point.x] = 255;
								}
							}
						}
					}

				}
			
				// Push currently obtained feature points set to list
				connected_feature_points_sets.push_back(feature_points_set);
			}
		}
	}
	cvReleaseImage(&temp);
	return;
}

// Find spurious fork points
// @source_image: original image
// @thinned_image: source image
// @connected_feature_points_sets: list of the set of feature points that is in the same connected region
// @S1: set of Spurious Candidate Fork Points returned
void AmbigousZoneExtractor::FindSpuriousCandidateForkPoints(IplImage* source_image, IplImage* thinned_image, const vector<set<FeaturePoint>> &connected_feature_points_sets, set<FeaturePoint> &S1)
{
	set<FeaturePoint>::iterator it;
	set<FeaturePoint>::iterator itp;
	set<FeaturePoint>::iterator ite;

	for (int i = 0; i< (int) connected_feature_points_sets.size(); i++)
	{
		set<FeaturePoint> current_feature_points_set = connected_feature_points_sets[i];
		// Set of fork points
		set<FeaturePoint> fork_points_set;
		// Set of end points
		set<FeaturePoint> end_points_set;
		for(it = current_feature_points_set.begin(); it != current_feature_points_set.end(); it++)
		{
			FeaturePoint p = (*it);
			if (p.Nb > 1)
			{
				fork_points_set.insert(p);
			}
			else
			{
				end_points_set.insert(p);
			}
		}
		
		set<FeaturePoint> visited;
		map<FeaturePoint,FeaturePoint> precedent;
		list<CvPoint> branch;

		for(ite = end_points_set.begin(); ite != end_points_set.end(); ite++)
		{
			FeaturePoint currentEndPoint = (*ite);
			for (itp = fork_points_set.begin(); itp != fork_points_set.end(); itp++)
			{
				FeaturePoint currentForkPoint = (*itp);
				double Lb = Tools::distance(currentEndPoint.position,currentForkPoint.position);
				// If length of branch satisfy condition: Lb < d1, d1 = 0.8 * w
				// Then push it all its 8-neighbour fork points to S1
				if (Lb < SE_THRESHOLD_D1 * this->estimated_stroke_width)
				{
					S1.insert(currentForkPoint);
				}
				// If length of branch satisfy condition: d1 <= Lb <= dh, d1 = 0.8 * w, dh = 2.0 * w
				else if (Lb <= SE_THRESHOLD_DH * this->estimated_stroke_width)
				{
					// Find a branch between currentEndPoint and currentForkPoint
					visited.clear();
					precedent.clear();
					branch.clear();
					list<pair<FeaturePoint,FeaturePoint>> node_queue;
					node_queue.clear();
					FeaturePoint zero_point;
					node_queue.push_back(make_pair(currentEndPoint,zero_point));
					// Put currentEndPoint to visited nodes list
					visited.insert(currentEndPoint);
					while(!node_queue.empty())
					{
						// Get the front element of queue
						pair<FeaturePoint,FeaturePoint> front_queue = node_queue.front();
						// Pop out the front element of queue
						node_queue.pop_front();
						// Mark precedent point of this point
						precedent[front_queue.first] = front_queue.second;
						// If this point is the target fork point, stop
						if (front_queue.first == currentForkPoint)
						{
							FeaturePoint temp = front_queue.first;
							do
							{
								branch.push_front(temp.position);
								temp = precedent[temp];
							}
							while (!(temp == currentEndPoint));
							branch.push_front(currentEndPoint.position);
							break;
						}
						// If this point is not the target fork point, continue to expand
						else
						{
							for (int k = -1; k < 2; k++)
							{
								for (int l = -1; l < 2; l++)
								{
									if ((front_queue.first.position.y + k < 0) || (front_queue.first.position.x + l < 0) || (front_queue.first.position.y + k >= thinned_image->height) || (front_queue.first.position.x + l >= thinned_image->width) )
									{
										continue;
									}
									else
									{
										unsigned char pixel_value = ((uchar *)(thinned_image->imageData + (front_queue.first.position.y + k) * thinned_image->widthStep))[front_queue.first.position.x + l];
										// If current neighbour point is an image pixel, then put it to stack
										if (pixel_value == 0)
										{
											FeaturePoint neighbour_point;
											neighbour_point.position = cvPoint(front_queue.first.position.x + l, front_queue.first.position.y + k);
											// Search the visited points to verify that this neighbour point is visited or not
											// If this neighbour point is not visited
											if (visited.find(neighbour_point) == visited.end())
											{
												// Put it to queue and mark as visited point
												// Mark front_queue as precedent point of neighbour_point
												node_queue.push_back(make_pair(neighbour_point,front_queue.first));
												visited.insert(neighbour_point);
											}
										}
									}
								}
							}

						}
					}
					
					// For each pixel pi in branch
					double max_dc = 0.0;
					double sum = 0.0;
					int Np = (int) branch.size();
					for (list<CvPoint>::iterator bi = branch.begin(); bi != branch.end(); bi++)
					{
						// Find shortest distance from pi to contour
						double dc = Tools::find_approximate_shortest_distance_to_contour(source_image,(*bi));
						if (max_dc < dc)
						{
							max_dc = dc;
						}
						sum+= dc;
					}
					sum = sum / Np;
					// Check second condition
					// If SUM(dc(pi)/Np) > 0.6 * w OR MAX(dc(pi)) > w
					// Then this fork point is a spurious candidate fork point
					// Push this fork point to S1
					if ((sum > SE_THRESHOLD_DC * this->estimated_stroke_width) || (max_dc > this->estimated_stroke_width))
					{
						S1.insert(currentForkPoint);
					}

				}
			}
		}
	}
	return;
}

// Analyse a point in thinned image to find out this point is fork point, end point or normal point
// Return 1: End point
// Return > 1: Fork point. Value return is Nb
// Return 0: Normal point
int AmbigousZoneExtractor::AnalysePoint(IplImage* thinned_image, int x, int y)
{
	unsigned char mask[3][3];

	unsigned char pixel_value = (((uchar *)(thinned_image->imageData + y * thinned_image->widthStep))[x] == 255) ? 0 : 1;
	// Fill mask value
	for (int k = -1; k < 2; k++)
	{
		for (int l = -1; l < 2; l++)
		{
			if ((y + k < 0) || (y + k >= thinned_image->height) || (x + l < 0) || (x + l >= thinned_image->width))
			{
				mask[k+1][l+1] = 0;
			}
			else
			{
				unsigned char neighbour_pixel_value = (((uchar *)(thinned_image->imageData + (y + k) * thinned_image->widthStep))[x + l] == 255) ? 0 : 1;
				mask[k+1][l+1] = neighbour_pixel_value;
			}
		}
	}

	int Nc = 0; 
	int Nb = 0;
	Nc =  abs(mask[0][1] - mask[0][0]) + abs(mask[0][2] - mask[0][1]) + abs(mask[1][2] - mask[0][2]) + abs(mask[2][2] - mask[1][2])
		+ abs(mask[2][1] - mask[2][2]) + abs(mask[2][0] - mask[2][1]) + abs(mask[1][0] - mask[2][0]) + abs(mask[0][0] - mask[1][0]);
	Nc = Nc / 2;
	Nb = mask[0][0] + mask[0][1] + mask[0][2] + mask[1][2] + mask[2][2] + mask[2][1] + mask[2][0] + mask[1][0];

	if ((Nc >= 3) || (Nb >= 4))
	{
		return Nb;
	}
	else if (Nb == 1)
	{
		return 1;
	}
	return 0;
}

// Visualyze feature points
// @image_name: name of output image file 
// @thinned_image: thinned image 
// @connected_feature_points_sets: list of the set of feature points that is in the same connected region
// @S1: set of spurious candidate fork points
void AmbigousZoneExtractor::VisualyzeFeaturePoint(const char* image_name, IplImage* thinned_image,const vector<set<FeaturePoint>> &connected_feature_points_sets, const set<FeaturePoint> &S1)
{
	IplImage* img = cvCreateImage(cvSize(thinned_image->width,thinned_image->height),IPL_DEPTH_8U,3);
	for (int i = 0; i< (int) thinned_image->height; i++)
	{
		for (int j = 0; j < (int) thinned_image->width; j++)
		{
			unsigned char pixel_value = ((uchar *)(thinned_image->imageData + i*thinned_image->widthStep))[j];
			((uchar *)(img->imageData + i*img->widthStep))[j*img->nChannels + 0] = pixel_value; // B
			((uchar *)(img->imageData + i*img->widthStep))[j*img->nChannels + 1] = pixel_value; // G
			((uchar *)(img->imageData + i*img->widthStep))[j*img->nChannels + 2] = pixel_value; // R
		}
	}
	
	// Draw feature points
	set<FeaturePoint>::iterator it;
	for (int i = 0; i< (int) connected_feature_points_sets.size(); i++)
	{
		set<FeaturePoint> current_feature_points_set = connected_feature_points_sets[i];
		for(it = current_feature_points_set.begin(); it != current_feature_points_set.end(); it++)
		{
			FeaturePoint feature_point = (*it);
			// If this is a fork point, draw a red point
			if (feature_point.Nb > 1)
			{
				((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 0] = 0; // B
				((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 1] = 0; // G
				((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 2] = 255; // R
			}
			// If this is an end point, draw a green point
			else
			{
				((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 0] = 0; // B
				((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 1] = 255; // G
				((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 2] = 0; // R
			}
		}
	}
	
	// Draw spurious candidate fork points
	for(it = S1.begin(); it!= S1.end(); it++)
	{
		FeaturePoint feature_point = (*it);
		// Draw a blue point for spurious candidate fork point
		((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 0] = 255; // B
		((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 1] = 0; // G
		((uchar *)(img->imageData + feature_point.position.y * img->widthStep))[feature_point.position.x * img->nChannels + 2] = 0; // R
	}
	// Save image to file
	cvSaveImage(image_name,img);
	// Release image to save memory
	cvReleaseImage(&img);
}

// Detect ambigous zones
// @stroke_graph: Output graph of sub-strokes and ambigous zones
// Return: Ambigous zones map
IplImage* AmbigousZoneExtractor::DetectAmbigousZones(const char* input_filename, vector<StrokeNode*> &stroke_graph, const vector<set<FeaturePoint>> &Sa)
{
	stroke_graph.clear();

	IplImage* source_image = cvLoadImage(input_filename,CV_LOAD_IMAGE_GRAYSCALE);
	if (!source_image)
	{
		cerr << "Couldn't open the image file" << endl;
		return NULL;
	}	

	IplImage *img_edge = cvCreateImage(cvSize(source_image->width,source_image->height),source_image->depth,source_image->nChannels);


	CvMemStorage* storage = cvCreateMemStorage(); 
	
	CvChain* first_contour = NULL;

	// Invert the image 
	cvThreshold(source_image, img_edge, 100, 255, CV_THRESH_BINARY_INV);

	int Nc = cvFindContours(
		img_edge,
		storage,
		(CvSeq**)&first_contour,
		sizeof(CvChain),
		CV_RETR_LIST,
		CV_CHAIN_CODE);
	
	vector<ContourChain*> contour_list;
	contour_list.clear();

	for( CvChain* c=first_contour; c!=NULL; c=(CvChain*)c->h_next )
	{
		ContourChain *contour = new ContourChain();

		CvChainPtReader reader;
		int i, total = c->total;
		if (c->total > 0)
		{
			cvStartReadChainPoints( c, &reader);
			for( i = 0; i < total; i++ )
			{
				CvPoint point;
				CV_READ_CHAIN_POINT(point, reader);
				contour->insert(point);
			}
			contour_list.push_back(contour);
		}
		
	}
	// Release img_edge to save memory
	cvReleaseImage(&img_edge);

	// Clear memory storage
	for( CvChain* c=first_contour; c!=NULL; c=(CvChain*)c->h_next )
	{
		cvClearSeq((CvSeq*)c);
	}
	cvClearMemStorage(storage);
	cvReleaseMemStorage(&storage);

	IplImage* des = cvCreateImage(cvSize(source_image->width, source_image->height),8,3);
	cvCvtColor(source_image, des,CV_GRAY2BGR);
	
	// Release source_image to save memory
	cvReleaseImage(&source_image);

	// For each element e in Sa
	for (int i = 0; i< (int) Sa.size(); i++)
	{
		// Initialize color index for each ambigous zone
		int colorindex = i;
		// Build the Sc set
		set<FeaturePoint>::iterator it;
		CvPoint centerPoint = (*Sa[i].begin()).position;
		

		// Build Sc set:
		// Sc = {c | d(pc, c) < dr}
		// dr = 1.4 w
		double dr =  SE_THRESHOLD_DR * this->estimated_stroke_width;
		
		set<ContourPoint*> Sc;
		// Iterate all contour points and retrieve relevant points
		for (int i = 0; i< (int) contour_list.size(); i++)
		{
			ContourChain* currentContour = contour_list[i];
			// Traverse all points in current contour
			ContourPoint* currentPoint = currentContour->getHead();
			do
			{
				double d = Tools::distance(centerPoint, currentPoint->position);
				if (d < dr)
				{
					Sc.insert(currentPoint);
				}
				currentPoint = currentPoint->next;
			}
			while(currentPoint != currentContour->getHead());
		}
		
		// Sz is the set that contains polygons that defines ambigous zone
		set<ContourPoint*> Sz;
		
		while (!Sc.empty())
		{

			// Find pm
			// dm = 1.1w
			double dm = (double) SE_THRESHOLD_DM * this->estimated_stroke_width;
			double dmin = 10000.0;
			ContourPoint* pm = NULL;

			for (set<ContourPoint*>::iterator itSc = Sc.begin(); itSc != Sc.end(); itSc++)
			{
				ContourPoint* currentPoint = (*itSc);
				double d = Tools::distance(centerPoint, currentPoint->position);
				if (dmin > d)
				{
					dmin = d;
					pm = currentPoint;
				}
			}

			if (dmin < dm)
			{
				Sz.insert(pm);
				int Lth = 0;
				double sin_theta = 0.0;
				double cos_theta = 0.0;
				if (dmin <= this->estimated_stroke_width * 1.0 / 2)
				{
					sin_theta = 1.0;
					cos_theta = 0.0;
				}
				else
				{
					sin_theta = (this->estimated_stroke_width * 1.0) / (2 * dmin);
					cos_theta = -1 * sqrt(4.0 * dmin * dmin - this->estimated_stroke_width * this->estimated_stroke_width * 1.0) / (2 * dmin);
				}

				Lth = (int) floor(dmin * cos_theta + sqrt(dm * dm - dmin * dmin * sin_theta * sin_theta) + 0.5) ;

				// CS is the set of contour points that is a segment from Backward(pm,Lth) and Forward(pm, Lth)
				set<ContourPoint*> CS;
				pm->owner->getSegment(pm->owner->backward(pm,Lth), pm->owner->forward(pm,Lth),CS);

				// Erase all elements from Sc that contains in CS
				// Sc = Sc - CS
				for (set<ContourPoint*>::iterator itCS = CS.begin(); itCS != CS.end(); itCS++)
				{
					set<ContourPoint*>::iterator it;
					it = find(Sc.begin(), Sc.end(), (*itCS));
					if ( it != Sc.end())
					{
						Sc.erase(it);
					}
				}


			}
			else
			{
				break;
			}
		}


		double Cth = SE_THRESHOLD_CTH;
		bool corner_condition = false;
		if (Sz.size() == 3)
		{
			set<ContourPoint*>::iterator itSz = Sz.begin();
			ContourPoint* v0 = (*itSz);
			itSz++;
			ContourPoint* v1 = (*itSz);
			itSz++;
			ContourPoint* v2 = (*itSz);;

			set<ContourPoint*> CS;
			double LCS = 0.0;
			
			CS.clear();
			if (v0->owner == v1->owner)
			{
				if (!corner_condition)
				{
					// Retrieve contour segment between v0 and v1
					v0->owner->getSegment(v0, v1,CS);
					LCS = (double) CS.size();
					// Calculate d0
					double d0 = 0.0;
					for (set<ContourPoint*>::iterator itCS = CS.begin(); itCS != CS.end(); itCS++)
					{
						double distance = Tools::distance((*itCS)->position,v0->position,v1->position);
						if (d0 < distance)
						{
							d0 = distance;
						}
					}

					double d1 = Tools::distance(v0->position, v1->position) / 2;

					double Larc = 2*(d0 + (d1* d1) / d0) * atan(d0 / d1);

					corner_condition = (abs(LCS - Larc) / Larc) < Cth;

				}
			}
			
			CS.clear();
			if (v0->owner == v2->owner)
			{
				if (!corner_condition)
				{

					// Retrieve contour segment between v0 and v2
					v0->owner->getSegment(v0, v2,CS);
					LCS = (double) CS.size();
					// Calculate d0
					double d0 = 0.0;
					for (set<ContourPoint*>::iterator itCS = CS.begin(); itCS != CS.end(); itCS++)
					{
						double distance = Tools::distance((*itCS)->position,v0->position,v2->position);
						if (d0 < distance)
						{
							d0 = distance;
						}
					}

					double d1 = Tools::distance(v0->position, v2->position) / 2;

					double Larc = 2*(d0 + (d1* d1) / d0) * atan(d0 / d1);

					corner_condition = (abs(LCS - Larc) / Larc) < Cth;

				}
			}
			
			CS.clear();
			if (v1->owner == v2->owner)
			{
				if (!corner_condition)
				{

					// Retrieve contour segment between v0 and v2
					v1->owner->getSegment(v1, v2,CS);
					LCS = (double) CS.size();
					// Calculate d0
					double d0 = 0.0;
					for (set<ContourPoint*>::iterator itCS = CS.begin(); itCS != CS.end(); itCS++)
					{
						double distance = Tools::distance((*itCS)->position,v1->position,v2->position);
						if (d0 < distance)
						{
							d0 = distance;
						}
					}

					double d1 = Tools::distance(v1->position, v2->position) / 2;

					double Larc = 2*(d0 + (d1* d1) / d0) * atan(d0 / d1);

					corner_condition = (abs(LCS - Larc) / Larc) < Cth;

				}
			}

		}

		
		if ((Sz.size() >= 3) && !corner_condition)
		{
			unsigned int hehe = (unsigned int) ((colorindex + 1) * 50);
			unsigned char r;
			unsigned char g;
			unsigned char b;
			r = (unsigned char) (((unsigned int)(hehe << 24)) >> 24);
			g = (unsigned char) (((unsigned int)(hehe << 16)) >> 24);
			b = (unsigned char) (((unsigned int)(hehe << 8)) >> 24);
			CvScalar green = cvScalar(b * 1.0,g * 1.0,r* 1.0);
			
			vector<CvPoint> points;
			points.clear();
			for (set<ContourPoint*>::iterator itSz = Sz.begin(); itSz != Sz.end(); itSz++)
			{
				points.push_back((*itSz)->position);
			}
			
			CvPoint* curves[1];
			CvPoint* result;
			vector<CvPoint> zone_polygon_points;
			int nCurvePts[1];
			GrahamAlgorithm graham_algo;
			result = graham_algo.compute(points, nCurvePts[0], zone_polygon_points);
			curves[0] = result;
			cvFillPoly(des,curves,nCurvePts,1,green);

			StrokeNode* ambigous_zone_node = new StrokeNode();
			
			ambigous_zone_node->elements = Tools::get_substroke_elements(result[0],des,green);
			ambigous_zone_node->polygon_vertex = zone_polygon_points;
			ambigous_zone_node->isAmbigousZone = true;
			ambigous_zone_node->id = colorindex;
			stroke_graph.push_back(ambigous_zone_node);
			delete [] result;
		}
	}	 

	for (int i = 0; i< (int) contour_list.size(); i++)
	{
		delete contour_list[i];
	} 
	return des;

}