import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.Console;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.TreeMap;
import java.util.Vector;

import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
import org.apache.lucene.document.Document;

import clustering_algorithms.DummyData;
import clustering_algorithms.k_means;
import clustering_algorithms.k_means_improved;
import clustering_algorithms.k_means_plus;

public class GoodSearchEngine {

	static Boolean in_dev_mode = true;
 	
//
//	//helper function to write the results to the file as requested	
//	//please note that the method receieve Writer object that is open and closed
//	//external. The function write the results per query and append it to the writer
//	public static void WriteResultsToFile(Writer writer, LuceneSearchResult result, int query_id) throws IOException
//	{
//		if( result.getScoreDoc().length == 0)
//		{
//			writer.write("q"+query_id+",dummy,1,0\n");
//		}
//		else
//		{
//			for (int rank = 0; rank < result.getScoreDoc().length ; ++rank) 
//			{					
//				int docId = result.getScoreDoc()[rank].doc;
//				float score = result.getScoreDoc()[rank].score;
//				if(score < 0.5f)
//				{
//				//	break;
//				}
//				Document d = result.getDoc(docId);
//				int external_doc_id = Integer.parseInt(d.getField("id").stringValue() );
//				writer.write("q"+query_id+",doc"+external_doc_id+","+(rank +1)+","+score+"\n");					
//			}
//		}		    	
//	}
//	
//	public static void PrepareDataForEvaluation(LuceneSearchResult result, int query_id, /* output */SearchResults results_counter) throws IOException
//	{
//		int max_results_to_update = result.getScoreDoc().length;
//				
//		for (int rank = 0; rank < max_results_to_update ; ++rank) 
//		{
//			int docId = result.getScoreDoc()[rank].doc;
//			Document d = result.getDoc(docId);
//			int doc_id = Integer.parseInt(d.getField("id").stringValue() );
//
//			results_counter.AddResults(query_id,  doc_id);
//		}
//	}	
	
	public static void EvaluateClassification(HashMap<Integer, HashSet<Integer>> clustering_results, int[] classes_list)
	{
		ArrayList<Integer> cluster_actual = new ArrayList<Integer>(); 
		ArrayList<Integer> class_gt = new ArrayList<Integer>(); 
		
		for(int cluster_id: clustering_results.keySet())
		{
			for(int doc_id : clustering_results.get(cluster_id))
			{
				cluster_actual.add(cluster_id);
				class_gt.add(classes_list[doc_id]);
			}
		}
		
		float purity = CalculatePurity(clustering_results, class_gt);
		float rand_index = CalcRandIndex(class_gt, cluster_actual);
		System.out.println("purity:" + ((Float)purity).toString() + " rand_index:"+((Float)rand_index).toString());
	}
	
	public static float CalculatePurity(HashMap<Integer, HashSet<Integer>> clustering_results, ArrayList<Integer> class_gt)
	{					
		float purity = 0;
		for(int cluster_id: clustering_results.keySet())
		{
			HashMap<Integer, Integer> class_freq_in_docs = new HashMap<Integer, Integer>();
			int max_freq_gt_class = 0;
			int dominate_max_class = -1;
			//int sum_of_dominate_items = 0;
			for(int doc_id : clustering_results.get(cluster_id))
			{
				Integer gt_class = class_gt.get(doc_id);
				Integer temp = class_freq_in_docs.get(gt_class);
				int current_freq = (temp == null) ? 0 : temp;
				++current_freq;
				class_freq_in_docs.put(gt_class, current_freq);
				
				if(max_freq_gt_class < current_freq)
				{
					max_freq_gt_class = current_freq;
					dominate_max_class = gt_class; 
				}								
			}
			int number_item_in_cluster =  clustering_results.get(cluster_id).size();
			float class_purity = (float)max_freq_gt_class/((float)number_item_in_cluster);
			System.out.println("cluster:"+ cluster_id+" class_purity "+class_purity);
			
			//sum_of_dominate_items = sum_of_dominate_items +max_freq_gt_class;
			purity += (float)max_freq_gt_class/((float)number_item_in_cluster);
		}
		return purity;
	}
	
	public static float CalcRandIndex(ArrayList<Integer> class_gt, ArrayList<Integer> cluster_actual)
	{
		float TP=0,FP=0,TN=0,FN=0;
		for(int i =0; i < cluster_actual.size(); ++i)
		{
			for(int j = i +1; j < cluster_actual.size(); ++j )
			{
				//is same class?
				Boolean is_same_cluster = (cluster_actual.get(i) == cluster_actual.get(j));
				Boolean is_same_class = (class_gt.get(i) == class_gt.get(j));
				
				if(is_same_cluster && is_same_class)
				{
					TP++;
				}
				else if(!is_same_cluster && is_same_class)
				{
					FN++;
				}
				else if(is_same_cluster && !is_same_class)
				{
					FP++;
				}
				else if(!is_same_cluster && !is_same_class)
				{
					TN++;
				}
				 
			}
		}
		
		float rand_index = (TP + TN)/(TP + FP + FN + TN);
		return rand_index;
	}
	
	public 	static void TestKMeans() throws IOException
	{	
		DummyData dummy_data = new DummyData();
		ArrayList<HashMap<Integer, Float>> vectors_to_clusteras_list = dummy_data.getDummyData(2); 
		clustering_algorithms.k_means clustering_algorithm = new k_means(vectors_to_clusteras_list, 10,0);
		HashMap<Integer, HashSet<Integer>> res = clustering_algorithm.DoTheClustering();	
	}
	
	public static void main(String[] args) {
		try {
				
			//TestKMeans();
			
			
			// get the parameter file name from the argument:
			String parameter_file_name = args[0];
			System.out.println("Working Directory = " +
		              System.getProperty("user.dir"));
			ParametersHelper params = new ParametersHelper(parameter_file_name);
			//String output_file_name = params.getParametersMap().get("outputFile");
			Integer k = Integer.parseInt(params.getParametersMap().get("k"));
			String retrievalAlgorithm = params.getParametersMap().get("retrievalAlgorithm");
			
			Boolean should_load_data = false;
			if (params.getParametersMap().containsKey("SerializedMethod"))
			{
				should_load_data = params.getParametersMap().get("SerializedMethod").compareTo("load") == 0;
				
			}
			ProcessedDb tfIdfAndGT = new ProcessedDb();
			if (should_load_data && params.getParametersMap().containsKey("SerializedFileName"))
			{
				String serializedobjectFileName =  params.getParametersMap().get("SerializedFileName");
				//ProcessedDb matrix_object2 = new ProcessedDb();
				tfIdfAndGT.desriazlie(serializedobjectFileName);
				BufferedWriter word_writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream("AllWords.csv"), "utf-8"));
				for(String term : tfIdfAndGT.m_term_id_to_term.values())
				{
					word_writer.write(term+",\n");
				}
				word_writer.close();
				
			}
			else
			{
				String output_docs_file_name = "files2.txt";
				ParserWithDocsId docs_reader = new ParserWithDocsId("data", "docIDs.txt", output_docs_file_name);
				docs_reader.ParseDirectoryFiles();
				String doc_file_name = output_docs_file_name;

				DocsDatabase db = new DocsDatabase(params.StopWordList, params.mIs_using_improved_algorithm);
				db.Init(doc_file_name);
				ArrayList<HashMap<Integer, Float>> tf_idf_matrix =  db.createAndGetTfIdfMatrix();
 
				int clustr_list[] = db.getDocIdToGTClusterId().clone();
				int doc_ids[] = db.getDocsId().clone();
				
				tfIdfAndGT = new ProcessedDb(tf_idf_matrix, clustr_list, db.m_termid_to_frquenecy, db.m_term_to_id, db.m_term_id_to_term, doc_ids);
				db = null;
				if(!should_load_data && params.getParametersMap().containsKey("SerializedFileName"))
				{
					String serializedobjectFileName =  params.getParametersMap().get("SerializedFileName");
					tfIdfAndGT.Serialize(serializedobjectFileName);	
				}
			}	
			
			k_means algorithm = null;
			if(retrievalAlgorithm.compareTo("basic")== 0)
			{
				algorithm = new k_means(tfIdfAndGT.m_tfIdfList, k,0);
			}
			else if (retrievalAlgorithm.compareTo("basic++")== 0)
			{
				algorithm = new k_means_plus(tfIdfAndGT.m_tfIdfList, k,0);
			}
			else if(retrievalAlgorithm.compareTo("improved")== 0)
			{
				algorithm = new k_means_improved(tfIdfAndGT.m_tfIdfList, k,0, tfIdfAndGT.m_termid_to_frquenecy);
			}
			
			
			HashMap<Integer, HashSet<Integer>> res = algorithm.DoTheClustering();
			//read the ground truth file
			Writer writer = null;
			if (params.getParametersMap().containsKey("outputFile"))
			{
				params.in_comparing_mode = false;
				String outputFile = params.getParametersMap().get("outputFile");
				writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outputFile), "utf-8"));
				//int[] cluster_list = new int[tfIdfAndGT.m_classes_list.length];  
				
				TreeMap<Integer, Integer> cluster_list = new TreeMap<Integer, Integer>();
				for(Entry<Integer, HashSet<Integer>> cluster_id :res.entrySet())
				{
					for(Integer doc_id : cluster_id.getValue())
					{
						cluster_list.put(tfIdfAndGT.m_doc_ids_list[doc_id], cluster_id.getKey());
					}
				}
				
				//for(Integer i =0; i< cluster_list.length; ++i)
				for(Entry<Integer, Integer> i : cluster_list.entrySet())
				{
					writer.write(i.getKey().toString() + "," + i.getValue().toString()+"\n");
				}
				writer.close();
			}
			EvaluateClassification(res, tfIdfAndGT.m_classes_list);

			
			
			
			
			/*
			if(!params.in_comparing_mode)
			{
				writer.close();
			}
			*/
			System.out.println("done");
			return;


		} catch (Exception e) {
			e.printStackTrace();
			System.out.println(e.getMessage());
		}		
				
	}

}