/*37218898_306040569 Lihi_Dery_Luba_Golosman*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map.Entry;




public class EMAlgorithm {

	private ArrayList<Document> documents;
	private Document corpus ; //all words, filtered
	private int vocabularySize; 
	private final int K=10; 
	private final double LAMBDA = 0.6;
	private final double EPSILON = 0.0000001; //the threshold for alpha_i
	
	//P(Wk|Xi) - the string is Wk and the Event contains an array for all probabilities P(Wk|Xi) 
	//private HashMap<String,Event> wordProb_before = new  HashMap<String,Event>();
	//P(Xi)
	//private double[] clusterProb_before = new double[Main.NUM_CLUSTERS];
	
	//P(Wk|Xi) - the string is Wk and the Event contains an array for probabilities for each Xi 
	private HashMap<String,Event> wordProb_current = new  HashMap<String,Event>();
	//P(Xi)
	private double[] clusterProb_current = new double[Main.NUM_CLUSTERS];
	
	//Wti for the E stage [document][cluster]
	private double[][] clusterProbInDocument; 
	
	private Document[] clusters;
	
	private Integer[] documentCluster; 
	
	private ArrayList<String> topics;
	//M[cluster][topic]
	private int[][] m; 
	
	//the most dominant topic in each cluster	
	private Integer[] clusterToTopic; 

	
	public EMAlgorithm(ArrayList<Document> documents, Document corpus , ArrayList<String> topics) {
		this.documents = documents;
		this.corpus = corpus;
		clusterProbInDocument =new double[documents.size()][Main.NUM_CLUSTERS];
		clusters = new Document[Main.NUM_CLUSTERS];
		this.topics = topics;
		analizeCorpus();
	}

	//Analyze corpus and count the words in each document 
	private void analizeCorpus()
	{
		for (Document doc : documents) {
			doc.InitializeEventMap();
		}
		corpus.InitializeEventMap();
		vocabularySize = corpus.getEventMap().size();
	}
	
	
	public void calculateExpectation() throws Exception
	{
		EMInitialization();
        double delta = 0;
        double likli_prev = 0;
        double likli_new = 0;
        do{        	
            stageE();
            stageM();
            likli_prev = likli_new;
            likli_new = compute_liklihood();   
            if (likli_prev !=0)
            	delta = likli_new - likli_prev;
            else delta = Math.abs(likli_new);
            System.out.println(likli_new );
        }
         while (delta > 0.00001);		 				
	}
	
	private void EMInitialization()
	{
		
		int i =0;
		//clusters the documents
		for (Document doc : documents) { 
			if (clusters[i%Main.NUM_CLUSTERS] == null)
			         clusters[i%Main.NUM_CLUSTERS]=new Document();
			clusters[i%Main.NUM_CLUSTERS].getWords().addAll(doc.getWords());
			i++;
		}
		// Initialize P(Xi) - MLE style: p(xi) = 1/num_clusters
		for (int j = 0; j < clusterProb_current.length; j++) {
			clusterProb_current[j] = 1D/Main.NUM_CLUSTERS;
		}
		
		// Initialize P(Wk|Xi) - the probability of word k in cluster i
		for (int j = 0; j < clusters.length; j++) {
			
			//for each doc create a hashmap of words from corpus: word + # of entries
			clusters[j].InitializeEventMap();
			
			//for every word in hashmap
			for (Entry<String, Integer> entry : corpus.getEventMap().entrySet()) {
				
				//calculate the probability of word k in cluster i
				double pki = (double)(clusters[j].getNumAccrued(entry.getKey())+LAMBDA)/((double)clusters[j].getNumWords() + (double)vocabularySize*LAMBDA);
				if (wordProb_current.containsKey(entry.getKey()))
				{
					 wordProb_current.get(entry.getKey()).updateClusterProp(j, pki);
				}
				else
				{
					Event event = new Event();
					event.updateClusterProp(j, pki);	
					wordProb_current.put(entry.getKey(), event);
				}
			}
		}
		//for debug
		/*
		for (Entry<String, Event> entry : wordProb_current.entrySet()) {
			System.out.println(entry.getKey() + "  "+ entry.getValue());
		}
		System.out.println();
		*/
	}
	
	/* @k is the word*/
	private double get_ntk(int t, String k){
		Document doc = documents.get(t);
		double ntk = 0;
		if(doc.getEventMap().containsKey(k))
		   ntk =doc.getEventMap().get(k);
		return ntk;
	}
	
	private double get_nt(int t){
		Document doc = documents.get(t);
		double nt = doc.getNumWords();
		return nt;
	}
	
	
	
	//compute Wti - @clusterProbInDocument : amount of cluster i in document t
	private void stageE()
	{
		//for each document
		for (int t = 0; t < documents.size(); t++) {			
			
			double[] zi = new double[Main.NUM_CLUSTERS];
			//for each cluster zi 
			for (int i = 0; i< zi.length ;i++)
			{
				zi[i] = compute_zi(t,i);			
			}
			
			double m = findMax(zi);
			for (int i = 0; i< zi.length ;i++)
			{
				if (zi[i]- m <-K)
					clusterProbInDocument[t][i] = 0 ;
				else
				{
					double numerator = Math.pow(Math.E,zi[i]- m );
					double denominator = 0;
					for (int j =0 ;j<Main.NUM_CLUSTERS;j++)
					{
						if (zi[j]- m >= -K)
							denominator += Math.pow(Math.E,zi[j]- m );
					}	
					clusterProbInDocument[t][i] = numerator/denominator;
				}	
			}
		} //for t (each doc) 
		//for debug
		/*
		for (int t =0 ;t<clusterProbInDocument.length ;t++)
		{
			for (int i =0 ;i<clusterProbInDocument[t].length ;i++)
				System.out.print(clusterProbInDocument[t][i] +" ");
			System.out.println();	
		}*/
	}
	
	//computes z[i]
	private double compute_zi(int t, int i){
		double zi = 0;
		Document doc = documents.get(t);
		//ln(alpha_i)
		zi = Math.log(clusterProb_current[i]);
		//run on all k words
		for (String word : doc.getEventMap().keySet()) {					
			double ntk = get_ntk(t, word);
			zi += ntk *Math.log(wordProb_current.get(word).getClusterProb(i));
		}
		return zi;
	}
	
	private double findMax(double[] z) {
		double max = z[0];
		for (int i=1 ;i<z.length ; i++)
			if (z[i]> max)
				max = z[i];
		return max;
	}
	
	/*computes the liklihood*/
	private double compute_liklihood(){
		double liklihood = 0;
		//run of documents
		for (int t = 0; t < documents.size(); t++) {			
			double[] zi = new double[Main.NUM_CLUSTERS];
			//for each cluster zi 
			for (int i = 0; i< zi.length ;i++)			
				zi[i] = compute_zi(t,i);									
			double m = findMax(zi);		
			double sub_sum = 0;
			for (int i = 0; i< zi.length ;i++)
			{
				if (zi[i]- m >= -K)
					sub_sum = sub_sum + Math.exp(zi[i]- m );	  
			}
			
		//	System.out.println(m+ " " + sub_sum + " " + liklihood);
			liklihood += m+ Math.log(sub_sum);					
		}		
		return  liklihood;		
	}
	 
	/*update p(xi) and p(wk|xi)
	 * @clusterProb_current is p(xi) / alpha_i
	 * @wordProb_current is p(wk|xi) / Pik
	 */
	private void stageM() throws Exception
	{ 
		//update each p(xi) / alpha_i 
		update_alpha_i();
		
		//update each p(Wk|xi) / Pik
		update_pik();				
		
	}
	
	//update each p(xi) / alpha_i
	private void update_alpha_i(){
		for (int i = 0; i < clusterProb_current.length; i++) {
			double alpha_i = 0;
			double sum_Wti = 0;
			//run on all docs
			for (int t = 0; t < documents.size(); t++) {
				sum_Wti = sum_Wti + clusterProbInDocument[t][i];				
			}
			alpha_i = sum_Wti /(double)documents.size() ; 
			
			//deal with problems in alpha_i:
			if (alpha_i == 0)
				alpha_i = EPSILON;
			
			clusterProb_current[i] = alpha_i;			
		}
		
	}
	
	//update each p(Wk|xi) / Pik	
	private void update_pik() throws Exception{
		
		//for every cluster i
		for (int i = 0; i < Main.NUM_CLUSTERS; i++) {
			//for every word k
			for (Entry<String, Integer> entry : corpus.getEventMap().entrySet()) {
				String str_k  = entry.getKey(); //the word
				double sum_pik_top = 0;
				double sum_pik_bottom = 0;
				for(int t=0; t<documents.size(); t++){
					double ntk = get_ntk(t, str_k);
					double nt = get_nt(t);
					sum_pik_top = sum_pik_top + (clusterProbInDocument[t][i]*ntk);
					sum_pik_bottom = sum_pik_bottom + (clusterProbInDocument[t][i]*nt);
				}//for t
				double pik = (sum_pik_top+LAMBDA)/(sum_pik_bottom + (double)vocabularySize*LAMBDA);
				if (wordProb_current.containsKey(entry.getKey())){
					wordProb_current.get(entry.getKey()).updateClusterProp(i, pik);
				}
				else{
					Event event = new Event();
			    	event.updateClusterProp(i, pik);	
			    	wordProb_current.put(entry.getKey(), event);
				}
			}//for k			   											
		}//for i
		
	}
	
	 
	private void calculteDocumentsClusters()
	{
		documentCluster = new Integer[documents.size()];
		for(int t=0; t<documents.size(); t++)
		{
			double max= 0;
			for (int i =0; i< Main.NUM_CLUSTERS && max<1 ;i++)
			{
				if ( max< clusterProbInDocument[t][i])
				{
					max =clusterProbInDocument[t][i];
					documentCluster[t] = i;
				}
			}
		}
	}
	
	public int[][] getHistogram()
	{
		calculteDocumentsClusters();
		HashMap<String,Integer> reverseTopics = new HashMap<String,Integer>();
		for (Integer i=0; i< topics.size() ;i++)
			reverseTopics.put(topics.get(i), i);
		m = new int[Main.NUM_CLUSTERS][topics.size()]; 
		for(int t=0; t<documents.size(); t++)
		{
			int docCluster = documentCluster[t]; 
			for (String topic : documents.get(t).getTopics()) {
				m[docCluster][reverseTopics.get(topic)]++;
			} 
		}
		calculateMostDominantClusterTopic();
		return m;
	}
	
	private void calculateMostDominantClusterTopic()
	{
		clusterToTopic = new Integer[Main.NUM_CLUSTERS]; 
		//go thought clusters
	    for (int i=0 ; i<m.length;i++)
	    {
	    	int max=0;
	    	for (int j=0;j<m[i].length; j++)
	    		if(m[i][j]> max)
	    		{
	    			max = m[i][j];
	    			clusterToTopic[i] = j;
	    		}
	    }	
	}
	
	public double getAccuracy()
	{
		double accuracy = 0;
		int numCurrect =0;
		int numAll =0;
		for(int t=0; t<documents.size(); t++)
		{
			numAll++;
			ArrayList<String> top = documents.get(t).getTopics();
			int docCluster = documentCluster[t]; 
			int clustertTopic = clusterToTopic [docCluster];
			String clustertTopic_str = topics.get(clustertTopic);
			if (top.contains(clustertTopic_str))
				numCurrect++;
			
		}
		accuracy= (double)numCurrect/numAll;
		return accuracy;
	}
	

}
