//	Aviv Charikar	Yael Tzirulnikov	305343600	307955609

package EM;

import java.io.*;
import java.util.*;
import java.util.Map.Entry;

public class EMAlg {
	private static final int NUM_CLUSTERS = 9;
	private static final int RARE_WORD_THOLD = 3;
	private static final double K = 10;
	private static final double EPSILON = 0.00001;
	private static final double LAMBDA = 0.39;
	private static final String[] topics;
	
	private ArrayList<DocumentCluster> documents;
	private HashMap<String, Integer> wordCounts;
	private double clusterProbs[];
	private ArrayList<HashMap<String, Double>> wordsProbs;
	private double weights[][];
	
	static {
		topics = new String[NUM_CLUSTERS];
		topics[0] = "acq";
		topics[1] = "money-fx";
		topics[2] = "grain";
		topics[3] = "crude";
		topics[4] = "trade";
		topics[5] = "interest";
		topics[6] = "ship";
		topics[7] = "wheat";
		topics[8] = "corn";
	}
	
	public EMAlg(String file) throws IOException {
		readDevFile(file);
	}
	
	private void readDevFile(String devFilename) throws IOException {
		documents = new ArrayList<DocumentCluster>();
		wordCounts =  new HashMap<String, Integer>();
		BufferedReader reader = new BufferedReader(new FileReader(devFilename));
		String line;
		
		// Iterate over all lines
		while ((line = reader.readLine()) != null) {
			// Skip empty lines
			if ((line.length() == 0))
				continue;
			
			// Work on training documents
			if (line.startsWith("<TRAIN")) {
				// Get the document topics
				String[] topics = line.split("\t");
				topics = Arrays.copyOfRange(topics, 2, topics.length);
				topics[topics.length - 1] = topics[topics.length - 1].replace(">", "");
				
				// Skip the empty line
				reader.readLine();

				// Get the document line
				line = reader.readLine();
				Document doc = new Document(line, topics);
				documents.add(new DocumentCluster(doc, -1));
				
				// Add the word counts of the document
				for (Entry<String, Integer> entry : doc.getWordCounts()) {
					String word = entry.getKey();
					Integer count = wordCounts.get(word);
					if (count == null)
						wordCounts.put(word, entry.getValue());
					else
						wordCounts.put(word, count + entry.getValue());
				}
			}
		}
		
		// Remove rare words
		ArrayList<String> rareWords = new ArrayList<String>();
		for (Entry<String, Integer> entry : wordCounts.entrySet()) {
			if (entry.getValue() <= RARE_WORD_THOLD) {
				String word = entry.getKey();
				rareWords.add(word);
			}
		}
		for (String rareWord : rareWords)
			wordCounts.remove(rareWord);
		for (DocumentCluster doc : documents)
			doc.document.removeWords(rareWords);
		
		// Init the params
		clusterProbs = new double[NUM_CLUSTERS];
		wordsProbs = new ArrayList<HashMap<String, Double>>(NUM_CLUSTERS);
		for (int i = 0; i < NUM_CLUSTERS; i++)
			wordsProbs.add(new HashMap<String, Double>());
		weights = new double[documents.size()][NUM_CLUSTERS];
		
		reader.close();
	}
	
	public void runAlg() {
		initEM();
		for (int i = 0; i < 30; i++) {
			EStep();
			MStep();
			reassignClusters();
			outputLikelihoodAndPerplexity();
		}
		outputConfusionMatrix();
	}
	
	private void initEM() {
		// Init weights
		for (int t = 0; t < documents.size(); t++) {
			int cluster = t % NUM_CLUSTERS;
			documents.get(t).cluster = cluster;
			for (int i = 0; i < NUM_CLUSTERS; i++) {
				if (i == cluster)
					weights[t][i] = 1;
				else
					weights[t][i] = 0;
			}
		}
		
		MStep();
	}
	
	private void EStep() {
		for (int t = 0; t < documents.size(); t++) {
			// Calculate all the z_i's
			double[] z = new double[NUM_CLUSTERS];
			double m = Double.NEGATIVE_INFINITY;
			for (int i = 0; i < NUM_CLUSTERS; i++) {
				z[i] = calcDocumentClusterProb(t, i);
				if (z[i] > m)
					m = z[i];
			}
			
			// Calculate the weights denom
			double denom = 0;
			for (int j = 0; j < NUM_CLUSTERS; j++) {
				if (z[j] - m >= -K)
					denom += Math.pow(Math.E, z[j] - m);
			}
			
			// Calculate the weights
			for (int i = 0; i < NUM_CLUSTERS; i++) {
				if (z[i] - m < -K)
					weights[t][i] = 0;
				else
					weights[t][i] = Math.pow(Math.E, z[i] - m) / denom;
			}
		}
	}
	
	private double calcDocumentClusterProb(int document, int cluster) {
		double sum = Math.log(clusterProbs[cluster]);
		for (Entry<String, Integer> entry : documents.get(document).document.getWordCounts())
			sum += entry.getValue() * Math.log(wordsProbs.get(cluster).get(entry.getKey()));
		return sum;
	}
	
	private void MStep() {
		for (int i = 0; i < NUM_CLUSTERS; i++) {
			// Calculate the cluster probabilities
			double sum = 0;
			double denom = 0;
			for (int t = 0; t < documents.size(); t++) {
				sum += weights[t][i];
				denom += weights[t][i] * documents.get(t).document.length();
			}
			clusterProbs[i] = sum / documents.size();
			
			// Calculate the word probabilities
			for (String word : wordCounts.keySet()) {
				double sumNumerator = 0;
				for (int t = 0; t < documents.size(); t++)
					sumNumerator += documents.get(t).document.getNumAppearances(word) * weights[t][i];
				wordsProbs.get(i).put(word, (sumNumerator + LAMBDA) / (denom + wordCounts.size() * LAMBDA));
			}
		}
		
		// Smooth the cluster probabilities
		smoothClusterProbs();
	}
	
	private void smoothClusterProbs() {
		// Fix the cluster probabilities to be at least epsilon
		double sum = 0;
		for (int i = 0; i < NUM_CLUSTERS; i++) {
			if (clusterProbs[i] < EPSILON)
				clusterProbs[i] = EPSILON;
			sum += clusterProbs[i];
		}
		
		// Normalize the probabilities
		for (int i = 0; i < NUM_CLUSTERS; i++)
			clusterProbs[i] /= sum;
	}
	
	private void reassignClusters() {
		for (int t = 0; t < documents.size(); t++) {
			int bestCluster = -1;
			double bestClusterProb = Double.NEGATIVE_INFINITY;
			for (int i = 0; i < NUM_CLUSTERS; i++) {
				double clusterProb = calcDocumentClusterProb(t, i);
				if (clusterProb > bestClusterProb) {
					bestClusterProb = clusterProb;
					bestCluster = i;
				}
			}
			documents.get(t).cluster = bestCluster;
		}
	}
	
	private void outputLikelihoodAndPerplexity() {
		double likelihood = 0;
		for (int t = 0; t < documents.size(); t++)
			likelihood += calcDocumentLikelihood(t);
		double perplexity = 0;
		for(String word : wordCounts.keySet()) {
			perplexity += calcWordLikelihood(word);
		}
		perplexity = Math.pow(Math.E, - perplexity / wordCounts.keySet().size());
		System.out.println(likelihood + "\t" + perplexity);
	}
	
	private double calcWordLikelihood(String word) {
		double prob = 0;
		for (int i = 0; i < NUM_CLUSTERS; i++) {
			prob += wordsProbs.get(i).get(word) * clusterProbs[i];
		}
		return Math.log(prob);
	}
	private double calcDocumentLikelihood(int document) {
		// Calculate all the z_i's
		double[] z = new double[NUM_CLUSTERS];
		double m = Double.NEGATIVE_INFINITY;
		for (int i = 0; i < NUM_CLUSTERS; i++) {
			z[i] = calcDocumentClusterProb(document, i);
			if (z[i] > m)
				m = z[i];
		}
		
		// Calculate the likelihood sum over all clusters
		double sum = 0;
		for (int i = 0; i < NUM_CLUSTERS; i++) {
			if (z[i] - m >= -K)
				sum += Math.pow(Math.E, z[i] - m);
		}
		
		// Return the log likelihood
		return (m + Math.log(sum));
	}
	
	private void outputConfusionMatrix() {
		// Inits
		ArrayList<HashMap<String, Integer>> clusterTopicCounts = new ArrayList<HashMap<String, Integer>>();
		for (int i = 0; i < NUM_CLUSTERS; i++) {
			HashMap<String, Integer> topicsMap = new HashMap<String, Integer>();
			for (int j = 0; j < NUM_CLUSTERS; j++)
				topicsMap.put(topics[j], 0);
			clusterTopicCounts.add(topicsMap);
		}
		int[] clusterSizes = new int[NUM_CLUSTERS];
		
		// Calculate the counts
		for (int t = 0; t < documents.size(); t++) {
			for (String topic : documents.get(t).document.getTopics()) {
				int count = clusterTopicCounts.get(documents.get(t).cluster).get(topic);
				clusterTopicCounts.get(documents.get(t).cluster).put(topic, count + 1);
			}
			clusterSizes[documents.get(t).cluster]++;
		}
		
		// Output the confusion matrix and calculate the dominant topic for each cluster
		for (int i = 0; i < NUM_CLUSTERS; i++)
			System.out.print("\t" + topics[i]);
		System.out.println("\tcluster_size");
		String[] clusterDominantTopic = new String[NUM_CLUSTERS];
		for (int i = 0; i < NUM_CLUSTERS; i++) {
			System.out.print(i);
			String dominantTopic = null;
			int dominantTopicCount = -1;
			for (int j = 0; j < NUM_CLUSTERS; j++) {
				int count = clusterTopicCounts.get(i).get(topics[j]);
				if (count > dominantTopicCount) {
					dominantTopic = topics[j];
					dominantTopicCount = count;
				}
				System.out.print("\t" + count);
			}
			clusterDominantTopic[i] = dominantTopic;
			System.out.println("\t" + clusterSizes[i]);
		}
		
		// Output the accuracy
		int correct = 0;
		for (int t = 0; t < documents.size(); t++) {
			for (String topic : documents.get(t).document.getTopics()) {
				if (topic.intern() == clusterDominantTopic[documents.get(t).cluster].intern()) {
					correct++;
					break;
				}
			}
		}
		System.out.println("Accuracy: " + ((double)correct / documents.size()) * 100 + "%");
	}
	
	private static class DocumentCluster {
		public Document document;
		public int cluster;
		
		public DocumentCluster(Document document, int cluster) {
			this.document = document;
			this.cluster = cluster;
		}
	}
}
