package datasource.review;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.StringTokenizer;

import com.aliasi.tokenizer.PorterStemmerTokenizerFactory;
import com.aliasi.util.Pair;

public class TopicClassifier {
	// train resources-----
	ArrayList<String> sentences = new ArrayList<String>();
	ArrayList<ArrayList<Integer>> labels = new ArrayList();

	// model ----------------
	HashMap<String, Double> wordProbs = new HashMap();
	HashMap<Integer, Double> labelProbs = new HashMap();
	HashMap<Integer, HashMap<String, Double>> condProb = new HashMap();

	public void train(String dir) {
		for (File file : new File(dir).listFiles()) {
			try {
				this.readFile(file.getAbsolutePath());
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}

		}
		calModel();
	}

	public void calModel() {
		HashMap<Integer, HashMap<String, Integer>> labelWordCount = new HashMap();
		HashMap<Integer, Integer> labelCount = new HashMap();
		HashMap<String, Integer> wordCount = new HashMap();
		for (int i = 0; i < sentences.size(); i++) {
			ArrayList<Integer> label = labels.get(i);
			// update label count
			for (Integer l : label) {
				Integer lCount = labelCount.get(l);
				if (lCount == null) {
					labelCount.put(l, 1);
				} else {
					labelCount.put(l, lCount + 1);
				}
			}
			String str = removeStop(sentences.get(i));
			StringTokenizer st = new StringTokenizer(str);
			HashSet<String> set = new HashSet();
			while (st.hasMoreTokens()) {
				String word = st.nextToken();
				set.add(word);
			}
			for (String word : set) {
				// update word count
				Integer wCount = wordCount.get(word);
				if (wCount == null) {
					wordCount.put(word, 1);
				} else {
					wordCount.put(word, wCount + 1);
				}
				// update word conditional count
				for (Integer l : label) {
					HashMap<String, Integer> condC = labelWordCount.get(l);
					if (condC == null) {
						condC = new HashMap();
						labelWordCount.put(l, condC);
					}
					Integer cc = condC.get(word);
					if (cc == null) {
						condC.put(word, 1);
					} else {
						condC.put(word, cc + 1);
					}
				}
			}
		}
		// update probs ------
		for (Entry<String, Integer> entry : wordCount.entrySet()) {
			wordProbs.put(entry.getKey(),
					entry.getValue() / (double) sentences.size());
		}
		for (Entry<Integer, Integer> entry : labelCount.entrySet()) {
			labelProbs.put(entry.getKey(), entry.getValue()
					/ (double) sentences.size());
		}
		for (Entry<Integer, HashMap<String, Integer>> entry : labelWordCount
				.entrySet()) {
			HashMap<String, Double> condP = new HashMap();
			for (Entry<String, Integer> e : entry.getValue().entrySet()) {
				condP.put(e.getKey(),
						e.getValue() / (double) labelCount.get(entry.getKey()));
			}
			condProb.put(entry.getKey(), condP);
		}

	}

	public ArrayList<Pair<Integer, Double>> predict(String sentence) {
		String senAfterStopWord = removeStop(sentence);
		StringTokenizer st = new StringTokenizer(senAfterStopWord, " ");
		ArrayList<String> words = new ArrayList();
		while (st.hasMoreTokens()) {
			words.add(st.nextToken());
		}
		ArrayList<Pair<Integer, Double>> result = new ArrayList();
		for (Integer label : condProb.keySet()) {
			HashMap<String, Double> cProbs = condProb.get(label);
			double labelP = labelProbs.get(label);
			double condP = 1;
			int count = 0;
			for (String word : words) {
				Double p = cProbs.get(word);
				if (p != null) {
					count++;
					condP *= p / wordProbs.get(word);
				}
			}
			if (count == 0) {
				condP = 0;
			}
			condP *= labelP;
			Pair<Integer, Double> pair = new Pair<Integer, Double>(label, condP);
			result.add(pair);
		}
		return result;

	}

	public String removeStop(String sentence) {
		StringTokenizer st = new StringTokenizer(sentence, " ");
		String result = "";
		while (st.hasMoreTokens()) {
			String word = st.nextToken().toLowerCase().trim();
			String stemmedWord = PorterStemmerTokenizerFactory.stem(word);
			result += stemmedWord + " ";
		}
		return result;
	}

	public void readFile(String fileName) throws IOException {
		BufferedReader br = new BufferedReader(new FileReader(fileName));
		String line = br.readLine();
		while (line != null) {
			sentences.add(line);
			line = br.readLine();
			StringTokenizer st = new StringTokenizer(line, " ");
			ArrayList<Integer> label = new ArrayList<Integer>();
			while (st.hasMoreTokens()) {
				int l = Integer.parseInt(st.nextToken());
				if (l != 0) {
					label.add(l);
				}
			}
			labels.add(label);
			line = br.readLine();
		}
		br.close();
	}

	public static void main(String[] args) {
		String dir = "res/train_hotel_files";
		TopicClassifier model = new TopicClassifier();
		model.train(dir);

	}

}
