/**
 * 
 */
package langnstats.project.languagemodel.tree;

import java.io.BufferedReader;

import java.io.FileInputStream;
import java.io.FileNotFoundException;

import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.StringReader;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;

import langnstats.project.lib.LanguageModel;
import langnstats.project.lib.WordType;
import langnstats.project.lib.crossvalidation.TrainTokens;
import edu.cmu.lti.lm.dt.BigramFeature;
import edu.cmu.lti.lm.dt.Entry;
import edu.cmu.lti.lm.dt.EntryCollection;
import edu.cmu.lti.lm.dt.Feature;
import edu.cmu.lti.lm.dt.History;
import edu.cmu.lti.lm.dt.Trainer;
import edu.cmu.lti.lm.dt.TreeNode;
import edu.cmu.lti.lm.dt.TrigramFeature;

/**
 * @author qing
 *
 */
public class TreeBasedLM implements LanguageModel {
	
	private Set<Feature> features = null;
	private TreeNode node;
	private int trainLevel = 3;
	private History hist = null;
	/* (non-Javadoc)
	 * @see langnstats.project.lib.LanguageModel#getDescription()
	 */
	public String getDescription() {
		return "TreeBased";
	}

	/* (non-Javadoc)
	 * @see langnstats.project.lib.LanguageModel#predict(langnstats.project.lib.WordType)
	 */
	public double[] predict(WordType wordType) {
		Entry.setFeat(features);
		if(node == null)
			return null;
		if(hist == null){
			hist = new History();
			hist.add("<s>");	
		}
		double sum = 0;
		double[] res = new double[WordType.vocabularySize()];
		for(int i = 0 ; i< WordType.vocabularySize() ; i++){
			String pred = WordType.values()[i].getOriginalTag();
			Entry e = new Entry(pred, hist);
			res[i] = Math.pow(2,node.getLogProb(e));
			sum+= res[i];
		}
		for(int i = 0 ; i<res.length ;i++){
			res[i]/=sum;
		}
		String token = wordType.getOriginalTag();
		Iterator<Feature> it = Entry.getFeat().iterator();
		while(it.hasNext()){
			it.next().addValue(hist);
		}
		if(!token.equals("<PERIOD>"))
			hist.add(0,token);
		else
			hist = null;
		return res;
	}

	/* (non-Javadoc)
	 * @see langnstats.project.lib.LanguageModel#train(langnstats.project.lib.crossvalidation.TrainTokens)
	 */
	public void train(TrainTokens trainTokens) {
		try {
			features = new HashSet<Feature>();
			Entry.setFeat(features);
			features.add(new BigramFeature());
			features.add(new TrigramFeature());
			EntryCollection ec = buildFromToken(trainTokens);
			Trainer train = new Trainer(features,ec);
			node = train.train(trainLevel);
		} catch (IOException e) {
			e.printStackTrace();
			return;
		}
	}
	
	private static EntryCollection buildFromToken(TrainTokens tok) throws IOException{
		boolean lineStart = true;
		StringBuffer buffer = new StringBuffer();
		WordType[] wd = tok.getTokenArray();
		for(int i = 0; i< wd.length ; i++){
			//if(lineStart)
			//	buffer.append("<s> ");
			buffer.append(wd[i].getOriginalTag());
			buffer.append(" ");
			if(wd[i].getOriginalTag().equals("<PERIOD>")){
				buffer.append("\n");
				lineStart = true;
			}else{
				lineStart = false;
			}
		}
		BufferedReader rd = new BufferedReader(new StringReader(buffer.toString()));
		EntryCollection cl = new EntryCollection();
		cl.readAllEntry(rd);
		return cl;
	}

	public TreeBasedLM clone(){
		TreeBasedLM tl = new TreeBasedLM();
		tl.node = node;
		return null;
	}

	public int getTrainLevel() {
		return trainLevel;
	}

	public void setTrainLevel(int trainLevel) {
		this.trainLevel = trainLevel;
	}
	protected void testTrain(String source) throws IOException{
		features = new HashSet<Feature>();
		Entry.setFeat(features);
		features.add(new BigramFeature());
		features.add(new TrigramFeature());
		EntryCollection e = new EntryCollection();
		boolean lineStart = true;
		StringBuffer buffer = new StringBuffer();
		BufferedReader rd1 = new BufferedReader(new FileReader(source));
		String tok;
		while((tok = rd1.readLine())!=null){
			//if(lineStart)
			//	buffer.append("<s> ");
			buffer.append(tok);
			buffer.append(" ");
			if(tok.equals("<PERIOD>")){
				buffer.append("\n");
				lineStart = true;
			}else{
				lineStart = false;
			}
		}
		BufferedReader rd = new BufferedReader(new StringReader(buffer.toString()));
		EntryCollection cl = new EntryCollection();
		cl.readAllEntry(rd);

		Trainer train = new Trainer(features,cl);
		node = train.train(trainLevel);
	}
	
	protected void testDec(String source) throws IOException{
		BufferedReader rd1 = new BufferedReader(new FileReader(source));
		String line;
		double sum = 0;
		int count = 0;
		while((line = rd1.readLine())!=null){
			WordType curr = WordType.get(line);
			double[] res = predict(curr);
			for(int i = 0 ; i < res.length ;i ++){
				System.out.print(WordType.values()[i].getOriginalTag());
				System.out.print(":");
				System.out.print(res[i]);
				System.out.print("|");
				sum += Math.log(res[curr.getIndex()]);
				count+=1;
			} 
			System.out.println(line);
		}
		System.out.println("PPL: " + 1/(Math.exp(sum / count)));
		 
	}
	
	public static void main(String[] args) throws IOException, ClassNotFoundException{
		TreeBasedLM lm  = new TreeBasedLM();
		lm.testTrain(args[0]);
		ObjectOutputStream oup = new ObjectOutputStream(new FileOutputStream("/tmp/test"));
		oup.writeObject(lm);
		ObjectInputStream inp = new ObjectInputStream(new FileInputStream("/tmp/test"));
		lm = (TreeBasedLM) inp.readObject();
		Entry.setFeat(lm.features);
		lm.testDec(args[1]);
		//ObjectWriter wr = new ObjectWriter
	
	
	}
	
	public void prepare(WordType[] allWordType) {
	}
}
