package edu.illinois.cs.mmak4.cs410;

import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Iterator;

import cc.mallet.classify.Classifier;
import cc.mallet.classify.NaiveBayes;
import cc.mallet.classify.NaiveBayesEMTrainer;
import cc.mallet.classify.NaiveBayesTrainer;
import cc.mallet.pipe.CharSequence2TokenSequence;
import cc.mallet.pipe.FeatureSequence2FeatureVector;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.Target2Label;
import cc.mallet.pipe.TokenSequence2FeatureSequence;
import cc.mallet.pipe.TokenSequenceLowercase;
import cc.mallet.pipe.TokenSequenceRemoveStopwords;
import cc.mallet.pipe.iterator.ArrayDataAndTargetIterator;
import cc.mallet.pipe.iterator.ArrayIterator;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Labeling;

public class TrainClassifier {
	private static boolean EM = true;

	/**
	 * Trains NB with EM (lots of unlabelled data)
	 * 
	 * @param args
	 * @throws FileNotFoundException
	 */
	public static void main(String[] args) throws FileNotFoundException {
		// TODO Auto-generated method stub

		/**
		 * Build the pipe that transforms instances
		 */
		Pipe instancePipe;
		ArrayList<Pipe> pipeList = new ArrayList<Pipe>();

		// Convert "target" object into a numeric index into a LabelAlphabet
		pipeList.add(new Target2Label());

		// Convert to tokens
		pipeList.add(new CharSequence2TokenSequence());

		// Lower case everything
		pipeList.add(new TokenSequenceLowercase());

		// Remove stop words
		pipeList.add(new TokenSequenceRemoveStopwords());

		// Convert to dictionary settings
		pipeList.add(new TokenSequence2FeatureSequence());

		// Remove order
		pipeList.add(new FeatureSequence2FeatureVector());

		// Set up the chain of pipes to pass the data through
		instancePipe = new SerialPipes(pipeList);

		/**
		 * Load the labelled and unlabelled data from the file
		 */
		FileReader fpIn = new FileReader("sentence.toclassify");
		BufferedReader inF = new BufferedReader(fpIn);

		ArrayList<String> sentence = new ArrayList<String>();
		ArrayList<Integer> label = new ArrayList<Integer>();

		ArrayList<String> unlabelledSentence = new ArrayList<String>();

		while (true) {
			try {
				String example = inF.readLine();

				// Split sample by '\t'
				String[] entries = example.split("\t");

				// First one is the label
				if (entries[0].equals("")) {
					unlabelledSentence.add(entries[1]);
				} else {
					// Add the labelled examples
					label.add(Integer.parseInt(entries[0]));
					sentence.add(entries[1]);
				}

			} catch (Exception e) {
				// Cheater's way of getting EOF
				break;
			}
		}

		/**
		 * Load up the instances
		 */
		InstanceList instances = new InstanceList(instancePipe);

		instances.addThruPipe(new ArrayDataAndTargetIterator(sentence, label));

		/**
		 * Train with EM for NaiveBayes
		 */
		Classifier classifier = null;
		NaiveBayes c = new NaiveBayesTrainer().train(instances);

		if (EM) {
			// Initial classifier trained
			boolean converged = false;
			double prevLogLikelihood = 0.0, logLikelihood = 0.0;

			double unlabeledDataWeight = 0.01;
			int maxParts = 1;
			int currParts = 1;
			while (!converged) {
				InstanceList trg2 = new InstanceList(instances.getPipe());
				for (int i = 0; i < instances.size(); i++) {
					// Copy into the training list
					trg2.add(instances.get(i));
				}

				c.getInstancePipe().getDataAlphabet().stopGrowth();
				c.getInstancePipe().getTargetAlphabet().stopGrowth();

				// Inject the unlabelled data
				Iterator<Instance> sentenceIterator = new ArrayIterator(
						unlabelledSentence);
				Iterator<Instance> iterator = c.getInstancePipe()
						.newIteratorFrom(sentenceIterator);
				int maxCount = currParts * unlabelledSentence.size() / maxParts;
				int count = 0;
				while (iterator.hasNext() && (count < maxCount)) {
					Instance inst = iterator.next();
					Instance inst2 = inst.shallowCopy();
					Labeling l_all = c.classify(inst2).getLabeling();
					inst2.unLock();
					inst2.setLabeling(l_all);

					inst2.lock();
					trg2.add(inst2, unlabeledDataWeight);
					count += 1;
				}
				c = new NaiveBayesTrainer().train(trg2);
				logLikelihood = c.dataLogLikelihood(trg2);
				System.err.println("Unlabelled = " + maxCount + "  Loglikelihood = " + logLikelihood);
				// Wait for a change in log-likelihood of less than 0.01% and at
				// least 10 iterations
				if (Math.abs((logLikelihood - prevLogLikelihood)
						/ logLikelihood) < 0.0000001) {
					currParts += 1;
					if (currParts >= maxParts)
						converged = true;
				}
				prevLogLikelihood = logLikelihood;
			}
		}

		classifier = c;

		/**
		 * Serialise trained classifier
		 */
		try {
			ObjectOutputStream oos = new ObjectOutputStream(
					new FileOutputStream("nbem.mallet"));
			oos.writeObject(classifier);
			oos.close();
		} catch (Exception e) {
			e.printStackTrace();
			throw new IllegalArgumentException(
					"Couldn't write classifier to filename " + "nbem.mallet");
		}

	}
}
