package main.specific;

import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;

import edu.washington.cs.knowitall.extractor.conf.BooleanFeatureSet;
import edu.washington.cs.knowitall.extractor.conf.LabeledBinaryExtraction;
import edu.washington.cs.knowitall.extractor.conf.LabeledBinaryExtractionReader;
import edu.washington.cs.knowitall.extractor.conf.ReVerbFeatures;
import edu.washington.cs.knowitall.extractor.conf.WekaDataSet;
import edu.washington.cs.knowitall.nlp.extraction.ChunkedBinaryExtraction;
import weka.classifiers.functions.Logistic;
import weka.core.SerializationHelper;
import weka.core.converters.ArffSaver;

/***
 * Used to train the ReVerb confidence function using the features described
 * by <code>ReVerbFeatures</code>. Given a set of <code>LabeledBinaryExtraction</code>
 * instances, this class featurizes them and trains a logistic regression classifier
 * using Weka's <code>Logistic</code> class. 
 * 
 * This class can be called from the command-line to train a classifier and save the
 * resulting model to a file.
 * 
 * @author afader
 *
 */
public class ReVerbClassifierTrainerSpecific {
	
	private static final String NATURAL_DISASTER = "NaturalDisaster";
	private BooleanFeatureSet<ChunkedBinaryExtraction> featureSet;
	private Logistic classifier;
	private WekaDataSet<ChunkedBinaryExtraction> dataSet;
	
	/**
	 * Constructs and trains a new Logistic classifier using the given examples.
	 * @param examples
	 * @throws Exception
	 */
	public ReVerbClassifierTrainerSpecific(Iterable<LabeledBinaryExtraction> examples) throws Exception {
		ReVerbFeatures feats = new ReVerbFeatures();
		featureSet = feats.getFeatureSet();
		createDataSet(examples);
		train();
	}
	
	/**
	 * @return the data set used to train the classifier
	 */
	public WekaDataSet<ChunkedBinaryExtraction> getDataSet() {
		return dataSet;
	}
	
	/**
	 * @return the trained classifier.
	 */
	public Logistic getClassifier() {
		return classifier;
	}
	
	private void createDataSet(Iterable<LabeledBinaryExtraction> examples) {
		dataSet = new WekaDataSet<ChunkedBinaryExtraction>("train", featureSet);
		for (LabeledBinaryExtraction extr : examples) {
			int label = extr.isPositive() ? 1 : 0;
			dataSet.addInstance(extr, label);
		}
	}

	private void train() throws Exception {
		classifier = new Logistic();
		classifier.buildClassifier(dataSet.getWekaInstances());
	}
	
	/**
	 * Trains a logistic regression classifier using the examples in the given file,
	 * and saves the model to disk. The examples must be in the format described in
	 * <code>LabeledBinaryExtractionReader</code>.
	 * 
	 * An optional third parameter can be passed that writes the training data in 
	 * Weka's ARFF file format to disk.
	 * 
	 * @param args
	 * @throws Exception
	 */
	public static void main(String[] args) throws Exception {
		
		String[] Datasets = main.Chunking.Datasets;

		String fileCombinationsName = "/home/pjbarrio/Dataset/SGML-ACE/COMBINATIONS";
		
		ChunkingSpecific.loadFileForCombinations(new File(fileCombinationsName));
		
		for (int d = 0; d < Datasets.length; d++) {
			
			String[] firstArgs = ChunkingSpecific.getFirstArgument(Datasets[d]);
			
			for (int first = 0; first < firstArgs.length; first++) {
				
				String[] secondArgs = ChunkingSpecific.getSecondArgument(Datasets[d],firstArgs[first]);
				
				for (int second = 0; second < secondArgs.length; second++) {
					
					int[] split = ChunkingSpecific.getSplits(Datasets[d]);
					
					for (int i = 0; i < split.length; i++) {
				
						args = new String[2];
						
						int gspl = 0;
				
						args[0] = ChunkingSpecific.getSplittedFormatedFile(ChunkingSpecific.getFormattedFile(firstArgs[first], secondArgs[second], Datasets[d], split[i], false, "train", false), gspl);

						while (new File(args[0]).exists()){
												
							args[1] = ChunkingSpecific.getSplittedFormatedFile(ChunkingSpecific.getFormattedFile(firstArgs[first], secondArgs[second], Datasets[d], split[i], false, "model", false), gspl);
							
							System.err.println(Datasets[d] + " - " + split[i] + " - " + gspl);
							
							if (args.length < 2) {
								System.err.println("Usage: ReVerbClassifierTrainer examples model [features]\n");
								System.err.println("    Trains the model used by ReVerbConfFunction on the given examples file and\n" +
												   "    writes them to the given model file. Optionally, will write out the \n" +
												   "    training data as a Weka ARFF file. The examples must be in the format\n" +
												   "    described in LabeledBinaryExtractionReader. The features used in the\n" +
												   "    classifier are described in ReVerbFeatures.\n");
								return;
							}
							InputStream in = new FileInputStream(args[0]);
							LabeledBinaryExtractionReader reader = new LabeledBinaryExtractionReader(in);
							ReVerbClassifierTrainerSpecific trainer = new ReVerbClassifierTrainerSpecific(reader.readExtractions());
							Logistic classifier = trainer.getClassifier();
							SerializationHelper.write(args[1], classifier);
							
							if (args.length > 2) {
								ArffSaver saver = new ArffSaver();
								saver.setInstances(trainer.getDataSet().getWekaInstances());
								saver.setFile(new File(args[2]));
								saver.writeBatch();
							}

							gspl++;
							
							args[0] = ChunkingSpecific.getSplittedFormatedFile(ChunkingSpecific.getFormattedFile(firstArgs[first], secondArgs[second], Datasets[d], split[i], false, "train", false), gspl);
							
						}
							
					}

					
				}
				
			}

			
		}
		
	}

}
