package utils;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Set;

import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.bayes.NaiveBayesMultinomial;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
import model.Collection;
import model.Document;
import edu.stanford.nlp.tagger.maxent.MaxentTagger;

public class WekaUtils {
	public static final String TAGGER_MODEL = "./models/bidirectional-distsim-wsj-0-18.tagger";
	public static char sep = Collection.getSeparator();

	/**
	 * Tag the documents first
	 */
	public static void tagCollection(String collection) {
		try {
			// Tag the files
			MaxentTagger tagger = new MaxentTagger(TAGGER_MODEL);
			int numOfDocs = Collection.addDirectory(collection, false).size();
			System.out.println(numOfDocs + " docs found at " + collection);
			// Stage 1 - Split the documents into slices
			for (String docLocation : Collection
					.addDirectory(collection, false)) {
				System.out.println("Processing " + docLocation);
				Document d = new Document(tagger, new File(docLocation));
				d.saveTagged(docLocation + ".tagged");
			}
			System.out.println("Tagging done");
		} catch (ClassNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	/**
	 * Given train files and test files, normalize the document vectors so that
	 * they are compatible.
	 * 
	 * @param train
	 *            file
	 * @param test
	 *            file
	 * @param output
	 */
	public static void toArff(String trainFile, String testFile,
			String collection) {
		final String EXTENSION = ".tagged";
		HashMap<String, String> rel = new HashMap<String, String>();
		try {
			List<String> trainFiles = new ArrayList<String>(), testFiles = new ArrayList<String>();
			// Read the list of files from train and test files
			BufferedReader reader = new BufferedReader(
					new FileReader(trainFile));
			String line = null;
			System.out.println("====== Train files ======");
			while ((line = reader.readLine()) != null) {
				if (line.charAt(0) != '@') {
					String fields[] = line.split(",");
					String id = fields[0];
					System.out.println(id);
					trainFiles.add(collection + sep + id + EXTENSION);
					rel.put(id + EXTENSION, fields[fields.length - 1]);
				}
			}
			reader.close();
			System.out.println("====== Test files ======");
			reader = new BufferedReader(new FileReader(testFile));
			line = null;
			while ((line = reader.readLine()) != null) {
				if (line.charAt(0) != '@') {
					String fields[] = line.split(",");
					String id = fields[0];
					System.out.println(id);
					testFiles.add(collection + sep + id + EXTENSION);
					rel.put(id + EXTENSION, fields[fields.length - 1]);
				}
			}
			reader.close();
			System.out.println(trainFiles.size() + " train files and "
					+ testFiles.size() + " test files loaded.");

			// Build the collections
			Collection train = new Collection(trainFiles, false, false, false);
			Collection test = new Collection(testFiles, false, false, false);

			// Merge the vocabulary
			Set<String> mergedVocabulary = train.getVocabulary().keySet();
			mergedVocabulary.retainAll(test.getVocabulary().keySet());

			// Reformat the doc vectors for training and testing sets
			train.toArff(
					mergedVocabulary,
					trainFile.substring(0, trainFile.length() - 3).concat(
							"normalized.arff"), rel);
			test.toArff(
					mergedVocabulary,
					testFile.substring(0, testFile.length() - 3).concat(
							"normalized.arff"), rel);

			System.out.println(mergedVocabulary.size());

		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	/**
	 * Wrapper to classify with the Weka toolkit
	 * 
	 * @param
	 */
	public static double classify(String trainFile, String testFile) {
		DataSource train, test;
		System.out.println("Classifying from:");
		System.out.println("Train file: " + trainFile);
		System.out.println("Test file: " + testFile);
		try {
			train = new DataSource(trainFile);
			test = new DataSource(testFile);

			Instances trainData = train.getDataSet();
			// setting class attribute if the data format does not provide this
			// information
			// E.g., the XRFF format saves the class attribute information as
			// well
			Instances testData = test.getDataSet();
			if (trainData.classIndex() == -1) {
				trainData.setClassIndex(trainData.numAttributes() - 1);
			}
			if (testData.classIndex() == -1) {
				testData.setClassIndex(testData.numAttributes() - 1);
			}
			// train classifier
			Classifier cls = new NaiveBayesMultinomial();
			cls.buildClassifier(trainData);
			// evaluate classifier and print some statistics
			Evaluation eval = new Evaluation(trainData);
			eval.evaluateModel(cls, testData);
			// System.out.println(eval.toSummaryString("\nResults\n======\n",
			// false));
			return eval.pctCorrect();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return -1;
	}

	public static void main(String args[]) {
		final String COLLECTION = "C:\\Users\\Amos\\workspace\\Data\\selected\\869\\doc";
		final String ARFF_LOC = "C:\\Users\\Amos\\workspace\\Data\\selected\\869\\cv";

		// WekaUtils.tagCollection(COLLECTION);
		// for (int i = 1; i < 6; i++) {
		// WekaUtils.toArff(ARFF_LOC + Collection.getSeparator() + "train" + i
		// + ".dat", ARFF_LOC + Collection.getSeparator() + "test" + i
		// + ".dat", COLLECTION);
		// }
		for (int i = 1; i < 6; i++) {
			System.out.println(WekaUtils.classify(
					ARFF_LOC + Collection.getSeparator() + "train" + i
							+ ".normalized.arff",
					ARFF_LOC + Collection.getSeparator() + "test" + i
							+ ".normalized.arff"));
		}
	}
}
