package nlp;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.SortedSet;
import java.util.TreeSet;

import weka.core.Attribute;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;

public class WekaFeatureManager {
	private Instances trainInstances;
	private Instances testInstances;

	private SortedSet<String> GetAllFeatureNames(List<TextDocument> docs,
			SortedSet<String> featureNameSet, HashSet<String> featureSkipSet) {
		for (TextDocument doc : docs) {
			String[] featureNames = doc.GetAllFeatureNames();
			for (String featureName : featureNames) {
				String[] featureNameParts = featureName.split("\t");
				if (!featureSkipSet.contains(featureNameParts[0])) {
					featureNameSet.add(featureName);
				}
			}
		}
		return featureNameSet;
	}

	private List<TextDocument> ParseTextDocumentFromFile(String fileName) {
		String inputFileData = FileUtil.ReadFileContent(fileName);
		String[] records = inputFileData.split("\n");
		List<TextDocument> docs = new ArrayList<TextDocument>();
		for (String record : records) {
			TextDocument doc = new TextDocument();
			doc.Deserialize(record);
			docs.add(doc);
		}
		return docs;
	}

	private Instances CreateWekaFeatureSetInstances(List<TextDocument> docs,
			SortedSet<String> featureNames) {
		int numFeatures = featureNames.size();

		FastVector wekaAttributes = new FastVector(numFeatures + 1);

		// Add the class attribute.
		String[] authorNames = AuthorClassifier.authorNames;
		FastVector classValues = new FastVector(authorNames.length);
		for (String authorName : authorNames) {
			classValues.addElement(authorName);
		}
		Attribute classAttribute = new Attribute("authorClass", classValues);
		wekaAttributes.addElement(classAttribute);

		// Add feature names.
		for (String featureName : featureNames) {
			wekaAttributes.addElement(new Attribute(featureName));
		}

		// Weka featureSet
		Instances featureSet = new Instances("Features", wekaAttributes, docs
				.size());
		// Set class index
		featureSet.setClassIndex(0);

		for (TextDocument doc : docs) {
			Instance anInstance = new Instance(numFeatures + 1);
			anInstance.setValue((Attribute) wekaAttributes.elementAt(0), doc
					.GetLabel());
			int idx = 1;
			for (String featureName : featureNames) {
				Feature feature = doc.GetFeature(featureName);
				if (feature != null) {
					Double value = (Double) feature.GetValue();
					anInstance.setValue((Attribute) wekaAttributes
							.elementAt(idx++), value);
				} else {
					anInstance.setMissing(idx++);
				}
			}
			featureSet.add(anInstance);
		}
		return featureSet;
	}

	public WekaFeatureManager(String trainingFile, String testFile,
			HashSet<String> featureSkipSet) {
		SortedSet<String> featureNameSet = new TreeSet<String>();
		List<TextDocument> trainingDocs = ParseTextDocumentFromFile(trainingFile);
		List<TextDocument> testDocs = ParseTextDocumentFromFile(testFile);
		GetAllFeatureNames(trainingDocs, featureNameSet, featureSkipSet);
		GetAllFeatureNames(testDocs, featureNameSet, featureSkipSet);
		trainInstances = CreateWekaFeatureSetInstances(trainingDocs,
				featureNameSet);
		testInstances = CreateWekaFeatureSetInstances(testDocs, featureNameSet);
	}

	public Instances GetTrainingInstances() {
		return trainInstances;
	}

	public Instances GetTestInstances() {
		return testInstances;
	}
}
