package DEEPERsource.DEEPERsource.source.machinelearning.wekawrapper;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import weka.classifiers.Classifier;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.converters.ArffLoader;

@Deprecated
public abstract class WekaClassifier {
	private static Log _log = LogFactory.getLog(WekaClassifier.class);

	protected Classifier classifier = null;

	protected Instances trainingSet = null;
	protected Instances testSet = null;
	
	/**
	 * Updates classifier model with new training data
	 * 
	 * @param fileName -
	 *            file in arff format
	 */
	/*public void updateTrainingSet(String fileName) {
		try {
			Instances newTrainingData = readArffFile(fileName);
			if (!newTrainingData.equalHeaders(trainingSet)) {
				_log.error("Datasets are not compartible!");
				return;
			}
			trainingSet = Instances.mergeInstances(trainingSet, newTrainingData);
		} catch (Exception e) {
			_log.error("Error occured during classifier training");
			e.printStackTrace();
		}
	}
	
	public void updateTrainingSet(InputStream stream) {
		try {
			Instances newTrainingData = readArffFile(stream);
			if (!newTrainingData.equalHeaders(trainingSet)) {
				_log.error("Datasets are not compartible!");
				return;
			}
			trainingSet = Instances.mergeInstances(trainingSet, newTrainingData);
		} catch (Exception e) {
			_log.error("Error occured during classifier training");
			e.printStackTrace();
		}
	}*/
	
	/**
	 * Input is a file in arff format
	 * 
	 * @param fileName
	 */
	/*public void addTrainingSet(String fileName) {
		trainingSet = readArffFile(fileName);
		trainingSet.setClassIndex(trainingSet.numAttributes()-1);
	}
	
	public void addTrainingSet(InputStream stream) {
		trainingSet = readArffFile(stream);
		trainingSet.setClassIndex(trainingSet.numAttributes()-1);
	}*/

	/**
	 * Input is a file in arff format
	 * 
	 * @param fileName
	 */
	/*public void addTestSet(String fileName) {
		testSet = readArffFile(fileName);
		//testSet.setClassIndex(trainingSet.numAttributes()-1);
	}*/


	/*private Instances readArffFile(String fileName) {
		ArffLoader loader = new ArffLoader();
		try {
			loader.setFile(new File(fileName));
			return loader.getDataSet();
		} catch (IOException e) {
			_log.error("Error reading the arff file");
			e.printStackTrace();
		}
		return null;
	}
	
	private Instances readArffFile(InputStream stream) {
		ArffLoader loader = new ArffLoader();
		try {
			loader.setSource(stream);
			return loader.getDataSet();
		} catch (IOException e) {
			_log.error("Error reading the arff file");
			e.printStackTrace();
		}
		return null;
	}*/

	/*public Map<Integer,List<double[]>> cvTest(int numFolds){
		Map<Integer, List<double[]>> result = new TreeMap<Integer, List<double[]>>();
		Instances backedTrainingSet = trainingSet;
		Random random = new Random(System.nanoTime());
		trainingSet.randomize(random);
		trainingSet.stratify(numFolds);
		for(int i = 0; i<numFolds; i++){
			trainingSet = backedTrainingSet.trainCV(numFolds, i);
			testSet = backedTrainingSet.testCV(numFolds, i);
			buildClassifier();
			result.put(new Integer(i), distributionForTestSet());
		}
		trainingSet = backedTrainingSet;
		testSet = null;
		buildClassifier();
		return result;
	}*/

	
	/**
	 * Return distribution for each instance from the test set
	 * 
	 * @return
	 */
	/*public List<double[]> distributionForTestSet() {
		List<double[]> result = new ArrayList<double[]>();
		if (testSet == null || testSet.numInstances() == 0) {
			_log.error("Test set is not defined. Can't classify");
		}
		for (Enumeration<Instance> e = testSet.enumerateInstances(); e.hasMoreElements();) {
			Instance toPredict = e.nextElement();
			Instance classMissing = (Instance)toPredict.copy();
			classMissing.setDataset(toPredict.dataset());
			classMissing.setClassMissing();
			result.add(distributionForInstance(classMissing));
		}
		return result;
	}*/

	/**
	 * return distribution for provided test instance
	 * 
	 * @return
	 */
	public double[] distributionForInstance(Instance testInstance) {
		try {
			return classifier.distributionForInstance(testInstance);
		} catch (Exception e) {
			_log.error("Error during instance classification");
			e.printStackTrace();
		}
		return null;
	}
	
	public void serializeClassifier(String modelFile) {
		try {
			ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(modelFile));
			try {
				out.writeObject(classifier);
			} finally {
				out.close();
			}
		} catch (FileNotFoundException e) {
			_log.error("Can't find file");
			e.printStackTrace();
		} catch (IOException e) {
			_log.error("Error writing model file");
			e.printStackTrace();
		}
	}

	public abstract Map<String, String> getOptions();

	public abstract void setOptions(Map<String, String> opt);
	
	/**
	 * Input is a path to the model file
	 * 
	 * @param modelFile
	 */
	public abstract void readClassifier(String modelFile);
	
	/**
	 * Defines underlying classification algortihm
	 * 
	 */
	public abstract void buildClassifier();
}
