/**
 * 
 */
package inz.model.classification;

import inz.common.LoggingModule;
import inz.common.MyUtils;
import inz.model.patterns.DbSequence;
import inz.model.patterns.Sequence;

import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;

import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.bayes.NaiveBayes;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;

/** Performs actions connected with classification */
public class ClassificationHelper
{
	/** loger */
	private static Logger logger = Logger.getLogger(LoggingModule.CLASSIFICATION_LOGGER);
	/** frequent sequences that will be attributes for classification purposes */
	private Set<Sequence> allFrequentSequences = new LinkedHashSet<Sequence>();
	/** sets of training examples grouped by their type */
	private Map<ExType, Set<Example>> trainExamples = new HashMap<ExType, Set<Example>>();
	/** sets of test examples grouped by their type */
	private Map<ExType, Set<Example>> testExamples = new HashMap<ExType, Set<Example>>();
	/** dataset of examples in Weka representation */
	private Instances dataset;
	/** Weka classifer */
	private Classifier classifier;

	// private Map<ExType, Set<Sequence>> seqOrigins = new HashMap<ExType, Set<Sequence>>();

	/** adds frequent sequences that will be attributes for classification purposes
	 * @param sequences list of additional sequence - attributes */
	public void addFrequentSequences(Collection<Sequence> sequences)
	{

		// seqOrigins.put(type, sequences);
		allFrequentSequences.addAll(sequences);
		List<Sequence> sortedList = new ArrayList<Sequence>();
		sortedList.addAll(allFrequentSequences);
		Collections.sort(sortedList, Sequence.comp);
		allFrequentSequences.clear();
		allFrequentSequences.addAll(sortedList);
	}

	/** Transforms database document sequences into training examples of given type
	 * @param db sequence database containing document sequences
	 * @param type type of document sequences
	 * @param maxDist maximal distance between elements of sequence
	 * @param purpose is set examples to train or test */
	public void addExamples(List<DbSequence> sequences, ExType type, int maxDist, ExType purpose)
	{
		Set<Example> newExamples = new LinkedHashSet<Example>();
		for (DbSequence dbSequence : sequences)
		{
			newExamples.add(new Example(allFrequentSequences, dbSequence, maxDist));
		}
		if (newExamples.isEmpty() == false)
		{
			Set<Example> previousExamples = null;
			Map<ExType, Set<Example>> whichExamples = null;
			if (purpose == ExType.TRAINING)
			{
				whichExamples = trainExamples;
			}
			else
			{
				whichExamples = testExamples;
			}
			previousExamples = whichExamples.get(type);
			if (previousExamples == null)
			{
				previousExamples = new LinkedHashSet<Example>(newExamples);
			}
			else
			{
				previousExamples.addAll(newExamples);
			}
			whichExamples.put(type, previousExamples);
		}
	}


	/** Builds one of the Weka classifiers
	 * @throws Exception if cannot build */
	public void buildClassifier() throws Exception
	{
		transformDataset();
		transformExamples();
		classifier = new NaiveBayes();
		classifier.buildClassifier(dataset);

		Classifier bayes = new NaiveBayes();
		Evaluation eval = new Evaluation(dataset);
		eval.crossValidateModel(bayes, dataset, dataset.numInstances(), new Random());
		logger.log(Level.INFO, eval.toSummaryString(), "wekaEval");
	}

	/** Transforms list of sequences - attributes into weka's representation of dataset */
	public void transformDataset()
	{
		ArrayList<Attribute> attributes = new ArrayList<Attribute>();

		for (int number = 1; number <= allFrequentSequences.size(); number++)
		{
			Attribute attr = new Attribute(Integer.toString(number));
			attributes.add(attr);
		}

		ArrayList<String> labels = new ArrayList<String>();
		for (ExType type : ExType.values())
		{
			labels.add(type.toString());
		}
		Attribute attr = new Attribute("class", labels);
		attributes.add(attr);
		dataset = new Instances("sequence-dataset", attributes, getExampleNr(ExType.TRAINING) + 1);
		dataset.setClass(attr);
	}

	/** Transforms examples into weka representation and inserts them into weka dataset */
	public void transformExamples()
	{
		for (Map.Entry<ExType, Set<Example>> entry : trainExamples.entrySet())
		{
			for (Example ex : entry.getValue())
			{
				Instance inst = buildInstance(ex, entry.getKey().toString());
				dataset.add(inst);
			}
		}
	}

	/** Builds weka representation of instance (example or to classify)
	 * @param example internal representation of example
	 * @param className type of example
	 * @return weka instance */
	private Instance buildInstance(Example example, String className)
	{
		double[] values = new double[dataset.numAttributes()];
		int number = 0;
		for (Sequence seq : allFrequentSequences)
		{
			double value = (example.getAttrVal(seq)) ? 1 : 0;
			values[number] = value;
			number++;
		}
		if (className != null)
		{
			values[dataset.numAttributes() - 1] = dataset.attribute("class").indexOfValue(className);
		}
		return new DenseInstance(1, values);
	}

	/** classifies database sequence of document
	 * @param dbSequence sequence representing document
	 * @param maxDist maximal distance between elements of sequence
	 * @throws Exception if could not classify */
	public ExType classify(DbSequence dbSequence, int maxDist) throws Exception
	{
		if (classifier == null)
		{
			buildClassifier();
		}

		Example example = new Example(allFrequentSequences, dbSequence, maxDist);
		logger.log(Level.INFO, example.toString(), "course");
		Instance instance = buildInstance(example, null);
		instance.setDataset(dataset);
		double result = classifier.classifyInstance(instance);
		String classType = dataset.classAttribute().value((int) result);
		ExType predictedType = ExType.valueOf(classType);
		logger.log(Level.INFO, "Weka classification:" + predictedType, "course");
		return predictedType;
	}

	/** classifies database sequence of document
	 * @param dbSequence sequence representing document
	 * @param maxDist maximal distance between elements of sequence
	 * @throws Exception if could not classify */
	public ExType classifyWithDeEPs(DbSequence dbSequence, int maxDist)
	{
		Example testExample = new Example(allFrequentSequences, dbSequence, maxDist);
		return DeEPs.classify(testExample, trainExamples);
	}


	/** checks accuracy rate of DeEPs classifier based on training and testing examples */
	public double evaluateDeEPs()
	{
		int all = 0;
		int classifiedCorrectly = 0;
		int notClassified = 0;

		logger.log(Level.INFO, "[TEST] Attributes: " + allFrequentSequences.size(), "deepsEval");

		LoggingModule.startProgress("deepsEval", "Evaluating DeEPs", getExampleNr(ExType.TESTING));

			for (Map.Entry<ExType, Set<Example>> entry : testExamples.entrySet())
			{
			for (Example example : entry.getValue())
				{
				ExType type = DeEPs.classify(example, trainExamples);
					if (entry.getKey() == type)
					{
						classifiedCorrectly++;
						logger.log(Level.INFO, "GOOD", "course");
					}
					else if (type == null)
					{
						notClassified++;
					}
					else
					{
						logger.log(Level.INFO, "BAD", "course");
					}
					all++;
					LoggingModule.tickProgress();
				}
			}
		LoggingModule.endProgress();

		logger.log(Level.INFO, "classified examples: " + all, "deepsEval");
		logger.log(
				Level.INFO,
				"Fragment DeEPs accuracy: "
						+ MyUtils.doubleFormat.format((double) 100 * classifiedCorrectly
 / getExampleNr(ExType.TESTING)) + " %",
				"testResult");
		logger.log(Level.INFO,
				"Fragment not classified : "
						+ MyUtils.doubleFormat.format((double) 100 * notClassified / getExampleNr(ExType.TESTING))
						+ " %", "testResult");
	
		return (double) 100 * classifiedCorrectly / getExampleNr(ExType.TESTING);
	}

	/** prints sequences used as attributes for classification */
	public void printAttributes()
	{
		logger.log(Level.INFO, "attribute sequences: " + allFrequentSequences, "attributes");
	}

	public void printExamples()
	{
		System.out.println("---------classification examples------------");
		for (Map.Entry<ExType, Set<Example>> entry : trainExamples.entrySet())
		{
			System.out.println(entry.getKey() + "(size:" + entry.getValue().size() + ") -" + entry.getValue());
		}
	}

	/** @return number of examples from given set */
	public int getExampleNr(ExType type)
	{
		Map<ExType, Set<Example>> whichExamples = null;
		if (type == ExType.TRAINING)
		{
			whichExamples = trainExamples;
		}
		else
		{
			whichExamples = testExamples;
		}
		int sum = 0;
		for (Map.Entry<ExType, Set<Example>> entry : whichExamples.entrySet())
		{
			sum += entry.getValue().size();
		}
		return sum;
	}

	// /** shows which frequent sequences are from which training dataset */
	// public void showOrigins()
	// {
	// for (Map.Entry<ExType, Set<Example>> entry : trainExamples.entrySet())
	// {
	// int number = 1;
	// Set<Integer> tmp = new LinkedHashSet<Integer>();
	// for (Sequence seq : allFrequentSequences)
	// {
	// if (seqOrigins.get(entry.getKey()).contains(seq))
	// {
	// tmp.add(number);
	// }
	// number++;
	// }
	// logger.log(Level.INFO, entry.getKey() + " - " + tmp, "course");
	// }
	// }

	/** Extracts one part of set
	 * @param set set to divide
	 * @param whichPart part of set to return
	 * @param allParts for how many parts set is divided
	 * @return part of set */
	public static <SomeType> Set<SomeType> extractPartOfSet(Set<SomeType> set, int whichPart, int allParts)
	{
		int partSize = set.size() / allParts;
		Set<SomeType> resultSet = new HashSet<SomeType>();

		int k = 0;
		for (SomeType t : set)
		{
			if (k >= partSize * (whichPart - 1) && resultSet.size() < partSize)
			{
				resultSet.add(t);
			}
			k++;
		}
		return resultSet;
	}

	/** Returns set without givenpart of it
	 * @param set set to divide
	 * @param whichPart part of set to remove
	 * @param allParts for how many parts set is divided
	 * @return set without specified part */
	public static <Type1, Type2> Map<Type1, Set<Type2>> removePartFromSet(Map<Type1, Set<Type2>> set, int whichPart,
			int allParts)
	{
		Map<Type1, Set<Type2>> resultMap = new HashMap<Type1, Set<Type2>>();
		for (Map.Entry<Type1, Set<Type2>> entry : set.entrySet())
		{
			int partSize = entry.getValue().size() / allParts;
			Set<Type2> resultSet = new LinkedHashSet<Type2>();

			int k = 0;
			for (Type2 t : entry.getValue())
			{
				if (k < partSize * (whichPart - 1) || k >= partSize * whichPart)
				{
					resultSet.add(t);
				}
				k++;
			}
			resultMap.put(entry.getKey(), resultSet);
		}
		return resultMap;
	}

}
