package cz.semjob.learning.classifier.benchmark;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;

import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.stereotype.Component;

import weka.classifiers.Classifier;
import weka.core.Instances;
import cz.semjob.document.AnnotatedDocument;
import cz.semjob.document.annotation.LabeledRelationWordsPair;
import cz.semjob.document.annotation.LabeledWords;
import cz.semjob.document.annotation.RelationWordsPair;
import cz.semjob.document.annotation.metadata.AnnotationTypeFactory;
import cz.semjob.document.annotation.metadata.Entity;
import cz.semjob.document.annotation.metadata.Relation;
import cz.semjob.document.processing.DocumentInstance;
import cz.semjob.document.processing.DocumentProcessingException;
import cz.semjob.document.processing.DocumentWord;
import cz.semjob.document.processing.batch.DocumentsBatchProcessor;
import cz.semjob.learning.classifier.ClassificationException;
import cz.semjob.learning.classifier.ClassifierExecutor;
import cz.semjob.learning.classifier.LearningException;
import cz.semjob.learning.data.DataSet;
import cz.semjob.learning.data.DataSetBatchCreator;
import cz.semjob.learning.data.IDataSet;
import cz.semjob.learning.data.RelationDataSet;
import cz.semjob.learning.data.TaxonomyLearningException;
import cz.semjob.learning.data.exporter.ARFFDataSetExporter;
import cz.semjob.learning.instances.creator.ITextInstanceCreator;
import cz.semjob.learning.instances.loader.ARFFDataSetLoader;
import cz.semjob.learning.instances.loader.InstancesCreator;
import cz.semjob.learning.learner.IDataSetLearner;
import cz.semjob.learning.relation.IRelationInstanceCreator;

@Component
public class ClassifierBenchmark {

	@Autowired
	@Qualifier("annotationTypeFactory")
	AnnotationTypeFactory annotationTypeFactory;

	@Autowired
	private ARFFDataSetLoader loader;

	@Autowired
	private InstancesCreator instancesLoader;

	@Autowired
	private DocumentsBatchProcessor documentBatchProcessor;

	@Autowired
	private ARFFDataSetExporter dataSetExporter;

	@Autowired
	private ClassifierExecutor classifierExecutor;

	@Autowired
	private ClassifierBenchmarkResultsIO resultIO;

	private static Logger logger = Logger.getLogger(ClassifierBenchmark.class);

	public ClassifierEntityBenchmarkResults benchmarkClassifierForEntity(
			IDataSetLearner learner, Entity entity,
			ITextInstanceCreator instanceCreator, String inputPath,
			String processedPath, double learnDocumentsRatio,
			Object... dataSources) throws TaxonomyLearningException,
			ClassificationException, LearningException {
		Random random = new Random();
		return benchmarkClassifierForEntity(learner, entity, instanceCreator,
				inputPath, processedPath, learnDocumentsRatio, random,
				dataSources);
	}

	public ClassifierEntityBenchmarkResults benchmarkClassifierForEntity(
			IDataSetLearner learner, Entity entity,
			ITextInstanceCreator instanceCreator, String inputPath,
			String processedPath, double learnDocumentsRatio, Random random,
			Object... dataSources) throws TaxonomyLearningException,
			ClassificationException, LearningException {

		List<AnnotatedDocument> learnDocuments = new ArrayList<AnnotatedDocument>();
		List<AnnotatedDocument> testDocuments = new ArrayList<AnnotatedDocument>();
		List<DocumentInstance> testDocumentInstances = new ArrayList<DocumentInstance>();

		createDocumentsDistribution(inputPath, processedPath, random,
				learnDocumentsRatio, learnDocuments, testDocuments,
				testDocumentInstances);

		DataSetStatistics dataSetStatistics = new DataSetStatistics(
				learnDocuments.size() + testDocuments.size(),
				learnDocuments.size());

		// statistics for the learn documents
		for (AnnotatedDocument annotatedDocument : learnDocuments) {
			dataSetStatistics.addLearnAnnotationsCount(entity.getName(),
					annotatedDocument.getAnnotationCount(entity));
			dataSetStatistics.addLearnAnnotationsCount("none",
					annotatedDocument.getNegativeAnnotationCount(entity));
		}
		// statistics for the test documents
		for (AnnotatedDocument annotatedDocument : testDocuments) {
			dataSetStatistics.addTestAnnotationsCount(entity.getName(),
					annotatedDocument.getAnnotationCount(entity));
			dataSetStatistics.addTestAnnotationsCount("none",
					annotatedDocument.getNegativeAnnotationCount(entity));
		}

		Map<Entity, ITextInstanceCreator> creatorsMap = new HashMap<Entity, ITextInstanceCreator>();
		creatorsMap.put(entity, instanceCreator);

		DataSetBatchCreator dataSetBatchCreator = new DataSetBatchCreator();
		dataSetBatchCreator.setTextInstanceCreators(creatorsMap);
		DataSet dataSet = dataSetBatchCreator.createDataSet(learnDocuments,
				entity);

		Instances learnIstances = createDataSet(dataSet,
				"src/test/resources/datasets/");

		Classifier classifier = learner.buildClassifier(learnIstances);

		Map<DocumentInstance, List<LabeledWords>> foundAnnotations = classifierExecutor
				.executeClassificationForEntity(classifier, entity,
						instanceCreator, testDocumentInstances, dataSources);

		ClassifierEntityBenchmarkResults results = new ClassifierEntityBenchmarkResults(
				entity, classifier, instanceCreator, learner.getInfo());
		results.setName(entity.getName());
		results.setDataSetStatistics(dataSetStatistics);

		double overallFalsePositive = 0;
		double overallFalseNegative = 0;
		double overallTruePositive = 0;
		double overallTrueNegative = 0;
		for (AnnotatedDocument testDocument : testDocuments) {
			double truePositive = 0;

			List<LabeledWords> foundDocumentAnnotations = foundAnnotations
					.get(testDocument.getDocumentInstance());
			List<List<DocumentWord>> realDocumentAnnotations = testDocument
					.getPositiveAnnotations(entity);

			for (LabeledWords foundAnnotation : foundDocumentAnnotations) {
				if (realDocumentAnnotations
						.contains(foundAnnotation.getWords())) {
					truePositive++;
				}
			}

			double falsePositive = foundDocumentAnnotations.size()
					- truePositive;
			double falseNegative = realDocumentAnnotations.size()
					- truePositive;
			double trueNegative = testDocument.getNegativeAnnotations(entity)
					.size() - falseNegative;
			System.out
					.println(testDocument.getDocumentInstance().getFileName());
			printConfusionMatrix(truePositive, falsePositive, falseNegative,
					trueNegative);

			double precision = calculatePrecision(truePositive, falsePositive);
			double recall = calculateRecall(truePositive, falseNegative);
			double trueNegativeRate = calculateTrueNegativeRate(falsePositive,
					trueNegative);
			double accuracy = calculateAccuracy(truePositive, falsePositive,
					falseNegative, trueNegative);

			results.addResult(testDocument.getDocumentInstance().getFileName(),
					precision, recall, trueNegativeRate, accuracy,
					foundDocumentAnnotations);

			overallFalsePositive += falsePositive;
			overallTrueNegative += trueNegative;
			overallTruePositive += truePositive;
			overallFalseNegative += falseNegative;
		}
		System.out.println("Overall:");
		printConfusionMatrix(overallTruePositive, overallFalsePositive,
				overallFalseNegative, overallTrueNegative);

		double overallPrecision = calculatePrecision(overallTruePositive,
				overallFalsePositive);
		double overallRecall = calculateRecall(overallTruePositive,
				overallFalseNegative);
		double overallTrueNegativeRate = calculateTrueNegativeRate(
				overallFalsePositive, overallTrueNegative);
		double overallAccuracy = calculateAccuracy(overallTruePositive,
				overallFalsePositive, overallFalseNegative, overallTrueNegative);
		results.setOverallResult(overallPrecision, overallRecall,
				overallTrueNegativeRate, overallAccuracy);

		try {
			resultIO.writeBenchmarkResults(results,
					"src/test/resources/results/instances");
		} catch (IOException e) {
			logger.error(e);
			throw new TaxonomyLearningException(e);
		}
		return results;
	}

	private void printConfusionMatrix(double truePositive,
			double falsePositive, double falseNegative, double trueNegative) {
		System.out.println("True positive: " + truePositive
				+ " False negative: " + falseNegative);
		System.out.println("False positive: " + falsePositive
				+ " True negative: " + trueNegative);
	}

	public ClassifierRelationBenchmarkResults benchmarkClassifierForRelations(
			IDataSetLearner learner, /* List<Relation> relations, */
			IRelationInstanceCreator instanceCreator, String inputPath,
			String processedPath, double learnDocumentsRatio, String name)
			throws TaxonomyLearningException, ClassificationException,
			LearningException {

		Random random = new Random();
		return benchmarkClassifierForRelations(learner, name, instanceCreator,
				inputPath, processedPath, learnDocumentsRatio, random);
	}

	public ClassifierRelationBenchmarkResults benchmarkClassifierForRelations(
			IDataSetLearner learner, String relationType,
			IRelationInstanceCreator instanceCreator, String inputPath,
			String processedPath, double learnDocumentsRatio, Random random)
			throws TaxonomyLearningException, ClassificationException,
			LearningException {

		List<AnnotatedDocument> learnDocuments = new ArrayList<AnnotatedDocument>();
		List<AnnotatedDocument> testDocuments = new ArrayList<AnnotatedDocument>();
		List<DocumentInstance> testDocumentInstances = new ArrayList<DocumentInstance>();

		createDocumentsDistribution(inputPath, processedPath, random,
				learnDocumentsRatio, learnDocuments, testDocuments,
				testDocumentInstances);

		DataSetStatistics dataSetStatistics = new DataSetStatistics(
				learnDocuments.size() + testDocuments.size(),
				learnDocuments.size());

		Map<DocumentInstance, List<RelationWordsPair>> candidateRelationWordsPairs = new HashMap<DocumentInstance, List<RelationWordsPair>>();
		for (AnnotatedDocument annotatedDocument : testDocuments) {
			List<RelationWordsPair> documentCandidateRelationWords = annotatedDocument
					.getCandidateRelations(relationType);
			candidateRelationWordsPairs.put(
					annotatedDocument.getDocumentInstance(),
					documentCandidateRelationWords);
			// now we create the statistics for the test documents
			Map<String, Integer> relationTypesCount = annotatedDocument
					.getRelationsCount(relationType);
			for (String relation : relationTypesCount.keySet()) {
				dataSetStatistics.addTestAnnotationsCount(relation,
						relationTypesCount.get(relation));
			}
		}
		// statistics for the learn documents
		for (AnnotatedDocument annotatedDocument : learnDocuments) {
			Map<String, Integer> relationTypesCount = annotatedDocument
					.getRelationsCount(relationType);
			for (String relation : relationTypesCount.keySet()) {
				dataSetStatistics.addLearnAnnotationsCount(relation,
						relationTypesCount.get(relation));
			}
		}

		List<Relation> relations = annotationTypeFactory
				.getRelationsByType(relationType);

		Map<String, IRelationInstanceCreator> creatorsMap = new HashMap<String, IRelationInstanceCreator>();

		creatorsMap.put(relationType, instanceCreator);

		DataSetBatchCreator dataSetBatchCreator = new DataSetBatchCreator();
		dataSetBatchCreator.setRelationInstanceCreators(creatorsMap);
		RelationDataSet dataSet = dataSetBatchCreator.createRelationDataSet(
				learnDocuments, relationType, relations);
		Instances learnIstances = createDataSet(dataSet,
				"src/test/resources/datasets/relations/");

		Classifier classifier = learner.buildClassifier(learnIstances);

		Map<DocumentInstance, Map<Relation, List<LabeledRelationWordsPair>>> foundAnnotations = classifierExecutor
				.executeClassificationForRelations(classifier, relationType,
						instanceCreator, candidateRelationWordsPairs);

		ClassifierRelationBenchmarkResults results = new ClassifierRelationBenchmarkResults(
				relations, classifier, instanceCreator, learner.getInfo());
		results.setName(relationType);
		results.setDataSetStatistics(dataSetStatistics);

		double overallFalsePositive = 0;
		double overallFalseNegative = 0;
		double overallTruePositive = 0;
		double overallTrueNegative = 0;
		for (AnnotatedDocument testDocument : testDocuments) {
			double truePositive = 0;

			Map<Relation, List<LabeledRelationWordsPair>> foundDocumentAnnotations = foundAnnotations
					.get(testDocument.getDocumentInstance());
			Map<Relation, List<LabeledRelationWordsPair>> realDocumentAnnotations = testDocument
					.getRelationsMap(relationType);

			for (Relation relation : realDocumentAnnotations.keySet()) {
				if (relation.equals(Relation.getNoneRelation(relationType))) {

				} else {
					List<LabeledRelationWordsPair> foundRelationWordsPairs = foundDocumentAnnotations
							.get(relation);
					if (foundRelationWordsPairs != null) {
						List<LabeledRelationWordsPair> realRelationWordsPairs = realDocumentAnnotations
								.get(relation);
						for (RelationWordsPair foundAnnotation : foundRelationWordsPairs) {
							if (realRelationWordsPairs
									.contains(foundAnnotation)) {
								truePositive++;
							}
						}
					}
				}
			}

			double foundRelationsCount = countRelations(foundDocumentAnnotations);
			double realRelationsCount = testDocument
					.getTotalRelationsCount(relationType);
			double realNonEmptyRelationsCount = testDocument
					.getRelationTypeCount(relationType);
			double falsePositive = foundRelationsCount - truePositive;
			double falseNegative = realNonEmptyRelationsCount - truePositive;
			double trueNegative = realRelationsCount - falsePositive
					- falseNegative - truePositive;
			System.out
					.println(testDocument.getDocumentInstance().getFileName());
			System.out.println("Found annotations count: "
					+ foundRelationsCount);
			System.out.println("Real annotations count: " + realRelationsCount
					+ ", " + "positives:" + realNonEmptyRelationsCount);
			printConfusionMatrix(truePositive, falsePositive, falseNegative,
					trueNegative);

			double precision = calculatePrecision(truePositive, falsePositive);
			double recall = calculateRecall(truePositive, falseNegative);
			double trueNegativeRate = calculateTrueNegativeRate(falsePositive,
					trueNegative);
			double accuracy = calculateAccuracy(truePositive, falsePositive,
					falseNegative, trueNegative);

			results.addResult(testDocument.getDocumentInstance().getFileName(),
					precision, recall, trueNegativeRate, accuracy,
					foundDocumentAnnotations);

			overallFalsePositive += falsePositive;
			overallTrueNegative += trueNegative;
			overallTruePositive += truePositive;
			overallFalseNegative += falseNegative;
		}
		System.out.println("Overall:");
		printConfusionMatrix(overallTruePositive, overallFalsePositive,
				overallFalseNegative, overallTrueNegative);
		double overallPrecision = calculatePrecision(overallTruePositive,
				overallFalsePositive);
		double overallRecall = calculateRecall(overallTruePositive,
				overallFalseNegative);
		double overallTrueNegativeRate = calculateTrueNegativeRate(
				overallFalsePositive, overallTrueNegative);
		double overallAccuracy = calculateAccuracy(overallTruePositive,
				overallFalsePositive, overallFalseNegative, overallTrueNegative);
		results.setOverallResult(overallPrecision, overallRecall,
				overallTrueNegativeRate, overallAccuracy);

		try {
			resultIO.writeBenchmarkResults(results,
					"src/test/resources/results/relations");
		} catch (IOException e) {
			logger.error(e);
			throw new TaxonomyLearningException(e);
		}
		return results;
	}

	private double countRelations(
			Map<Relation, List<LabeledRelationWordsPair>> foundDocumentAnnotations) {
		int count = 0;
		for (Relation relation : foundDocumentAnnotations.keySet()) {
			count += foundDocumentAnnotations.get(relation).size();
		}
		return count;
	}

	private Instances createDataSet(IDataSet dataSet, String path /*
																 * , double
																 * positiveWeigth
																 * , double
																 * NegativeWeigth
																 */)
			throws TaxonomyLearningException {
		String exportedDataSet = dataSetExporter.exportDataSet(dataSet);
		String dataSetPath = path + dataSet.getName();

		Instances learnIstances = null;
		try {
			FileUtils.writeStringToFile(new File(dataSetPath), exportedDataSet);
			learnIstances = loader.loadDataSet(dataSetPath);
		} catch (IOException e) {
			logger.error(e);
			throw new TaxonomyLearningException(e);
		}
		return learnIstances;
	}

	private double calculateAccuracy(double truePositive, double falsePositive,
			double falseNegative, double trueNegative) {
		return (truePositive + trueNegative)
				/ (truePositive + falsePositive + trueNegative + falseNegative);
	}

	private double calculateTrueNegativeRate(double falsePositive,
			double trueNegative) {
		return trueNegative / (trueNegative + falsePositive);
	}

	private double calculateRecall(double truePositive, double falseNegative) {
		double recall = truePositive / (truePositive + falseNegative);
		if (recall > 1) {
			throw new AssertionError("Recall could not be greater than 1");
		}
		return recall;
	}

	private double calculatePrecision(double truePositive, double falsePositive) {
		if (truePositive == 0 && falsePositive == 0) {
			return 0;
		}
		return truePositive / (truePositive + falsePositive);
	}

	public void setLoader(ARFFDataSetLoader loader) {
		this.loader = loader;
	}

	public void setInstancesLoader(InstancesCreator instancesLoader) {
		this.instancesLoader = instancesLoader;
	}

	public void setDocumentBatchProcessor(
			DocumentsBatchProcessor documentBatchProcessor) {
		this.documentBatchProcessor = documentBatchProcessor;
	}

	public void setDataSetExporter(ARFFDataSetExporter dataSetExporter) {
		this.dataSetExporter = dataSetExporter;
	}

	public void setClassifierExecutor(ClassifierExecutor classifierExecutor) {
		this.classifierExecutor = classifierExecutor;
	}

	public void setResultIO(ClassifierBenchmarkResultsIO resultIO) {
		this.resultIO = resultIO;
	}

	public void createDocumentsDistribution(String inputPath,
			String processedPath, Random random, double learnDocumentsRatio,
			List<AnnotatedDocument> learnDocuments,
			List<AnnotatedDocument> testDocuments,
			List<DocumentInstance> testDocumentInstances)
			throws TaxonomyLearningException {
		List<AnnotatedDocument> annotatedDocuments;
		try {
			annotatedDocuments = documentBatchProcessor
					.processAnnotatedDocumentBatch(inputPath, processedPath);
		} catch (DocumentProcessingException e) {
			logger.error(e);
			throw new TaxonomyLearningException(e);
		}

		int documentCount = annotatedDocuments.size();
		int learnDocumentCount = (int) (documentCount * learnDocumentsRatio);

		List<Integer> remainingDocumentsIndexes = new ArrayList<Integer>();
		for (int i = 0; i < annotatedDocuments.size(); i++) {
			remainingDocumentsIndexes.add(i);
		}

		for (int i = 0; i < annotatedDocuments.size(); i++) {

			int index = (int) (random.nextDouble() * remainingDocumentsIndexes
					.size());
			int documentIndex = remainingDocumentsIndexes.get(index);
			remainingDocumentsIndexes.remove(index);
			if (i < learnDocumentCount) {
				learnDocuments.add(annotatedDocuments.get(documentIndex));
			} else {
				testDocuments.add(annotatedDocuments.get(documentIndex));
				testDocumentInstances.add(annotatedDocuments.get(documentIndex)
						.getDocumentInstance());
			}

		}

		logger.info("Learn documents size: " + learnDocuments.size());
		logger.info("Learn documents:");
		for (AnnotatedDocument learnDocument : learnDocuments) {
			logger.info(learnDocument.getDocumentInstance().getFileName());
		}
		logger.info("Test documents size: " + testDocuments.size());
		logger.info("Test documents:");
		for (AnnotatedDocument testDocument : testDocuments) {
			logger.info(testDocument.getDocumentInstance().getFileName());
		}
	}

}
