/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package org.simba.evaluation;

import java.io.BufferedReader;
import java.io.FileReader;
import java.util.HashMap;
import java.util.TreeSet;
import java.util.regex.Pattern;
import org.apache.log4j.Logger;
import org.simba.controller.GenericController;
import org.simba.utils.Configuration;
import org.simba.utils.EntityUriMap;
import org.simba.utils.NamedEntity;

/**
 *
 * @author ngonga
 */
public class Evaluator {

    static Logger logger = Logger.getLogger("AEON");
    HashMap<String, TreeSet<String>> referenceCorpus;
    HashMap<String, TreeSet<String>> toolResults;
    //returns MacroP, MacroR, MacroF, MicroP, MicroR, MicroF

    public String evaluate(Configuration config, String annotatedCorpus) {
        readReferenceCorpus(annotatedCorpus);
        logger.info(referenceCorpus);
        toolResults = new HashMap<String, TreeSet<String>>();
        EntityUriMap map;
        double overlap = 0, retrieved = 0, total = 0;
        TreeSet<String> entry;
        for (String sentence : referenceCorpus.keySet()) {
            config.inputText = sentence;
            map = GenericController.run(config);
            HashMap<NamedEntity, String> bestMapping = map.getBestMapping();
            System.out.println(bestMapping);
            entry = new TreeSet<String>();
            for (NamedEntity e : bestMapping.keySet()) {
                String uri = bestMapping.get(e);
                int counter = 0;
                while (entry.contains(uri + counter)) {
                    counter++;
                }
                entry.add(uri + counter);
            }
            toolResults.put(sentence, entry);
            total = total + referenceCorpus.get(sentence).size();
            retrieved = retrieved + toolResults.get(sentence).size();
            overlap = overlap + Eval.getOverlap(referenceCorpus.get(sentence), toolResults.get(sentence));
            //logger.info(Eval.getOverlap(referenceCorpus.get(sentence), toolResults.get(sentence)));
        }
        
        logger.info("Results:\n" + toolResults);
        logger.info("Reference:\n" + referenceCorpus);
        return "P = " + (overlap / retrieved) + "; R = " + (overlap / total);
    }

    public void readReferenceCorpus(String dataset) {
        referenceCorpus = readAnnotatedCorpus(dataset);
    }

    /** Computes a list of URIs that should be found for each entry in the corpus
     * and writes the results in referenceURIs. In addition, the sentences are written
     * in the sentences list. Assumes one sentence/line.
     * @param corpus Path to the input corpus
     */
    public static HashMap<String, TreeSet<String>> readAnnotatedCorpus(String dataset) {
        HashMap<String, TreeSet<String>> annotations = new HashMap<String, TreeSet<String>>();
        try {
            BufferedReader rd = new BufferedReader(new FileReader(dataset));
            String line = rd.readLine();
            HashMap<String, TreeSet<String>> entities;
            String text;
            int count = 0;
            while (line != null) {
                entities = getNamedEntities(line);
                if (!entities.isEmpty()) {
                    text = entities.keySet().iterator().next();
                    annotations.put(text, entities.get(text));
                    count = count + entities.get(text).size();
                }
                line = rd.readLine();
            }
            logger.info("Read corpus from " + dataset);
            logger.info("Got " + annotations.size() + " annotated sentences");
            logger.info("Got " + count + " entities");

        } catch (Exception e) {
            e.printStackTrace();
        }
        return annotations;
    }

    //public abstract void getToolResults(Configuration config);
    public static HashMap<String, TreeSet<String>> getNamedEntities(String input) {
        HashMap<String, TreeSet<String>> map = new HashMap<String, TreeSet<String>>();
        TreeSet<String> entities = new TreeSet<String>();
        int begin, end;
        String annotation, expression, entity, uri;
        int count = 100;
        while (input.contains("[") && input.contains("]") && count > 0) {
            count--;
            annotation = input.substring(input.indexOf("[") + 1, input.indexOf("]"));
            entity = annotation.substring(0, annotation.indexOf(","));
            input = input.replaceFirst(Pattern.quote("[" + annotation + "]"), entity);
            //System.out.println(input + " -> "+annotation);
            uri = annotation.substring(annotation.lastIndexOf(",") + 1).trim();
            //canonical replacement
            if (uri.equals("US")) uri = "United_States";
            if (uri.equals("UN")) uri = "United_Nations";
            if (uri.equals("Xinhua")) uri = "Xinhua_News_Agency";
            if (uri.equals("US_State_Department")) uri = "United_States_Department_of_State";
            if (uri.equals("IHOP")) uri = "International_House_of_Pancakes";
            if (uri.equals("UK")) uri = "United_Kingdom";
            if (uri.equals("New_York_Fire_Department")) uri = "New_York_City_Fire_Department";
            if (uri.equals("NULL")||uri.startsWith("*")) {
                int counter = 0;
                while (entities.contains("*" + entity + counter)) {
                    counter++;
                }
                entities.add("*" + entity + counter);
            } else {
                if (!uri.startsWith("http")) {
                    uri = "http://dbpedia.org/resource/" + uri;
                }
                int counter = 0;
                while (entities.contains(uri + counter)) {
                    counter++;
                }
                entities.add(uri + counter);
            }
            //System.out.println(annotation);
            //System.out.println(input);
        }
        input = input.toLowerCase();
        input = input.trim();
        input = input.replaceAll(" ", "");
        input = input.replaceAll(Pattern.quote(","), "");
        input = input.replaceAll(Pattern.quote("."), "");
        map.put(input.substring(0, 20), entities);
        return map;
    }

    public static void main(String args[]) {
//        String s = "[Ngnadu, LOC, Ngandu] is a settlement in [Kenya, LOC, Kenya]'s [Central Province, LOC, Central_Province_(Kenya)].";
//        String s2 = "President [Obama, PER, Obama] is chilling in the [States, LOC, States].";
//        Evaluator eval = new Evaluator();
//        eval.referenceCorpus = getNamedEntities(s);        
//        eval.toolResults = getNamedEntities(s);        
//        for(String key: eval.referenceCorpus.keySet())
//        {
//            System.out.println(eval.referenceCorpus+"\n"+eval.toolResults);
//            System.out.println(Eval.getPrecision(eval.referenceCorpus.get(key), eval.toolResults.get(key)));
//            System.out.println(Eval.getRecall(eval.referenceCorpus.get(key), eval.toolResults.get(key)));
//            System.out.println(Eval.getFScore(eval.referenceCorpus.get(key), eval.toolResults.get(key)));
//        }
        Evaluator eval = new Evaluator();
        eval.readReferenceCorpus("D:/Work/Data/WWW_Corpus/Reference Data/annotated_all.txt");
        //System.out.println(eval.referenceCorpus);
        HashMap<String, TreeSet<String>> result =  Evaluator.readAnnotatedCorpus("D:/Work/Data/WWW_Corpus/corpus_annotated_apriori.txt");
        
        Eval.getDiff(eval.referenceCorpus, result);
        System.out.println(Eval.getMicroAveragePrecision(eval.referenceCorpus, result));
        System.out.println(Eval.getMicroAverageRecall(eval.referenceCorpus, result));
//        System.out.println(eval.referenceCorpus.keySet());
//        Configuration config = new Configuration();
//        config.algorithm = "linear";
//        config.apriori = "edgecount";
//        config.coherence = "edgecount";
//        config.graph = null;
//        config.labelProperties = new TreeSet<String>(); //default
//        //config.labelProperties.add("<http://www.w3.org/2000/01/rdf-schema#label>");
//        //config.labelProperties.add("<http://dbpedia.org/ontology/abstract>");
//        config.nerTool = "fox";
//        config.similarity = "weighted coherence";
//        config.sparqlEndpoint = "http://live.dbpedia.org/sparql";
//        System.out.println(eval.evaluate(config, "D:/Work/Data/MUC3-Data-Subset/news corpus.txt"));
    }
}
