package edu.hit.irlab.coref.evaluation;

import static nlpeap.util.Strings.getNTimes;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.HashMap;

import nlpeap.io.XMl;
import nlpeap.model.eval.PRScore;
import nlpeap.model.eval.PRScores;
import nlpeap.util.algorithm.Unoin_Find_Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.dom4j.Document;
import org.dom4j.DocumentException;

import com.sun.org.apache.bcel.internal.generic.RETURN;

import edu.hit.irlab.coref.CRConfigProperties;
import edu.hit.irlab.coref.evaluation.methds.Bcubed;
import edu.hit.irlab.coref.evaluation.methds.Ceaf;
import edu.hit.irlab.coref.evaluation.methds.Valain;
import edu.hit.irlab.coref.evaluation.methds.Bcubed.BcubedScore;
import edu.hit.irlab.coref.evaluation.methds.Ceaf.CeafScore;
import edu.hit.irlab.coref.evaluation.methds.Valain.ValainScore;
import edu.hit.irlab.coref.mentionfinder.Mention;
import edu.hit.irlab.coref.mentionfinder.MentionFinder;
import edu.hit.irlab.coref.postprocess.Postprocessor;
import edu.hit.irlab.coref.resolve.Entity;
import edu.hit.irlab.coref.resolve.algorithm.ResolveAlgorithm;

/**
 * <p>
 * evaluate the <i>Coreference Resolution System's</i> performance,
 * using all the three typical evaluate algorithms:<i>the B-CUBED</i>,
 * <i>the Valain</i> and <i>the CEAF</i>
 * </p>
 * 
 * @author chuter
 *
 */
public class Evaluation {
	public static Log _log = LogFactory.getLog(Evaluation.class.getName());
	//the Mention finder for corpus
	private final MentionFinder<Document> _corpusMentionFinder;
	//the resolve algorithm
	private final ResolveAlgorithm _resolveAlgorithm;
	private final static boolean _debug = CRConfigProperties.getInstance().isDebug();
	private final Postprocessor _postProcessor;
	//the Mention recognition log file
	private OutputStreamWriter _wrong_log;
	private OutputStreamWriter _lost_log;
	private OutputStreamWriter _cr_log;
	private static final String SEPARATOR = getNTimes('-',72) + "\n";
	
	public Evaluation(
			MentionFinder<Document> corpusMentionFinder, 
			ResolveAlgorithm resolveAlgorithm,
			Postprocessor postProcessor
		) {
		_corpusMentionFinder = corpusMentionFinder;
		_resolveAlgorithm = resolveAlgorithm;
		_postProcessor = postProcessor;
	}
	
	/** Get the clusters of the <i>Mentions</i> according to the
	 *  <i>set_id</i> information of the <i>Mentions</i><br> 
	 *  The <i>Mentions</i> which have the same <i>set_id</i>
	 *  are in the same cluster */
	private static ArrayList<ArrayList<Integer>> getEntitySet(Mention[] mention_arr) {
		//map the mention's set_id to the id of the cluster
		//that contains the mention
		HashMap<Integer, Integer> id_map = new HashMap<Integer, Integer>();
		
		ArrayList<ArrayList<Integer>> clusters = new ArrayList<ArrayList<Integer>>();
		int cluster_id = 0;
		for (Mention mention : mention_arr) {
			int set_id = mention.getSetId();
			if (id_map.containsKey(set_id))
				clusters.get(id_map.get(set_id)).add(mention.mention_index);
			else {
				ArrayList<Integer> cluster = new ArrayList<Integer>();
				cluster.add(mention.mention_index);
				clusters.add(cluster);
				id_map.put(set_id, cluster_id);
				cluster_id++;
			}
		}
		
		return clusters;
	}
	
	/** Get the clusters of the <i>Mentions</i> according to the
	 *  <i>refMen_index</i> information of the <i>Mentions</i><br> 
	 *  The <i>Mentions</i> which have the same <i>father</i>
	 *  are in the same cluster, it use Unoin_Find_Set to generate
	 *  the clusters */
	private static ArrayList<ArrayList<Integer>> getEntityChainRef(Mention[] mention_arr) {
		int[] father_arr = new int[mention_arr.length];
		for (int i = 0; i < mention_arr.length; i++) {
			assert (i == mention_arr[i].mention_index);
			father_arr[i] = mention_arr[i].getRefMention_index();
		}
		
		Unoin_Find_Set ufs = new Unoin_Find_Set(father_arr);
		
		return ufs.getUnoinSet();
	}
	
	private static ArrayList<ArrayList<Integer>> getEntityChain(Mention[] mention_arr) {
		if (mention_arr.length == 0)
			return new ArrayList<ArrayList<Integer>>(){};
		
		if (mention_arr[0].getSetId() != -1)
			//if the mentions have been set the set_id information,
			//then call getEntitySet to generate the clusters
			return getEntitySet(mention_arr);
		else
			//else call getEntityChainRef to generate the clusters,
			//in this case, the refMen_index information of the mention
			//must have been set
			return getEntityChainRef(mention_arr);
	}
	
	private static ArrayList<ArrayList<Integer>> getEntityChain(
			ArrayList<Entity> entity_lis) {
		ArrayList<ArrayList<Integer>> clusters = new ArrayList<ArrayList<Integer>>();
		
		for (Entity entity : entity_lis) {
			ArrayList<Integer> cluster = new ArrayList<Integer>();
			for (Mention mention : entity.getmentions())
				cluster.add(mention.mention_index);
			clusters.add(cluster);
		}
		
		return clusters;
	}
	
	/** check whether the contents of the two Mentions are
	 *  match( after filter the characters that have nothing to do
	 *  with the matching contrast, like the punctuation characters ) */
	protected boolean isMentionContMatch(String menStrA, String menStrB) {
		menStrA = menStrA.replaceAll("\\pP|\\s*", "");
		menStrB = menStrB.replaceAll("\\pP\\s*", "");
		
		if (menStrA.equalsIgnoreCase(menStrB))
			return true;
		return false;
	}
	
	/** check whether two <i>Mentions</i> are the same <i>Mention</i>,
	 *  the method is first check whether the position in the document
	 *  of them are equal, then check whether their <i>extent</i> are
	 *  match */
	protected boolean isMentionMatch(Mention keyMention, Mention resMention) {
		boolean isMatch = false;
		if (
				Math.abs(keyMention.endPositionInDoc()-resMention.endPositionInDoc())<=1
			&& 
				Math.abs(keyMention.startPositionInDoc()-resMention.startPositionInDoc())<=1
			) {
			if (isMentionContMatch(keyMention.getExtent(), resMention.getExtent()))
				isMatch = true;
		} 
		
		return isMatch;
	}
	
	/** write the <i>Mention</i> recognition log
	 *  @throws IOException */
	protected void writeMentionRecLog(
			Mention[] mentions_key, //the key result
			Mention[] mentions_res, //the recognition result
			OutputStreamWriter writer, //the log writer
			String filePath //the corpus file path
		) throws IOException {
		for (Mention res : mentions_res) {
			boolean isMatch = false;
			for (Mention key : mentions_key) {
				if (isMentionMatch(res, key)) {
					isMatch = true;
					break;
				}
			}
			if (!isMatch)
				writer.write(String.format("%s: %s\n", filePath, res.toString()));
		}
	}
	
	/** the number of the <i>Mentions</i> that identify correctly */
	private int getRightRecogNum(
			Mention[] Mentions_key,
			Mention[] Mentions_res
		) {
		int right = 0;
		for (Mention resMention : Mentions_res) {
			for (Mention keyMention : Mentions_key) {
				if (isMentionMatch(resMention, keyMention)) {
					right++;
					break;
				}
			}
		}
		return right;
	}
	
	/** extract all <i>Mentions</i> in the Corpus file(XML file) */
	private Mention[] findCorpusMentions(String corpus_file) {
		Document dom;
		try {
			dom = XMl.loadXmlFile(corpus_file);
			return _corpusMentionFinder.findmentions(dom);
		} catch (DocumentException e) {
			_log.fatal(String.format("error in loading the file: %s, please"+
					"check whether the file exists", corpus_file));
			return new Mention[]{};
		}
	}
	
	/** Evaluate the <i>Mention</i> recognition, in addition,  write the <i>wrong 
	 *  recognition</i> and the <i>lost recognition</i> information into log files:
	 *  "./logs/wrong" and "./logs/lost" respectively if the <i>_debug</i> if true
	 *  @throws IOException */
	public void evaluatMenRecog(
			String testCorpusPath, //the test corpus directory path
			//the MentionFinder which identify the Mentions from plain text
			MentionFinder<nlpeap.tms.Document> _docMentionFinder
		) throws IOException {
		if (_debug) {
			_wrong_log = new OutputStreamWriter (
				new FileOutputStream("./logs/wrong"));
			 _lost_log = new OutputStreamWriter (
				new FileOutputStream("./logs/lost"));
		}
		
		int getRight=0, total=0, getSum=0;
		File corpusDir = new File(testCorpusPath);
		for (File file : corpusDir.listFiles()) {
			//evaluate each file
			String filePath = file.getAbsolutePath();
			if (!filePath.endsWith(".xml"))
				continue;
			_log.info(String.format("evaluate the file: %s", filePath));
			
			Mention[] mentions_key = findCorpusMentions(filePath);
			if (mentions_key.length <= 0) 
				continue;
			nlpeap.tms.Document doc = mentions_key[0].getdoc();
			//if the mentions_key array is empty, then
			//first get the corresponding content of the corpus file,
			//and then call the particular DocumentFactory to create Document
			Mention[] mentions_res = _docMentionFinder.findmentions(doc);
			
			if (_debug) {
				writeMentionRecLog(mentions_res, mentions_key, _lost_log, filePath);
				writeMentionRecLog(mentions_key, mentions_res, _wrong_log, filePath);
			}
			
			int right =  getRightRecogNum(mentions_key, mentions_res);
			getRight += right;
			total += mentions_key.length;
			getSum += mentions_res.length;
		}
		
		_log.info(String.format("Rec_Right: %d Total:%d Rec_Total:%d", 
				getRight, total, getSum));
		System.out.println(getRight+" "+total+" "+getSum);
		if (_debug) {
			_wrong_log.close();
			_lost_log.close();
		}
	}
	
	/** write the <i>Mention</i> recognition log
	 *  @throws IOException */
	protected void writeCRLog(
			Mention[] mentions, //all the mantions
			ArrayList<ArrayList<Integer>> result_key, //the key Mention clusters
			ArrayList<ArrayList<Integer>> result_res, //the recognition result
			OutputStreamWriter writer, //the log writer
			String filePath //the corpus file path
		) throws IOException {
		writer.write(String.format("Mention clusters in: %s\n", filePath));
		writer.write("The Key Result:\n");
		for (ArrayList<Integer> cluster : result_key) {
			writer.write(SEPARATOR);
			for (Integer men_id : cluster) {
				writer.write(mentions[men_id].toString()+"\n");
			}
			writer.write(SEPARATOR);
		}
		
		writer.write("The Res Result:\n");
		for (ArrayList<Integer> cluster : result_res) {
			writer.write(SEPARATOR);
			for (Integer men_id : cluster) {
				writer.write(mentions[men_id].toString()+"\n");
			}
			writer.write(SEPARATOR);
		}
		writer.write("\n");
	}
	
	/** Evaluate the <i>Coreference Resolution System's</i> performance
	 *  on the Test Corpus indicated by the directory path.
	 *  The results are organized into {@link nlpeap.model.eval.PRScores PRScores}},
	 *  this function also writes the resolution log if the <i>_debug</i>
	 *  if true, and the default log file is "./logs/crlog"
	 * @throws IOException 
	 *   */
	public PRScores evaluateCR(String testCorpusPath) throws IOException {	
		if (_debug) {
			_cr_log = new OutputStreamWriter (
					new FileOutputStream("./logs/crlog"));
		}
		
		File corpusDir = new File(testCorpusPath);
		ArrayList<ArrayList<Integer>> result_key = new ArrayList<ArrayList<Integer>>();
		ArrayList<ArrayList<Integer>> result_res = new ArrayList<ArrayList<Integer>>();
		
		int mention_num = 0;
		int file_nm = 0;
		ValainScore _valainScore = new ValainScore(0.0, 0.0, 0.0, 0.0);
		BcubedScore _bcubedScore = new BcubedScore(0.0, 0.0, 0.0);
		CeafScore _ceafScore1    = new CeafScore(0.0, 0.0, 0.0);
		CeafScore _ceafScore2    = new CeafScore(0.0, 0.0, 0.0);
		for (File file : corpusDir.listFiles()) {
			String filePath = file.getAbsolutePath();
			if (!filePath.endsWith(".xml"))
				continue;
			_log.info(String.format("evaluate the file: %s", filePath));
			
			Mention[] mentions = findCorpusMentions(filePath);
			if (mentions.length <= 0)
				continue;
			
			result_key = getEntityChain(mentions);
			ArrayList<Entity> entityRes = _resolveAlgorithm.resolve(mentions);
			entityRes = _postProcessor.postProcess(entityRes);
			result_res = getEntityChain(entityRes);			
			//write the log
			if (_debug) 
				writeCRLog(mentions, result_key, result_res, _cr_log, testCorpusPath);
			
			_valainScore.add(Valain.scorePart(result_key, result_res));
			_bcubedScore.add(Bcubed.scorePart(result_key, result_res));
			_ceafScore1.add(Ceaf.scorePart(1, result_key, result_res));
			_ceafScore2.add(Ceaf.scorePart(2, result_key, result_res));

			mention_num += mentions.length;
			file_nm++;
		}
		
		//get the scores
		PRScores scores = new PRScores();
		PRScoreStruct _valainPRScore = _valainScore.toPRScore();
		PRScoreStruct _bcubedPRScore = _bcubedScore.toPRScore();
		PRScoreStruct _ceafPRScore1 = _ceafScore1.toPRScore();
		PRScoreStruct _ceafPRScore2 = _ceafScore2.toPRScore();
        scores.add(new PRScore(_valainPRScore.getPScore(), _valainPRScore.getRScore(), "Valain"));
        scores.add(new PRScore(_bcubedPRScore.getPScore(), _bcubedPRScore.getRScore(), "B-CUBED"));
        scores.add(new PRScore(_ceafPRScore1.getPScore(), _ceafPRScore1.getRScore(), "CEAF_1"));
        scores.add(new PRScore(_ceafPRScore2.getPScore(), _ceafPRScore2.getRScore(), "CEAF_2"));
        
        _log.info(String.format("there are %d mentions total in the corpus %s[%d total files]", 
        		mention_num, testCorpusPath, file_nm));
        if (_debug)
        	_cr_log.close();
        
		return scores;
	}
	
}
