#!/usr/bin/python

"""
Candidate Selection scoring script

version 0.1:
Oct 3, 2012
by Zhaochen Guo (zhaochen@ualberta.ca)

This script is mainly used for evaluting the results of candidate selection.
The main metrics that been implemented here are 
	- AR = Average Rank: 1/N * \sum {rank(a_i, Q_i)}
	- AR@10 = 1/N * \sum{rank@10(a_i,Q_i)}

"""

import re
import sys
import os
import weakref
import xml.etree.ElementTree as ET

#reload(sys)
#sys.setdefaultencoding("UTF-8")

class SystemOutputException(Exception): pass

def readFile(truthFile, tagname):
	"""
	<query id="EL_00024">
	  <name>ADF</name>
	  <docid>eng-NG-31-107936-12000474</docid>
	  <entity>Australian Defence Force</entity>
	  <candidate>Australian Defence Force</candidate>
	</query>

	Do NOT change IDs in any way.
	Returns a dictionary query_id -> tagname
	"""

	diction = dict()

	tree = ET.parse(truthFile)
	root = tree.getroot()
	for query in tree.getiterator('query'):
		id = query.get('id')
		print query.get('id')
		for cand in query.getiterator(tagname):
			print "\t%s" % (cand.text)
			diction[id] = cand.text

	return diction

def readGroundTruth(truthFile):
	"""
	Read the ground truth according to the following format.
	<query id="EL_00024">
	  <name>ADF</name>
	  <docid>eng-NG-31-107936-12000474</docid>
	  <entity>Australian Defence Force</entity>
	</query>

	Do NOT change IDs in any way.
	Returns a dictionary query_id -> entity
	"""

	groundTruth = dict()

	tree = ET.parse(truthFile)
	root = tree.getroot()
	for query in tree.getiterator('query'):
		id = query.get('id')
		for cand in query.getiterator('entity'):
			groundTruth[id] = cand.text

	return groundTruth

def readResult(resultFile):
	"""
	Read the candidate selection result file in the following format:
	<query id="EL_00024">
	  <name>ADF</name>
	  <candidate>Advanced Format</candidate>
	  <candidate>Amiga Disk File</candidate>
	  <candidate>Amsterdam Density Functional</candidate>
	  <candidate>Australian Defence Force</candidate>
	  <docid>eng-NG-31-107936-12000474</docid>
	</query>

	Returns a dictionary query_id -> set of candidates.
	"""

	results = dict()

	tree = ET.parse(resultFile)
	root = tree.getroot()
	for query in tree.getiterator('query'):
		id = query.get('id')
		cands = list()
		for cand in query.getiterator('candidate'):
			cands.append(cand.text);

		results[id] = cands

	return results

def eval(ground_truth_file, result_file, N):
	"""
	Compare the results with groundTruth using three different metrics.
	"""

	groundTruth = readGroundTruth(ground_truth_file)
	results = readResult(result_file)

	ar = 0
	ar_n = 0
	correct = 0

	for key in results:
		cands = results[key]
		truth = groundTruth[key]

		rank = len(cands)
		rank_n = N

		for i in range(len(cands)):
			if cands[i] == truth:
				correct = correct + 1
				rank = i+1
				rank_n = rank
				if rank_n > N:
					rank_n = N

				break

		ar = ar + rank
		ar_n = ar_n + rank_n

	ar = ar * 1.0 / len(results)
	ar_n = ar_n * 1.0 / len(results)
	recall = correct * 1.0 / len(results)

	print "AR\tAR@%d" % (N)
	print "%.3f\t%.3f" % (ar, ar_n) 
	print "Recall: %.3f" % (recall) 


if __name__ == "__main__":
	if(len(sys.argv) < 2 or len(sys.argv) > 4):
		print "-----------------------------------------"
		print "KBP Candidate Selection evaluation script"
		print "-----------------------------------------"
		print "USAGE: ./cs_scorer [result_file] [gold_standard_file (optional if gold_standard is in the result file)] [N (optional, default=10)]"
		print " - result_file	The output of a candidate selection system in the specified format"
		print " - gold_standard_file	The ground truth for the test data"
		print " - N  check the true entity in the top N candidates. "
	else:
		result_file = sys.argv[1]
		try:
			N = int(sys.argv[len(sys.argv)-1])
			ground_truth_file = sys.argv[len(sys.argv)-2]
		except ValueError:
			N = 10
			ground_truth_file = sys.argv[len(sys.argv)-1]

		eval(ground_truth_file, result_file, N)


#if __name__ == "__main__":
#	readFile(sys.argv[1], 'entity')





