from MAError import *
import xml.etree.ElementTree as ET

class statsTest:
	def __init__(self, subjects, annots, attrib, strict):
		try:
			self.annots = bool(annots) and annots or subjects[0].getAnnotators()
		except IndexError:
			print "Empty subject list."
			raise
		self.subjectD = {}
		self.partialScores = {}
		self.score = None
		self.strict = strict
		self.attrib = attrib
		
	def __getitem__(self, text):
		"""Returns a list of subjects corresponding to the index text or subject"""
		try:
			return self.subjectD[hash(ET.parse(text).getroot().getchildren()[0])]
		except:
			print "Invalid input text:", text
			raise
		
	def getScore(self):
		return self.score
		
	def getScore(self, text = None):
		if text:
			try:
				return self.partialScores[hash(ET.parse(text).getroot().getchildren()[0])]
			except:
				print "Invalid input text:", text
				raise
		return self.score
		
	def deviance(self):
		pass
	
class fleissKappaTest(statsTest):
	def __init__(self, subjects, attrib, strict, annots = []):
		statsTest.__init__(self, subjects, annots, attrib, strict)
		for subj in subjects:
			self.subjectD[subj.thash] = self.subjectD.get(subj.thash, list()) + [subj]
		#self.partialScores = dict((i, self._kappa(self.subjectD[i], self.annots)) for i in self.subjectD.keys())
		self.score = self._kappa([subj for li in self.subjectD.values() for subj in li], self.annots)
		
	def _kappa(self, subjects, annots):
		"""
		Calculates the kappa value for a list of extents or links.
		"""
		try:
			if len(subjects) < 2 or len(annots) < 3: #alter? perform Cohen in former case?
				raise StatsError('Fleiss')
		except StatsError as SE:
			print "Inappropriate statistical test:", SE
			raise
		#tags = set([cmprnd for subj in subjects for cmprnd in subj.getComparanda(self.strict).values()])
		tags = {}
		for subj in subjects:
			for cmprnd in subj.getComparanda(self.attrib, self.strict).values():
				tags[cmprnd] = tags.get(cmprnd, 0) + 1
		#Pc = [float(sum([sum([subj.getComparanda(self.strict)[annot] == tag for subj in subjects]) for annot in annots]))/(len(annots) * len(subjects)) for tag in tags]
		Pc = [float(val)/(len(annots) * len(subjects)) for val in tags.values()]
		#Pi = [(sum([float(sum([i == tag for i in subj.getComparanda(self.strict).values()])**2) for tag in tags]) - len(annots))/(len(annots) * (len(annots) - 1)) for subj in subjects]
		Pi = [(sum([float(sum([i == tag for i in subj.getComparanda(self.attrib, self.strict).values()])**2) for tag in set(subj.getComparanda(self.attrib, self.strict).values())]) - len(annots))/(len(annots) * (len(annots) - 1)) for subj in subjects]
		P = sum(Pi)/len(Pi)
		Pe = sum([i**2 for i in Pc])
		try:	
			kappa = (P - Pe)/(1 - Pe)
		except ZeroDivisionError:
			return 1
		return kappa
	
class cohenKappaTest(statsTest):
	pass

class krippendorffAlphaTest(statsTest):
	pass
	
def annotator_deviance(subjects, test):
	"""
	Returns a list of annotators and deviation scores. In the absence of any better metric, this method
	simply subtracts the kappa score for all the annotators from the kappa score given by removal of
	individual annotators, then assigns each annotator a deviation score equal to the inverse of the 
	difference. Higher deviation values, then, would mean more disagreement with the other annotators.	
	"""
	base = test(subjects)
	return [(annot, 1/(test(subjects, list(set(annots).difference(set([annot])))) - base)) for annot in annots]
	
