import sys, util.gzopen, sets

jacy_hdrs = ['n','na','v','i','adv']
erg_hdrs = ['n_','av_','aj_','v_']

ERG = False
JACY = not ERG

CUTOFF=0.0
BETA=[0.00001,0.00005,0.0001,0.0005,0.001,0.005,0.01,0.05,0.1,0.5,1.0]

class TagStat:
	def __init__(self):
		self.count = 0 
		self.tags = sets.Set()

	def inc(self, tag):
		self.count += 1
		self.tags.add(tag)

class FeatureMap:
	def __init__(self):
		self.features = {}
		self.num_features = 0
		self.labels = {}
		self.rev_labels = {}
		self.num_labels = 0

	def detect(self, feature_string, add_unseen=False):
		if self.features.has_key(feature_string):
			return self.features[feature_string]
		elif add_unseen:
			self.features[feature_string] = self.num_features
			self.num_features += 1
			return self.num_features-1
		return -1

	def detect_tag(self, tag, add_unseen=False):
		if self.labels.has_key(tag):
			return self.labels[tag]
		elif add_unseen:
			self.labels[tag] = self.num_labels
			self.rev_labels[self.num_labels] = tag
			self.num_labels += 1
			return self.num_labels-1
		return -1

	def serialise(self, out_file="feature_map.gz"):
		out = util.gzopen.gzopen(out_file,'w')
		print >>out, len(self.labels)
		for label,id in self.labels.items():
			print >>out, label, id
		for feature,id in self.features.items():
			print >>out, feature, id
		out.close()

	def deserialise(self, in_file="feature_map.gz"):
		input = util.gzopen.gzopen(in_file,'r')
		self.num_labels = int(input.readline())
		counter = 0
		for line in input:
			tag,id = line.split()
			self.labels[tag] = int(id)
			self.rev_labels[int(id)] = tag
			counter+=1
			if counter >= self.num_labels:
				break
		for line in input:
			feature,id = line.split()
			id = int(id)
			self.features[feature] = id
			if id+1 > self.num_features:
				self.num_features = id+1
		input.close()					 

# open the file with the test data
gold_file = util.gzopen.gzopen(sys.argv[1])

# load the feature map
feature_map = FeatureMap()
feature_map.deserialise(sys.argv[2])
num_sentences = -1
if len(sys.argv) > 4:
	num_sentences = int(sys.argv[4])

# create a lexicon from the training lexemes and their tags
lexicon={}
if len(sys.argv) > 6:
	lexicon_file=util.gzopen.gzopen(sys.argv[6])
	for line in lexicon_file:
		tokens=line.split()
		if len(tokens)==2:
			if tokens[0] not in lexicon: lexicon[tokens[0]] = sets.Set()
			tag_id = feature_map.detect_tag(tokens[1])
			lexicon[tokens[0]].add(tag_id)

# load tag dict
tag_dict={}
if len(sys.argv) > 5:
	tag_dict_in = util.gzopen.gzopen(sys.argv[5],'r')
	num_words = int(tag_dict_in.readline())
	for line in tag_dict_in:
		tokens = line.split()
		tag_dict[tokens[0]] = TagStat()
		tag_dict[tokens[0]].count = int(tokens[1])
		tag_dict[tokens[0]].tags = sets.Set()
		for token in tokens[2:]:
			tag_dict[tokens[0]].tags.add(int(token))
	tag_dict_in.close()					 

def next_sentence(file):
	sentence = []
	for line in file:
		if line.strip() == '':
			yield sentence
			sentence = []
			continue
		sentence.append(line)
	yield sentence
	return
		
predictions = util.gzopen.gzopen(sys.argv[3])
total_predicted = 0
total_unseen = 0
total_new= 0
correct_predicted = 0
correct_unseen = 0
correct_new= 0

# for parse reduction beta graphs
correct_beta = [0 for x in BETA]
sent_correct_beta = [0 for x in BETA]
total_word_tags= [0 for x in BETA]

# for DLA results
fp_new=sets.Set()
fn_new=sets.Set()
tp_new=sets.Set()
subtype_rec={}
subtype_prec={}

pred_file = sys.argv[3]+".predictions"
pred_fd = open(pred_file,'w')
for i,sentence in enumerate(next_sentence(gold_file)):
	if num_sentences > 0 and i >= num_sentences:
		break
	sent_correct_flags = [True for x in BETA]
	for line in sentence:
		tokens = line.split()
		word = tokens[0]
		gold_tag = tokens[-1]
		unseen = (feature_map.detect('WORD=%s'%word) < 0)

		gold = feature_map.detect_tag(gold_tag)
		distribution = []
		for pair in predictions.next().strip().split():
			tag,prob = pair.split(':')
			distribution.append((int(tag),float(prob)))
		p = distribution[0][0]
		prob = distribution[0][1]

		# create a set of tags selected by beta
		beta_tags=[]
		for beta in BETA:
			tags = sets.Set()
			for tag,pr in distribution:
				if pr < prob*beta: break
				else: tags.add(tag)
			beta_tags.append(tags)

		p_tag = feature_map.rev_labels[p]
#	new_lexical_type = (word not in tag_dict or gold not in tag_dict[word].tags)
#	pred_new_lexical_type = (word not in tag_dict or p not in tag_dict[word].tags)
		new_lexical_type = (word not in lexicon or gold not in lexicon[word])
		pred_new_lexical_type = (word not in lexicon or p not in lexicon[word])

		if JACY: hdrs = jacy_hdrs
		else:    hdrs = erg_hdrs
		gold_hdr = pred_hdr = 'other'

		for hdr in hdrs:
			if gold_tag[:len(hdr)] == hdr: gold_hdr = hdr
			if p_tag[:len(hdr)] == hdr: pred_hdr = hdr
		if gold_hdr not in subtype_rec: 
			subtype_rec[gold_hdr]=[0,0]
			subtype_prec[gold_hdr]=[0,0]
		if new_lexical_type:
			subtype_rec[gold_hdr][1] += 1
		if pred_new_lexical_type:
			subtype_prec[gold_hdr][1] += 1

#		 print word, gold_tag, feature_map.detect_tag(gold_tag),\
#			 p_tag, p, (p_tag==gold_tag)
		print >>pred_fd,word, gold_tag, p_tag
		if gold_tag == p_tag: 
			correct_predicted += 1
			if unseen: correct_unseen += 1
			if new_lexical_type:
				subtype_rec[gold_hdr][0] += 1
				subtype_prec[gold_hdr][0] += 1

				if prob >= CUTOFF: 
					correct_new += 1
					tp_new.add((word,p_tag))
				else:
					fn_new.add((word,p_tag))
		else:
			if pred_new_lexical_type and prob >= CUTOFF: fp_new.add((word,p_tag))
			if new_lexical_type:      fn_new.add((word,gold_tag))

		for i,x in enumerate(BETA):
			if gold in beta_tags[i]: correct_beta[i] += 1
			else: sent_correct_flags[i] = False
			total_word_tags[i] += len(beta_tags[i])

		total_predicted += 1
		if unseen: total_unseen += 1
		if new_lexical_type: total_new += 1
	print >>pred_fd
	for i,x in enumerate(BETA):
		if sent_correct_flags[i]: sent_correct_beta[i] += 1

pred_fd.close()

report_file = sys.argv[3]+".result"
report_fd = open(report_file,'w')
print >>report_fd, "Total tags predicted/correct:", total_predicted, correct_predicted
print >>report_fd, "Total unseen words/correct:", total_unseen, correct_unseen
print >>report_fd, "Total accuracy:", float(correct_predicted) / total_predicted 
if total_unseen: print >>report_fd, "Total unseen accuracy:", float(correct_unseen) / total_unseen
print >>report_fd

print >>report_fd, "Beta accuracy:"
for i,x in enumerate(BETA):
	print >>report_fd, x, '%.5f'%(float(correct_beta[i])/total_predicted), '%.2f'%(float(total_word_tags[i])/total_predicted), 'sent_acc=%.3f'%(float(sent_correct_beta[i])/num_sentences)
print >>report_fd

print len(tag_dict)>0, total_new
if len(tag_dict)>0 and total_new:
	num_types = len(tp_new.union(fn_new))
	num_pred = len(tp_new.union(fp_new))
	num_missed = len(fn_new)
	print >>report_fd, "Total unseen types/correct:", total_new, correct_new
	print >>report_fd, "Total unseen entries/correct/pred:", num_types , len(tp_new), num_pred
	new_acc = float(correct_new) / total_new
	if num_pred>0: new_pre = float(len(tp_new)) / num_pred
	else: new_pre=0
	new_rec = float(len(tp_new)) / num_types
	if new_pre>0 or new_rec>0: new_f = 2*new_pre*new_rec / (new_pre+new_rec)
	else: new_f=0
	print >>report_fd, "New Types a=%.3f, p=%.3f, r=%.3f, f=%.3f"%(new_acc,new_pre,new_rec,new_f)
	print >>report_fd, '\nSubtype stats'
	for key in subtype_prec:
		if subtype_prec[key][1]>0: prec=float(subtype_prec[key][0])/float(subtype_prec[key][1])
		else: prec=0
		if subtype_rec[key][1]>0: rec=float(subtype_rec[key][0])/float(subtype_rec[key][1])
		else: rec=0
		if prec>0 or rec>0: f = 2*prec*rec / (prec+rec)
		else: f=0
		print >>report_fd, key, ':', 'p=%.3f r=%.3f f=%.3f'%(prec,rec,f), subtype_rec[key][1]
report_fd.close()

#for word,type in tp_new.union(fn_new): 
#	if (word,type) in tp_new: print '+',
#	else: print '-',
#	print word, type
