'''
prerequisite:
1) allocate the term-id and doc-id;
2) build a language model for each passage (fixed window)
3) get an initialed translation model
'''
from ML import ML
from QueryManager import QueryManager
from Query import Query
from Lexicon import Lexicon
from TranslationModel import TranslationModel
from time import clock
from DictUtils import *

def printModel(dic):
	for key in dic.keys():
		line = str(key) + ' '
		for k2 in dic[key].keys():
			line = line + str(k2) + ':' + str(dic[key][k2])
			print line
class ATM: # aligned translation model
	'''
	EM algorithm for training:
	qd_reader: a reader for query-doc pair;
	init_tm:
	'''
	def train(self, init_tm, iterate_num, model_diff):
		prev_global_translation = init_tm
		qm = QueryManager()
		qm.load()
		query_count = 100 #test for 1000 queries
		lexicon = Lexicon()
		lexicon.load()
		for i in xrange(iterate_num):
			t1 = clock()
			print 'Iterate %d model :' % (i+1)
#			printModel(prev_global_translation)
			global_translation = {}
			for query_index in xrange(query_count):
				query = qm.getQuery(query_index)
				q = []
				for term in query.getQuery().split():
					q.append(lexicon.getIdByTerm(term))
				doc = ML(str(query.getAnsdoc()))
				passage_models = doc.getPassageModels()
				passage_scores = []
				passage_translations = []
				for lm in passage_models:
#					t3 = clock()
					passage_score, passage_translation = self.passage_score(q, lm, prev_global_translation)
					passage_scores.append(passage_score)
					passage_translations.append(passage_translation)
#					t4 = clock()
#					print 'Query pair cost %f s' % (t4-t3)
				self.passage_norm(passage_scores)
				doc_translation = self.doc_translation_norm(passage_translations, passage_scores)
				self.update_translation(global_translation, doc_translation)
			self.translation_norm(global_translation)
			error = self.compare(prev_global_translation, global_translation)
			print 'Iterate %d error %f .' % (i+1, error)
			if(error < model_diff):
				break;
			prev_global_translation = global_translation;
			t2 = clock()
			print 'Iterate %d cost %f s' % (i+1, t2-t1)
		self.model = global_translation

	def writeModel(self):
		f = open('EM_TM_path', 'w')#test path
		for td in self.model.keys():
			line = str(td) + ' '
			for tq in self.model[td].keys():
				line = line + str(tq) + ':' + str(self.model[td][tq]) + ' '
			line = line + '\n'
			f.write(line)
		f.close()
		
	def load(self):
		f = open(EM_TM_path, 'r')
		self.model = {}
		lines = f.readlines()
		f.close()
		for line in lines:
			items = line.split()
			td = int(items[0])
			for item in items[1:]:
				tq = int(item.split(':')[0])
				value = float(item.split(':')[1])
				addElement_twolevel(self.model, td, tq ,value)
	def getProb(self, td, tq):
		return getElement_twolevel(self.model, td, tq)

	def passage_score(self, q, lm, ptm):
		score = 1.0
		translation = {}
		for td in lm.keys():
			translation[td] = {}
		for tq in q:
			k_score = 0.0
			for td in lm.keys():
				if td == 0:
					continue
				p1 = getElement_twolevel(ptm, td, tq)
				p2 = getElement_onelevel(lm, td)
				if p1 * p2 == 0:
					continue
				translation[td][tq] = p1 * p2
				k_score = k_score + p1 * p2
			score = score * k_score
		return (score, translation)

	def passage_norm(self, passage_scores):
		denominator = 0.0
		for score in passage_scores:
			denominator = denominator + score
		if denominator == 0:
			return
		for i in xrange(len(passage_scores)):
			passage_scores[i] = passage_scores[i] / denominator

	def doc_translation_norm(self, passage_translations, passage_scores):
		doc_translation = {}
		for k in xrange(len(passage_scores)):
			if passage_scores[k] == 0:
				continue
			for td in passage_translations[k].keys():
				for tq in passage_translations[k][td].keys():
					addElement_twolevel(doc_translation, tq, td, passage_scores[k] * passage_translations[k][td][tq])
		for tq in doc_translation.keys():
			for td in doc_translation[tq].keys():
				if td == 0:
					continue #Remember not do normalization to 0th element
				doc_translation[tq][td] = doc_translation[tq][td] / doc_translation[tq][0]
			doc_translation[tq][0] = 1.0
		return doc_translation

	def update_translation(self, global_translation, doc_translation):
		for tq in doc_translation.keys():
			for td in doc_translation[tq].keys():
				if td == 0:
					continue
				addElement_twolevel(global_translation, td, tq, doc_translation[tq][td])

	def translation_norm(self, global_translation):
		for td in global_translation.keys():
			for tq in global_translation[td].keys():
				if tq == 0:
					continue
				global_translation[td][tq] = global_translation[td][tq] / global_translation[td][0]
			global_translation[td][0] = 1.0

	def	compare(self, pt, gt):
		diff = 0.0
		td_list = set(pt.keys()) | set(gt.keys()) - set([0])
		row = len(td_list)
		col = 0 
		for td in td_list:
			tq_list = set()
			if pt.has_key(td):
				tq_list = tq_list | set(pt[td].keys())
			if gt.has_key(td):
				tq_list = tq_list | set(gt[td].keys())
			col += len(tq_list)
			tq_list = tq_list - set([0])
			for tq in tq_list:
				diff = diff + abs(getElement_twolevel(pt, td, tq) - getElement_twolevel(gt, td, tq))
		print 'row: %d col: %d' % (row, col/row)
		return diff

if __name__ == '__main__':
	atm = ATM()
	tm_model = TranslationModel()
	i_tm = tm_model.getTM_dict()
	atm.train(i_tm, 500, 1e-5)
	atm.writeModel()
