
'''
prerequisite:
1) allocate the term-id and doc-id;
2) build a language model for each passage (fixed window)
3) get an initialed translation model
'''
from ML import ML
from QueryManager import QueryManager
from Query import Query
from Lexicon import Lexicon
from TranslationModel import TranslationModel
from time import time
from DictUtils import *
from multiprocessing import Process, Pipe
from config import EM_TM_path

def printModel(dic):
	for key in dic.keys():
		line = str(key) + ' '
		for k2 in dic[key].keys():
			line = line + str(k2) + ':' + str(dic[key][k2])
		print line
#将一个字典写入文件，按照进程编号
def WriteDict(d, num):
	wf = open(str(num), 'w')
	for key in d.keys():
		line = str(key) + ' '
		for k2 in d[key].keys():
			line = line + str(k2) + ':' + str(d[key][k2])+' '
		wf.write(line+'\n')
	wf.close()
#将一个字典从文件中读出，按照进程编号
def ReadDict(num):
	d = {}
	rf = open(str(num), 'r')
	lines = rf.readlines()
	rf.close()
	
	for line in lines:
		key = int(line.split()[0])
		d[key] = {}
		items = line.split()[1:]
		for item in items:
			k = int(item.split(':')[0])
			v = float(item.split(':')[1])
			d[key][k] = v
	return d
			
class ATM: # aligned translation model
	'''
	EM algorithm for training:
	qd_reader: a reader for query-doc pair;
	init_tm:
	'''
	#每一个进程对应的训练子任务，[begin,end)对应这个进程处理的query范围，num是这个进程的编号，prev_global_translation是上一次迭代后的ATM
	def train_mul(self, begin, end,num, prev_global_translation):
		doc_translation_local = {}
		for query_index in xrange(begin, end):
			query = self.qm.getQuery(query_index)#获得一个query对象
			q = []
			for term in query.getQuery().split():#获得一个query的文本内容，即一个字符串
				q.append(self.lexicon.getIdByTerm(term))#获得term这个词的编号
			doc = ML(str(query.getAnsdoc()))#创建一篇文档的最大似然对象，若需要这篇文档的最大似然需要load一下，若只需要paasage models则不需要load
			passage_models = doc.getPassageModels()#获得这篇文本的最大似然对象的passage model的列表
			passage_scores = []
			passage_translations = []
			for lm in passage_models:
#				t3 = clock()
				passage_score, passage_translation = self.passage_score(q, lm, prev_global_translation)
				passage_scores.append(passage_score)
				passage_translations.append(passage_translation)
#				t4 = clock()
#				print 'Query pair cost %f s' % (t4-t3)
			self.passage_norm(passage_scores)
			doc_translation = self.doc_translation_norm(passage_translations, passage_scores)
			self.update_translation(doc_translation_local, doc_translation)
		WriteDict(doc_translation_local, num)#暂时写入文件，待所有子进程执行结束后，再在主进程中进行归并
	'''
	我觉得问题可能主要出在：
	1.有关字典的操作中，我的词的编号是从1开始的，所以0号位置储存的和
	2.只有passage_translation没有0号元素，其他的字典都如第1条所示
	3.DictUtils中有三个函数，分别对应一阶字典的读写和二阶字典的写
	这一部分的处理可以会有些混乱，因为每次都是从字典中通过keys方法获取需要计算的词的列表，所以每一步我都要判断，对于key=0时，不做处理。
	'''
	def train(self, init_tm, iterate_num, model_diff):
		prev_global_translation = init_tm
		self.qm = QueryManager()
		self.qm.load()
		self.collection = ML('collection')
		self.collection.load()
		query_count = 10000#self.qm.getSize() #test for 1000 queries
		self.lexicon = Lexicon()
		self.lexicon.load()
		for i in xrange(iterate_num):
#			import pdb
#			pdb.set_trace()
			t1 = time()
			print 'Iterate %d model :' % (i+1)
#			printModel(prev_global_translation)
			global_translation = {}
			pool = []
			kernelnum = 16
			for j in xrange(kernelnum):
				pool.append(Process(target=self.train_mul, args=(query_count*j/kernelnum, query_count*(j+1)/kernelnum,j,prev_global_translation)))
			for j in xrange(kernelnum):
				pool[j].start()
			for j in xrange(kernelnum):
				pool[j].join()
			for j in xrange(kernelnum):
				doc_translation = ReadDict(j)
				self.update_translation(global_translation, doc_translation)
			self.translation_norm(global_translation)
			error = self.compare(prev_global_translation, global_translation)
			print 'Iterate %d error %f .' % (i+1, error)
			if(error < model_diff):
				break;
			prev_global_translation = global_translation;
			t2 = time()
			print 'Iterate %d cost %f s' % (i+1, t2-t1)
		self.model = global_translation

	def writeModel(self):
		f = open('EM_TM_path', 'w')#test path
		for td in self.model.keys():
			line = str(td) + ' '
			for tq in self.model[td].keys():
				if tq == 0:
					continue
				line = line + str(tq) + ':' + str(self.model[td][tq]) + ' '
			line = line + '\n'
			f.write(line)
		f.close()
	'''
	读取模型文件
	'''		
	def load(self):
		f = open('EM_TM_path', 'r')
		self.model = {}
		lines = f.readlines()
		f.close()
		for line in lines:
			items = line.split()
			td = int(items[0])
			for item in items[1:]:
				tq = int(item.split(':')[0])
				value = float(item.split(':')[1])
				addElement_twolevel(self.model, td, tq ,value)
	'''
	获得词td->tq的概率
	'''
	def getProb(self, td, tq):
		return getElement_twolevel(self.model, td, tq)

	def passage_score(self, q, lm, ptm):
		score = 1.0
		translation = {}
		for td in lm.keys():
			if td == 0:
				continue
			translation[td] = {}
		col_ML = ML('collection')
		col_ML.load()
		for tq in q:
			k_score = 0.0
			for td in lm.keys():
				if td == 0:
					continue
				p1 = getElement_twolevel(ptm, td, tq)
				p2 = getElement_onelevel(lm, td)
				tmp_alpha = 1e-5
				tmp_score = p1*((1-tmp_alpha)*p2+self.collection.getProb(td)*tmp_alpha)
				if tmp_score== 0:
					continue
				translation[td][tq] = tmp_score
				k_score = k_score + tmp_score
			score = score * k_score
		return (score, translation)

	def passage_norm(self, passage_scores):
		denominator = 0.0
		for score in passage_scores:
			denominator = denominator + score
		if denominator == 0:
			return
		for i in xrange(len(passage_scores)):
			passage_scores[i] = passage_scores[i] / denominator

	def doc_translation_norm(self, passage_translations, passage_scores):
		doc_translation = {}
		for k in xrange(len(passage_scores)):
			if passage_scores[k] == 0:
				continue
			for td in passage_translations[k].keys():
				for tq in passage_translations[k][td].keys():
					addElement_twolevel(doc_translation, tq, td, passage_scores[k] * passage_translations[k][td][tq])
		for tq in doc_translation.keys():
			for td in doc_translation[tq].keys():
				if td == 0:
					continue #Remember not do normalization to 0th element
				doc_translation[tq][td] = doc_translation[tq][td] / doc_translation[tq][0]
			doc_translation[tq][0] = 1.0
		return doc_translation

	def update_translation(self, global_translation, doc_translation):
		for tq in doc_translation.keys():
			for td in doc_translation[tq].keys():
				if td == 0:
					continue
				addElement_twolevel(global_translation, td, tq, doc_translation[tq][td])

	def translation_norm(self, global_translation):
		for td in global_translation.keys():
			for tq in global_translation[td].keys():
				if tq == 0:
					continue
				global_translation[td][tq] = global_translation[td][tq] / global_translation[td][0]
			global_translation[td][0] = 1.0

	def compare(self, pt, gt):
		diff = 0.0
		td_list = set(pt.keys()) | set(gt.keys()) - set([0])
		word_num = 0
		for td in td_list:
			tq_list = set()
			if pt.has_key(td):
				tq_list = tq_list | set(pt[td].keys())
			if gt.has_key(td):
				tq_list = tq_list | set(gt[td].keys())
			tq_list = tq_list - set([0])
			for tq in tq_list:
				word_num = word_num + 1
				diff = diff + abs(getElement_twolevel(pt, td, tq) - getElement_twolevel(gt, td, tq))
		print 'word_num: %d' % (word_num)
		return diff

if __name__ == '__main__':
	atm = ATM()
	tm_model = TranslationModel()
	i_tm = tm_model.getTM_dict(True)
	atm.train(i_tm, 10, 100)
	atm.load()
	atm.writeModel()
	print atm.getProb(65542, 71749)
