from os.path import join, exists
from Document import Document
from Lexicon import Lexicon
from QueryManager import QueryManager
from TranslationModel import TranslationModel
import math
from OfflineTranslationModel_multiprocess import OfflineTranslationModel
from multiprocessing import Process, Pipe
import sys
from ML import ML
from config import *
from OfflineTMSingle import OfflineTMSingle
from ATM_mul import ATM
from DictUtils import *

class Retrieval:

	def __init__(self, alpha, beta, model='LM'):

		self.alpha = alpha
		self.beta = beta

		self.lexicon = Lexicon()
		self.lexicon.load()

		self.queries = QueryManager()
		self.queries.load()
		
		self.collection = ML('collection')
		self.collection.load()

		self.ATM_mul = ATM()
		self.ATM_mul.load()
'''
在computeScoreList中调用self.computeModel，来决定使用哪个模型打分
'''
		if model == 'LM':
			self.computeModel = self.computeLMscore
		else:
			self.computeModel = self.computeTMscore
#		self.off_tm = OfflineTranslationModel()
#		self.off_tm.load()

#		self.doc_list = []
#		for i in xrange(doccount):
#			ml = ML(str(i))
#			ml.load()
#			self.doc_list.append(ml)
	
	'''
	language model计算一个query和一个文档之间的分数
	'''
	def computeLMscore(self, query, docId):
		ml = ML(str(docId))	
		ml.load()
		score = 0.0
		query_token = query.split()
		for word in query_token:
			word_id = self.lexicon.getIdByTerm(word)
			prob = ml.getProb(word_id)
			prob = self.alpha * self.collection.getProb(word_id) + (1-self.alpha) * prob
			score = score + math.log(prob)
		return score
	'''
	global translation model计算一个query和一个文档之间的分数
	'''
	def computeTMscore(self, query, docId):
		ml = ML(str(docId))
		ml.load()
		otm = OfflineTMSingle(str(docId))
		otm.load()
		score = 0.0
		query_token = query.split()
		for word in query_token:
			word_id = self.lexicon.getIdByTerm(word)
			prob = otm.getProb(word_id)
			prob = self.beta * ml.getProb(word_id) + (1-self.beta) * prob
			prob = self.alpha * self.collection.getProb(word_id) + (1-self.alpha) * prob
			score = score + math.log(prob)
		return score
	'''
	就一个查询而言，计算所有文档的分数列表
	'''
	def computeScoreList(self, queryNo):
		query = self.queries.getQuery(queryNo).getQuery()
		scorelist = []
		for docId in xrange(doccount):
#			print 'Processing query %d doc %d.' % (queryNo, docId)
			scorelist.append((self.computeLMscore(query,docId), docId))
		ordered_scorelist = sorted(scorelist, key = lambda d:d[0], reverse = True)
		return ordered_scorelist
	#def computePassageScore(self, queryNo, docId):
	#	query = self.queries.getQuery(queryNo).getQuery()
	#	queryid_list = []
	#	for term in query:
	#		queryid_list.append(self.lexicon.getIdByTerm(term))
	#	ml = ML(str(docId))
	#	passage_models = ml.getPassageModels()
	#	passage_scores = []
	#	denominator = 0.0
	#	for lm in passage_models:
	#		score = 1.0
	#		for tq in queryid_list:
	#			halfscore = 0.0
	#			for td in lm.keys():
	#				halfscore += self.ATM.getProb(td, tq)*getElement_onelevel(lm, td)
	#			score *= halfscore
	#		passage_scores.append(score)
	#		denominator += score
	#	return (passage_scores,denominator)
	
	'''
	使用Aligned Translation Model进行重排序的打分函数
	'''
	def computeRerankingScore(self, queryNo, docId):
		ml = ML(str(docId))
		passage_models = ml.getPassageModels()
		passage_scores = []
		queryid_list = []
		query = self.queries.getQuery(queryNo).getQuery().split()
		for term in query:
			queryid_list.append(self.lexicon.getIdByTerm(term))
		for lm in passage_models:
			score = 1.0
			for tq in queryid_list:
				halfscore = 0.0
				for td in lm.keys():
					if td == 0:
						continue
					halfscore += self.ATM_mul.getProb(td, tq)*getElement_onelevel(lm,td)
				#	halfscore += self.ATM_mul.getProb(td, tq)*(self.alpha*self.collection.getProb(tq) + (1-self.alpha)*getElement_onelevel(lm, td))
				halfscore = self.beta*getElement_onelevel(lm, tq) + (1-self.beta)*halfscore
				halfscore = self.alpha * self.collection.getProb(tq) + (1-self.alpha) * halfscore
			score = score * halfscore
			passage_scores.append(score)
		return max(passage_scores)
	'''
	重排序过程，返回重排序后的有序列表
	'''				
	def rerank(self, queryNo, reranking_list):
		#scorelist = self.computeScoreList(queryNo)
		reranked_list = []
		for (score, docId) in reranking_list:
			reranked_list.append((self.computeRerankingScore(queryNo,docId), docId))
		return sorted(reranked_list, key=lambda d:d[0], reverse=True)
	'''
	获得重排序列表后，计算MRR分数，若再获得的有序重排序列表中存在这个query的ansdoc，即目标文档标号，则按这个位置返回，否则在language model返回的分数列表中查找ansdoc的位置
	'''
	def computeMRRscore_rerank(self, queryNo):
		ansdoc = self.queries.getQuery(queryNo).getAnsdoc()
		scorelist = self.computeScoreList(queryNo)
		reranked_list = self.rerank(queryNo,scorelist[:reranking_num])
		for i in xrange(reranking_num):
			if reranked_list[i][1] == ansdoc:
				return 1.0 / ( i + 1 )
		for i in xrange(reranking_num, len(scorelist)):
			if scorelist[i][1] == ansdoc:
				return 1.0 / ( i + 1 )



	def computeMRRscore(self, queryNo):
		ansdoc = self.queries.getQuery(queryNo).getAnsdoc()
		scorelist = self.computeScoreList(queryNo)
		for i in xrange(len(scorelist)):
			if scorelist[i][1] == ansdoc:
				return 1.0 / (i + 1)
	
	'''
	按照query的数目划分后的子任务，[begin, end)是queryNo的范围
	'''
	def computeMRRscore_mulprocessing(self, conn, begin, end):
		sum = 0.0
		for i in xrange(begin, end):
			sum = sum + self.computeMRRscore_rerank(i)
		conn.send(sum)
		conn.close()
	'''
	获得最终分数的函数，也是整个打分过程的入口
	'''
	def computeFinalScore(self, query_num = 200):
		pool = []
		connectPairs = []
		kernelnum = 16
		for i in xrange(kernelnum):
			connectPairs.append(Pipe())
			pool.append(Process(target=self.computeMRRscore_mulprocessing,
				args=(connectPairs[i][0],query_num*i/kernelnum, query_num*(i+1)/kernelnum)))
		for i in xrange(kernelnum):
			pool[i].start()
		final_score = 0
		for i in xrange(kernelnum):
			final_score = final_score + connectPairs[i][1].recv()
		for i in xrange(kernelnum):
			pool[i].join()
		final_score = final_score / query_num
		return final_score
	def temp(self):
		result = 0
		for i in xrange(100):
			result = result + self.computeMRRscore(i)
		return result / 100

if __name__ == '__main__':
	for beta in xrange(1, 10, 1):
		ret = Retrieval(1e-5, 0.1*beta)
		print 'for beta = %f score = %f.' % (0.1*beta, ret.computeFinalScore())

