from os.path import exists, join
from Document import Document
from Lexicon import Lexicon
from config import *

class TranslationModel:

	def load(self, is_filt=False):
		self.tm = []
		for i in xrange(lexicon_size+1):
			self.tm.append({})
		tm_file = open(TM_path, 'r')
		lines = tm_file.readlines()
		tm_file.close()
	
		for i in xrange(lexicon_size):
			self.tm[i+1][0] = int(lines[i].split()[0])
			pair_list = lines[i].split()[1:]

			length = len(pair_list)
			if is_filt and filter_num < length:
				length = filter_num
			for j in xrange(length):
				pair = pair_list[j]
				term_id = int(pair.split(':')[0])
				count = int(pair.split(':')[1])
				self.tm[i+1][term_id] = count
'''
在ATM中调用，获得global translation model的二阶字典表示
'''
	def getTM_dict(self, is_filt=False):
		tm = {}
		tm_file = open(TM_path, 'r')
		lines = tm_file.readlines()
		tm_file.close()

		for i in xrange(lexicon_size):
			sum_num = int(lines[i].split()[0])
			if sum_num == 0:
				continue
			tm[i+1] = {}
			tm[i+1][0] = sum_num
			pair_list = lines[i].split()[1:]
			length = len(pair_list)
			if is_filt and filter_num < length:
				length = filter_num
			for j in xrange(length):
				pair = pair_list[j]
				term_id = int(pair.split(':')[0])
				count = int(pair.split(':')[1])
				tm[i+1][term_id] = count
		return tm
	
	def create(self):
		abstract = []
		comments = []
		for i in xrange(doccount):
			paper = Document(i)
			abstract.append(paper.getAbstract())
			comments.append(paper.getComments())

		self.lexicon = Lexicon()
		self.lexicon.load()
		
		self.tm = []
		for i in xrange(lexicon_size+1):
			self.tm.append({})
			self.tm[i][0] = 0

		for i in xrange(doccount):
			print 'Processing doc %d' % i
			if len(abstract[i]) != 0 and len(comments[i]) != 0:
				self.__UniCalculate(abstract[i], comments[i])

		tm_file = open(TM_path, 'w')
		for i in xrange(1, lexicon_size+1):
			line = str(self.tm[i][0]) + ' '
			for (key, value) in sorted(self.tm[i].iteritems(), key=lambda d:d[1], reverse =
					True)[1:]:
				line = line + str(key) + ':' + str(value) + ' '
			line = line + '\n'
			tm_file.write(line)
		tm_file.close()

	def __UniCalculate(self, abstract, comment_list):
		'''
		Treat different comments of a document as difference comments
		'''
		abs_words = abstract.split()
		self.__unify(abs_words)
		for comment in comment_list:
			comment_words = comment.split()
			self.__unify(comment_words)
			for aw in abs_words:
				aw_id = self.lexicon.getIdByTerm(aw)
				for cw in comment_words:
					cw_id = self.lexicon.getIdByTerm(cw)
					if self.tm[aw_id].has_key(cw_id):
						self.tm[aw_id][cw_id] = self.tm[aw_id][cw_id] + 1
					else:
						self.tm[aw_id][cw_id] = 1
				self.tm[aw_id][0] = self.tm[aw_id][0] + len(comment_words)
	
	def __MultiCalculate(self, abstract, comment_list):
		'''
		Treat different comments of a document as only one comment
		'''
		abs_words = abstract.split()
		comment_words = []
		for comment in comment_words():
			comment_words.extend(comment.strip())
		self.__unify(abs_words)
		self.__unify(comment_words)

		for aw in abs_words:
			aw_id = self.lexicon.getIdByTerm(aw)
			for cw in comment_words:
				cw_id = self.lexicon.getIdByTerm(cw)
				if self.tm[aw_id].has_key(cw_id):
					self.tm[aw_id][cw_id] = self.tm[aw_id][cw_id] + 1
				else:
					self.tm[aw_id][cw_id] = 1
			self.tm[aw_id][0] = self.tm[aw_id][0] + len(comment_words)
	
	def __unify(self, word_list):
		return list(set(word_list))

	def getProb(self, orig_id, dest_id):
		if self.tm[orig_id].has_key(dest_id):
			return 1.0 * self.tm[orig_id][dest_id] / self.tm[orig_id][0]
		return 0.0
	def getExtensionList(self, word_id):
		return sorted(self.tm[word_id].keys())[1:]

if __name__ == '__main__':
	tm = TranslationModel()
	tm.create()
	tm.load(False)
	print tm.getProb(206,37107)
