from config import *
from os.path import exists
from Document import Document
from Lexicon import Lexicon
from os import mkdir,rmdir
import os.path

class ML:
	'''Generate collection-wise and document-wise information , namely P(w|C), P(w|D)'''
	'''doc是一个字符串形式，字符串字面可以为数字或collection，前者代表文档的ML，后者代表集合的ML'''
	def __init__(self, doc):
		'''
		doc can be a document number or collection as the whole
		doc must be str type
		'''
		self.doc = doc
#ML信息，列表形式，有很多0存在
	def getML(self, lexicon, word_list):
		'''
		word num is 1-indexed
		'''
		numTerms = lexicon.getSize()+1
		ML = []
		for i in xrange(numTerms):
			ML.append(0)
		for word in word_list:
			word_index = lexicon.getIdByTerm(word)
			ML[word_index] = ML[word_index] + 1
			ML[0] = ML[0] + 1
		return ML
#这个和上面的意义一样，只是是一个字典的形式
	def getML_dict(self, lexicon, word_list):
		model = {}
		model[0] = len(word_list)
		for word in word_list:
			word_index = lexicon.getIdByTerm(word)
			if model.has_key(word_index):
				model[word_index] = model[word_index] + 1
			else:
				model[word_index] = 1
		return model

	def create(self):
		if exists(ML_path):
			rmdir(ML_path)
		mkdir(ML_path)

		lexicon = Lexicon()
		lexicon.load()
		collection_content = ''

		for doc_index in xrange(doccount):
			print 'Processing ' + str(doc_index)
			paper = Document(doc_index)
			content = paper.getFulltext() 
			word_list = content.split()
			doc_ML = self.getML(lexicon, word_list)
			f = open(os.path.join(ML_path, str(doc_index)), 'w')
			f.write(str(doc_ML[0])+'\n')
			for i in xrange(1,len(doc_ML)):
				if doc_ML[i] != 0:
					f.write(str(i) + ' ' + str(doc_ML[i]) + '\n')
			f.close()
			
			collection_content = collection_content + content + ' ' + ' '.join(paper.getComments())

		collection_list = collection_content.split()
		collection_ML = self.getML(lexicon, collection_list)
		f = open(collection_path, 'w')
		f.write(str(collection_ML[0])+'\n')
		for i in xrange(1, len(collection_ML)):
			if collection_ML[i] != 0:
				f.write(str(i) + ' ' + str(collection_ML[i]) + '\n')
		f.close()

	def load(self):
		self.ML = {}
		f = open(os.path.join(ML_path, self.doc), 'r')
		lines = f.readlines()
		f.close()
		self.ML[0] = int(lines[0].strip())
		for line in lines[1:]:
			word_id = int(line.split()[0])
			word_num = int(line.split()[1])
			self.ML[word_id] = word_num
	#根据词编号，返回这个ML中这个词的分数
	def getProb(self, word_id):
		if self.ML.has_key(word_id):
			return 1.0 * self.ML[word_id] / self.ML[0]
		return 0.0
	
	def getWordsList(self):
		return sorted(self.ML.keys())[1:]
'''
生成这篇文档的passage model，是一个字典的列表，列表的每一项为一个passage model的字典
'''	
	def getPassageModels(self):
		paper = Document(self.doc)
		self.models = []
		items = paper.getFulltext().split()
		doc_len = len(items)
		lexicon = Lexicon()
		lexicon.load()
		left = []
		right = []
		for i in xrange(0, doc_len, passage_length):
			left.append(i)
			right.append(min(doc_len,i+passage_length))
		#Add the tail to the last passage
		length = len(left)
		#因为最后一段往往不足100个词，所以将最后一段假如倒数第二段
		if len(right) > 2:
			right[-2] = right[-1]
			length -= 1
		for i in xrange(length):
			self.models.append(self.getML_dict(lexicon, items[left[i]:right[i]]))
		return self.models

if __name__ == '__main__':
	ml = ML('2')
	ml.create()
	ml.load()
	print ml.getProb(717)
	#print ml.getWordsList()
	print collection_path
	print '------------------------'
	#print ml.getPassageModels()
