#fdddddddddddddddddddddddd
from gensim import corpora,models,similarities
from gensim.models import LdaModel
import os
import sys
import collections

#构建target collections的字典，统计每个词出现的次数
root='target collections/en/'
target_file=os.listdir(root)

#遍历collection中所有文件,构建整个target collections的字典和每个文档的字典
all_file=[]
dict_all={}
all_len=0
for filename in target_file:
	with open(root+filename,'r',encoding='utf-8') as rf:
		text=rf.read()
		text_sp=text.split()
	dict_count={filename:collections.Counter(text_sp)}            #键值对的形式是文件名-文件统计字典
	dict_nums={filename:len(text_sp)}
	all_file.extend(text_sp)
	all_len=all_len+len(text_sp)                                 #对每个文件的词的数目做统计，最后累加得到整个文档集的词数目

m=collections.Counter(all_file)             #整个target collection的统计


#读取查询文件，每一行代表一个查询
query_items=[]
with open('query.txt','r',encoding='utf-8') as pf:
	query_all=pf.readlines()
	for query_item in query_all:
		queryitems.append(query_item.strip('\n'))

#计算每个查询与文档的相似度
for query in query_items:
	with open(query+'lm_evolution.txt','w',encoding='utf-8') as wf:
		for file in target_file:
			p=1
			query_sp=query.split()                  #每个查询可能由多个查询词构成
			for query_spone in query_sp:
				p1=dict_nums[file]/(dict_nums[file]+u)
				p2=dict_count[file][query_spone]/dict_nums[file]
				p3=(1-p1)*m[query_spone]/all_len
				p4=p1*p2+p3
				p=p*p4
		wf.write(file+'-'+query+'-'+str(p)+'\n')









