# 统计测试集+验证集中，oov的数量多不多
# 这个有点麻烦了，当初统计vocabulay的时候，把测试集也统计上了。。
'''
好吧我理解错了，当使用词向量的时候，用的是训练集微调的，所以oov是存在的，测试集有些单词在词向量模型的vocabulary中找不到。
不过当使用tensorflow自带的lookup时，其词汇表包含了测试集，所以说这样子对比不是很公平
'''
from gensim.models import Word2Vec
import os

from util.PATH import PATH
from src.data_helper import DataHelper

save_path = '/document/Bug_msg/data'

def read_embedding_model(model_name):
	'''

	:param model_name:
	:return:vocabulary,  lookup_table
	'''
	word_vecs = Word2Vec.load(os.path.join(save_path, model_name))
	# print(word_vecs.wv.vectors)           # 这个实际上是ndarray格式的lookup_table
	# print(word_vecs.wv.index2word)        # 每个index对应的词,这个实际上就相当于vocabulary
	# print(word_vecs.wv.vocab)  # 查看Word和vector的对应关系
	# print(word_vecs.wv.vectors.shape)
	return word_vecs.wv.index2word, word_vecs.wv.vectors

def read_dataset(name, vocabulary):
	Path = PATH(name)
	data_helper = DataHelper(Path)
	bug_msg_all, _ = data_helper.get_msg_all()  # In fact, 可以将这些变量一起
	time_windows = data_helper.split_dataset_by_eight_one_one(bug_msg_all)
	in_words = set()
	out_words = set()
	in_frequency = 0
	out_frequency = 0
	total = len(time_windows[1]) + len(time_windows[2])
	count = 0
	for i in range(1, 3):
		for idx in time_windows[i]:
			print('{}/{}'.format(count, total))
			count += 1
			with open(Path.path_corpus + str(idx), 'r') as reader:
				for line in reader.readlines():
					word = line.strip()
					if word in vocabulary:
						in_words.add(word)
						in_frequency += 1
					else:
						out_words.add(word)
						out_frequency += 1
	with open('{}.txt'.format(name), 'w') as writer:
		writer.write('包含单词数：{}，包含单词频数：{}\n'.format(len(in_words), in_frequency))
		writer.write('oov单词数：{}，oov单词频数：{}\n'.format(len(out_words), out_frequency))

if __name__ == '__main__':
	name = 'Eclipse'
	model_name = '{}_new_corpus_300_cbow_hs'.format(name)
	vocabulary,_ = read_embedding_model(model_name)
	read_dataset(name, vocabulary)
	
	