import os
import copy
'''分析各关键字文件, 构建vocabulary;
这个废掉了!!!!'''

# key=word, value=语料库中出现的文档数
vocabulary = {}
# root = '../data/Bug_msg_eclipse/'
# path = root + 'keywords_stemed'
# new_path = root + 'filtered_keywords_stemed'         # 记录经过处理之后的新文件, 这个等之后再说, 先架构
# noise_path = root + 'noise_words.txt'           # 记录被删除的噪声文件
# vocabulary_path = root + 'vocabulary.txt'
# vocabulary_nums_path = root + 'vocabulary_nums.txt'     # 记录vocabulary中各单词的出现次数, 之后将根据这个进行过滤


def get_words_from_file(file_path):
	words = set()       # 保存当前file里出现过的所有单词, 不重复
	with open(file_path, 'r') as reader:
		for w in reader.readlines():        # 拿出来是有序的么........
			words.add(w.strip())
	return words

# 构建带出现文档次数的vocabulary, 仅供测试用
def write_vocabulary_with_nums(file):
	with open(file, 'w') as writer:
		for key in vocabulary.keys():
			writer.write(key + '\t' + str(vocabulary[key]) + '\n')

# 构建正式的vocabulary
def write_vocabulary(file):
	with open(file, 'w') as writer:
		for key in vocabulary.keys():
			writer.write(key + '\n')

def read_file(file):
	words = []  # 保存当前file里出现过的所有单词, 按顺序
	with open(file, 'r') as reader:
		for w in reader.readlines():  # 拿出来是有序的
			words.append(w.strip())
	return words

# 清理掉指定文件的噪声words, 并创建新文件写入
def clean_keywords_in_file(file_name, noise_words):
	old_file = path + '/' + file_name
	# words = get_words_from_file(old_file)
	words = read_file(old_file)
	new_file = new_path + '/' + file_name
	# print('dealing: ', file_name)
	# for一个临时副本来删除老列表中的元素
	words_copy = copy.deepcopy(words)   # list的remove方法只删除第一个符合情况的元素, 所有需要copy一个副本来
	for word in words_copy:
		if word in noise_words:
			words.remove(word)     # 清除掉当前文件old_file里的噪声words, 注意noise可能有多个相同的
	with open(new_file, 'w') as writer:
		for i in range(len(words)):
			writer.write(words[i] + '\n')

# docs_len: 语料库中文档的个数
def write_noise_word_file(docs_len):
	# 统计噪声单词, 存入noise_words
	noise_words = []
	for key in vocabulary.keys():
		times = vocabulary[key]
		# 若单词出现次数过多(在超过50%文档中出现),出现次数过少(少于10个文档), 这样的单词被认为是噪声单词
		# 予以删除, 清除出vocabulary
		if times > (docs_len / 2) or times < 10:
			noise_words.append(key)  # 统计待删除的单词

	# 将噪声单词写入文档中
	with open(noise_path, 'w') as writer:
		for i in range(len(noise_words)):
			writer.write(noise_words[i] + '\n')
	return noise_words

# 清理词汇表, 就是删除词汇表中的噪声单词
# 然后根据过滤完的vocabulary, 来清理各个doc中的单词
def clean_vocabulary_and_docs(noise_words, doc_names):
	# 清理词汇表, 删掉噪声单词
	for w in noise_words:
		vocabulary.pop(w)
	print(len(vocabulary))
	# write_vocabulary_with_nums(vocabulary_nums_path)
	# write_vocabulary(vocabulary_path)

	# 根据构建的词汇表清洗一遍keywords_stemed文件
	# 凡是不在词汇表中的单词都删掉
	# 得到新的keywords_stemed文件夹
	# 注意要保持原先的单词顺序
	for name in doc_names:
		print(name)
		clean_keywords_in_file(name, noise_words)

def read_files_to_get_vocabulary():

	names = os.listdir(path)
	print("file nums:", len(names))
	for name in names:
		# print(name)
		file = path + '/' + name
		words = get_words_from_file(file)
		# 增加次数, 补充进vocabulary
		for word in words:
			if word in vocabulary.keys():
				vocabulary[word] += 1       # 统计单词在多少文件出现过, 这儿写的没问题, 因为可以保证words没有重复值
			else:
				vocabulary[word] = 1
	write_vocabulary_with_nums(vocabulary_nums_path)
	write_vocabulary(vocabulary_path)

	# noise_words = write_noise_word_file(len(names))
	# clean_vocabulary_and_docs(noise_words, names)
	return vocabulary.keys()

if __name__ == '__main__':
    read_files_to_get_vocabulary()