import os
root_path = os.path.dirname(os.path.abspath(__file__))
# 加载idf字典
with open(os.path.join(root_path,"idf_vocabulary.txt"), "r", encoding="utf-8") as f1:
    vocabulary = f1.read(-1).split(",")

# 加载idf值
with open(os.path.join(root_path,"idf_module.txt"), "r", encoding="utf-8") as f2:
    idf_value = list(map(float, f2.read(-1).split(",")))
idf_value_avg = sum(idf_value) / len(idf_value)

# 加载停用词
with open(os.path.join(root_path,"stop_word.txt"), "r", encoding="utf-8") as f3:
    stop_word = f3.read(-1).split("\n")

# 加载脏词汇
with open(os.path.join(root_path,"bad_word.txt"), "r", encoding="utf-8") as f4:
    bad_word = "|".join(f4.read(-1).split("\n"))

# 加载本地相似词
with open(os.path.join(root_path,"similarity_word.txt"), "r", encoding="utf-8") as f5:
    similarity_arr = f5.read(-1).split("\n")
    list1 = []
    list2 = []
    for words in similarity_arr:

        word_list = words.split(",")
        list1.extend(word_list)
        for i in range(0, len(word_list)):
            tmp = word_list.copy()
            del tmp[i]
            list2.append(tmp)
    similarity_word = dict(zip(list1, list2))

# 加载本地不相似的词
with open(os.path.join(root_path,"not_similarity_word.txt"), "r", encoding="utf-8") as f6:
    similarity_arr = f6.read(-1).split("\n")
    list1 = []
    list2 = []
    for words in similarity_arr:
        word_list = words.split(",")
        list1.extend(word_list)
        for i in range(0, len(word_list)):
            tmp = word_list.copy()
            del tmp[i]
            list2.append(tmp)
    not_similarity_word = dict(zip(list1, list2))


with open(os.path.join(root_path,"local_vocabulary.txt"), "r", encoding="utf-8") as f7:
    local_vocabularys = f7.read(-1).split("\n")