import re
import nltk
from nltk.metrics.distance import masi_distance


def proprocess_text(filename):
    with open(filename, "r") as file:
        text = file.readline()
    # 分词
    word1 = text.split()
    # 排除大小写
    word2 = [word.lower() for word in word1]
    print("排除大小写后",word2[:20])
    # 标点符号继续分词
    word3 = [nw for word in word2 for nw in re.split("\W+", word)]
    # 重新用split继续划分
    word4 = [nw for word in word3 for nw in word.split()]

    # 停用词
    with open("stopwords.txt", "r") as stopword_file:
        stopwords = stopword_file.readlines()
    stopwords = [stopword.replace("\n", "") for stopword in stopwords]
    print("停用词",stopwords[:20])

    # 停用词过滤
    word5 = list(filter(lambda word: word not in stopwords, word4))
    print("停用词过滤后",word5[:20])

    # 过滤掉长度小于2的词，比如a、an等无意义单词
    word6 = list(filter(lambda word: len(word) > 2, word5))
    print("过滤掉长度小于2的词",word6[:20])

    # 选择特征词
    fdist = nltk.FreqDist(word6)
    features = fdist.most_common(50)
    print("特征词",features[:20])

    # 计算词的权重
    word_weights = {}
    for word, freq in features:
        word_weights[word] = freq
    print("特征词的权重", word_weights)  # 也就是词频

    # 返回预处理完毕的单词列表
    return word6


# 读入文本
text1 = proprocess_text("text1.txt")
text2 = proprocess_text("text2.txt")
text3 = proprocess_text("text3.txt")
# 使用集合
set1 = set(text1)
set2 = set(text2)
set3 = set(text3)

# https://www.zhiu.cn/56293.html
# jaccard距离度量的两个集合的相似度，它是由 （集合1交集合2）/（结合1交结合2）计算而来的。
# masi距离度量是jaccard相似度的加权版本，当集合之间存在部分重叠时，通过调整得分来生成小于jaccard距离值。
print("文本1和文本2的相似度")
print(1-masi_distance(set1, set2))

print("文本1和文本3的相似度")
print(1-masi_distance(set1, set3))

print("文本2和文本3的相似度")
print(1-masi_distance(set2, set2))
