# 作者 :南雨
# 时间 : 2022/3/16 15:36
import re
import math
# 导入自然语言处理工具包的停用词
from nltk.corpus import stopwords


def loadText():
    """
    加载文本
    :return: ["i love you","you loving me","just a case"]
    """
    text = ["I love you,you loving me.love love love test test work",
            "I am claimed exchangeable like water,and by contrast,my clarity loving keeps consistent exchangeable like water,and by contrast",
            "There exists an army of companies love coming from the distance,and loving exists an army of companies coming from consequently i feel more than delight."]
    return text


def tokenizer(text):
    """
    文本分词:将每条文本单词通过“ ”分割,以列表的形式接受
    :param: ["i love you","you loving me","just a case"]
    :return: ['hello','world','nlp']
    """

    # 定义空列表
    wordlist = []
    # 定义正则过滤字符
    rule = ['!', '"', '#', '$', '%', '&', '\(', '\)', '\*', '\+', ',', '-', '\.', '/', ':', ';', '<', '=', '>',
            '\?', '@', '\[', '\\', '\]', '^', '_', '`', '\{', '\|', '\}', '~', '\t', '\n', '\x97', '\x96', '”',
            '“', ]
    for sentence in text:
        sentence = sentence.lower()
        # 使用正则替换标点符号或其他字符为空格
        sentence = re.sub("|".join(rule), " ", sentence).strip()
        word_list = sentence.split(" ")
        wordlist.append(word_list)
    return wordlist


def stopWords(wordlist):
    """
    去除停用词、标点符号、特殊符号
    :param: [['i', 'love', 'you'], ['i', 'am', 'claimed']
    :return: [['love'], ['claimed']
    """

    words_list = []
    # 设置英文停用词
    stop_words = stopwords.words("english")
    # 加入自定义的停用词
    for filters in ['-s', '-ly', '</s>', 's']:
        stop_words.append(filters)

    for sentence in wordlist:
        words = []
        for word in sentence:
            if word not in stop_words:
                words.append(word)
        words_list.append(words)
    # print("停用词去除", words_list)
    return words_list


def countTFIDfF(words_list):
    """
    结果输出
    :param: words_list-已分词且去除停用词后的文本列表
    :return: (["hello","world"],[0.1242,0.5422])
    """

    # 存储计算出的每个单词的TFIDF值
    tf_idf_list = []
    # 存储去重后的单词列表，与TFIDF形成一一对应
    unique_words_list = []
    for sentence in words_list:
        # sentence转换为set集合，目的是为了去重
        words_set = set(sentence)
        unique_words_list.append(list(words_set))
        temp_list = []
        for word in words_set:
            tf = countTf(word, sentence)
            idf = countIdf(word, words_list)

            # 结果保留四位小数
            id_itf = round(tf * idf, 4)
            # print("%s的tf" % word, tf)
            # print("%s的idf" % word, idf)
            # print("%s的tf-idf" % word, id_itf)
            # print("------------------")
            temp_list.append(id_itf)
        tf_idf_list.append(temp_list)
    return unique_words_list, tf_idf_list


def countTf(word, sentence_list):
    """
    统计一个单词在文档中出现的频率，计算TF
    :param: word-需要计算的单词，sentence_list-该单词位于的已分词且去除停用词后的文本列表
    :return: int
    """

    count = 0
    for w in sentence_list:
        if word == w:
            count += 1

    # 计算tf
    tf = count / len(sentence_list)
    return tf


def countIdf(word, word_list):
    """
    计算IDF,lg(文档总数/出现该单词的文档个数)
    :param: word-需要计算IDF的单词，word_list-所有文本的单词列表
    :return: int
    """
    count = 0
    for sentence in word_list:
        if word in sentence:
            count += 1

    # 计算idf
    idf = math.log10(len(word_list) / (count + 1))
    return idf


if __name__ == "__main__":
    text_list = loadText()
    tokenizer_list = tokenizer(text_list)
    stopwords_list = stopWords(tokenizer_list)
    tf_idf = countTFIDfF(stopwords_list)
    print(tf_idf[0])
    print(tf_idf[1])
