from collections import Counter
import math
from sklearn.feature_extraction.text import TfidfVectorizer


# 相同的语料库
corpus = ['this is the first document',
        'this is the second second document',
        'and the third one',
        'is this the first document']

"""
python手动实现tfidf计算
"""
def tf_idf(corpus,splitchar):
    # 单词列表，将所有文档的单词切分之后的单词表
    word_list = list()
    for i in range(len(corpus)):
        word_list.append(corpus[i].split(splitchar))
    # 统计每个单词在各个文档中的出现次数
    count_list = list()
    for i in range(len(word_list)):
        count = Counter(word_list[i])
        count_list.append(count)

    #计算各个单词的tf-idf的值
    for i, count in enumerate(count_list):
        print("第 {} 个文档 TF-IDF 统计信息".format(i + 1))
        scores = {word: tf_idf_count(word, count, count_list) for word in count}
        sorted_word = sorted(scores.items(), key=lambda x: x[1], reverse=True)
        for word, score in sorted_word:
            print("\tword: {}, TF-IDF: {}".format(word, round(score, 5)))

# 计算单词的tf值
def tf(word,count):
    return count[word] / sum(count.values())

# 计算单词的idf值
def idf(word,count_list):
    n_contain = sum([1 for count in count_list if word in count])
    # 加1是为了防止发生包含词条数量为0从而导致运算结果出错的现象
    return math.log(len(count_list) / (1 + n_contain))

# 计算单词的tf-idf值
def tf_idf_count(word, count, count_list):
    return tf(word, count) * idf(word, count_list)

"""
使用sklearn库函数计算tfidf值
"""
def sklearn_tfidf(corpus):
    tfidf_vec = TfidfVectorizer()
    tfidf_matrix = tfidf_vec.fit_transform(corpus)
    #得到语料库所有不重复的词
    print(tfidf_vec.get_feature_names_out())
    #得到每个单词对应的id值
    print(tfidf_vec.vocabulary_)
    #得到每个句子所对应的向量，向量里数字的顺序是按照词语的id顺序
    print(tfidf_matrix.toarray())


# 测试
if __name__ == '__main__':
    print('----------python手动实现tf-idf计算-------------')
    tf_idf(corpus,' ')
    print('----------使用sklearn库函数计算tf-idf-------------')
    sklearn_tfidf(corpus)