import jieba
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer

# 设定分词及清理停用词函数
stop_list = list(pd.read_table("config/stop_words.txt", names=["w"], sep='aaa', encoding="utf-8", engine="python").w)


def m_cut(in_txt):
    return [w for w in jieba.lcut(in_txt) if w not in stop_list and len(w) > 1]


def get_keywords_through_tfidf(txt, limit=20):
    txt_list = [" ".join(m_cut(txt))]  # 要用空格分隔开,才能传入fit_transform中
    vectorizer = CountVectorizer()  # 创建计算词频的实例
    v = vectorizer.fit_transform(txt_list)  # 将文本中的词语转换为词频稀疏矩阵
    transformer = TfidfTransformer()  # 创建计算TF-IDF的玩意
    tfidf = transformer.fit_transform(v)  # 基于词频稀疏矩阵X计算TF-IDF值
    word = vectorizer.get_feature_names_out()  # 获取词袋模型中的所有词语
    weight = tfidf.toarray()  # 将tf-idf矩阵抽取出来，元素a[i][j]表示j词在i类文本中的tf-idf权重
    data_dict = {}
    for i in range(len(weight)):  # 文本的tf-idf词语权重添加到字典data_dict中
        for j in range(len(word)):
            data_dict[word[j]] = weight[i, j]
    return sorted(data_dict.items(), key=lambda x: x[1], reverse=True)[:limit]  # 按照tfidf值倒叙输出前7个
