import sys, codecs
import pandas as pd
import numpy as np
import jieba.posseg
import jieba.analyse
from tqdm import tqdm
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer

"""
       TF-IDF权重：
           1、CountVectorizer 构建词频矩阵
           2、TfidfTransformer 构建tfidf权值计算
           3、文本的关键字
           4、对应的tfidf矩阵
"""


# 数据预处理操作：分词，去停用词，词性筛选
def filterStopWords(text, stop_words):
    l = []
    pos = ['n', 'nz', 'v', 'vd', 'vn', 'l', 'a', 'd']  # 定义选取的词性
    seg = jieba.posseg.cut(text)  # 分词
    for i in seg:
        if i.word not in stop_words and i.flag in pos:  # 去停用词 + 词性筛选
            l.append(i.word)
    text = " ".join(l)
    return text


# tf-idf获取文本topk关键词
def extract_keywords_by_tfidf(corpus, topk=5, stop_words=[], return_score=False):
    '''
    corpus: List[str]
    topk: int
    '''
    if len(stop_words) > 0:
        corpus = [filterStopWords(text, stop_words) for text in corpus]

    # 1、构建词频矩阵，将文本中的词语转换成词频矩阵
    vectorizer = CountVectorizer()
    X = vectorizer.fit_transform(corpus)  # 词频矩阵,a[i][j]:表示j词在第i个文本中的词频  doc-term矩阵
    # 2、统计每个词的tf-idf权值
    transformer = TfidfTransformer()
    tfidf = transformer.fit_transform(X)
    # 3、获取词袋模型中的关键词
    words = vectorizer.get_feature_names_out()
    # 4、获取tf-idf矩阵，a[i][j]表示j词在i篇文本中的tf-idf权重
    weights = tfidf.toarray()  # [doc_len, words_len]
    # 5、打印词语权重
    results = []
    for i in tqdm(range(len(weights))):
        # 当前文章的所有词汇列表、词汇对应权重列表
        words_ls = [words[j] for j in range(len(words))]
        weights_ls = [weights[i][j] for j in range(len(words))]  # 当前文章的所有词汇列表、词汇对应权重列表

        df_word_weight = pd.DataFrame(data={"words": words_ls, "weights": weights_ls})
        df_word_weight = df_word_weight.sort_values(by="weights", ascending=False)  # 按照权重值降序排列
        sorted_keywords = np.array(df_word_weight['words'])  # 选择词汇列并转成数组格式
        sorted_weights = np.array(df_word_weight['weights'])  # 选择词汇列并转成数组格式
        word_split = [sorted_keywords[x] for x in range(0, topk)]  # 抽取前topK个词汇作为关键词
        if return_score:
            word_split = [f"{sorted_keywords[idx]},{round(sorted_weights[idx],4)}" for idx in range(0,topk)]
        word_split = "|".join(word_split)
        results.append(word_split)

    return results
