# -*- coding:utf-8 -*-

import sys
import os
sys.path.append(os.getcwd())

import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer

from scripts.stop_word.stop_words import load_stopwords
from scripts.cut_word.jieba_cut_word import JiebaCutWord
from scripts.cut_word.bert_cut_word import ChineseBERTTokenizerWrapper

jieba_cut_word = JiebaCutWord()
bert_cut_word = ChineseBERTTokenizerWrapper()

class IDFKeywordRxtractor:
    def __init__(self):
        self.stopwords = load_stopwords()
        self.vectorizer = TfidfVectorizer()

    def preprocess_text(self, text):
        """分词并移除停用词"""
        # 1.分词
        words = jieba_cut_word.segment(text)
        # words = bert_cut_word.tokenize(text)
        # 2.移除通用词
        cut_words = " ".join([word for word in words if word not in self.stopwords+['\n','\t','\r']])
        return cut_words
    
    def fit(self, documents):
        """计算 TF-IDF 矩阵"""
        # 3.计算逆文档频率（IDF）
        processed_docs = [self.preprocess_text(doc) for doc in documents]
        self.tfidf_matrix = self.vectorizer.fit_transform(processed_docs)
        self.feature_names = self.vectorizer.get_feature_names_out()

    def extract_keywords(self, doc_index, top_n=20):
        """提取指定文档的关键词"""
        # 4.计算TF-IDF得分
        scores = np.array(self.tfidf_matrix[doc_index].todense()).flatten()
        # 5.排序和选择关键词
        keywords = [(self.feature_names[i], scores[i]) for i in range(len(scores)) if scores[i] > 0]
        return sorted(keywords, key=lambda x: x[1], reverse=True)[:top_n]
    
if __name__ == "__main__":
    # 示例文档
    documents = [
        "我爱自然语言处理和机器学习。",
        "机器学习是一种数据分析方法，它使用算法来从数据中学习。",
        "自然语言处理让计算机能够理解和生成人类语言。",
    ]

    # 创建关键词提取器实例
    extractor = IDFKeywordRxtractor()
    
    # 计算 TF-IDF
    extractor.fit(documents)

    # 提取关键词
    for i in range(len(documents)):
        print(f"文档 {i + 1} 的关键词：", extractor.extract_keywords(i))












