# -*- coding: utf-8 -*-

'''
基于语义分析筛选关键词
算法过程：
    1、先分词-（拓展分词方法：jieba、hanlp、Bert、GPT等方法）
    2、过滤停用词
    3、根据主题词筛选关键词

'''

import re
import jieba.posseg as pseg
from collections import Counter
from sentence_transformers import SentenceTransformer, util

from stop_word.stop_words import load_stopwords
stop_words = load_stopwords()

class SemanticKeywordFilter:
    def __init__(self, model_name='paraphrase-MiniLM-L6-v2', threshold=0.7, min_freq=2, max_freq=50, allowed_pos=None):
        """
        初始化语义关键词筛选器
        :param model_name: 预训练模型名称
        :param threshold: 筛选关键词的相似度阈值
        """
        self.model = SentenceTransformer(model_name)
        self.threshold = threshold
        self.min_freq = min_freq
        self.max_freq = max_freq
        self.allowed_pos = allowed_pos
        self.stopwords = set(line.strip() for line in stop_words)

    def load_stopwords(self):
        """加载停用词表"""
        self.stopwords = set(line.strip() for line in stop_words)

    def clean_text(self, text):
        """清理文本，去除非中文字符"""
        text = re.sub(r'[^\u4e00-\u9fa5]', '', text)
        return text
    
    def tokenize(self, text):
        """分词、过滤停用词，并按词性筛选"""
        words = pseg.cut(text)
        filtered_words = [
            word for word, flag in words
            if word not in self.stopwords and (self.allowed_pos is None or flag in self.allowed_pos)
        ]
        return filtered_words
    
    def filter_by_frequency(self, words):
        """根据词频过滤"""
        word_counts = Counter(words)
        return [word for word in words if self.min_freq <= word_counts[word] <= self.max_freq]

    def filter_keywords_by_theme(self, theme_words, text, top_k=20):
        """
        根据主题词筛选关键词，保留与主题词语义相似的关键词
        :param keywords: 待筛选的关键词列表
        :param theme_words: 主题词列表
        :return: 过滤后的关键词列表
        """
        # 清理和分词
        cleaned_text = self.clean_text(text)
        words = self.tokenize(cleaned_text)
        
        # 词频过滤
        keywords = self.filter_by_frequency(words)

        # # 计算主题词的平均嵌入向量
        # theme_embeddings = self.model.encode(theme_words)
        # theme_embedding = theme_embeddings.mean(axis=0)  # 计算主题词的平均嵌入
        
        # # 筛选与主题相似的关键词
        # filtered_keywords = []
        # for keyword in keywords:
        #     keyword_embedding = self.model.encode(keyword)
        #     similarity = util.cos_sim(theme_embedding, keyword_embedding).item()
            
        #     if similarity > self.threshold:
        #         filtered_keywords.append(keyword)

        theme_embeddings = self.model.encode(theme_words)
        filtered_keywords = []
        for theme_embedding in theme_embeddings:
            
            for keyword in keywords:
                keyword_embedding = self.model.encode(keyword)
                similarity = util.cos_sim(theme_embedding, keyword_embedding).item()
                
                if similarity > self.threshold and keyword not in filtered_keywords:
                    filtered_keywords.append(keyword)
        
        return filtered_keywords[:top_k]

if __name__ == '__main__':

    path = "/home/ubuntu/code/git/subject-word-extraction/scripts/clean_data/000001_2023_平安银行_2023年年度报告_2024-03-15.txt"
    with open(path,"r") as f:
        content = f.readlines()

    extractor = SemanticKeywordFilter(
        threshold=0.7,
        min_freq=5,
        max_freq=50,
        allowed_pos={'n', 'nv', 'v'}
    )
    theme_words = ["智能制造"]  # 例如主题词
    filtered_keywords = extractor.filter_keywords_by_theme(theme_words, ''.join(content))

    print("与主题词语义相似的关键词：", filtered_keywords)