# -*- coding: utf-8 -*-
# @Function: 分词处理器
# @Description: 提供文本分词、关键词提取和水库ID识别功能，是NLP处理的核心组件
# @Usage: 被importer.py、query_tester.py和llm_query_tester.py导入使用，处理文本分词
# @Dependencies: Z_nlp_dict.py, Z_config.py

import re
import jieba
import jieba.posseg as pseg
from typing import List, Tuple
from .nlp_dict import STOP_WORDS, KEYWORD_WEIGHTS
from .config import RESERVOIR_MAPPING

class Tokenizer:
    def __init__(self):
        """初始化分词器"""
        self.stop_words = STOP_WORDS
        self.keyword_weights = KEYWORD_WEIGHTS

    def tokenize(self, text: str) -> List[str]:
        """分词函数

        Args:
            text (str): 待分词的文本

        Returns:
            List[str]: 分词结果
        """
        # 使用jieba进行分词
        words = jieba.lcut(text)

        # 过滤停用词和标点符号
        filtered_words = []
        for word in words:
            if (word not in self.stop_words and
                    len(word.strip()) > 0 and
                    not re.match(r'^[\s\W]+$', word)):
                filtered_words.append(word)

        return filtered_words

    def extract_keywords(self, text: str) -> List[Tuple[str, float]]:
        """提取关键词及其权重

        Args:
            text (str): 文本

        Returns:
            List[Tuple[str, float]]: 关键词及权重列表
        """
        # 分词并获取词性
        words_with_pos = pseg.cut(text)

        keywords = []
        for word, pos in words_with_pos:
            # 过滤停用词
            if word in self.stop_words:
                continue

            # 计算权重
            weight = 1.0

            # 根据词性调整权重
            if pos in ['n', 'nr', 'ns', 'nt', 'nz']:  # 名词类
                weight *= 1.5
            elif pos in ['v', 'vd', 'vn']:  # 动词类
                weight *= 1.2
            elif pos in ['a', 'ad', 'an']:  # 形容词类
                weight *= 1.1

            # 根据预定义关键词调整权重
            if word in self.keyword_weights:
                weight *= self.keyword_weights[word]

            # 检查是否包含重要关键词
            for key, key_weight in self.keyword_weights.items():
                if key in word or word in key:
                    weight *= key_weight
                    break

            if len(word) > 1:  # 过滤单字符
                keywords.append((word, weight))

        # 按权重排序
        keywords.sort(key=lambda x: x[1], reverse=True)
        return keywords

    # 在Tokenizer类中添加新方法
    
    def extract_reservoir_id(self, text: str) -> str:
        """从文本中提取水库名称并返回对应的category_id
    
        Args:
            text (str): 文本内容
    
        Returns:
            str: 水库对应的category_id，如果未找到则返回None
        """
        
        # 遍历所有水库名称，检查是否在文本中出现
        for reservoir_name, category_id in RESERVOIR_MAPPING.items():
            if reservoir_name in text:
                print(f"找到水库: {reservoir_name}, category_id: {category_id}")
                return category_id
        
        return None
    
    # 修改tokenize_question方法，添加水库ID提取
    def tokenize_question(self, question: str) -> dict:
        """对问题进行分词
    
        Args:
            question (str): 问题文本
    
        Returns:
            dict: 分词结果，包含tokens、weights和category_id信息
        """
        try:
            # 使用分词函数进行分词
            tokens = self.tokenize(question)
    
            # 提取关键词
            keywords = self.extract_keywords(question)
            
            # 提取水库ID
            category_id = self.extract_reservoir_id(question)
    
            # 构建分词结果字典
            token_data = {
                "tokens": list(set(tokens + [kw[0] for kw in keywords])),
                "keywords": [{
                    "word": kw[0],
                    "weight": kw[1]
                } for kw in keywords[:10]],  # 保留前10个关键词
                "token_count": len(tokens),
                "category_id": category_id  # 添加category_id字段
            }
    
            print(f"问题: {question}")
            print(f"分词结果: {token_data}")
    
            return token_data
    
        except Exception as e:
            print(f"分词失败: {e}")
            return {"tokens": [], "keywords": [], "token_count": 0, "category_id": None}