"""
敏感词检测算法
"""
import os
import re
from typing import List, Set, Tuple
from config.settings import settings
from utils.logger import audit_logger
from models.schemas import SensitiveWordResult


class SensitiveWordDetector:
    """敏感词检测器"""
    
    def __init__(self):
        self.sensitive_words: Set[str] = set()
        self.load_sensitive_words()
    
    def load_sensitive_words(self):
        """加载敏感词词典"""
        try:
            words_file = settings.sensitive_words_file
            if not os.path.exists(words_file):
                audit_logger.warning(f"敏感词文件不存在: {words_file}")
                return
            
            with open(words_file, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    # 跳过空行和注释行
                    if line and not line.startswith('#'):
                        self.sensitive_words.add(line.lower())
            
            audit_logger.info(f"加载敏感词 {len(self.sensitive_words)} 个")
            
        except Exception as e:
            audit_logger.error(f"加载敏感词失败: {e}")
    
    def detect(self, text: str) -> SensitiveWordResult:
        """检测文本中的敏感词"""
        try:
            if not text:
                return SensitiveWordResult(
                    has_sensitive=False,
                    sensitive_words=[],
                    confidence=1.0
                )
            
            # 转换为小写进行匹配
            text_lower = text.lower()
            found_words = []
            
            # 直接匹配
            for word in self.sensitive_words:
                if word in text_lower:
                    found_words.append(word)
            
            # 模糊匹配（处理变形词）
            found_words.extend(self._fuzzy_match(text_lower))
            
            # 去重
            found_words = list(set(found_words))
            
            # 计算置信度
            confidence = self._calculate_confidence(text, found_words)
            
            result = SensitiveWordResult(
                has_sensitive=len(found_words) > 0,
                sensitive_words=found_words,
                confidence=confidence
            )
            
            if found_words:
                audit_logger.info(f"检测到敏感词: {found_words}")
            
            return result
            
        except Exception as e:
            audit_logger.error(f"敏感词检测失败: {e}")
            return SensitiveWordResult(
                has_sensitive=False,
                sensitive_words=[],
                confidence=0.0
            )
    
    def _fuzzy_match(self, text: str) -> List[str]:
        """模糊匹配，处理变形词"""
        found_words = []
        
        # 处理常见的变形方式
        transformations = [
            # 数字替换
            ('0', 'o'), ('1', 'i'), ('3', 'e'), ('4', 'a'), ('5', 's'), ('7', 't'),
            # 符号替换
            ('@', 'a'), ('$', 's'), ('!', 'i'),
            # 中文数字
            ('一', '1'), ('二', '2'), ('三', '3'), ('四', '4'), ('五', '5'),
        ]
        
        # 移除空格和特殊字符
        cleaned_text = re.sub(r'[^\w\u4e00-\u9fff]', '', text)
        
        for word in self.sensitive_words:
            # 生成变形词
            variants = [word]
            for old, new in transformations:
                new_variants = []
                for variant in variants:
                    new_variants.append(variant.replace(old, new))
                    new_variants.append(variant.replace(new, old))
                variants.extend(new_variants)
            
            # 检查是否匹配
            for variant in variants:
                if variant in cleaned_text:
                    found_words.append(word)
                    break
        
        return found_words
    
    def _calculate_confidence(self, text: str, found_words: List[str]) -> float:
        """计算检测置信度"""
        if not found_words:
            return 1.0
        
        # 基础置信度
        base_confidence = 0.8
        
        # 根据敏感词数量调整
        word_count_factor = min(len(found_words) * 0.1, 0.2)
        
        # 根据敏感词在文本中的占比调整
        total_chars = len(text)
        sensitive_chars = sum(len(word) for word in found_words)
        ratio_factor = min(sensitive_chars / total_chars * 0.5, 0.2) if total_chars > 0 else 0
        
        confidence = base_confidence + word_count_factor + ratio_factor
        return min(confidence, 1.0)
    
    def add_sensitive_word(self, word: str):
        """添加敏感词"""
        self.sensitive_words.add(word.lower())
        audit_logger.info(f"添加敏感词: {word}")
    
    def remove_sensitive_word(self, word: str):
        """移除敏感词"""
        self.sensitive_words.discard(word.lower())
        audit_logger.info(f"移除敏感词: {word}")
    
    def get_word_count(self) -> int:
        """获取敏感词数量"""
        return len(self.sensitive_words)


# 全局敏感词检测器实例
sensitive_word_detector = SensitiveWordDetector()
