import os
import time
import jieba
import logging
import re
from typing import List, Tuple, Dict, Any
from app.db import get_cursor
from app.config import settings

logger = logging.getLogger(__name__)

class SensitiveWordService:
    def __init__(self):
        self._sensitive_words = set()
        self._last_refresh_time = 0
        
        # 加载敏感词
        self._load_from_file()
        
        # 加载文本分类特征
        self.spam_patterns = [
            r'(推广|促销|广告|优惠|打折).*(链接|地址|网址|网站)',
            r'(免费|特价|限时|抢购).*(活动|机会)',
            r'微信.*加.*好友',
            r'联系.*电话',
            r'赚.*钱',
            r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
        ]
        self.compiled_patterns = [re.compile(pattern) for pattern in self.spam_patterns]
        
        # 尝试启用情感分析
        self.sentiment_enabled = False
        try:
            # 简单情感词库
            self.positive_words = set(['喜欢', '好', '棒', '赞', '优秀', '满意', '开心', '高兴',
                                      '快乐', '幸福', '欣赏', '支持', '爱', '推荐', '感谢'])
            self.negative_words = set(['讨厌', '差', '烂', '糟糕', '失望', '不满', '生气', '愤怒',
                                      '痛苦', '难过', '反对', '批评', '恨', '垃圾', '废物'])
            self.sentiment_enabled = True
            logger.info("情感分析功能已启用")
        except Exception as e:
            logger.warning(f"情感分析初始化失败: {str(e)}")
    
    def _load_from_file(self):
        """从文件加载敏感词"""
        try:
            default_file = os.path.join('data', 'sensitive_words', 'default.txt')
            if os.path.exists(default_file):
                with open(default_file, 'r', encoding='utf-8') as f:
                    words = [line.strip() for line in f if line.strip()]
                    self._sensitive_words = set(words)
                    self._last_refresh_time = time.time()
                    logger.info(f"从文件加载了 {len(self._sensitive_words)} 个敏感词")
        except Exception as e:
            logger.error(f"从文件加载敏感词失败: {str(e)}")
    
    def check_text(self, text: str) -> Dict[str, Any]:
        """
        使用NLP技术检查文本的问题
        
        Args:
            text: 待检查的文本
            
        Returns:
            Dict: 包含多种检测结果的字典
        """
        result = {
            "sensitive_words": [],  # 敏感词检测结果
            "spam_score": 0.0,      # 垃圾内容得分 (0-1)
            "sentiment_score": 0.0, # 情感得分 (-1到1, 负面到正面)
            "content_quality": 1.0  # 内容质量得分 (0-1)
        }
        
        if not text:
            return result
            
        # 1. 敏感词检测（保留原有功能）
        if self._sensitive_words:
            # 分词处理
            words = jieba.lcut(text)
            word_count = {}
            
            # 检查单个词是否在敏感词库中
            for word in words:
                if word in self._sensitive_words:
                    word_count[word] = word_count.get(word, 0) + 1
            
            # 检查原文本中是否直接包含敏感词
            for sensitive_word in self._sensitive_words:
                if sensitive_word in text and sensitive_word not in word_count:
                    word_count[sensitive_word] = text.count(sensitive_word)
            
            result["sensitive_words"] = [(word, count) for word, count in word_count.items()]
        
        # 2. 垃圾内容检测（新增NLP功能）
        # 基于模式匹配的广告、垃圾信息识别
        spam_matches = 0
        for pattern in self.compiled_patterns:
            if pattern.search(text):
                spam_matches += 1
        
        # 重复词汇检测
        word_freq = {}
        for word in jieba.lcut(text):
            if len(word) > 1:  # 忽略单字
                word_freq[word] = word_freq.get(word, 0) + 1
        
        # 计算重复率
        repetition_score = 0
        if len(word_freq) > 0:
            max_freq = max(word_freq.values()) if word_freq else 0
            repetition_score = min(max_freq / 10.0, 1.0)  # 标准化到0-1
        
        # 综合垃圾得分计算
        if self.compiled_patterns:
            spam_ratio = spam_matches / len(self.compiled_patterns)
        else:
            spam_ratio = 0
        result["spam_score"] = min((spam_ratio * 0.7) + (repetition_score * 0.3), 1.0)
        
        # 3. 情感分析（新增NLP功能）
        if self.sentiment_enabled:
            try:
                pos_count = 0
                neg_count = 0
                for word in words:
                    if word in self.positive_words:
                        pos_count += 1
                    elif word in self.negative_words:
                        neg_count += 1
                
                total = max(pos_count + neg_count, 1)  # 避免除零
                result["sentiment_score"] = (pos_count - neg_count) / total
            except Exception as e:
                logger.warning(f"情感分析失败: {str(e)}")
        
        # 4. 内容质量分析
        # 基于文本长度、词汇多样性的简单质量评估
        if len(text) < 10:
            quality_score = 0.3  # 内容太短
        else:
            # 词汇多样性
            unique_words = len(word_freq)
            total_words = sum(word_freq.values())
            
            if total_words > 0:
                diversity = unique_words / total_words
                # 长文本质量评分
                result["content_quality"] = min(0.3 + (len(text) / 500) * 0.3 + diversity * 0.4, 1.0)
            else:
                result["content_quality"] = 0.5
        
        return result

# 创建单例实例
sensitive_word_service = SensitiveWordService() 