"""
内容审核引擎
"""
import time
from typing import Dict, Any, List
from datetime import datetime
from config.settings import settings
from utils.logger import audit_logger
from monitoring.metrics import metrics_collector, PerformanceMonitor
from models.schemas import (
    AuditRequest, AuditResponse, AuditResult,
    SensitiveWordResult, TextClassificationResult, 
    SentimentAnalysisResult, ImageAuditResult
)
from algorithms import (
    sensitive_word_detector,
    text_classifier,
    sentiment_analyzer,
    image_auditor
)


class AuditEngine:
    """内容审核引擎"""
    
    def __init__(self):
        self.confidence_threshold = settings.confidence_threshold
    
    def audit_content(self, request: AuditRequest) -> AuditResponse:
        """审核内容"""
        start_time = time.time()

        try:
            audit_logger.info(f"开始审核帖子 {request.post_id}")

            # 初始化审核结果
            audit_details = {}
            violations = []
            total_confidence = 0.0
            check_count = 0
            
            # 1. 敏感词检测
            if request.content or request.title:
                full_text = f"{request.title or ''} {request.content or ''}".strip()

                with PerformanceMonitor(metrics_collector).set_algorithm("sensitive_words"):
                    sensitive_result = sensitive_word_detector.detect(full_text)
                audit_details['sensitive_words'] = {
                    'has_sensitive': sensitive_result.has_sensitive,
                    'words': sensitive_result.sensitive_words,
                    'confidence': sensitive_result.confidence
                }
                
                if sensitive_result.has_sensitive:
                    violations.append(f"包含敏感词: {', '.join(sensitive_result.sensitive_words[:3])}")
                    metrics_collector.record_violation("sensitive_words")
                    audit_logger.info(f"检测到敏感词: {sensitive_result.sensitive_words}")
                else:
                    audit_logger.info(f"未检测到敏感词，检查文本: '{full_text}'")

                total_confidence += sensitive_result.confidence
                check_count += 1

            # 2. 文本分类
            if request.content:
                with PerformanceMonitor(metrics_collector).set_algorithm("text_classification"):
                    classification_result = text_classifier.classify(request.content)
                audit_details['text_classification'] = {
                    'category': classification_result.category,
                    'is_normal': classification_result.is_normal,
                    'confidence': classification_result.confidence
                }
                
                if not classification_result.is_normal:
                    category_name = text_classifier.get_category_name(classification_result.category)
                    violations.append(f"内容分类: {category_name}")
                    metrics_collector.record_violation(classification_result.category)

                total_confidence += classification_result.confidence
                check_count += 1

            # 3. 情感分析
            if request.content:
                with PerformanceMonitor(metrics_collector).set_algorithm("sentiment_analysis"):
                    sentiment_result = sentiment_analyzer.analyze(request.content)
                audit_details['sentiment_analysis'] = {
                    'sentiment': sentiment_result.sentiment,
                    'score': sentiment_result.score,
                    'confidence': sentiment_result.confidence
                }
                
                # 极端负面情感可能需要关注
                if sentiment_result.sentiment == 'negative' and sentiment_result.score < -0.7:
                    violations.append("内容情感过于负面")
                    metrics_collector.record_violation("negative_sentiment")

                total_confidence += sentiment_result.confidence
                check_count += 1

            # 4. 图片审核
            if request.images:
                image_results = []
                for image_url in request.images:
                    with PerformanceMonitor(metrics_collector).set_algorithm("image_audit"):
                        image_result = image_auditor.audit_image(image_url)
                    image_results.append({
                        'url': image_url,
                        'is_safe': image_result.is_safe,
                        'violations': image_result.violations,
                        'confidence': image_result.confidence
                    })
                    
                    if not image_result.is_safe:
                        violations.extend([f"图片违规: {v}" for v in image_result.violations])
                        metrics_collector.record_violation("image_violation")

                    total_confidence += image_result.confidence
                    check_count += 1

                audit_details['image_audit'] = image_results

            # 计算总体置信度
            overall_confidence = total_confidence / check_count if check_count > 0 else 0.5

            # 确定审核结果
            audit_result = self._determine_audit_result(violations, overall_confidence, audit_details)
            audit_logger.info(f"审核决策: 违规数量={len(violations)}, 置信度={overall_confidence:.2f}, 结果={audit_result}")
            if violations:
                audit_logger.info(f"违规详情: {violations}")

            # 构建响应
            response = AuditResponse(
                post_id=request.post_id,
                audit_result=audit_result,
                confidence=overall_confidence,
                reasons=violations,
                details=audit_details,
                processed_at=datetime.now()
            )

            # 记录监控指标
            duration = time.time() - start_time
            metrics_collector.record_audit_result(audit_result.value, duration)

            audit_logger.info(f"帖子 {request.post_id} 审核完成: {audit_result}, 置信度: {overall_confidence:.2f}")

            return response
            
        except Exception as e:
            audit_logger.error(f"内容审核失败: {e}")

            # 记录错误指标
            duration = time.time() - start_time
            metrics_collector.record_audit_result("ERROR", duration)
            metrics_collector.record_error("audit_exception")

            return AuditResponse(
                post_id=request.post_id,
                audit_result=AuditResult.MANUAL,
                confidence=0.0,
                reasons=["审核异常，需要人工处理"],
                details={"error": str(e)},
                processed_at=datetime.now()
            )
    
    def _determine_audit_result(self, violations: List[str], confidence: float, details: Dict[str, Any]) -> AuditResult:
        """确定审核结果"""
        # 如果没有违规
        if not violations:
            return AuditResult.PASS
        
        # 检查严重违规
        serious_violations = self._check_serious_violations(violations, details)
        if serious_violations:
            return AuditResult.REJECT
        
        # 根据置信度和违规数量判断
        violation_count = len(violations)
        
        # 多个违规且置信度高，直接拒绝
        if violation_count >= 3 and confidence >= self.confidence_threshold:
            return AuditResult.REJECT
        
        # 单个严重违规且置信度高，直接拒绝
        if violation_count >= 1 and confidence >= 0.9:
            return AuditResult.REJECT
        
        # 置信度较低，需要人工审核
        if confidence < 0.6:
            return AuditResult.MANUAL
        
        # 其他情况根据违规数量判断
        if violation_count >= 2:
            return AuditResult.REJECT
        elif violation_count == 1:
            return AuditResult.MANUAL
        else:
            return AuditResult.PASS
    
    def _check_serious_violations(self, violations: List[str], details: Dict[str, Any]) -> bool:
        """检查是否存在严重违规"""
        # 敏感词违规
        sensitive_details = details.get('sensitive_words', {})
        if sensitive_details.get('has_sensitive', False):
            sensitive_words = sensitive_details.get('words', [])
            # 检查是否包含严重敏感词
            serious_keywords = [
                '政治', '色情', '暴力', '毒品', '诈骗',
                '杀害', '杀人', '抢劫', '爆炸', '吸毒', '海洛因', '大麻',
                '血腥', '自杀', '跳楼', '传销', '骗钱'
            ]
            for word in sensitive_words:
                # 直接匹配敏感词
                if word in serious_keywords:
                    return True
                # 模糊匹配
                for keyword in serious_keywords:
                    if keyword in word or word in keyword:
                        return True
        
        # 文本分类违规
        classification_details = details.get('text_classification', {})
        if not classification_details.get('is_normal', True):
            category = classification_details.get('category', '')
            if category in ['spam', 'adult', 'violence']:
                return True
        
        # 图片违规
        image_details = details.get('image_audit', [])
        for image_result in image_details:
            if not image_result.get('is_safe', True):
                image_violations = image_result.get('violations', [])
                serious_image_violations = ['疑似血腥内容', '成人内容']
                for violation in image_violations:
                    if any(serious in violation for serious in serious_image_violations):
                        return True
        
        return False
    
    def get_audit_statistics(self) -> Dict[str, Any]:
        """获取审核统计信息"""
        return {
            'sensitive_words_count': sensitive_word_detector.get_word_count(),
            'confidence_threshold': self.confidence_threshold,
            'algorithms_status': {
                'sensitive_words': True,
                'text_classifier': text_classifier.model is not None,
                'sentiment_analyzer': True,
                'image_auditor': True
            }
        }


# 全局审核引擎实例
audit_engine = AuditEngine()
