import asyncio
from typing import Dict, Any, Optional, List, Union
from datetime import datetime
from enum import Enum
import uuid
import copy

from ..models.audit_models import (
    AuditRequest, AuditResult, AuditDecision, AuditorType, 
    AIAnalysisResult, RuleResult, AuditLog
)
from ..utils.logger import get_logger
from .simple_classroom_rules import SimpleClassroomRuleEngine
from .custom_standards import StandardManager, CustomRule
from .ai_analyzer import AIAnalyzer, AIAnalyzerFactory

logger = get_logger(__name__)


class AuditStrategy(str, Enum):
    """审核策略枚举."""
    FAST = "fast"          # 快速审核：仅规则检查
    STANDARD = "standard"  # 标准审核：规则检查 + AI分析
    STRICT = "strict"      # 严格审核：规则检查 + AI分析 + 人工复核
    AI_ONLY = "ai_only"    # 仅AI审核：仅AI分析


class AuditStatus(str, Enum):
    """审核状态枚举."""
    PENDING = "pending"
    PROCESSING = "processing"
    COMPLETED = "completed"
    FAILED = "failed"
    CANCELLED = "cancelled"


class AuditWorkflowConfig:
    """审核工作流配置."""
    
    def __init__(self):
        import os
        from dotenv import load_dotenv
        
        # 加载环境变量
        load_dotenv()
        
        # 默认配置
        self.enable_ai_analysis = True
        self.enable_rule_engine = True
        self.ai_fallback_enabled = True  # AI失败时使用规则引擎
        self.max_processing_time_seconds = 300  # 最大处理时间
        self.parallel_processing = True  # 并行处理
        
        # 阈值配置
        self.auto_approve_threshold = 80.0
        self.auto_reject_threshold = 30.0
        self.manual_review_threshold = 50.0
        
        # AI配置 - 从环境变量读取API密钥
        api_key = os.getenv("OPENAI_API_KEY")
        if api_key:
            logger.info(f"成功加载API密钥: {api_key[:10]}...{api_key[-4:]}")
        else:
            logger.warning("未找到OPENAI_API_KEY环境变量")
            
        self.ai_provider_config = {
            "provider": "openai",
            "api_key": api_key,
            "model": "qwen-max-latest"  # 使用通义千问最新模型
        }
        
        # 重试配置
        self.max_retries = 3
        self.retry_delay_seconds = 1


class AuditWorkflow:
    """主要的审核工作流引擎."""
    
    def __init__(self, config: AuditWorkflowConfig = None):
        self.config = config or AuditWorkflowConfig()
        self.rule_engine = SimpleClassroomRuleEngine()
        self.standard_manager = StandardManager()
        self.ai_analyzer = self._initialize_ai_analyzer()
        
        # 状态跟踪
        self.active_audits: Dict[str, Dict[str, Any]] = {}
        
        logger.info("审核工作流引擎初始化完成")
    
    def _initialize_ai_analyzer(self) -> Optional[AIAnalyzer]:
        """初始化AI分析器."""
        try:
            if self.config.enable_ai_analysis:
                return AIAnalyzerFactory.create_analyzer(self.config.ai_provider_config)
            return None
        except Exception as e:
            logger.error(f"AI分析器初始化失败: {str(e)}")
            return None
    
    async def audit_request(self, request: AuditRequest, 
                          strategy: AuditStrategy = AuditStrategy.STANDARD,
                          standard_id: Optional[str] = None,
                          original_request: Any = None) -> AuditResult:
        """执行审核请求."""
        start_time = datetime.now()
        request_id = request.request_id
        
        # 记录审核开始
        self.active_audits[request_id] = {
            "status": AuditStatus.PROCESSING,
            "strategy": strategy,
            "start_time": start_time
        }
        
        try:
            logger.info(f"开始审核请求 {request_id}, 策略: {strategy}")
            
            # 根据策略执行不同的审核流程
            if strategy == AuditStrategy.FAST:
                result = await self._fast_audit(request, standard_id, original_request)
            elif strategy == AuditStrategy.STANDARD:
                result = await self._standard_audit(request, standard_id, original_request)
            elif strategy == AuditStrategy.STRICT:
                result = await self._strict_audit(request, standard_id, original_request)
            elif strategy == AuditStrategy.AI_ONLY:
                result = await self._ai_only_audit(request)
            else:
                raise ValueError(f"不支持的审核策略: {strategy}")
            
            # 记录完成状态
            self.active_audits[request_id]["status"] = AuditStatus.COMPLETED
            self.active_audits[request_id]["end_time"] = datetime.now()
            
            logger.info(f"审核完成 {request_id}, 决策: {result.decision}, 得分: {result.final_score}")
            return result
            
        except Exception as e:
            # 记录失败状态
            self.active_audits[request_id]["status"] = AuditStatus.FAILED
            self.active_audits[request_id]["error"] = str(e)
            
            logger.error(f"审核失败 {request_id}: {str(e)}")
            
            # 返回失败结果
            return self._create_error_result(request, str(e), start_time)
        
        finally:
            # 清理状态（可选，可能需要保留用于查询）
            processing_time = (datetime.now() - start_time).total_seconds() * 1000
            self.active_audits[request_id]["processing_time_ms"] = processing_time
    
    async def _fast_audit(self, request: AuditRequest, 
                         standard_id: Optional[str] = None,
                         original_request: Any = None) -> AuditResult:
        """快速审核：仅使用规则引擎."""
        start_time = datetime.now()
        
        # 执行规则检查
        rule_results = self.rule_engine.execute_rules(original_request or request)
        
        # 计算得分
        final_score = self.rule_engine.calculate_total_score(rule_results)
        
        # 生成决策
        decision = self._make_decision(final_score, None)
        
        # 生成原因
        reasons = self._generate_reasons(rule_results, None)
        
        processing_time = (datetime.now() - start_time).total_seconds() * 1000
        
        return AuditResult(
            request_id=request.request_id,
            decision=decision,
            final_score=final_score,
            confidence_level=0.8,  # 规则引擎置信度较高
            reasons=reasons,
            rule_results=rule_results,
            ai_analysis=None,
            auditor_type=AuditorType.RULE_ENGINE,
            standard_used=standard_id,
            processing_time_ms=processing_time
        )
    
    async def _standard_audit(self, request: AuditRequest, 
                            standard_id: Optional[str] = None,
                            original_request: Any = None) -> AuditResult:
        """标准审核：规则引擎 + AI分析."""
        start_time = datetime.now()
        
        # 并行执行规则检查和AI分析
        if self.config.parallel_processing and self.ai_analyzer:
            rule_task = asyncio.create_task(self._execute_rules_async(original_request or request))
            ai_task = asyncio.create_task(self._execute_ai_analysis_async(request))
            
            rule_results, ai_analysis = await asyncio.gather(
                rule_task, ai_task, return_exceptions=True
            )
            
            # 处理异常
            if isinstance(rule_results, Exception):
                logger.error(f"规则检查异常: {str(rule_results)}")
                rule_results = []
            
            if isinstance(ai_analysis, Exception):
                logger.error(f"AI分析异常: {str(ai_analysis)}")
                ai_analysis = None
        else:
            # 顺序执行
            rule_results = self.rule_engine.execute_rules(original_request or request)
            
            ai_analysis = None
            if self.ai_analyzer:
                try:
                    ai_analysis = await self.ai_analyzer.analyze_request(request)
                except Exception as e:
                    logger.error(f"AI分析失败: {str(e)}")
        
        # 综合评分
        final_score = self._calculate_combined_score(rule_results, ai_analysis)
        
        # 生成决策
        decision = self._make_decision(final_score, ai_analysis)
        
        # 生成原因
        reasons = self._generate_reasons(rule_results, ai_analysis)
        
        # 计算置信度
        confidence_level = self._calculate_confidence(rule_results, ai_analysis)
        
        processing_time = (datetime.now() - start_time).total_seconds() * 1000
        
        return AuditResult(
            request_id=request.request_id,
            decision=decision,
            final_score=final_score,
            confidence_level=confidence_level,
            reasons=reasons,
            rule_results=rule_results,
            ai_analysis=ai_analysis,
            auditor_type=AuditorType.HYBRID,
            standard_used=standard_id,
            processing_time_ms=processing_time
        )
    
    async def _strict_audit(self, request: AuditRequest, 
                          standard_id: Optional[str] = None,
                          original_request: Any = None) -> AuditResult:
        """严格审核：标准审核 + 人工复核标记."""
        # 先执行标准审核
        result = await self._standard_audit(request, standard_id, original_request)
        
        # 根据得分和风险评估决定是否需要人工复核
        needs_manual_review = self._should_require_manual_review(result)
        
        if needs_manual_review:
            # 修改决策为需要人工审核
            result.decision = AuditDecision.PENDING
            result.reasons.append("根据严格审核策略，需要人工复核")
            
            # 生成人工审核建议
            manual_review_suggestions = self._generate_manual_review_suggestions(result)
            if result.suggestions is None:
                result.suggestions = []
            result.suggestions.extend(manual_review_suggestions)
        
        return result
    
    async def _ai_only_audit(self, request: AuditRequest) -> AuditResult:
        """仅AI审核."""
        start_time = datetime.now()
        
        if not self.ai_analyzer:
            raise ValueError("AI分析器未初始化，无法执行AI审核")
        
        try:
            ai_analysis = await self.ai_analyzer.analyze_request(request)
        except Exception as e:
            logger.error(f"AI审核失败: {str(e)}")
            return self._create_error_result(request, f"AI审核失败: {str(e)}", start_time)
        
        # 基于AI分析结果生成审核结果
        final_score = self._ai_score_to_audit_score(ai_analysis)
        decision = self._ai_recommendation_to_decision(ai_analysis.ai_recommendation)
        
        reasons = [ai_analysis.ai_reasoning]
        if ai_analysis.risk_assessment:
            risk_factors = ai_analysis.risk_assessment.get("risk_factors", [])
            reasons.extend([f"风险因素: {factor}" for factor in risk_factors])
        
        processing_time = (datetime.now() - start_time).total_seconds() * 1000
        
        return AuditResult(
            request_id=request.request_id,
            decision=decision,
            final_score=final_score,
            confidence_level=ai_analysis.confidence_scores.get("overall_confidence", 0.5),
            reasons=reasons,
            rule_results=[],
            ai_analysis=ai_analysis,
            auditor_type=AuditorType.AI_SYSTEM,
            processing_time_ms=processing_time
        )
    
    async def _execute_rules_async(self, request) -> List:
        """异步执行规则检查."""
        return await asyncio.to_thread(self.rule_engine.execute_rules, request)
    
    async def _execute_ai_analysis_async(self, request: AuditRequest) -> Optional[AIAnalysisResult]:
        """异步执行AI分析."""
        if not self.ai_analyzer:
            return None
        return await self.ai_analyzer.analyze_request(request)
    
    def _calculate_combined_score(self, rule_results: List[RuleResult], 
                                ai_analysis: Optional[AIAnalysisResult]) -> float:
        """计算规则和AI的综合得分."""
        # 规则得分
        rule_score = self.rule_engine.calculate_total_score(rule_results)
        
        # AI得分
        ai_score = 50.0  # 默认中性分数
        if ai_analysis:
            ai_score = self._ai_score_to_audit_score(ai_analysis)
        
        # 权重分配（可配置）
        rule_weight = 0.6
        ai_weight = 0.4
        
        # 如果AI分析失败，增加规则权重
        if not ai_analysis:
            rule_weight = 1.0
            ai_weight = 0.0
        
        combined_score = rule_score * rule_weight + ai_score * ai_weight
        
        # 确保在0-100范围内
        return max(0, min(100, combined_score))
    
    def _ai_score_to_audit_score(self, ai_analysis: AIAnalysisResult) -> float:
        """将AI分析结果转换为审核得分."""
        recommendation = ai_analysis.ai_recommendation.lower()
        
        score_map = {
            "approve": 85.0,
            "conditional": 65.0,
            "reject": 25.0,
            "manual_review": 50.0
        }
        
        base_score = score_map.get(recommendation, 50.0)
        
        # 根据置信度调整
        confidence = ai_analysis.confidence_scores.get("overall_confidence", 0.5)
        if confidence < 0.5:
            # 置信度低，向中性分数靠近
            base_score = base_score * 0.7 + 50.0 * 0.3
        
        return base_score
    
    def _make_decision(self, final_score: float, 
                      ai_analysis: Optional[AIAnalysisResult]) -> AuditDecision:
        """基于得分和AI分析生成审核决策."""
        
        # 首先检查AI的强建议
        if ai_analysis and hasattr(ai_analysis, 'ai_recommendation'):
            ai_rec = ai_analysis.ai_recommendation.lower()
            confidence = ai_analysis.confidence_scores.get("overall_confidence", 0.5)
            
            # 高置信度的AI建议优先考虑
            if confidence > 0.8:
                if ai_rec == "approve":
                    return AuditDecision.APPROVED
                elif ai_rec == "reject":
                    return AuditDecision.REJECTED
        
        # 基于得分决策
        if final_score >= self.config.auto_approve_threshold:
            return AuditDecision.APPROVED
        elif final_score <= self.config.auto_reject_threshold:
            return AuditDecision.REJECTED
        elif final_score >= self.config.manual_review_threshold:
            return AuditDecision.CONDITIONAL
        else:
            return AuditDecision.PENDING
    
    def _generate_reasons(self, rule_results: List[RuleResult], 
                         ai_analysis: Optional[AIAnalysisResult]) -> List[str]:
        """生成审核原因列表."""
        reasons = []
        
        # 从规则结果生成原因
        for rule_result in rule_results:
            if rule_result.matched:
                reasons.append(f"✓ {rule_result.message}")
            else:
                reasons.append(f"✗ {rule_result.message}")
        
        # 从AI分析生成原因
        if ai_analysis:
            if ai_analysis.ai_reasoning:
                reasons.append(f"AI分析: {ai_analysis.ai_reasoning}")
            
            # 添加风险因素
            if ai_analysis.risk_assessment:
                risk_factors = ai_analysis.risk_assessment.get("risk_factors", [])
                for factor in risk_factors[:3]:  # 最多显示3个风险因素
                    reasons.append(f"风险: {factor}")
        
        return reasons
    
    def _calculate_confidence(self, rule_results: List[RuleResult], 
                            ai_analysis: Optional[AIAnalysisResult]) -> float:
        """计算综合置信度."""
        confidences = []
        
        # 规则引擎置信度（基于规则执行成功率）
        if rule_results:
            successful_rules = sum(1 for r in rule_results if r.execution_time_ms > 0)
            rule_confidence = successful_rules / len(rule_results) * 0.9  # 规则置信度基准0.9
            confidences.append(rule_confidence)
        
        # AI置信度
        if ai_analysis:
            ai_confidence = ai_analysis.confidence_scores.get("overall_confidence", 0.5)
            confidences.append(ai_confidence)
        
        if not confidences:
            return 0.5
        
        return sum(confidences) / len(confidences)
    
    def _should_require_manual_review(self, result: AuditResult) -> bool:
        """判断是否需要人工审核."""
        # 置信度过低
        if result.confidence_level < 0.6:
            return True
        
        # 得分在边界区间
        if 40 <= result.final_score <= 70:
            return True
        
        # AI检测到高风险
        if result.ai_analysis and result.ai_analysis.risk_assessment:
            overall_risk = result.ai_analysis.risk_assessment.get("overall_risk_level", "medium")
            if overall_risk == "high":
                return True
        
        # 规则冲突（同时有通过和失败的规则）
        passed_rules = sum(1 for r in result.rule_results if r.matched)
        failed_rules = sum(1 for r in result.rule_results if not r.matched)
        if passed_rules > 0 and failed_rules > 0:
            return True
        
        return False
    
    def _generate_manual_review_suggestions(self, result: AuditResult) -> List[str]:
        """生成人工审核建议."""
        suggestions = []
        
        suggestions.append("请重点关注以下方面：")
        
        # 基于得分范围的建议
        if result.final_score < 50:
            suggestions.append("- 申请存在多个问题，建议详细核查")
        elif result.final_score > 70:
            suggestions.append("- 申请基本符合要求，可考虑批准")
        else:
            suggestions.append("- 申请存在争议，需要综合判断")
        
        # 基于AI风险评估的建议
        if result.ai_analysis and result.ai_analysis.risk_assessment:
            risk_factors = result.ai_analysis.risk_assessment.get("risk_factors", [])
            if risk_factors:
                suggestions.append(f"- 注意风险因素: {', '.join(risk_factors[:2])}")
        
        # 基于规则结果的建议
        failed_critical_rules = [
            r for r in result.rule_results 
            if not r.matched and "critical" in r.rule_name.lower()
        ]
        if failed_critical_rules:
            suggestions.append("- 关键规则未通过，请特别注意")
        
        return suggestions
    
    def _ai_recommendation_to_decision(self, ai_recommendation: str) -> AuditDecision:
        """将AI推荐转换为审核决策."""
        recommendation_map = {
            "approve": AuditDecision.APPROVED,
            "reject": AuditDecision.REJECTED,
            "conditional": AuditDecision.CONDITIONAL,
            "manual_review": AuditDecision.PENDING
        }
        return recommendation_map.get(ai_recommendation.lower(), AuditDecision.PENDING)
    
    def _create_error_result(self, request: AuditRequest, error_message: str, 
                           start_time: datetime) -> AuditResult:
        """创建错误结果."""
        processing_time = (datetime.now() - start_time).total_seconds() * 1000
        
        return AuditResult(
            request_id=request.request_id,
            decision=AuditDecision.PENDING,
            final_score=0.0,
            confidence_level=0.0,
            reasons=[f"审核异常: {error_message}", "需要人工处理"],
            rule_results=[],
            auditor_type=AuditorType.AI_SYSTEM,
            processing_time_ms=processing_time,
            metadata={"error": error_message}
        )
    
    async def batch_audit(self, requests: List[AuditRequest], 
                         strategy: AuditStrategy = AuditStrategy.STANDARD,
                         standard_id: Optional[str] = None,
                         max_concurrent: int = 5) -> List[AuditResult]:
        """批量审核请求."""
        logger.info(f"开始批量审核，共 {len(requests)} 个请求")
        
        # 创建信号量限制并发数量
        semaphore = asyncio.Semaphore(max_concurrent)
        
        async def audit_with_semaphore(request: AuditRequest) -> AuditResult:
            async with semaphore:
                return await self.audit_request(request, strategy, standard_id)
        
        # 并发执行所有审核
        tasks = [audit_with_semaphore(request) for request in requests]
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理异常结果
        final_results = []
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                logger.error(f"批量审核第 {i} 个请求失败: {str(result)}")
                error_result = self._create_error_result(
                    requests[i], str(result), datetime.now()
                )
                final_results.append(error_result)
            else:
                final_results.append(result)
        
        logger.info(f"批量审核完成，成功: {len([r for r in final_results if r.decision != AuditDecision.PENDING])}")
        return final_results
    
    def get_audit_status(self, request_id: str) -> Optional[Dict[str, Any]]:
        """获取审核状态."""
        return self.active_audits.get(request_id)
    
    def get_active_audits_count(self) -> int:
        """获取活跃审核数量."""
        return len([
            audit for audit in self.active_audits.values() 
            if audit.get("status") == AuditStatus.PROCESSING
        ])
    
    async def cancel_audit(self, request_id: str) -> bool:
        """取消审核."""
        if request_id in self.active_audits:
            self.active_audits[request_id]["status"] = AuditStatus.CANCELLED
            self.active_audits[request_id]["cancelled_at"] = datetime.now()
            logger.info(f"审核已取消: {request_id}")
            return True
        return False
    
    def cleanup_completed_audits(self, max_age_hours: int = 24):
        """清理已完成的审核记录."""
        cutoff_time = datetime.now().timestamp() - (max_age_hours * 3600)
        
        to_remove = []
        for request_id, audit_info in self.active_audits.items():
            if audit_info.get("status") in [AuditStatus.COMPLETED, AuditStatus.FAILED]:
                end_time = audit_info.get("end_time", audit_info.get("start_time"))
                if end_time and end_time.timestamp() < cutoff_time:
                    to_remove.append(request_id)
        
        for request_id in to_remove:
            del self.active_audits[request_id]
        
        if to_remove:
            logger.info(f"清理了 {len(to_remove)} 个已完成的审核记录")


class AuditWorkflowManager:
    """审核工作流管理器 - 单例模式."""
    
    _instance = None
    _workflow = None
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance
    
    def get_workflow(self, config: AuditWorkflowConfig = None) -> AuditWorkflow:
        """获取工作流实例."""
        if self._workflow is None:
            self._workflow = AuditWorkflow(config)
        return self._workflow
    
    def reconfigure_workflow(self, config: AuditWorkflowConfig) -> AuditWorkflow:
        """重新配置工作流."""
        self._workflow = AuditWorkflow(config)
        return self._workflow