from typing import List, Dict, Any, Tuple
import numpy as np
import time
from datetime import datetime
import traceback

from app.config.config import settings
from app.tools.redis_client import redis_client
from app.schemas.drpa_base_schema import (
    AlgorithmResponse, OutputParams, TaskStatus, 
    AlgorithmMiddleResponse, RiskLevel, WindowAssessment,
    DRPAOutputParams
)
from app.utils.logger import logger

class DRPAService:
    def __init__(self):
        self.risk_thresholds = {
            RiskLevel.LOW: 0.3,
            RiskLevel.MEDIUM: 0.5,
            RiskLevel.HIGH: 0.7,
            RiskLevel.CRITICAL: 0.9
        }
    
    async def update_task_status(self, task_id: str, task_data: dict, progress: int, logs: str):
        """更新任务状态"""
        try:
            response = AlgorithmMiddleResponse(
                task_id=task_id,
                task_callback_url=task_data.get("task_callback_url"),
                task_status=TaskStatus.RUNNING,
                task_progress=progress,
                task_logs=logs,
                input_params=task_data.get("input_params"),
                metrics=[]
            )
            await redis_client.update_data(f'{task_id}_result', response.model_dump())
            logger.info(f"任务状态更新成功: {task_id}, 进度: {progress}%, 日志: {logs}")
        except Exception as e:
            logger.error(f"任务状态更新失败: {task_id}, 错误: {str(e)}")
            raise

    def calculate_risk_score(self, flow: Dict[str, Any], weights: Dict[str, float]) -> float:
        """计算单个数据流的风险分数"""
        risk_score = 0.0
        
        # 数据类型敏感度评分
        data_type_score = {
            "public": 0.1,
            "internal": 0.4,
            "confidential": 0.7,
            "secret": 1.0
        }.get(flow.get("data_type", "public"), 0.1)
        
        # 访问频率评分
        freq = flow.get("access_frequency", 0)
        freq_score = min(1.0, freq / 100)  # 标准化访问频率
        
        # 用户权限评分
        privilege_score = {
            "guest": 0.1,
            "user": 0.4,
            "admin": 0.7,
            "super_admin": 1.0
        }.get(flow.get("user_privilege", "guest"), 0.1)
        
        # 历史事件评分
        incidents = flow.get("historical_incidents", 0)
        incident_score = min(1.0, incidents / 10)  # 标准化历史事件
        
        # 计算加权风险分数
        risk_score = (
            weights["data_type"] * data_type_score +
            weights["access_frequency"] * freq_score +
            weights["user_privilege"] * privilege_score +
            weights["historical_incidents"] * incident_score
        )
        
        return risk_score

    def get_risk_level(self, risk_score: float) -> RiskLevel:
        """根据风险分数确定风险等级"""
        if risk_score >= self.risk_thresholds[RiskLevel.CRITICAL]:
            return RiskLevel.CRITICAL
        elif risk_score >= self.risk_thresholds[RiskLevel.HIGH]:
            return RiskLevel.HIGH
        elif risk_score >= self.risk_thresholds[RiskLevel.MEDIUM]:
            return RiskLevel.MEDIUM
        else:
            return RiskLevel.LOW

    def analyze_window(
        self, 
        flows: List[Dict[str, Any]], 
        window_id: int,
        start_time: int,
        end_time: int,
        weights: Dict[str, float],
        risk_threshold: float
    ) -> WindowAssessment:
        """分析时间窗口内的数据流"""
        flagged_flows = []
        window_scores = []
        
        for flow in flows:
            risk_score = self.calculate_risk_score(flow, weights)
            window_scores.append(risk_score)
            
            if risk_score >= risk_threshold:
                flagged_flows.append(flow.get("flow_id"))
        
        avg_risk_score = np.mean(window_scores) if window_scores else 0.0
        risk_level = self.get_risk_level(avg_risk_score)
        
        # 生成缓解措施
        mitigation_actions = self.generate_mitigation_actions(risk_level, len(flagged_flows))
        
        return WindowAssessment(
            window_id=window_id,
            start_time=start_time,
            end_time=end_time,
            risk_score=avg_risk_score,
            risk_level=risk_level,
            flagged_flows=flagged_flows,
            mitigation_actions=mitigation_actions
        )

    def generate_mitigation_actions(self, risk_level: RiskLevel, num_flagged: int) -> List[str]:
        """生成风险缓解建议"""
        actions = []
        
        if risk_level == RiskLevel.CRITICAL:
            actions.extend([
                "立即阻断可疑数据流",
                "触发安全团队紧急响应",
                "开启全系统日志审计"
            ])
        elif risk_level == RiskLevel.HIGH:
            actions.extend([
                "增加数据流监控频率",
                "对可疑流量进行深度包检测",
                "通知安全团队进行评估"
            ])
        elif risk_level == RiskLevel.MEDIUM:
            actions.extend([
                "标记可疑数据流进行追踪",
                "增加采样检测频率"
            ])
        else:
            actions.append("保持常规监控")
            
        if num_flagged > 10:
            actions.append("建议更新风险评估模型参数")
            
        return actions

    async def run_drpa(self, input_params: dict, task_id: str) -> AlgorithmResponse:
        """运行DRPA算法的主函数"""
        task_data = await redis_client.get_data(task_id)
        
        try:
            drpa_params = input_params.get("drpa_params", {})
            data_flows = drpa_params.get("data_flows", [])
            time_window = drpa_params.get("time_window", 60)
            risk_threshold = drpa_params.get("risk_threshold", 0.7)
            sensitivity_weights = drpa_params.get("sensitivity_weights", {})
            
            await self.update_task_status(task_id, task_data, 10, "开始运行DRPA算法...")
            
            # 按时间窗口分组分析数据流
            current_time = int(time.time())
            window_assessments = []
            
            # 模拟时间窗口分析
            for i in range(len(data_flows) // 10 + 1):  # 每个窗口10个流
                start_idx = i * 10
                end_idx = min((i + 1) * 10, len(data_flows))
                window_flows = data_flows[start_idx:end_idx]
                
                if not window_flows:
                    continue
                
                window_start = current_time + i * time_window
                window_end = window_start + time_window
                
                assessment = self.analyze_window(
                    window_flows,
                    i,
                    window_start,
                    window_end,
                    sensitivity_weights,
                    risk_threshold
                )
                window_assessments.append(assessment)
                
                await self.update_task_status(
                    task_id, 
                    task_data,
                    30 + (i * 60 // len(data_flows)),
                    f"完成第{i+1}个时间窗口分析"
                )
            
            # 汇总分析结果
            overall_risk_score = np.mean([wa.risk_score for wa in window_assessments])
            
            # 识别高风险模式
            high_risk_patterns = [
                {
                    "window_id": wa.window_id,
                    "risk_score": wa.risk_score,
                    "num_flagged": len(wa.flagged_flows)
                }
                for wa in window_assessments
                if wa.risk_level in [RiskLevel.HIGH, RiskLevel.CRITICAL]
            ]
            
            # 生成整体建议
            recommended_actions = self.generate_mitigation_actions(
                self.get_risk_level(overall_risk_score),
                sum(len(wa.flagged_flows) for wa in window_assessments)
            )
            
            # 构建输出结果
            drpa_output = DRPAOutputParams(
                assessments=window_assessments,
                overall_risk_score=overall_risk_score,
                high_risk_patterns=high_risk_patterns,
                recommended_actions=recommended_actions
            )
            
            return AlgorithmResponse(
                task_id=task_id,
                task_callback_url=task_data.get("task_callback_url"),
                task_status=TaskStatus.COMPLETED,
                task_progress=100,
                output_params=OutputParams(drpa_results=drpa_output),
                metrics=[]
            )
            
        except Exception as e:
            error_msg = f"错误: {str(e)}\n{traceback.format_exc()}"
            logger.error(error_msg)
            
            error_response = AlgorithmMiddleResponse(
                task_id=task_id,
                task_callback_url=task_data.get("task_callback_url"),
                task_status=TaskStatus.FAILED,
                task_progress=0,
                task_logs=error_msg,
                input_params=task_data.get("input_params"),
                metrics=[]
            )
            await redis_client.update_data(f'{task_id}_result', error_response.model_dump())
            raise 