"""
分析器访问者

实现AST的分析功能，用于收集工作流的统计信息和性能分析。
"""

import time
from typing import Any, Dict, List, Set, Optional
from collections import defaultdict, Counter
from .base import BaseAstVisitor

# 前向声明，避免循环导入
from typing import TYPE_CHECKING
if TYPE_CHECKING:
    from ..ast.base import Ast, AstContext
    from ..ast.nodes import *


class AnalysisResult:
    """分析结果"""
    
    def __init__(self):
        self.node_count: int = 0
        self.node_types: Dict[str, int] = defaultdict(int)
        self.max_depth: int = 0
        self.parallel_nodes: int = 0
        self.estimated_execution_time: float = 0.0
        self.resource_requirements: Dict[str, Any] = {}
        self.dependencies: Dict[str, List[str]] = defaultdict(list)
        self.critical_path: List[str] = []
        self.bottlenecks: List[Dict[str, Any]] = []
        self.recommendations: List[str] = []
        self.complexity_score: float = 0.0
        self.metadata: Dict[str, Any] = {}
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            "node_count": self.node_count,
            "node_types": dict(self.node_types),
            "max_depth": self.max_depth,
            "parallel_nodes": self.parallel_nodes,
            "estimated_execution_time": self.estimated_execution_time,
            "resource_requirements": self.resource_requirements,
            "dependencies": dict(self.dependencies),
            "critical_path": self.critical_path,
            "bottlenecks": self.bottlenecks,
            "recommendations": self.recommendations,
            "complexity_score": self.complexity_score,
            "metadata": self.metadata
        }


class AnalyzerVisitor(BaseAstVisitor):
    """
    分析器访问者
    
    负责分析AST的各种特性，包括：
    - 节点统计
    - 复杂度分析
    - 性能预估
    - 资源需求分析
    - 依赖关系分析
    - 优化建议生成
    """
    
    def __init__(self):
        super().__init__()
        self.analysis_result = AnalysisResult()
        self.current_depth = 0
        self.visited_nodes: Set[str] = set()
        self.node_execution_times: Dict[str, float] = {}
        self.node_dependencies: Dict[str, Set[str]] = defaultdict(set)
    
    def analyze(self, ast: 'Ast', context: 'AstContext') -> AnalysisResult:
        """分析AST"""
        # 重置分析结果
        self.analysis_result = AnalysisResult()
        self.current_depth = 0
        self.visited_nodes.clear()
        self.node_execution_times.clear()
        self.node_dependencies.clear()
        
        # 执行分析
        self.visit(ast, context)
        
        # 后处理分析结果
        self._post_process_analysis()
        
        return self.analysis_result
    
    def visit(self, ast: 'Ast', context: 'AstContext') -> Any:
        """重写visit方法，添加分析逻辑"""
        # 避免重复访问
        if ast.node_id in self.visited_nodes:
            return None
        
        self.visited_nodes.add(ast.node_id)
        
        # 更新统计信息
        self.analysis_result.node_count += 1
        self.analysis_result.node_types[ast.node_type.value] += 1
        self.analysis_result.max_depth = max(self.analysis_result.max_depth, self.current_depth)
        
        # 分析节点特性
        self._analyze_node(ast, context)
        
        # 增加深度
        self.current_depth += 1
        
        try:
            # 调用父类方法
            result = super().visit(ast, context)
            return result
        finally:
            # 恢复深度
            self.current_depth -= 1
    
    def _analyze_node(self, ast: 'Ast', context: 'AstContext') -> None:
        """分析单个节点"""
        # 估算执行时间
        execution_time = self._estimate_execution_time(ast)
        self.node_execution_times[ast.node_id] = execution_time
        
        # 分析资源需求
        self._analyze_resource_requirements(ast)
        
        # 分析依赖关系
        self._analyze_dependencies(ast, context)
        
        # 检查潜在问题
        self._check_potential_issues(ast)
    
    def _estimate_execution_time(self, ast: 'Ast') -> float:
        """估算节点执行时间（秒）"""
        # 基于节点类型的执行时间估算
        time_estimates = {
            'sequence': 0.1,
            'parallel': 0.2,
            'conditional': 0.1,
            'loop': 1.0,  # 基础时间，实际会乘以迭代次数
            'ffmpeg_slice': 30.0,  # FFmpeg处理通常较慢
            'extract_metadata': 2.0,
            'scan_directory': 1.0,
            'gemini_classify': 5.0,  # AI分类需要网络请求
            'output_draft': 1.0,
            'organize_files': 0.5,
            'generate_report': 0.3,
            'file_operation': 0.2,
            'data_transform': 0.1,
        }
        
        base_time = time_estimates.get(ast.node_type.value, 1.0)
        
        # 根据节点特性调整时间
        if hasattr(ast, 'max_iterations') and ast.max_iterations:
            base_time *= ast.max_iterations
        
        if hasattr(ast, 'children') and ast.children:
            if ast.node_type.value == 'parallel':
                # 并行节点的时间是最长子节点的时间
                child_times = [self._estimate_execution_time(child) for child in ast.children]
                base_time += max(child_times) if child_times else 0
            else:
                # 顺序节点的时间是所有子节点时间之和
                child_times = [self._estimate_execution_time(child) for child in ast.children]
                base_time += sum(child_times)
        
        return base_time
    
    def _analyze_resource_requirements(self, ast: 'Ast') -> None:
        """分析资源需求"""
        # CPU需求
        cpu_intensive_nodes = ['ffmpeg_slice', 'extract_metadata', 'gemini_classify']
        if ast.node_type.value in cpu_intensive_nodes:
            self.analysis_result.resource_requirements.setdefault('cpu_cores', 0)
            self.analysis_result.resource_requirements['cpu_cores'] += 1
        
        # 内存需求
        memory_intensive_nodes = ['ffmpeg_slice', 'extract_metadata']
        if ast.node_type.value in memory_intensive_nodes:
            self.analysis_result.resource_requirements.setdefault('memory_mb', 0)
            self.analysis_result.resource_requirements['memory_mb'] += 512  # 每个节点512MB
        
        # 网络需求
        network_nodes = ['gemini_classify']
        if ast.node_type.value in network_nodes:
            self.analysis_result.resource_requirements['network'] = True
        
        # 存储需求
        storage_nodes = ['ffmpeg_slice', 'output_draft', 'organize_files']
        if ast.node_type.value in storage_nodes:
            self.analysis_result.resource_requirements.setdefault('storage_mb', 0)
            self.analysis_result.resource_requirements['storage_mb'] += 1024  # 每个节点1GB
    
    def _analyze_dependencies(self, ast: 'Ast', context: 'AstContext') -> None:
        """分析依赖关系"""
        # 分析数据依赖
        if hasattr(ast, 'inputs'):
            for input_name, input_value in ast.inputs.items():
                if isinstance(input_value, str) and input_value.startswith('$'):
                    # 这是一个变量引用，表示依赖
                    var_name = input_value[1:]
                    self.node_dependencies[ast.node_id].add(var_name)
        
        # 分析文件依赖
        if hasattr(ast, 'input_path') and ast.input_path:
            if ast.input_path.startswith('$'):
                var_name = ast.input_path[1:]
                self.node_dependencies[ast.node_id].add(var_name)
    
    def _check_potential_issues(self, ast: 'Ast') -> None:
        """检查潜在问题"""
        # 检查并行节点
        if ast.node_type.value == 'parallel':
            self.analysis_result.parallel_nodes += 1
            if hasattr(ast, 'max_workers') and ast.max_workers > 8:
                self.analysis_result.bottlenecks.append({
                    "node_id": ast.node_id,
                    "type": "high_parallelism",
                    "description": f"并行度过高: {ast.max_workers}",
                    "severity": "warning"
                })
        
        # 检查循环节点
        if ast.node_type.value == 'loop':
            if hasattr(ast, 'max_iterations') and ast.max_iterations > 100:
                self.analysis_result.bottlenecks.append({
                    "node_id": ast.node_id,
                    "type": "high_iteration",
                    "description": f"循环次数过多: {ast.max_iterations}",
                    "severity": "warning"
                })
        
        # 检查资源密集型节点
        resource_intensive = ['ffmpeg_slice', 'gemini_classify']
        if ast.node_type.value in resource_intensive:
            self.analysis_result.bottlenecks.append({
                "node_id": ast.node_id,
                "type": "resource_intensive",
                "description": f"资源密集型节点: {ast.node_type.value}",
                "severity": "info"
            })
    
    def _post_process_analysis(self) -> None:
        """后处理分析结果"""
        # 计算总执行时间
        self.analysis_result.estimated_execution_time = sum(self.node_execution_times.values())
        
        # 计算复杂度分数
        self._calculate_complexity_score()
        
        # 生成优化建议
        self._generate_recommendations()
        
        # 分析关键路径
        self._analyze_critical_path()
    
    def _calculate_complexity_score(self) -> None:
        """计算复杂度分数"""
        score = 0.0
        
        # 基于节点数量
        score += self.analysis_result.node_count * 0.1
        
        # 基于深度
        score += self.analysis_result.max_depth * 0.5
        
        # 基于并行节点数量
        score += self.analysis_result.parallel_nodes * 0.3
        
        # 基于循环节点
        loop_count = self.analysis_result.node_types.get('loop', 0)
        score += loop_count * 1.0
        
        # 基于资源密集型节点
        intensive_nodes = ['ffmpeg_slice', 'gemini_classify']
        for node_type in intensive_nodes:
            count = self.analysis_result.node_types.get(node_type, 0)
            score += count * 0.8
        
        self.analysis_result.complexity_score = score
    
    def _generate_recommendations(self) -> None:
        """生成优化建议"""
        recommendations = []
        
        # 基于复杂度分数
        if self.analysis_result.complexity_score > 10:
            recommendations.append("工作流复杂度较高，建议拆分为多个子工作流")
        
        # 基于并行节点
        if self.analysis_result.parallel_nodes > 3:
            recommendations.append("并行节点较多，注意系统资源限制")
        
        # 基于执行时间
        if self.analysis_result.estimated_execution_time > 300:  # 5分钟
            recommendations.append("预估执行时间较长，建议优化或添加进度监控")
        
        # 基于资源需求
        cpu_cores = self.analysis_result.resource_requirements.get('cpu_cores', 0)
        if cpu_cores > 4:
            recommendations.append(f"需要 {cpu_cores} 个CPU核心，确保系统资源充足")
        
        memory_mb = self.analysis_result.resource_requirements.get('memory_mb', 0)
        if memory_mb > 2048:  # 2GB
            recommendations.append(f"需要 {memory_mb}MB 内存，注意内存使用")
        
        # 基于瓶颈分析
        error_bottlenecks = [b for b in self.analysis_result.bottlenecks if b['severity'] == 'error']
        if error_bottlenecks:
            recommendations.append("发现严重瓶颈，需要修复后才能执行")
        
        warning_bottlenecks = [b for b in self.analysis_result.bottlenecks if b['severity'] == 'warning']
        if warning_bottlenecks:
            recommendations.append("发现性能警告，建议优化相关配置")
        
        self.analysis_result.recommendations = recommendations
    
    def _analyze_critical_path(self) -> None:
        """分析关键路径"""
        # 简化实现：找出执行时间最长的节点序列
        # 实际实现应该考虑依赖关系和并行执行
        
        sorted_nodes = sorted(
            self.node_execution_times.items(),
            key=lambda x: x[1],
            reverse=True
        )
        
        # 取前5个最耗时的节点作为关键路径
        self.analysis_result.critical_path = [node_id for node_id, _ in sorted_nodes[:5]]
    
    # ========================================================================
    # 具体节点的分析实现
    # ========================================================================
    
    def visit_sequence(self, ast: 'Sequence', context: 'AstContext') -> Any:
        """分析顺序执行节点"""
        results = []
        for child in ast.children:
            result = self.visit(child, context)
            results.append(result)
        return results
    
    def visit_parallel(self, ast: 'Parallel', context: 'AstContext') -> Any:
        """分析并行执行节点"""
        # 记录并行度信息
        self.analysis_result.metadata.setdefault('parallel_configs', []).append({
            'node_id': ast.node_id,
            'max_workers': getattr(ast, 'max_workers', 4)
        })
        
        results = []
        for child in ast.children:
            result = self.visit(child, context)
            results.append(result)
        return results
    
    def visit_conditional(self, ast: 'Conditional', context: 'AstContext') -> Any:
        """分析条件执行节点"""
        # 分析两个分支
        results = {}
        
        if ast.true_branch:
            results['true_branch'] = self.visit(ast.true_branch, context)
        
        if ast.false_branch:
            results['false_branch'] = self.visit(ast.false_branch, context)
        
        return results
    
    def visit_loop(self, ast: 'Loop', context: 'AstContext') -> Any:
        """分析循环执行节点"""
        # 记录循环配置
        self.analysis_result.metadata.setdefault('loop_configs', []).append({
            'node_id': ast.node_id,
            'max_iterations': getattr(ast, 'max_iterations', None),
            'condition': getattr(ast, 'condition', None)
        })
        
        # 分析子节点（只分析一次，不实际循环）
        results = []
        for child in ast.children:
            result = self.visit(child, context)
            results.append(result)
        
        return results
    
    # 其他节点使用默认实现
    def visit_default(self, ast: 'Ast', context: 'AstContext') -> Any:
        """默认分析方法"""
        # 对于叶子节点，不需要特殊处理
        return None
