"""
核心监控器模块，整合所有功能并提供统一API。
"""
import os
import json
import torch
from typing import Dict, List, Optional, Any, Union, Tuple
from dataclasses import dataclass, field
from enum import Enum

from ..metrics.anomaly import AnomalyDetector, AnomalyResult, AnomalyType
from ..metrics.stats import calculate_statistics
from ..metrics.performance import PerformanceTimer, MemoryMonitor, ThroughputMonitor, TimerEvent
from ..distributed.zero import ZeROMonitor
from ..distributed.parallel import ParallelMonitor, ParallelType
from ..visualization.tensorboard import TensorBoardVisualizer, MetricType
from ..hooks.hook_manager import HookManager
from ..utils.logging import MonitorLogger, LogLevel
from ..utils.io import save_json, save_csv, ensure_directory, csv_to_tensorboard

class MonitorConfig:
    """监控器配置"""
    
    def __init__(self,
                 output_dir: str,
                 log_interval: int = 1,
                 anomaly_detection: bool = True,
                 tensorboard: bool = True,
                 csv: bool = True,
                 api: bool = False,
                 grad_norm_threshold: float = 10.0,
                 grad_min_threshold: float = 1e-6,
                 outlier_std_threshold: float = 3.0,
                 history_size: int = 100,
                 monitor_memory: bool = True,
                 memory_interval: float = 1.0,
                 monitor_performance: bool = True,
                 targets: Optional[List[str]] = None,
                 log_level: LogLevel = LogLevel.INFO):
        """
        初始化配置
        
        Args:
            output_dir: 输出目录
            log_interval: 日志记录间隔
            anomaly_detection: 是否启用异常检测
            tensorboard: 是否启用TensorBoard
            csv: 是否启用CSV输出
            api: 是否启用API输出
            grad_norm_threshold: 梯度范数阈值
            grad_min_threshold: 梯度最小值阈值
            outlier_std_threshold: 异常值标准差阈值
            history_size: 历史数据大小
            monitor_memory: 是否监控内存
            memory_interval: 内存监控间隔
            monitor_performance: 是否监控性能
            targets: 监控目标列表（模型层名）
            log_level: 日志级别
        """
        self.output_dir = output_dir
        self.log_interval = log_interval
        self.anomaly_detection = anomaly_detection
        self.tensorboard = tensorboard
        self.csv = csv
        self.api = api
        self.grad_norm_threshold = grad_norm_threshold
        self.grad_min_threshold = grad_min_threshold
        self.outlier_std_threshold = outlier_std_threshold
        self.history_size = history_size
        self.monitor_memory = monitor_memory
        self.memory_interval = memory_interval
        self.monitor_performance = monitor_performance
        self.targets = targets
        self.log_level = log_level
        
        # 创建输出目录
        ensure_directory(output_dir)
        
        # 保存配置
        self._save_config()
    
    def _save_config(self) -> None:
        """保存配置到文件"""
        config_path = os.path.join(self.output_dir, "config.json")
        
        # 将枚举转换为字符串
        config_dict = {k: v.name if isinstance(v, Enum) else v for k, v in self.__dict__.items()}
        
        with open(config_path, "w") as f:
            json.dump(config_dict, f, indent=4)

class TrainerMon:
    """训练监控器"""
    
    def __init__(self, config_path: str = None, **kwargs):
        """
        初始化监控器
        
        Args:
            config_path: 配置文件路径
            **kwargs: 配置参数
        """
        # 加载配置
        if config_path and os.path.exists(config_path):
            with open(config_path, "r") as f:
                config_dict = json.load(f)
                
            # 将字符串转换为枚举
            if "log_level" in config_dict:
                config_dict["log_level"] = LogLevel[config_dict["log_level"]]
                
            self.config = MonitorConfig(**config_dict)
        else:
            self.config = MonitorConfig(**kwargs)
        
        # 初始化状态
        self.step = 0
        self.model = None
        self.optimizer = None
        self.metrics: Dict[str, Dict[str, List[float]]] = {}
        
        # 初始化日志记录器
        self.logger = MonitorLogger(
            name="colo_monitor",
            level=self.config.log_level,
            log_dir=os.path.join(self.config.output_dir, "logs")
        )
        
        # 初始化组件
        if self.config.anomaly_detection:
            self.anomaly_detector = AnomalyDetector(
                grad_norm_threshold=self.config.grad_norm_threshold,
                grad_min_threshold=self.config.grad_min_threshold,
                outlier_std_threshold=self.config.outlier_std_threshold,
                history_size=self.config.history_size
            )
        else:
            self.anomaly_detector = None
        
        if self.config.tensorboard:
            self.visualizer = TensorBoardVisualizer(
                log_dir=os.path.join(self.config.output_dir, "tensorboard")
            )
        else:
            self.visualizer = None
        
        # 初始化性能监控
        if self.config.monitor_performance:
            self.timer = PerformanceTimer()
            self.throughput_monitor = ThroughputMonitor()
        else:
            self.timer = None
            self.throughput_monitor = None
        
        # 初始化内存监控
        if self.config.monitor_memory:
            self.memory_monitor = MemoryMonitor(interval=self.config.memory_interval)
            self.memory_monitor.start()
        else:
            self.memory_monitor = None
        
        # 初始化钩子管理器
        self.hook_manager = HookManager()
        
        # 初始化分布式组件
        self.zero_monitor = None
        self.parallel_monitor = None
        
        self.logger.info(f"TrainerMon initialized with output directory: {self.config.output_dir}")
    
    def set_model(self, model: torch.nn.Module) -> None:
        """
        设置要监控的模型
        
        Args:
            model: PyTorch模型
        """
        self.model = model
        self.logger.info(f"Model set: {type(model).__name__}")
        self.logger.log_model_info(model)
    
    def set_optimizer(self, optimizer: Any) -> None:
        """
        设置要监控的优化器
        
        Args:
            optimizer: 优化器实例
        """
        self.optimizer = optimizer
        self.logger.info(f"Optimizer set: {type(optimizer).__name__}")
        
        # 检查是否是ZeRO优化器
        if hasattr(optimizer, "stage"):
            self.zero_monitor = ZeROMonitor(optimizer)
            self.logger.info(f"ZeRO optimizer detected with stage: {self.zero_monitor.state.stage.value}")
    
    def set_parallel(self, 
                    parallel_type: ParallelType,
                    world_size: int,
                    rank: int,
                    group_size: int,
                    group_rank: int,
                    stage_id: Optional[int] = None,
                    num_stages: Optional[int] = None) -> None:
        """
        设置并行配置
        
        Args:
            parallel_type: 并行类型
            world_size: 总进程数
            rank: 当前进程rank
            group_size: 当前组大小
            group_rank: 当前组内rank
            stage_id: 流水线并行阶段ID
            num_stages: 流水线并行总阶段数
        """
        self.parallel_monitor = ParallelMonitor(
            parallel_type=parallel_type,
            world_size=world_size,
            rank=rank,
            group_size=group_size,
            group_rank=group_rank,
            stage_id=stage_id,
            num_stages=num_stages
        )
        self.logger.info(f"Parallel mode set: {parallel_type.value}")
        self.logger.info(f"World size: {world_size}, Rank: {rank}")
        self.logger.info(f"Group size: {group_size}, Group rank: {group_rank}")
        
        if stage_id is not None:
            self.logger.info(f"Pipeline stage: {stage_id}/{num_stages}")
    
    def register_hooks(self) -> None:
        """注册钩子"""
        if self.model is None:
            self.logger.warning("Cannot register hooks: model not set")
            return
        
        # 注册梯度钩子
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                # 如果有指定目标，只监控目标参数
                if self.config.targets and not any(target in name for target in self.config.targets):
                    continue
                
                self.hook_manager.register_gradient_hook(
                    name=name,
                    module=param,
                    callback=self._gradient_callback
                )
        
        # 注册激活值钩子
        for name, module in self.model.named_modules():
            # 如果有指定目标，只监控目标模块
            if self.config.targets and not any(target in name for target in self.config.targets):
                continue
            
            # 排除容器模块
            if len(list(module.children())) == 0:
                self.hook_manager.register_activation_hook(
                    name=name,
                    module=module,
                    callback=self._activation_callback
                )
        
        # 注册优化器钩子
        if self.optimizer:
            self.hook_manager.register_optimizer_hook(
                name="optimizer",
                optimizer=self.optimizer,
                callback=self._optimizer_callback
            )
        
        self.logger.info("Hooks registered")
    
    def _gradient_callback(self, name: str, grad: torch.Tensor, is_training: bool) -> None:
        """
        梯度回调函数
        
        Args:
            name: 参数名称
            grad: 梯度张量
            is_training: 是否在训练模式
        """
        if not is_training or self.step % self.config.log_interval != 0:
            return
        
        # 计算统计信息
        stats = calculate_statistics(grad)
        
        # 记录指标
        self._record_metric(f"grad/{name}", stats)
        
        # 异常检测
        if self.anomaly_detector:
            anomalies = self.anomaly_detector.check_gradient(name, grad, self.step)
            for anomaly in anomalies:
                self._handle_anomaly(anomaly)
        
        # TensorBoard可视化
        if self.visualizer:
            # 获取分片信息
            shard_info = {}
            if self.parallel_monitor:
                shard_info = self.parallel_monitor.get_grad_shard_info(name, grad.shape)
            
            if self.zero_monitor:
                grad_rank = self.zero_monitor.get_grad_rank(name)
                if grad_rank is not None:
                    shard_info["grad_rank"] = grad_rank
            
            # 记录指标
            self.visualizer.add_gradient_metrics(
                name=name,
                value=stats["norm"],
                step=self.step,
                details={**stats, **shard_info}
            )
    
    def _activation_callback(self, name: str, activation: torch.Tensor, is_training: bool) -> None:
        """
        激活值回调函数
        
        Args:
            name: 层名称
            activation: 激活值张量
            is_training: 是否在训练模式
        """
        if not is_training or self.step % self.config.log_interval != 0:
            return
        
        # 计算统计信息
        stats = calculate_statistics(activation)
        
        # 记录指标
        self._record_metric(f"activation/{name}", stats)
        
        # TensorBoard可视化
        if self.visualizer:
            self.visualizer.add_activation_metrics(
                name=name,
                value=stats["norm"],
                step=self.step,
                details=stats
            )
    
    def _optimizer_callback(self, name: str, state: Dict[str, Any], lr: float) -> None:
        """
        优化器回调函数
        
        Args:
            name: 优化器名称
            state: 优化器状态
            lr: 学习率
        """
        if self.step % self.config.log_interval != 0:
            return
        
        # 记录学习率
        self._record_metric("optimizer/lr", {"value": lr})
        
        # TensorBoard可视化
        if self.visualizer:
            self.visualizer.add_optimizer_metrics(
                name="learning_rate",
                value=lr,
                step=self.step
            )
    
    def _handle_anomaly(self, anomaly: AnomalyResult) -> None:
        """
        处理异常
        
        Args:
            anomaly: 异常检测结果
        """
        # 记录异常
        self.logger.log_anomaly(
            anomaly_type=anomaly.anomaly_type.value,
            parameter_name=anomaly.parameter_name,
            value=anomaly.value,
            threshold=anomaly.threshold,
            step=self.step
        )
        
        # TensorBoard可视化
        if self.visualizer:
            self.visualizer.add_anomaly_metrics(
                name=f"{anomaly.anomaly_type.value}/{anomaly.parameter_name}",
                value=anomaly.value,
                step=self.step,
                details={
                    "threshold": anomaly.threshold,
                    **(anomaly.details or {})
                }
            )
        
        # 保存异常信息
        anomaly_path = os.path.join(self.config.output_dir, "anomalies.json")
        anomaly_info = {
            "step": self.step,
            "parameter": anomaly.parameter_name,
            "type": anomaly.anomaly_type.value,
            "value": anomaly.value,
            "threshold": anomaly.threshold,
            "details": anomaly.details
        }
        
        if os.path.exists(anomaly_path):
            with open(anomaly_path, "r") as f:
                anomalies = json.load(f)
        else:
            anomalies = []
        
        anomalies.append(anomaly_info)
        
        with open(anomaly_path, "w") as f:
            json.dump(anomalies, f, indent=4)
    
    def _record_metric(self, name: str, value: Dict[str, float]) -> None:
        """
        记录指标
        
        Args:
            name: 指标名称
            value: 指标值字典
        """
        if name not in self.metrics:
            self.metrics[name] = {k: [] for k in value.keys()}
        
        for k, v in value.items():
            if k in self.metrics[name]:
                self.metrics[name][k].append(v)
        
        # CSV输出
        if self.config.csv:
            csv_dir = os.path.join(self.config.output_dir, "csv")
            ensure_directory(csv_dir)
            
            # 替换/为_，避免路径问题
            file_name = name.replace("/", "_")
            csv_path = os.path.join(csv_dir, f"{file_name}.csv")
            
            # 创建CSV数据
            data = []
            for i, _ in enumerate(self.metrics[name][list(self.metrics[name].keys())[0]]):
                row = {"step": i + 1}
                for k in self.metrics[name].keys():
                    if i < len(self.metrics[name][k]):
                        row[k] = self.metrics[name][k][i]
                data.append(row)
            
            save_csv(data, csv_path)
    
    def forward_start(self) -> str:
        """
        开始前向传播计时
        
        Returns:
            计时器ID
        """
        if self.timer:
            return self.timer.start(TimerEvent.FORWARD, step=self.step)
        return ""
    
    def forward_end(self, timer_id: str) -> Optional[float]:
        """
        结束前向传播计时
        
        Args:
            timer_id: 计时器ID
        
        Returns:
            持续时间（秒）
        """
        if self.timer:
            return self.timer.stop(timer_id)
        return None
    
    def backward_start(self) -> str:
        """
        开始反向传播计时
        
        Returns:
            计时器ID
        """
        if self.timer:
            return self.timer.start(TimerEvent.BACKWARD, step=self.step)
        return ""
    
    def backward_end(self, timer_id: str) -> Optional[float]:
        """
        结束反向传播计时
        
        Args:
            timer_id: 计时器ID
        
        Returns:
            持续时间（秒）
        """
        if self.timer:
            return self.timer.stop(timer_id)
        return None
    
    def optimizer_step_start(self) -> str:
        """
        开始优化器步骤计时
        
        Returns:
            计时器ID
        """
        if self.timer:
            return self.timer.start(TimerEvent.OPTIMIZER_STEP, step=self.step)
        return ""
    
    def optimizer_step_end(self, timer_id: str) -> Optional[float]:
        """
        结束优化器步骤计时
        
        Args:
            timer_id: 计时器ID
        
        Returns:
            持续时间（秒）
        """
        if self.timer:
            return self.timer.stop(timer_id)
        return None
    
    def step_start(self) -> str:
        """
        开始整体步骤计时
        
        Returns:
            计时器ID
        """
        if self.timer:
            return self.timer.start(TimerEvent.TOTAL_STEP, step=self.step)
        return ""
    
    def step_end(self, timer_id: str, samples: int = 0, tokens: int = 0) -> Optional[float]:
        """
        结束整体步骤计时
        
        Args:
            timer_id: 计时器ID
            samples: 处理的样本数
            tokens: 处理的token数
        
        Returns:
            持续时间（秒）
        """
        duration = None
        if self.timer:
            duration = self.timer.stop(timer_id)
        
        # 更新吞吐量
        if self.throughput_monitor:
            self.throughput_monitor.update(samples=samples, tokens=tokens)
        
        return duration
    
    def update_step(self) -> None:
        """更新步数"""
        self.step += 1
    
    def log_step_stats(self) -> None:
        """记录步骤统计信息"""
        if self.step % self.config.log_interval != 0:
            return
        
        # 记录性能统计
        if self.throughput_monitor:
            stats = self.throughput_monitor.get_statistics()
            self.logger.info(f"Step {self.step} - Samples/sec: {stats['samples_per_second']:.2f}, Tokens/sec: {stats['tokens_per_second']:.2f}")
            
            # TensorBoard可视化
            if self.visualizer:
                self.visualizer.add_metric(
                    MetricData(
                        type=MetricType.OPTIMIZER,
                        name="throughput/samples_per_second",
                        value=stats["samples_per_second"],
                        step=self.step
                    )
                )
                self.visualizer.add_metric(
                    MetricData(
                        type=MetricType.OPTIMIZER,
                        name="throughput/tokens_per_second",
                        value=stats["tokens_per_second"],
                        step=self.step
                    )
                )
        
        # 记录内存统计
        if self.memory_monitor and len(self.memory_monitor.memory_records) > 0:
            latest_stats = self.memory_monitor.memory_records[-1]
            self.logger.log_memory_stats(latest_stats)
            
            # TensorBoard可视化
            if self.visualizer and "cuda" in latest_stats:
                for gpu_id, gpu_stats in latest_stats["cuda"].items():
                    allocated_mb = gpu_stats["allocated"] / (1024 ** 2)
                    self.visualizer.add_metric(
                        MetricData(
                            type=MetricType.OPTIMIZER,
                            name=f"memory/{gpu_id}/allocated_mb",
                            value=allocated_mb,
                            step=self.step
                        )
                    )
    
    def get_metrics(self) -> Dict[str, Dict[str, List[float]]]:
        """
        获取所有指标
        
        Returns:
            指标字典
        """
        return self.metrics
    
    def get_metric(self, name: str) -> Optional[Dict[str, List[float]]]:
        """
        获取特定指标
        
        Args:
            name: 指标名称
        
        Returns:
            指标字典
        """
        return self.metrics.get(name)
    
    def get_performance_stats(self) -> Dict[str, float]:
        """
        获取性能统计信息
        
        Returns:
            性能统计字典
        """
        if self.throughput_monitor:
            return self.throughput_monitor.get_statistics()
        return {}
    
    def get_memory_stats(self) -> Dict[str, Any]:
        """
        获取内存统计信息
        
        Returns:
            内存统计字典
        """
        if self.memory_monitor and len(self.memory_monitor.memory_records) > 0:
            return self.memory_monitor.memory_records[-1]
        return {}
    
    def get_peak_memory(self, memory_type: str = "cuda") -> Dict[str, Any]:
        """
        获取峰值内存使用
        
        Args:
            memory_type: 内存类型
        
        Returns:
            峰值内存字典
        """
        if self.memory_monitor:
            return self.memory_monitor.get_peak_memory(memory_type)
        return {}
    
    def close(self) -> None:
        """关闭监控器"""
        # 停止内存监控
        if self.memory_monitor:
            self.memory_monitor.stop()
        
        # 移除钩子
        self.hook_manager.remove_all_hooks()
        
        # 关闭可视化器
        if self.visualizer:
            self.visualizer.close()
        
        # 将CSV转换为TensorBoard（如果需要）
        if self.config.csv and self.config.tensorboard:
            csv_dir = os.path.join(self.config.output_dir, "csv")
            tb_dir = os.path.join(self.config.output_dir, "tensorboard", "from_csv")
            
            # 导入函数以避免循环导入
            from ..utils.io import csv_to_tensorboard
            csv_to_tensorboard(csv_dir, tb_dir)
        
        # 保存性能统计
        if self.throughput_monitor:
            stats = self.throughput_monitor.get_statistics()
            save_json(stats, os.path.join(self.config.output_dir, "performance.json"))
        
        # 保存内存峰值统计
        if self.memory_monitor:
            cuda_peak = self.memory_monitor.get_peak_memory("cuda")
            system_peak = self.memory_monitor.get_peak_memory("system")
            process_peak = self.memory_monitor.get_peak_memory("process")
            
            memory_stats = {
                "cuda": cuda_peak,
                "system": system_peak,
                "process": process_peak
            }
            save_json(memory_stats, os.path.join(self.config.output_dir, "memory_peak.json"))
        
        # 记录关闭信息
        self.logger.info(f"TrainerMon closed after {self.step} steps")
    
    def __enter__(self):
        """上下文管理器入口"""
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """上下文管理器出口"""
        self.close() 