import torch
import torch.distributed as dist
from typing import Dict, Optional, Set, List, Union
from enum import Enum
import numpy as np
from collections import defaultdict
import logging
import time
from contextlib import contextmanager
from .parallel_utils import ParallelInfo

class GradientStage(Enum):
    PRE_REDUCE = "pre_reduce"  # 聚合前的梯度
    POST_REDUCE = "post_reduce"  # 聚合后的梯度

class GradientMonitor:
    """
    梯度监控器，用于监控模型训练过程中的梯度信息
    支持监控聚合前和聚合后的梯度，支持在使用ZeRO优化器或TP、PP等并行策略时的梯度监控
    """
    def __init__(self, 
                 model: torch.nn.Module,
                 monitor_stages: Set[GradientStage] = {GradientStage.PRE_REDUCE, GradientStage.POST_REDUCE},
                 log_dir: str = "./gradient_logs",
                 max_items_per_tensor: int = 1000,  # 每个张量最多采样的元素数
                 use_async_stats: bool = True,  # 是否使用异步统计计算
                 rank: int = 0,  # 当前进程的rank
                 memory_efficient: bool = True):  # 是否使用内存高效模式
        self.model = model
        self.monitor_stages = monitor_stages
        self.log_dir = log_dir
        self.max_items_per_tensor = max_items_per_tensor
        self.use_async_stats = use_async_stats
        self.rank = rank
        self.memory_efficient = memory_efficient
        
        # 初始化并行信息
        self.parallel_info = ParallelInfo(model)
        
        self.stats = defaultdict(lambda: defaultdict(dict))
        self.hooks = []
        self.logger = logging.getLogger(f"GradientMonitor-rank{rank}")
        self.monitoring_time = 0.0  # 用于记录监控耗时
        
        # 设置日志格式
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
        )
        
        # 初始化CUDA流用于异步计算
        self.compute_stream = torch.cuda.Stream() if use_async_stats else None
        
        # 记录并行配置
        self.parallel_config = self.parallel_info.get_parallel_config()
        self.logger.info(f"Parallel config: {self.parallel_config}")
        
        self._register_hooks()

    @contextmanager
    def _stats_timing(self):
        """用于测量统计计算时间的上下文管理器"""
        start = time.perf_counter()
        yield
        self.monitoring_time += time.perf_counter() - start

    def _register_hooks(self):
        """注册梯度钩子到所有参数"""
        for name, param in self.model.named_parameters():
            if param.requires_grad and self.parallel_info.should_monitor_grad(name):
                hook = param.register_hook(
                    lambda grad, param_name=name: self._grad_hook(grad, param_name)
                )
                self.hooks.append(hook)

    def _compute_stats(self, tensor: torch.Tensor, param_name: str = None) -> Dict[str, float]:
        """计算张量的统计信息"""
        if tensor.numel() == 0:
            return {
                "norm": 0.0,
                "max": 0.0,
                "min": 0.0,
                "mean": 0.0,
                "std": 0.0,
                "sparsity": 0.0,
                "zero_count": 0,
                "inf_count": 0,
                "nan_count": 0
            }
        
        with torch.no_grad():
            # 如果启用内存高效模式，使用流式计算
            if self.memory_efficient and tensor.numel() > self.max_items_per_tensor:
                return self._compute_stats_streaming(tensor)
            
            # 如果张量太大，进行采样
            if tensor.numel() > self.max_items_per_tensor:
                indices = torch.randperm(tensor.numel(), device=tensor.device)[:self.max_items_per_tensor]
                tensor = tensor.view(-1)[indices]
            
            # 使用异步流计算统计信息
            if self.use_async_stats:
                with torch.cuda.stream(self.compute_stream):
                    stats = self._compute_tensor_stats(tensor)
            else:
                stats = self._compute_tensor_stats(tensor)
            
            # 添加并行信息
            if param_name is not None:
                stats.update(self.parallel_info.get_param_parallel_info(param_name))
            
            return stats

    def _compute_stats_streaming(self, tensor: torch.Tensor) -> Dict[str, float]:
        """使用流式计算方式计算大张量的统计信息"""
        chunk_size = self.max_items_per_tensor
        n_chunks = (tensor.numel() + chunk_size - 1) // chunk_size
        
        running_stats = {
            "sum": 0.0,
            "sum_sq": 0.0,
            "max": float('-inf'),
            "min": float('inf'),
            "zero_count": 0,
            "inf_count": 0,
            "nan_count": 0
        }
        
        # 流式处理每个块
        for i in range(n_chunks):
            start_idx = i * chunk_size
            end_idx = min((i + 1) * chunk_size, tensor.numel())
            chunk = tensor.view(-1)[start_idx:end_idx]
            
            # 更新统计信息
            running_stats["sum"] += chunk.sum().item()
            running_stats["sum_sq"] += (chunk ** 2).sum().item()
            running_stats["max"] = max(running_stats["max"], chunk.max().item())
            running_stats["min"] = min(running_stats["min"], chunk.min().item())
            running_stats["zero_count"] += (chunk == 0).sum().item()
            running_stats["inf_count"] += torch.isinf(chunk).sum().item()
            running_stats["nan_count"] += torch.isnan(chunk).sum().item()
        
        # 计算最终统计信息
        n = tensor.numel()
        mean = running_stats["sum"] / n
        var = (running_stats["sum_sq"] / n) - (mean ** 2)
        std = np.sqrt(max(var, 0))
        
        return {
            "norm": np.sqrt(running_stats["sum_sq"]),
            "max": running_stats["max"],
            "min": running_stats["min"],
            "mean": mean,
            "std": std,
            "sparsity": running_stats["zero_count"] / n,
            "zero_count": running_stats["zero_count"],
            "inf_count": running_stats["inf_count"],
            "nan_count": running_stats["nan_count"]
        }

    def _compute_tensor_stats(self, tensor: torch.Tensor) -> Dict[str, float]:
        """实际计算张量统计信息的函数"""
        # 计算基本统计量
        norm = torch.norm(tensor).item()
        max_val = tensor.max().item()
        min_val = tensor.min().item()
        mean = tensor.mean().item()
        std = tensor.std().item()
        
        # 计算稀疏度和特殊值统计
        zero_mask = tensor == 0
        zero_count = zero_mask.sum().item()
        sparsity = zero_count / tensor.numel()
        
        inf_count = torch.isinf(tensor).sum().item()
        nan_count = torch.isnan(tensor).sum().item()
        
        return {
            "norm": norm,
            "max": max_val,
            "min": min_val,
            "mean": mean,
            "std": std,
            "sparsity": sparsity,
            "zero_count": zero_count,
            "inf_count": inf_count,
            "nan_count": nan_count
        }

    def _grad_hook(self, grad: torch.Tensor, param_name: str):
        """梯度钩子函数，用于收集梯度统计信息"""
        if grad is None:
            return

        with self._stats_timing():
            # 记录聚合前的梯度统计信息
            if GradientStage.PRE_REDUCE in self.monitor_stages:
                self.stats[param_name][GradientStage.PRE_REDUCE] = self._compute_stats(grad, param_name)

            # 如果需要，进行梯度规约
            grad = self.parallel_info.reduce_grad_if_needed(grad, param_name)

        return grad

    def after_reduce_callback(self):
        """在梯度聚合后调用此函数来收集聚合后的梯度统计信息"""
        if GradientStage.POST_REDUCE not in self.monitor_stages:
            return

        with self._stats_timing():
            for name, param in self.model.named_parameters():
                if param.grad is not None and self.parallel_info.should_monitor_grad(name):
                    self.stats[name][GradientStage.POST_REDUCE] = self._compute_stats(param.grad, name)

            # 如果使用异步计算，确保计算完成
            if self.use_async_stats:
                torch.cuda.current_stream().wait_stream(self.compute_stream)

    def get_stats(self) -> Dict:
        """获取收集到的统计信息"""
        # 确保异步计算完成
        if self.use_async_stats:
            torch.cuda.current_stream().wait_stream(self.compute_stream)
        
        # 添加并行配置信息
        stats = dict(self.stats)
        stats["__parallel_config__"] = self.parallel_config
        return stats

    def get_monitoring_time(self) -> float:
        """获取监控耗时（秒）"""
        return self.monitoring_time

    def clear_stats(self):
        """清除统计信息"""
        self.stats.clear()
        self.monitoring_time = 0.0

    def remove_hooks(self):
        """移除所有注册的钩子"""
        for hook in self.hooks:
            hook.remove()
        self.hooks.clear()

    def __del__(self):
        """析构函数，确保钩子被移除"""
        self.remove_hooks() 