import torch
import torch.distributed as dist
from typing import Optional, Dict, Set, Union, List
from .grad_monitor import GradientMonitor, GradientStage
from colossalai.zero import GeminiDDP
from colossalai.booster import Booster
from colossalai.tensor import ColoParameter
from colossalai.context.parallel_mode import ParallelMode
import os
import json
from datetime import datetime
import time
import logging

class ColossalGradientMonitor:
    """
    专门为Colossal AI设计的梯度监控器，支持各种并行策略
    """
    def __init__(self,
                 booster: Booster,
                 monitor_stages: Set[GradientStage] = {GradientStage.PRE_REDUCE, GradientStage.POST_REDUCE},
                 log_dir: str = "./gradient_logs",
                 save_interval: int = 100,
                 max_items_per_tensor: int = 1000,
                 use_async_stats: bool = True,
                 monitor_ranks: Union[str, List[int]] = "all",
                 memory_efficient: bool = True,
                 log_level: str = "INFO"):
        """
        Args:
            booster: Colossal AI的booster实例
            monitor_stages: 要监控的阶段（聚合前/后）
            log_dir: 日志保存目录
            save_interval: 每隔多少步保存一次统计信息
            max_items_per_tensor: 每个张量最多采样的元素数
            use_async_stats: 是否使用异步统计计算
            monitor_ranks: 要监控的rank，可以是"all"或rank列表
            memory_efficient: 是否使用内存高效模式
            log_level: 日志级别
        """
        self.booster = booster
        self.model = booster.model
        self.monitor_stages = monitor_stages
        self.log_dir = log_dir
        self.save_interval = save_interval
        self.step = 0
        
        # 获取并行配置信息
        self.rank = dist.get_rank()
        self.world_size = dist.get_world_size()
        
        # 确定是否需要监控当前rank
        if isinstance(monitor_ranks, str) and monitor_ranks.lower() == "all":
            self.should_monitor = True
        else:
            self.should_monitor = self.rank in monitor_ranks
        
        # 设置日志级别
        self.logger = logging.getLogger(f"ColossalGradientMonitor-rank{self.rank}")
        log_level = getattr(logging, log_level.upper())
        self.logger.setLevel(log_level)
        
        # 创建日志目录
        if self.should_monitor:
            rank_log_dir = os.path.join(log_dir, f"rank_{self.rank}")
            os.makedirs(rank_log_dir, exist_ok=True)
            self.log_dir = rank_log_dir
        
            # 初始化基础监控器
            self.monitor = GradientMonitor(
                model=self.model,
                monitor_stages=monitor_stages,
                log_dir=rank_log_dir,
                max_items_per_tensor=max_items_per_tensor,
                use_async_stats=use_async_stats,
                rank=self.rank,
                memory_efficient=memory_efficient
            )
        
            # 记录性能统计信息
            self.performance_stats = {
                "monitoring_time": 0.0,
                "total_time": 0.0,
                "memory_usage": {
                    "initial": torch.cuda.memory_allocated(),
                    "peak": torch.cuda.max_memory_allocated(),
                    "reserved": torch.cuda.memory_reserved()
                },
                "monitoring_overhead": 0.0
            }
            
            self.logger.info(f"Initialized gradient monitor on rank {self.rank}")
            self.logger.info(f"Parallel config: {self.monitor.parallel_config}")
        
        # 替换booster的backward方法
        self._wrap_backward()
        
    def _wrap_backward(self):
        """包装booster的backward方法以支持梯度监控"""
        original_backward = self.booster.backward
        
        def wrapped_backward(loss: torch.Tensor, optimizer: torch.optim.Optimizer):
            if not self.should_monitor:
                return original_backward(loss, optimizer)
            
            start_time = time.perf_counter()
            
            # 清除上一步的统计信息
            self.monitor.clear_stats()
            
            # 执行原始backward
            original_backward(loss, optimizer)
            
            # 收集聚合后的梯度信息
            self.monitor.after_reduce_callback()
            
            # 更新性能统计信息
            end_time = time.perf_counter()
            step_time = end_time - start_time
            monitoring_time = self.monitor.get_monitoring_time()
            
            self.performance_stats["monitoring_time"] += monitoring_time
            self.performance_stats["total_time"] += step_time
            self.performance_stats["monitoring_overhead"] = (
                self.performance_stats["monitoring_time"] / 
                self.performance_stats["total_time"]
            )
            self.performance_stats["memory_usage"].update({
                "peak": max(
                    self.performance_stats["memory_usage"]["peak"],
                    torch.cuda.max_memory_allocated()
                ),
                "current": torch.cuda.memory_allocated(),
                "reserved": torch.cuda.memory_reserved()
            })
            
            # 增加步数计数
            self.step += 1
            
            # 定期保存统计信息
            if self.step % self.save_interval == 0:
                self.save_stats()
                
                # 打印性能信息
                self.logger.info(
                    f"Step {self.step} - "
                    f"Monitoring overhead: {self.performance_stats['monitoring_overhead']*100:.2f}% - "
                    f"Peak memory: {self.performance_stats['memory_usage']['peak']/1024/1024:.1f}MB"
                )
        
        self.booster.backward = wrapped_backward
    
    def save_stats(self):
        """保存统计信息到文件"""
        if not self.should_monitor:
            return
            
        stats = self.monitor.get_stats()
        
        # 添加性能统计信息
        stats["__performance__"] = {
            "step": self.step,
            "monitoring_overhead": self.performance_stats["monitoring_overhead"],
            "memory_usage_mb": {
                k: v/1024/1024 for k, v in self.performance_stats["memory_usage"].items()
            },
            "total_time": self.performance_stats["total_time"],
            "monitoring_time": self.performance_stats["monitoring_time"]
        }
        
        # 创建保存文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = os.path.join(
            self.log_dir, 
            f"grad_stats_step_{self.step}_rank_{self.rank}_{timestamp}.json"
        )
        
        # 将统计信息保存为JSON格式
        with open(filename, 'w') as f:
            json.dump(stats, f, indent=2)
            
        self.logger.debug(f"Saved gradient statistics to {filename}")
    
    def get_current_stats(self) -> Dict:
        """获取当前步的统计信息"""
        if not self.should_monitor:
            return {}
        return self.monitor.get_stats()
    
    def get_performance_stats(self) -> Dict:
        """获取性能统计信息"""
        if not self.should_monitor:
            return {}
        return self.performance_stats
    
    def __del__(self):
        """清理资源"""
        if hasattr(self, 'monitor'):
            self.monitor.remove_hooks() 