"""
ZeRO优化器支持模块，用于监控ZeRO优化器的状态。
"""
import torch
from typing import Dict, List, Optional, Any
from dataclasses import dataclass
from enum import Enum

class ZeROStage(Enum):
    """ZeRO优化器阶段枚举"""
    ZERO_1 = "zero_1"  # 参数分片
    ZERO_2 = "zero_2"  # 参数和梯度分片
    ZERO_3 = "zero_3"  # 参数、梯度和优化器状态分片

@dataclass
class ZeROState:
    """ZeRO优化器状态"""
    stage: ZeROStage
    param_shard_size: int
    grad_shard_size: int
    optimizer_state_shard_size: int
    param_groups: List[Dict[str, Any]]
    param_to_rank: Dict[str, int]
    grad_to_rank: Dict[str, int]
    optimizer_state_to_rank: Dict[str, int]

class ZeROMonitor:
    """ZeRO优化器监控器"""
    
    def __init__(self, optimizer: Any):
        """
        初始化ZeRO监控器
        
        Args:
            optimizer: ZeRO优化器实例
        """
        self.optimizer = optimizer
        self.state = self._extract_state()
    
    def _extract_state(self) -> ZeROState:
        """
        从优化器中提取状态信息
        
        Returns:
            ZeRO优化器状态
        """
        # 获取ZeRO阶段
        stage = self._get_zero_stage()
        
        # 获取分片大小
        param_shard_size = self._get_param_shard_size()
        grad_shard_size = self._get_grad_shard_size()
        optimizer_state_shard_size = self._get_optimizer_state_shard_size()
        
        # 获取参数组信息
        param_groups = self.optimizer.param_groups
        
        # 获取参数到rank的映射
        param_to_rank = self._get_param_to_rank()
        grad_to_rank = self._get_grad_to_rank()
        optimizer_state_to_rank = self._get_optimizer_state_to_rank()
        
        return ZeROState(
            stage=stage,
            param_shard_size=param_shard_size,
            grad_shard_size=grad_shard_size,
            optimizer_state_shard_size=optimizer_state_shard_size,
            param_groups=param_groups,
            param_to_rank=param_to_rank,
            grad_to_rank=grad_to_rank,
            optimizer_state_to_rank=optimizer_state_to_rank
        )
    
    def _get_zero_stage(self) -> ZeROStage:
        """获取ZeRO阶段"""
        if hasattr(self.optimizer, 'stage'):
            if self.optimizer.stage == 1:
                return ZeROStage.ZERO_1
            elif self.optimizer.stage == 2:
                return ZeROStage.ZERO_2
            elif self.optimizer.stage == 3:
                return ZeROStage.ZERO_3
        return ZeROStage.ZERO_1  # 默认返回ZERO_1
    
    def _get_param_shard_size(self) -> int:
        """获取参数分片大小"""
        if hasattr(self.optimizer, 'param_shard_size'):
            return self.optimizer.param_shard_size
        return 0
    
    def _get_grad_shard_size(self) -> int:
        """获取梯度分片大小"""
        if hasattr(self.optimizer, 'grad_shard_size'):
            return self.optimizer.grad_shard_size
        return 0
    
    def _get_optimizer_state_shard_size(self) -> int:
        """获取优化器状态分片大小"""
        if hasattr(self.optimizer, 'optimizer_state_shard_size'):
            return self.optimizer.optimizer_state_shard_size
        return 0
    
    def _get_param_to_rank(self) -> Dict[str, int]:
        """获取参数到rank的映射"""
        param_to_rank = {}
        if hasattr(self.optimizer, 'param_to_rank'):
            param_to_rank = self.optimizer.param_to_rank
        return param_to_rank
    
    def _get_grad_to_rank(self) -> Dict[str, int]:
        """获取梯度到rank的映射"""
        grad_to_rank = {}
        if hasattr(self.optimizer, 'grad_to_rank'):
            grad_to_rank = self.optimizer.grad_to_rank
        return grad_to_rank
    
    def _get_optimizer_state_to_rank(self) -> Dict[str, int]:
        """获取优化器状态到rank的映射"""
        optimizer_state_to_rank = {}
        if hasattr(self.optimizer, 'optimizer_state_to_rank'):
            optimizer_state_to_rank = self.optimizer.optimizer_state_to_rank
        return optimizer_state_to_rank
    
    def get_param_rank(self, param_name: str) -> Optional[int]:
        """
        获取参数所在的rank
        
        Args:
            param_name: 参数名称
        
        Returns:
            参数所在的rank，如果未找到则返回None
        """
        return self.state.param_to_rank.get(param_name)
    
    def get_grad_rank(self, param_name: str) -> Optional[int]:
        """
        获取梯度所在的rank
        
        Args:
            param_name: 参数名称
        
        Returns:
            梯度所在的rank，如果未找到则返回None
        """
        return self.state.grad_to_rank.get(param_name)
    
    def get_optimizer_state_rank(self, param_name: str) -> Optional[int]:
        """
        获取优化器状态所在的rank
        
        Args:
            param_name: 参数名称
        
        Returns:
            优化器状态所在的rank，如果未找到则返回None
        """
        return self.state.optimizer_state_to_rank.get(param_name)
    
    def get_param_group(self, param_name: str) -> Optional[Dict[str, Any]]:
        """
        获取参数所在的参数组
        
        Args:
            param_name: 参数名称
        
        Returns:
            参数组信息，如果未找到则返回None
        """
        for group in self.state.param_groups:
            for param in group['params']:
                if param_name == param.name:
                    return group
        return None
    
    def update_state(self) -> None:
        """更新状态信息"""
        self.state = self._extract_state() 