"""
并行策略支持模块，用于监控张量并行和流水线并行。
"""
import torch
from typing import Dict, List, Optional, Any, Tuple
from dataclasses import dataclass
from enum import Enum

class ParallelType(Enum):
    """并行类型枚举"""
    TENSOR = "tensor"  # 张量并行
    PIPELINE = "pipeline"  # 流水线并行
    DATA = "data"  # 数据并行

@dataclass
class ParallelState:
    """并行状态"""
    parallel_type: ParallelType
    world_size: int
    rank: int
    group_size: int
    group_rank: int
    stage_id: Optional[int] = None  # 流水线并行阶段ID
    num_stages: Optional[int] = None  # 流水线并行总阶段数
    tp_size: Optional[int] = None  # 张量并行大小
    pp_size: Optional[int] = None  # 流水线并行大小
    dp_size: Optional[int] = None  # 数据并行大小

class ParallelMonitor:
    """并行策略监控器"""
    
    def __init__(self, 
                 parallel_type: ParallelType,
                 world_size: int,
                 rank: int,
                 group_size: int,
                 group_rank: int,
                 stage_id: Optional[int] = None,
                 num_stages: Optional[int] = None):
        """
        初始化并行监控器
        
        Args:
            parallel_type: 并行类型
            world_size: 总进程数
            rank: 当前进程rank
            group_size: 当前组大小
            group_rank: 当前组内rank
            stage_id: 流水线并行阶段ID
            num_stages: 流水线并行总阶段数
        """
        self.state = ParallelState(
            parallel_type=parallel_type,
            world_size=world_size,
            rank=rank,
            group_size=group_size,
            group_rank=group_rank,
            stage_id=stage_id,
            num_stages=num_stages
        )
        
        # 计算并行大小
        self._compute_parallel_sizes()
    
    def _compute_parallel_sizes(self) -> None:
        """计算各种并行大小"""
        if self.state.parallel_type == ParallelType.TENSOR:
            self.state.tp_size = self.state.group_size
            self.state.dp_size = self.state.world_size // self.state.tp_size
            self.state.pp_size = 1
        elif self.state.parallel_type == ParallelType.PIPELINE:
            self.state.pp_size = self.state.num_stages
            self.state.dp_size = self.state.world_size // self.state.pp_size
            self.state.tp_size = 1
        else:  # DATA
            self.state.dp_size = self.state.world_size
            self.state.tp_size = 1
            self.state.pp_size = 1
    
    def get_param_shard_info(self, param_name: str, param_shape: Tuple[int, ...]) -> Dict[str, Any]:
        """
        获取参数分片信息
        
        Args:
            param_name: 参数名称
            param_shape: 参数形状
        
        Returns:
            分片信息字典
        """
        info = {
            "name": param_name,
            "shape": param_shape,
            "parallel_type": self.state.parallel_type.value,
            "world_size": self.state.world_size,
            "rank": self.state.rank,
            "group_size": self.state.group_size,
            "group_rank": self.state.group_rank
        }
        
        if self.state.parallel_type == ParallelType.TENSOR:
            info.update({
                "tp_size": self.state.tp_size,
                "tp_rank": self.state.group_rank,
                "shard_shape": self._get_tensor_shard_shape(param_shape)
            })
        elif self.state.parallel_type == ParallelType.PIPELINE:
            info.update({
                "pp_size": self.state.pp_size,
                "stage_id": self.state.stage_id,
                "num_stages": self.state.num_stages
            })
        
        return info
    
    def _get_tensor_shard_shape(self, shape: Tuple[int, ...]) -> Tuple[int, ...]:
        """
        获取张量分片后的形状
        
        Args:
            shape: 原始形状
        
        Returns:
            分片后的形状
        """
        if self.state.parallel_type != ParallelType.TENSOR:
            return shape
        
        # 假设在最后一个维度上进行分片
        shard_shape = list(shape)
        shard_shape[-1] = shard_shape[-1] // self.state.tp_size
        return tuple(shard_shape)
    
    def get_grad_shard_info(self, param_name: str, grad_shape: Tuple[int, ...]) -> Dict[str, Any]:
        """
        获取梯度分片信息
        
        Args:
            param_name: 参数名称
            grad_shape: 梯度形状
        
        Returns:
            分片信息字典
        """
        return self.get_param_shard_info(param_name, grad_shape)
    
    def get_optimizer_state_shard_info(self, param_name: str, state_shape: Tuple[int, ...]) -> Dict[str, Any]:
        """
        获取优化器状态分片信息
        
        Args:
            param_name: 参数名称
            state_shape: 状态形状
        
        Returns:
            分片信息字典
        """
        return self.get_param_shard_info(param_name, state_shape)
    
    def is_param_local(self, param_name: str) -> bool:
        """
        检查参数是否在当前进程上
        
        Args:
            param_name: 参数名称
        
        Returns:
            是否在当前进程上
        """
        if self.state.parallel_type == ParallelType.TENSOR:
            # 在张量并行中，参数按组分布
            return True
        elif self.state.parallel_type == ParallelType.PIPELINE:
            # 在流水线并行中，参数按阶段分布
            return True
        else:
            # 在数据并行中，每个进程都有完整的参数
            return True
    
    def get_communication_info(self) -> Dict[str, Any]:
        """
        获取通信信息
        
        Returns:
            通信信息字典
        """
        return {
            "parallel_type": self.state.parallel_type.value,
            "world_size": self.state.world_size,
            "rank": self.state.rank,
            "group_size": self.state.group_size,
            "group_rank": self.state.group_rank,
            "tp_size": self.state.tp_size,
            "pp_size": self.state.pp_size,
            "dp_size": self.state.dp_size,
            "stage_id": self.state.stage_id,
            "num_stages": self.state.num_stages
        } 