import torch
import torch.distributed as dist
from typing import Optional, Dict, Set, List, Tuple
from colossalai.context.parallel_mode import ParallelMode
from colossalai.tensor import ProcessGroup, ColoParameter
from colossalai.zero import GeminiDDP
from colossalai.pipeline.stage_manager import PipelineStageManager
import re

class ParallelInfo:
    """用于处理和存储并行策略相关信息的工具类"""
    
    def __init__(self, model: torch.nn.Module):
        self.model = model
        self.is_gemini = isinstance(model, GeminiDDP)
        
        # 获取进程组信息
        self.dp_group = None  # 数据并行组
        self.tp_group = None  # 张量并行组
        self.pp_stage = None  # 流水线并行阶段
        
        # 缓存参数信息
        self.param_info_cache = {}
        self.param_shapes = {}  # 存储原始参数形状
        
        self._detect_parallel_groups()
        self._cache_param_info()
    
    def _detect_parallel_groups(self):
        """检测并获取各种并行策略的进程组信息"""
        # 检测数据并行组
        try:
            self.dp_group = ProcessGroup.get_group_by_name(ParallelMode.DATA)
        except:
            self.dp_group = None
            
        # 检测张量并行组
        try:
            self.tp_group = ProcessGroup.get_group_by_name(ParallelMode.TENSOR)
        except:
            self.tp_group = None
            
        # 检测流水线并行阶段
        if hasattr(self.model, 'stage_manager') and isinstance(self.model.stage_manager, PipelineStageManager):
            self.pp_stage = self.model.stage_manager.stage
            self.num_stages = self.model.stage_manager.num_stages
        else:
            self.pp_stage = None
            self.num_stages = 1
    
    def _cache_param_info(self):
        """缓存参数信息，包括原始形状和并行配置"""
        for name, param in self.model.named_parameters():
            # 获取参数的并行信息
            is_sharded = False
            parallel_mode = []
            orig_shape = None
            
            # 检查是否是ColoParameter
            if isinstance(param, ColoParameter):
                is_sharded = True
                if param.is_tensor_parallel:
                    parallel_mode.append("tp")
                    # 尝试从参数属性中获取原始形状
                    if hasattr(param, 'orig_shape'):
                        orig_shape = param.orig_shape
                
            # 检查ZeRO分片
            if self.is_gemini:
                is_sharded = True
                parallel_mode.append("zero")
                
            # 存储信息
            self.param_info_cache[name] = {
                "is_sharded": is_sharded,
                "parallel_mode": parallel_mode,
                "orig_shape": orig_shape or param.shape
            }
            self.param_shapes[name] = param.shape
    
    def get_parallel_config(self) -> Dict:
        """获取并行配置信息"""
        return {
            "is_gemini": self.is_gemini,
            "has_dp": self.dp_group is not None,
            "has_tp": self.tp_group is not None,
            "has_pp": self.pp_stage is not None,
            "dp_size": dist.get_world_size(self.dp_group) if self.dp_group else 1,
            "tp_size": dist.get_world_size(self.tp_group) if self.tp_group else 1,
            "pp_stage": self.pp_stage if self.pp_stage is not None else -1,
            "num_pp_stages": self.num_stages
        }
    
    def _get_layer_stage(self, param_name: str) -> Optional[int]:
        """根据参数名推断其所属的流水线阶段"""
        if not self.pp_stage:
            return None
            
        # 从参数名中提取层号
        # 假设参数名格式类似: stage_1.layer.weight 或 1.layer.weight
        stage_patterns = [
            r'stage_(\d+)',  # stage_X 格式
            r'^(\d+)\.',     # X. 格式
        ]
        
        for pattern in stage_patterns:
            match = re.search(pattern, param_name)
            if match:
                return int(match.group(1))
        
        return None
    
    def should_monitor_grad(self, param_name: str) -> bool:
        """判断是否应该监控某个参数的梯度"""
        # 如果是GeminiDDP，需要特殊处理
        if self.is_gemini:
            return True  # GeminiDDP会自动处理梯度聚合
            
        # 在流水线并行中，只监控当前阶段的参数
        if self.pp_stage is not None:
            param_stage = self._get_layer_stage(param_name)
            if param_stage is not None and param_stage != self.pp_stage:
                return False
            
        return True
    
    def get_param_parallel_info(self, param_name: str) -> Dict:
        """获取参数的并行信息"""
        if param_name not in self.param_info_cache:
            return {
                "is_sharded": False,
                "shard_size": None,
                "orig_size": None,
                "parallel_mode": []
            }
        
        info = self.param_info_cache[param_name].copy()
        info["shard_size"] = self.param_shapes[param_name]
        
        # 添加流水线阶段信息
        if self.pp_stage is not None:
            stage = self._get_layer_stage(param_name)
            if stage is not None:
                info["pipeline_stage"] = stage
        
        return info
    
    def reduce_grad_if_needed(self, grad: torch.Tensor, param_name: str) -> torch.Tensor:
        """如果需要，对梯度进行规约操作"""
        if not grad.is_cuda:
            return grad
            
        # 在张量并行中，某些梯度需要规约
        if self.tp_group is not None and param_name in self.param_info_cache:
            info = self.param_info_cache[param_name]
            if "tp" in info["parallel_mode"]:
                # 对需要规约的梯度进行all-reduce操作
                dist.all_reduce(grad, group=self.tp_group)
            
        return grad
    
    def get_grad_device(self, param_name: str) -> torch.device:
        """获取梯度应该存储的设备"""
        if self.is_gemini:
            # GeminiDDP会自动管理设备放置
            return torch.device('cuda')
            
        # 对于其他情况，检查参数当前的设备
        param = dict(self.model.named_parameters())[param_name]
        return param.device
    
    def get_param_stage_info(self, param_name: str) -> Dict:
        """获取参数的流水线并行阶段信息"""
        stage = self._get_layer_stage(param_name)
        return {
            "current_stage": self.pp_stage,
            "param_stage": stage,
            "num_stages": self.num_stages
        } if stage is not None else None 