from collections import defaultdict
from typing import List, Dict, Set, Optional
import numpy as np
from enum import Enum, auto

class GlobalLayerType(Enum):
    """全局层的类型"""
    EMBEDDING = "Embedding"
    ATTENTION = "Qwen2SdpaAttention"
    MLP = "Mlp"
    NORM = "Norm"
    LAYER_NORM = "LayerNorm"
    RMS_NORM = "RMSNorm"

class GlobalLayer:
    """全局计算图中的层"""
    def __init__(self, layer_type: GlobalLayerType, input_shape: tuple, name: str = None):
        """
        初始化全局层
        :param layer_type: 层类型
        :param input_shape: 输入张量形状
        :param name: 层的名称
        """
        self.layer_type = layer_type
        self.input_shape = input_shape
        self.name = name or f"{layer_type.value.lower()}"
        
        # 计算相关属性
        self.compute_cost = 0.0  # 计算成本
        self.communication_cost = 0.0  # 通信成本
        self.memory_cost = 0.0  # 内存成本
        
        # 图结构相关属性
        self.index = -1  # 在计算图中的索引
        self.dependencies = []  # 依赖的前驱层
        self.next_layers = []  # 后继层
        
        # 并行策略相关属性
        self.dp_degree = 1  # 数据并行度
        self.tp_degree = 1  # 张量并行度
        self.stage_id = -1  # 所属的流水线阶段
        
    def add_dependency(self, layer: 'GlobalLayer'):
        """添加依赖的前驱层"""
        if layer not in self.dependencies:
            self.dependencies.append(layer)
            layer.next_layers.append(self)
            
    def compute_costs(self, dp_degree: int, tp_degree: int):
        """
        计算该层在给定并行策略下的各种成本
        :param dp_degree: 数据并行度
        :param tp_degree: 张量并行度
        """
        # 根据层类型计算不同的成本
        if self.layer_type == GlobalLayerType.MLP:
            self.compute_cost = self._compute_mlp_cost(tp_degree)
            self.communication_cost = self._compute_mlp_comm_cost(dp_degree, tp_degree)
            self.memory_cost = self._compute_mlp_memory_cost(dp_degree, tp_degree)
        elif self.layer_type == GlobalLayerType.ATTENTION:
            self.compute_cost = self._compute_attention_cost(tp_degree)
            self.communication_cost = self._compute_attention_comm_cost(dp_degree, tp_degree)
            self.memory_cost = self._compute_attention_memory_cost(dp_degree, tp_degree)
        elif self.layer_type == GlobalLayerType.EMBEDDING:
            self.compute_cost = self._compute_embedding_cost(dp_degree)
            self.communication_cost = self._compute_embedding_comm_cost(dp_degree, tp_degree)
            self.memory_cost = self._compute_embedding_memory_cost(dp_degree, tp_degree)
        else:  # Norm layers
            self.compute_cost = self._compute_norm_cost()
            self.communication_cost = self._compute_norm_comm_cost(dp_degree)
            self.memory_cost = self._compute_norm_memory_cost(dp_degree)
            
    def _compute_mlp_cost(self, tp_degree: int) -> float:
        """计算MLP层的计算成本 - 考虑Megatron-LM优化"""
        # 简化的计算成本模型
        batch_size, seq_len, hidden_dim = self.input_shape
        
        # 基础计算量
        base_cost = (batch_size * seq_len * hidden_dim * 4) / tp_degree
        
        # Megatron-LM的优化：
        # 1. Fused MLP操作：减少约15%的计算开销
        # 2. 混合精度训练：减少约10%的计算开销
        # 3. 内核融合：减少约5%的开销
        megatron_optimization = 0.85 * 0.9 * 0.95  # 总体减少约27%
        
        return base_cost * megatron_optimization
        
    def _compute_mlp_comm_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算MLP层的通信成本 - 考虑Megatron-LM优化"""
        batch_size, seq_len, hidden_dim = self.input_shape
        
        # 更精确的通信成本计算
        comm_cost = 0
        
        # 数据并行的all-reduce（梯度同步）
        if dp_degree > 1:
            # 梯度大小 = 参数大小
            grad_size = batch_size * seq_len * hidden_dim * 4  # 4倍hidden_dim（MLP的扩展比例）
            # Megatron-LM使用更高效的ring all-reduce
            comm_cost += grad_size * (dp_degree - 1) / dp_degree * 1.8  # 优化后的ring all-reduce
        
        # 张量并行的通信
        if tp_degree > 1:
            # 前向传播：all-gather
            fwd_size = batch_size * seq_len * hidden_dim * 4
            comm_cost += fwd_size * (tp_degree - 1) / tp_degree * 1.2  # Megatron-LM优化的all-gather
            # 反向传播：reduce-scatter
            bwd_size = batch_size * seq_len * hidden_dim * 4
            comm_cost += bwd_size * (tp_degree - 1) / tp_degree * 1.2  # Megatron-LM优化的reduce-scatter
        
        # Megatron-LM的通信优化：
        # 1. 通信与计算重叠：减少约15%的通信开销
        # 2. 更高效的通信原语：减少约10%的开销
        # 3. 减少通信延迟：减少约5%的开销
        megatron_comm_optimization = 0.85 * 0.9 * 0.95  # 总体减少约27%
        
        comm_cost *= megatron_comm_optimization
        
        return comm_cost
        
    def _compute_mlp_memory_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算MLP层的内存成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        # 应用梯度检查点，激活显存减少到原来的15%
        return (batch_size * seq_len * hidden_dim * 4) / (dp_degree * tp_degree) * 0.15
        
    def _compute_attention_cost(self, tp_degree: int) -> float:
        """计算Attention层的计算成本 - 考虑Megatron-LM优化"""
        batch_size, seq_len, hidden_dim = self.input_shape
        
        # 基础计算量
        base_cost = (batch_size * seq_len * seq_len * hidden_dim) / tp_degree
        
        # Megatron-LM的优化：
        # 1. Flash Attention：减少约20%的计算开销
        # 2. 混合精度训练：减少约10%的计算开销
        # 3. 内核融合：减少约5%的开销
        megatron_optimization = 0.8 * 0.9 * 0.95  # 总体减少约32%
        
        return base_cost * megatron_optimization
        
    def _compute_attention_comm_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算Attention层的通信成本 - 考虑Megatron-LM优化"""
        batch_size, seq_len, hidden_dim = self.input_shape
        
        comm_cost = 0
        
        # 数据并行的all-reduce（梯度同步）
        if dp_degree > 1:
            # Attention的梯度大小
            grad_size = batch_size * seq_len * hidden_dim * 3  # Q, K, V三个矩阵
            # Megatron-LM使用更高效的通信
            comm_cost += grad_size * (dp_degree - 1) / dp_degree * 0.9  # 优化的all-reduce
        
        # 张量并行的通信
        if tp_degree > 1:
            # 前向传播：all-gather for Q, K, V
            fwd_size = batch_size * seq_len * hidden_dim * 3
            comm_cost += fwd_size * (tp_degree - 1) / tp_degree * 0.8  # Megatron-LM优化的all-gather
            # 反向传播：reduce-scatter
            bwd_size = batch_size * seq_len * hidden_dim * 3
            comm_cost += bwd_size * (tp_degree - 1) / tp_degree * 0.8  # Megatron-LM优化的reduce-scatter
        
        # Megatron-LM的通信优化：
        # 1. 通信与计算重叠：减少约15%的通信开销
        # 2. 更高效的通信原语：减少约10%的开销
        # 3. 减少通信延迟：减少约5%的开销
        megatron_comm_optimization = 0.85 * 0.9 * 0.95  # 总体减少约27%
        
        comm_cost *= megatron_comm_optimization
        
        return comm_cost
        
    def _compute_attention_memory_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算Attention层的内存成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        # 使用Flash Attention，激活显存从O(seq_len²)降低到O(seq_len)
        # 应用梯度检查点，激活显存减少到原来的15%
        return (batch_size * seq_len * hidden_dim) / (dp_degree * tp_degree) * 0.15
        
    def _compute_embedding_cost(self, dp_degree: int) -> float:
        """计算Embedding层的计算成本"""
        batch_size, seq_len = self.input_shape
        return (batch_size * seq_len * self.input_shape[-1]) / dp_degree
        
    def _compute_embedding_comm_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算Embedding层的通信成本"""
        batch_size, seq_len = self.input_shape
        return (batch_size * seq_len * self.input_shape[-1]) * ((dp_degree - 1) / dp_degree + (tp_degree - 1) / tp_degree)
        
    def _compute_embedding_memory_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算Embedding层的内存成本"""
        batch_size, seq_len = self.input_shape
        return (batch_size * seq_len * self.input_shape[-1]) / (dp_degree * tp_degree)
        
    def _compute_norm_cost(self) -> float:
        """计算Norm层的计算成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return batch_size * seq_len * hidden_dim
        
    def _compute_norm_comm_cost(self, dp_degree: int) -> float:
        """计算Norm层的通信成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * hidden_dim) * (dp_degree - 1) / dp_degree
        
    def _compute_norm_memory_cost(self, dp_degree: int) -> float:
        """计算Norm层的内存成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * hidden_dim) / dp_degree

class GlobalComputation:
    """全局计算图"""
    def __init__(self):
        self.layers = []  # 所有层
        self.layer_dict = {}  # 层名称到层的映射
        
    def add_layer(self, layer: GlobalLayer) -> int:
        """
        添加一个层到计算图
        :param layer: 要添加的层
        :return: 层的索引
        """
        layer.index = len(self.layers)
        self.layers.append(layer)
        self.layer_dict[layer.name] = layer
        return layer.index
        
    def add_edge(self, src_layer: GlobalLayer, dst_layer: GlobalLayer):
        """
        添加层之间的依赖关系
        :param src_layer: 源层
        :param dst_layer: 目标层
        """
        dst_layer.add_dependency(src_layer)
        
    def compute_costs(self, dp_degree: int, tp_degree: int):
        """
        计算所有层在给定并行策略下的成本
        :param dp_degree: 数据并行度
        :param tp_degree: 张量并行度
        """
        for layer in self.layers:
            layer.compute_costs(dp_degree, tp_degree)
            
    def get_total_cost(self) -> float:
        """获取计算图的总成本"""
        return sum(layer.compute_cost + layer.communication_cost + layer.memory_cost 
                  for layer in self.layers)
        
    def get_stage_cost(self, stage_layers: List[GlobalLayer]) -> float:
        """
        获取一个阶段的总成本
        :param stage_layers: 阶段包含的层
        :return: 阶段总成本
        """
        return sum(layer.compute_cost + layer.communication_cost + layer.memory_cost 
                  for layer in stage_layers)
        
    def get_layer_liveness(self) -> Dict[int, Set[GlobalLayer]]:
        """
        计算层的活跃性分析
        :return: 时间步到活跃层集合的映射
        """
        liveness = {}
        live_set = set()
        
        for t in range(len(self.layers) - 1, -1, -1):
            layer = self.layers[t]
            
            # 当前层会输出结果
            live_set.add(layer)
            
            # 当前层依赖的前置层也要活跃
            for dep in layer.dependencies:
                live_set.add(dep)
                
            # 记录当前时刻的活跃层
            liveness[t] = set(live_set)
            
            # 当前层执行完后可释放
            live_set.remove(layer)
            
        return liveness
        
    def __str__(self):
        """返回计算图的字符串表示"""
        lines = []
        for layer in self.layers:
            deps = [dep.name for dep in layer.dependencies]
            lines.append(f"{layer.name} (idx={layer.index}): depends on {deps}")
        return "\n".join(lines) 

# 新增通信跨节点差异设计
def compute_comm_cost_with_bandwidth(data_size, dp_degree, tp_degree, mesh_shape, intra_bw, inter_bw, latency):
    """
    根据并行度和mesh形状，判断通信是同节点还是跨节点，并据此计算通信成本
    """
    single_node_gpus = mesh_shape[1]
    if dp_degree > single_node_gpus or tp_degree > single_node_gpus:
        # 跨节点
        bw = inter_bw
        lat = latency
    else:
        # 节点内
        bw = intra_bw
        lat = 0
    
    # 添加调试信息
    import logging
    logger = logging.getLogger(__name__)
    logger.debug(f"通信成本计算: data_size={data_size}, dp={dp_degree}, tp={tp_degree}, "
                f"bw={bw}, lat={lat}, cost={data_size / bw + lat}")
    
    return data_size / bw + lat

def estimate_model_memory_requirements(model_size_billions, seq_len, batch_size, hidden_dim, 
                                     use_fp16=True, use_gradient_checkpointing=True):
    """
    更准确的模型显存需求估算
    :param model_size_billions: 模型大小（十亿参数）
    :param seq_len: 序列长度
    :param batch_size: 批大小
    :param hidden_dim: 隐藏层维度
    :param use_fp16: 是否使用FP16
    :param use_gradient_checkpointing: 是否使用梯度检查点
    :return: 显存需求字典
    """
    # 参数数量（十亿）
    param_count = model_size_billions * 1e9
    
    # 数据类型大小
    param_dtype_size = 2 if use_fp16 else 4  # FP16=2字节, FP32=4字节
    grad_dtype_size = 4  # 梯度通常用FP32
    opt_dtype_size = 4   # 优化器状态用FP32
    
    # 1. 参数显存
    param_memory = param_count * param_dtype_size / (1024**3)  # GB
    
    # 2. 梯度显存
    grad_memory = param_count * grad_dtype_size / (1024**3)  # GB
    
    # 3. 优化器状态显存（Adam: 2个状态变量）
    optimizer_memory = param_count * opt_dtype_size * 2 / (1024**3)  # GB
    
    # 4. 激活显存（更精确的计算）
    # 计算层数（基于模型大小估算）
    num_layers = int(model_size_billions * 2.5)  # 粗略估算
    
    if use_gradient_checkpointing:
        # 使用梯度检查点时，只需要存储部分激活
        activation_memory = num_layers * batch_size * seq_len * hidden_dim * 2 / (1024**3) * 0.1  # 只存储10%
    else:
        # 不使用梯度检查点时，需要存储所有激活
        activation_memory = num_layers * batch_size * seq_len * hidden_dim * 2 / (1024**3)
    
    # 5. KV Cache显存（对于生成任务）
    kv_cache_memory = 0
    if seq_len > 2048:  # 长序列需要KV cache
        kv_cache_memory = num_layers * batch_size * seq_len * hidden_dim * 2 / (1024**3) * 0.5  # 估算值
    
    # 6. 通信buffer显存
    comm_buffer_memory = param_memory * 0.2  # 估算为参数显存的20%
    
    # 7. 其他开销
    overhead_memory = 2.0  # 2GB基础开销
    
    total_memory = (param_memory + grad_memory + optimizer_memory + 
                   activation_memory + kv_cache_memory + comm_buffer_memory + overhead_memory)
    
    return {
        'param_memory': param_memory,
        'grad_memory': grad_memory,
        'optimizer_memory': optimizer_memory,
        'activation_memory': activation_memory,
        'kv_cache_memory': kv_cache_memory,
        'comm_buffer_memory': comm_buffer_memory,
        'overhead_memory': overhead_memory,
        'total_memory': total_memory
    } 