from collections import defaultdict
import numpy as np
from typing import List, Tuple, Dict, Optional
import logging
from .global_computation import GlobalLayerType, compute_comm_cost_with_bandwidth

logger = logging.getLogger(__name__)

class GlobalParallelStrategy:
    def __init__(self, num_devices: int, mesh_shape, intra_bw, inter_bw, latency):
        """
        初始化全局并行策略搜索器
        :param num_devices: 设备总数
        :param mesh_shape: 设备拓扑形状
        :param intra_bw: 设备内部带宽
        :param inter_bw: 设备间带宽
        :param latency: 设备间延迟
        """
        self.num_devices = num_devices
        self.mesh_shape = mesh_shape
        self.intra_bw = intra_bw
        self.inter_bw = inter_bw
        self.latency = latency
        self.valid_strategies = []
        self.strategy_costs = {}  # 缓存策略成本
        
    def generate_valid_strategies(self, cards_per_stage=None, batch_size=1, seq_len=2048, 
                                hidden_dim=4096, model_layers=None) -> List[Tuple[int, int]]:
        """
        生成所有可能的 TP/DP 组合，基于计算-通信权衡动态构造搜索空间
        :param cards_per_stage: 每阶段卡数
        :param batch_size: 批大小
        :param seq_len: 序列长度
        :param hidden_dim: 隐藏层维度
        :param model_layers: 模型层列表，用于计算复杂度
        :return: 所有有效的 (dp, tp) 组合列表
        """
        self.valid_strategies = []
        
        if cards_per_stage is not None:
            # 当指定每stage卡数时，生成所有可能的因子组合
            factors = self._get_factors(cards_per_stage)
            for dp in factors:
                for tp in factors:
                    if dp * tp == cards_per_stage:
                        self.valid_strategies.append((dp, tp))
            
            # 基于计算-通信权衡进行排序
            self._sort_strategies_by_compute_comm_ratio(batch_size, seq_len, hidden_dim)
        else:
            # 动态生成搜索空间，基于计算密度和硬件配置
            self._generate_dynamic_search_space(batch_size, seq_len, hidden_dim, model_layers)
        
        return self.valid_strategies
    
    def _sort_strategies_by_compute_comm_ratio(self, batch_size, seq_len, hidden_dim):
        """
        基于计算-通信权衡对策略进行排序
        """
        def compute_comm_ratio(strategy):
            dp, tp = strategy
            # 计算密度：seq_len * hidden_dim / batch_size
            compute_density = seq_len * hidden_dim / batch_size
            
            # 对于高计算密度（长序列），优先考虑高TP
            # 对于低计算密度（短序列），优先考虑高DP
            if compute_density > 1000000:  # 高计算密度阈值
                return (tp, -dp)  # 优先TP，TP相同时优先高DP
            else:
                return (dp, -tp)  # 优先DP，DP相同时优先高TP
        
        self.valid_strategies.sort(key=compute_comm_ratio)
    
    def _generate_dynamic_search_space(self, batch_size, seq_len, hidden_dim, model_layers):
        """
        动态生成搜索空间，基于输入参数和硬件配置
        """
        # 计算计算密度
        compute_density = seq_len * hidden_dim / batch_size
        
        # 基于计算密度动态调整搜索范围
        if compute_density > 2000000:  # 极高计算密度（如seq_len=32768）
            # 长序列，计算密集，优先考虑高TP
            max_tp = min(16, self.num_devices)
            max_dp = min(32, self.num_devices)
            tp_candidates = [1, 2, 4, 8, 16]
            dp_candidates = [1, 2, 4, 8, 16, 32]
        elif compute_density > 500000:  # 高计算密度（如seq_len=8192）
            # 中等长序列，平衡考虑
            max_tp = min(8, self.num_devices)
            max_dp = min(16, self.num_devices)
            tp_candidates = [1, 2, 4, 8]
            dp_candidates = [1, 2, 4, 8, 16]
        else:  # 低计算密度（如seq_len=2048）
            # 短序列，通信相对重要，优先考虑高DP
            max_tp = min(4, self.num_devices)
            max_dp = min(64, self.num_devices)
            tp_candidates = [1, 2, 4]
            dp_candidates = [1, 2, 4, 8, 16, 32, 64]
        
        # 根据节点间带宽进一步调整
        if hasattr(self, 'inter_bw') and self.inter_bw < 5.0:  # 低带宽
            # 低带宽环境下，减少跨节点通信，优先使用DP
            tp_candidates = [1, 2]  # 限制TP
            dp_candidates = [1, 2, 4, 8, 16, 32, 64]  # 允许更多DP
        elif hasattr(self, 'inter_bw') and self.inter_bw > 20.0:  # 高带宽
            # 高带宽环境下，可以更自由地使用TP
            tp_candidates = [1, 2, 4, 8, 16]
            dp_candidates = [1, 2, 4, 8, 16, 32]
        
        # 生成所有可能的组合
        for dp in dp_candidates:
            for tp in tp_candidates:
                if dp * tp <= self.num_devices:
                    # 移除硬性约束，只保留基本约束
                    if self._is_basic_valid_strategy(dp, tp):
                        self.valid_strategies.append((dp, tp))
        
        # 按计算-通信权衡排序
        self._sort_strategies_by_compute_comm_ratio(batch_size, seq_len, hidden_dim)
    
    def _is_basic_valid_strategy(self, dp: int, tp: int) -> bool:
        """
        基本策略验证 - 移除硬性约束，只保留必要的基本约束
        """
        # 基本约束：不能超过设备总数
        if dp * tp > self.num_devices:
            return False
        
        # 基本约束：不能都是1（纯串行）
        if dp == 1 and tp == 1:
            return False
        
        # 基本约束：不能超过设备总数的一半（避免过度并行）
        if dp * tp < max(1, self.num_devices // 2):
            return False
        
        return True
    
    def _get_factors(self, n: int) -> List[int]:
        """获取一个数的所有因子"""
        factors = []
        for i in range(1, int(n**0.5) + 1):
            if n % i == 0:
                factors.append(i)
                if i != n // i:
                    factors.append(n // i)
        return sorted(factors)
    
    def _is_valid_strategy(self, dp: int, tp: int) -> bool:
        """
        判断策略是否有效 - 保留原方法以兼容性，但内部调用新的基本验证
        """
        return self._is_basic_valid_strategy(dp, tp)

    def validate_strategy_feasibility(self, strategy: Tuple[int, int], model_layers: List, 
                                    batch_size: int, seq_len: int, hidden_dim: int, 
                                    gpu_memory_per_device: float, pp: int = 1) -> Tuple[bool, str, float]:
        """
        验证策略的可行性 - 多层次显存约束策略
        """
        dp, tp = strategy
        
        # 1. 检查显存约束 - 多层次估算
        conservative_mem = self.estimate_simple_gpu_memory(dp, tp, batch_size, seq_len, hidden_dim, pp)
        optimistic_mem = self.estimate_optimistic_gpu_memory(dp, tp, batch_size, seq_len, hidden_dim, pp)
        
        # 多层次显存约束策略
        if conservative_mem <= gpu_memory_per_device * 0.8:
            # 保守估算下显存充足，策略完全可行
            return True, "策略可行", conservative_mem
        elif optimistic_mem <= gpu_memory_per_device:
            # 乐观估算下显存充足，策略可能可行（需要显存优化）
            return True, f"策略可能可行(需要显存优化): 保守{conservative_mem:.2f}GB, 乐观{optimistic_mem:.2f}GB", optimistic_mem
        else:
            # 即使乐观估算也显存不足
            return False, f"显存不足: 保守{conservative_mem:.2f}GB, 乐观{optimistic_mem:.2f}GB, 可用{gpu_memory_per_device}GB", optimistic_mem
        
        # 2. 检查通信约束 - 简化检查
        if not self.check_communication_feasibility(dp, tp):
            return False, "通信配置不可行", 0.0
        
        # 3. 检查并行度约束
        if dp * tp > self.num_devices:
            return False, f"并行度超出设备数量: {dp}*{tp}={dp*tp} > {self.num_devices}", 0.0
        
        return True, "策略可行", conservative_mem

    def estimate_simple_gpu_memory(self, dp: int, tp: int, batch_size: int, seq_len: int, hidden_dim: int, pp: int = 1) -> float:
        """
        改进的显存估算 - 更符合真实分布式训练情况，考虑流水线并行
        """
        # 基础显存需求（GB）
        base_memory = 2.0  # 基础开销
        
        # 根据hidden_dim估算模型参数大小（更准确）
        # 假设模型有64层，每层包含attention和mlp
        num_layers = 64  # 根据配置文件调整
        attention_params_per_layer = hidden_dim * hidden_dim * 4  # Q,K,V + output projection
        mlp_params_per_layer = hidden_dim * (hidden_dim * 4) * 2  # 两个线性层
        total_params = num_layers * (attention_params_per_layer + mlp_params_per_layer)
        model_params_gb = total_params * 2 / (1024**3)  # FP16，2字节/参数
        
        # 参数显存：在DP、TP、PP三个维度上分片
        param_memory = model_params_gb / (dp * tp * pp)
        
        # 激活显存 - 使用更合理的估算方法
        # 1. 使用Flash Attention，激活显存从O(seq_len²)降低到O(seq_len)
        # 2. 使用梯度检查点，激活显存减少到原来的10-20%
        # 3. 考虑注意力头数的影响
        num_heads = hidden_dim // 64  # 假设每个head 64维
        
        # Attention激活显存（使用Flash Attention）
        attention_activation = batch_size * seq_len * hidden_dim / (1024**3) / (dp * tp * pp)
        
        # MLP激活显存
        mlp_activation = batch_size * seq_len * hidden_dim * 4 / (1024**3) / (dp * tp * pp)
        
        # 应用梯度检查点，激活显存减少到原来的15%
        activation_memory = (attention_activation + mlp_activation) * 0.15
        
        # KV Cache显存（长序列时很重要）
        kv_cache_memory = 0
        if seq_len > 2048:
            # KV cache大小 = 2 * num_layers * batch_size * seq_len * hidden_dim / num_heads
            kv_cache_memory = 2 * num_layers * batch_size * seq_len * hidden_dim / (1024**3) / (dp * tp * pp)
        
        # 优化器状态显存（Adam）
        # 在流水线并行中，每个stage只存储该stage的参数对应的优化器状态
        optimizer_memory = model_params_gb * 2 / (dp * pp)  # Adam需要2个状态变量，在DP和PP维度分片
        
        # 通信buffer - 根据seq_len调整
        comm_buffer = max(1.0, seq_len / 1024) / (dp * tp * pp)  # 长序列需要更多通信buffer
        
        total_memory = (base_memory + param_memory + activation_memory + 
                       kv_cache_memory + optimizer_memory + comm_buffer)
        
        # 根据seq_len调整安全边际
        safety_margin = 1.1
        if seq_len > 16384:
            safety_margin = 1.2  # 超长序列需要更多安全边际
        elif seq_len > 8192:
            safety_margin = 1.15  # 长序列需要更多安全边际
        
        total_memory *= safety_margin
        
        # # 添加调试信息
        # import logging
        # logger = logging.getLogger(__name__)
        # logger.info(f"显存估算详情: DP={dp}, TP={tp}, PP={pp}")
        # logger.info(f"  参数显存: {param_memory:.2f}GB, 激活显存: {activation_memory:.2f}GB")
        # logger.info(f"  KV Cache: {kv_cache_memory:.2f}GB, 优化器: {optimizer_memory:.2f}GB")
        # logger.info(f"  通信buffer: {comm_buffer:.2f}GB, 基础开销: {base_memory:.2f}GB")
        # logger.info(f"  总显存: {total_memory:.2f}GB")
        
        return total_memory

    def estimate_optimistic_gpu_memory(self, dp: int, tp: int, batch_size: int, seq_len: int, hidden_dim: int, pp: int = 1) -> float:
        """
        乐观的显存估算 - 考虑Megatron-LM的优化技术
        """
        # 基础显存需求（GB）
        base_memory = 1.0  # Megatron-LM的基础开销更小
        
        # 根据hidden_dim估算模型参数大小
        num_layers = 64
        attention_params_per_layer = hidden_dim * hidden_dim * 4
        mlp_params_per_layer = hidden_dim * (hidden_dim * 4) * 2
        total_params = num_layers * (attention_params_per_layer + mlp_params_per_layer)
        model_params_gb = total_params * 2 / (1024**3)  # FP16
        
        # 参数显存：在DP、TP、PP三个维度上分片
        param_memory = model_params_gb / (dp * tp * pp)
        
        # 激活显存 - Megatron-LM的Selective Activation Recomputation
        num_heads = hidden_dim // 64
        
        # Attention激活显存（使用Memory Efficient Attention）
        attention_activation = batch_size * seq_len * hidden_dim / (1024**3) / (dp * tp * pp)
        
        # MLP激活显存
        mlp_activation = batch_size * seq_len * hidden_dim * 4 / (1024**3) / (dp * tp * pp)
        
        # Megatron-LM的Selective Activation Recomputation，只保留关键激活
        activation_memory = (attention_activation + mlp_activation) * 0.03  # 只保留3%的激活
        
        # KV Cache显存（Megatron-LM的优化存储）
        kv_cache_memory = 0
        if seq_len > 2048:
            # Megatron-LM使用更高效的KV cache存储
            kv_cache_memory = 2 * num_layers * batch_size * seq_len * hidden_dim / (1024**3) / (dp * tp * pp) * 0.6
        
        # 分布式优化器状态显存（Megatron-LM的Distributed Optimizer）
        # 优化器状态在多个GPU间分片，每个GPU只存储部分状态
        optimizer_sharding_factor = min(8, dp)  # 优化器分片因子
        optimizer_memory = model_params_gb * 2 / (dp * pp * optimizer_sharding_factor)
        
        # 通信buffer - Megatron-LM的优化通信
        comm_buffer = max(0.3, seq_len / 4096) / (dp * tp * pp)  # 更高效的通信buffer
        
        # 梯度累积buffer（如果使用梯度累积）
        grad_accum_buffer = 0
        if batch_size > 32:
            grad_accum_buffer = model_params_gb / (dp * tp * pp) * 0.5
        
        total_memory = (base_memory + param_memory + activation_memory + 
                       kv_cache_memory + optimizer_memory + comm_buffer + grad_accum_buffer)
        
        # Megatron-LM的安全边际更小
        safety_margin = 1.02
        if seq_len > 16384:
            safety_margin = 1.05
        elif seq_len > 8192:
            safety_margin = 1.03
        
        total_memory *= safety_margin
        
        return total_memory

    def check_communication_feasibility(self, dp: int, tp: int) -> bool:
        """
        检查通信可行性 - 简化版本
        """
        # 基本约束
        if dp > 64 or tp > 8:  # 实际训练中的限制
            return False
        
        # 检查是否跨节点通信过多
        if dp > self.mesh_shape[1] and tp > self.mesh_shape[1]:
            return False
        
        return True

    def compute_simple_communication_cost(self, layer, dp: int, tp: int) -> float:
        """
        简化的通信成本计算
        """
        # 基础通信成本
        base_cost = layer.communication_cost if hasattr(layer, 'communication_cost') else 0.1
        
        # 并行度影响
        if dp > 1:
            base_cost *= 0.5  # 数据并行通常通信开销较小
        if tp > 1:
            base_cost *= 1.5  # 张量并行通信开销较大
        
        return base_cost



    def _get_intelligent_default_strategy(self, cards_per_stage, num_devices, inter_bw, 
                                        model_layers, batch_size, seq_len, hidden_dim, gpu_memory_per_device):
        """
        智能选择默认策略
        """
        if cards_per_stage is not None:
            # 根据节点间带宽选择策略
            if inter_bw < 1.0:  # 低带宽，优先使用DP
                if cards_per_stage >= 4:
                    return (cards_per_stage // 2, 2)
                else:
                    return (cards_per_stage, 1)
            elif inter_bw < 10.0:  # 中等带宽，平衡DP和TP
                if cards_per_stage >= 8:
                    return (cards_per_stage // 4, 4)
                elif cards_per_stage >= 4:
                    return (cards_per_stage // 2, 2)
                else:
                    return (cards_per_stage, 1)
            else:  # 高带宽，可以使用更多TP
                if cards_per_stage >= 8:
                    return (cards_per_stage // 4, 4)
                elif cards_per_stage >= 4:
                    return (cards_per_stage // 2, 2)
                else:
                    return (cards_per_stage, 1)
        else:
            # 没有指定cards_per_stage时的默认策略
            if inter_bw < 1.0:
                return (4, 2)  # 低带宽优先DP
            elif inter_bw < 10.0:
                return (4, 4)  # 中等带宽平衡
            else:
                return (2, 4)  # 高带宽优先TP

    def compute_layer_cost(self, layer, dp: int, tp: int) -> float:
        """
        计算单个层在给定策略下的计算成本
        :param layer: 模型层
        :param dp: 数据并行度
        :param tp: 张量并行度
        :return: 计算成本
        """
        # 根据层类型计算不同的成本
        if layer.layer_type == GlobalLayerType.MLP:
            # MLP层在TP下计算量减少
            return layer.compute_cost / tp
        elif layer.layer_type == GlobalLayerType.ATTENTION:
            # Attention层在TP下计算量减少
            return layer.compute_cost / tp
        elif layer.layer_type == GlobalLayerType.EMBEDDING:
            # Embedding层在DP下计算量减少
            return layer.compute_cost / dp
        else:  # Norm layers
            # 其他层保持原计算量
            return layer.compute_cost

    def compute_comm_cost(self, layer, dp: int, tp: int) -> float:
        """
        计算单个层在给定策略下的通信成本
        :param layer: 模型层
        :param dp: 数据并行度
        :param tp: 张量并行度
        :return: 通信成本
        """
        # 先计算层的通信成本
        layer.compute_costs(dp, tp)
        # 然后根据带宽计算实际通信时间
        return compute_comm_cost_with_bandwidth(
            layer.communication_cost, dp, tp,
            self.mesh_shape, self.intra_bw, self.inter_bw, self.latency
        )

    def compute_memory_cost(self, layer, dp: int, tp: int) -> float:
        """
        计算单个层在给定策略下的内存成本
        :param layer: 模型层
        :param dp: 数据并行度
        :param tp: 张量并行度
        :return: 内存成本
        """
        # 内存成本与并行度成反比
        return layer.memory_cost / (dp * tp)

    def compute_strategy_cost(self, strategy: Tuple[int, int], model_layers: List, 
                            batch_size: int = 1, seq_len: int = 2048, hidden_dim: int = 4096,
                            comm_cost_weight: float = 1.0) -> float:
        """
        计算给定策略的总成本 - 考虑Megatron-LM的优化
        """
        if strategy in self.strategy_costs:
            return self.strategy_costs[strategy]
            
        dp, tp = strategy
        total_cost = 0
        
        # 计算成本 - 使用层的实际计算成本
        compute_cost = 0
        for layer in model_layers:
            # 重新计算层的成本，确保使用最新的seq_len
            layer.compute_costs(dp, tp)
            compute_cost += layer.compute_cost
        
        # Megatron-LM的计算优化：Fused Operations和Kernel Fusion
        # 融合操作可以减少约10-20%的计算开销
        compute_cost *= 0.85  # 考虑Megatron-LM的融合优化
        
        # 通信成本 - 使用层的实际通信成本
        comm_cost = 0
        for layer in model_layers:
            comm_cost += layer.communication_cost
        
        # Megatron-LM的通信优化：更高效的通信原语
        comm_cost *= 0.9  # 考虑Megatron-LM的通信优化
        
        # 内存成本 - 使用层的实际内存成本
        memory_cost = 0
        for layer in model_layers:
            memory_cost += layer.memory_cost
        
        # 根据seq_len调整权重
        compute_weight = 1.0
        comm_weight = comm_cost_weight
        memory_weight = 0.5
        
        # 长序列时，计算成本更重要，内存成本也更重要
        if seq_len > 8192:
            compute_weight = 1.5
            memory_weight = 1.0
        elif seq_len > 4096:
            compute_weight = 1.2
            memory_weight = 0.8
        
        total_cost = (compute_cost * compute_weight + 
                     comm_cost * comm_weight + 
                     memory_cost * memory_weight)
        
        self.strategy_costs[strategy] = total_cost
        
        return total_cost



class PipelineStageAssignment:
    def __init__(self, model_layers: List, num_stages: int, optimal_strategy: Tuple[int, int]):
        """
        初始化流水线阶段分配器
        :param model_layers: 模型的所有层
        :param num_stages: 流水线阶段数
        :param optimal_strategy: 最优的 (dp, tp) 策略
        """
        self.model_layers = model_layers
        self.num_stages = num_stages
        self.optimal_strategy = optimal_strategy
        self.dp, self.tp = optimal_strategy
        
        # 计算每层的计算量
        self.layer_compute_costs = []
        for layer in model_layers:
            cost = self.compute_layer_cost(layer)
            self.layer_compute_costs.append(cost)
            
        # 计算总计算量
        self.total_compute_cost = sum(self.layer_compute_costs)
        # 计算理想情况下每个阶段的平均计算量
        self.target_stage_cost = self.total_compute_cost / num_stages
        
    def solve(self) -> List[List]:
        """
        使用改进的动态规划算法求解流水线阶段分配
        考虑负载均衡和通信开销
        """
        n = len(self.model_layers)
        dp = np.full((n + 1, self.num_stages + 1), float('inf'))
        dp[0][0] = 0
        
        # 记录最优解
        prev = np.full((n + 1, self.num_stages + 1), -1)
        
        # 计算每个可能的子序列的成本
        costs = np.zeros((n + 1, n + 1))
        for i in range(n):
            for j in range(i + 1, n + 1):
                costs[i][j] = self.compute_stage_cost(self.model_layers[i:j])
        
        # 动态规划求解
        for i in range(n + 1):
            for j in range(self.num_stages + 1):
                if i == 0 and j == 0:
                    continue
                    
                # 尝试将层 i 分配到阶段 j
                for k in range(i):
                    cost = costs[k][i]
                    
                    # 添加负载均衡惩罚项
                    stage_cost = sum(self.layer_compute_costs[k:i])
                    load_balance_penalty = abs(stage_cost - self.target_stage_cost) / self.target_stage_cost
                    
                    # 添加通信开销
                    comm_cost = self.compute_stage_comm_cost(self.model_layers[k:i])
                    
                    total_cost = cost + load_balance_penalty * self.target_stage_cost + comm_cost
                    
                    if dp[k][j-1] + total_cost < dp[i][j]:
                        dp[i][j] = dp[k][j-1] + total_cost
                        prev[i][j] = k
                        
        return self.reconstruct_solution(prev, n)
        
    def compute_stage_cost(self, layers: List) -> float:
        """计算一个阶段的总成本"""
        stage_cost = 0
        for layer in layers:
            compute_cost = self.compute_layer_cost(layer)
            memory_cost = self.compute_memory_cost(layer)
            stage_cost += compute_cost + memory_cost
        return stage_cost
    
    def compute_stage_comm_cost(self, layers: List) -> float:
        """计算一个阶段的通信成本"""
        if not layers:
            return 0
            
        # 计算阶段内层间的通信开销
        comm_cost = 0
        for i in range(len(layers) - 1):
            comm_cost += self.compute_comm_cost(layers[i])
            
        # 考虑流水线气泡
        bubble_cost = self.compute_bubble_cost(layers)
        
        return comm_cost + bubble_cost
    
    def compute_bubble_cost(self, layers: List) -> float:
        """计算流水线气泡成本"""
        if not layers:
            return 0
            
        # 计算该阶段的计算时间
        stage_time = sum(self.layer_compute_costs[i] for i in range(len(layers)))
        
        # 气泡成本与阶段计算时间成正比
        return stage_time * 0.2  # 假设气泡成本为计算时间的20%
    
    def compute_layer_cost(self, layer) -> float:
        """计算单个层的计算成本"""
        if layer.layer_type == GlobalLayerType.MLP:
            return layer.compute_cost / self.tp
        elif layer.layer_type == GlobalLayerType.ATTENTION:
            return layer.compute_cost / self.tp
        elif layer.layer_type == GlobalLayerType.EMBEDDING:
            return layer.compute_cost / self.dp
        else:
            return layer.compute_cost
            
    def compute_comm_cost(self, layer) -> float:
        """计算单个层的通信成本"""
        comm_cost = 0
        if self.dp > 1:
            comm_cost += layer.communication_cost * (self.dp - 1) / self.dp
        if self.tp > 1:
            comm_cost += layer.communication_cost * (self.tp - 1) / self.tp
        return comm_cost
        
    def compute_memory_cost(self, layer) -> float:
        """计算单个层的内存成本"""
        return layer.memory_cost / (self.dp * self.tp)
        
    def reconstruct_solution(self, prev: np.ndarray, n: int) -> List[List]:
        """从动态规划结果重建最优解"""
        stages = []
        current_n = n
        current_stage = self.num_stages
        
        while current_stage > 0:
            prev_n = prev[current_n][current_stage]
            stages.append(self.model_layers[prev_n:current_n])
            current_n = prev_n
            current_stage -= 1
            
        return list(reversed(stages))

def solve_global_parallel_strategy(
    model_layers: list,
    num_devices: int,
    num_stages: int,
    mesh_shape,
    intra_bw,
    inter_bw,
    latency,
    batch_size=1,
    seq_len=2048,
    hidden_dim=4096,
    gpu_memory_per_device=80,
    cards_per_stage=None
):
    """
    求解全局并行策略
    :param model_layers: 模型的所有层
    :param num_devices: 设备总数
    :param num_stages: 流水线阶段数
    :param mesh_shape: 设备拓扑形状
    :param intra_bw: 设备内部带宽
    :param inter_bw: 设备间带宽
    :param latency: 设备间延迟
    :param batch_size: 批大小
    :param seq_len: 序列长度
    :param hidden_dim: 隐藏层维度
    :param gpu_memory_per_device: 单卡显存上限（GB）
    :param cards_per_stage: 每阶段卡数
    :return: (最优的(dp, tp)策略, 最优的阶段分配方案)
    """
    strategy_generator = GlobalParallelStrategy(num_devices, mesh_shape, intra_bw, inter_bw, latency)
    # 设置inter_bw属性，用于动态搜索空间生成
    strategy_generator.inter_bw = inter_bw
    valid_strategies = strategy_generator.generate_valid_strategies(
        cards_per_stage=cards_per_stage,
        batch_size=batch_size,
        seq_len=seq_len,
        hidden_dim=hidden_dim,
        model_layers=model_layers
    )
    
    logger.info(f"搜索空间: 总GPU={num_devices}, PP={num_stages}, cards_per_stage={cards_per_stage}")
    logger.info(f"输入参数: batch_size={batch_size}, seq_len={seq_len}, hidden_dim={hidden_dim}")
    logger.info(f"计算密度: {seq_len * hidden_dim / batch_size:.2e}")
    logger.info(f"有效策略数量: {len(valid_strategies)}")
    if valid_strategies:
        logger.info(f"策略候选: {valid_strategies}")
        logger.info(f"策略排序: {[f'DP={dp},TP={tp}' for dp, tp in valid_strategies]}")
    
    COMM_COST_WEIGHT = 1.0      # 通信成本权重
    
    # 新增：更精细的成本权重
    COMPUTE_COST_WEIGHT = 1.0
    MEMORY_COST_WEIGHT = 0.5
    
    best_strategy = None
    best_cost = float('inf')
    best_pipeline_assignment = None
    
    for strategy in valid_strategies:
        dp, tp = strategy
        
        # 使用新的验证机制
        is_feasible, reason, estimated_memory = strategy_generator.validate_strategy_feasibility(
            strategy, model_layers, batch_size, seq_len, hidden_dim, gpu_memory_per_device, num_stages)
        
        if not is_feasible:
            logger.info(f"跳过不可行策略: DP={dp}, TP={tp}, 原因: {reason}")
            continue
        
        # 记录策略的可行性级别
        if "需要显存优化" in reason:
            logger.info(f"策略需要显存优化: DP={dp}, TP={tp}, 估算显存: {estimated_memory:.2f}GB")
        else:
            logger.info(f"策略完全可行: DP={dp}, TP={tp}, 估算显存: {estimated_memory:.2f}GB")
        
        # 计算该策略下的总成本（改进版本）
        total_cost = strategy_generator.compute_strategy_cost(
            strategy, model_layers, 
            batch_size=batch_size, seq_len=seq_len, hidden_dim=hidden_dim,
            comm_cost_weight=COMM_COST_WEIGHT
        )
        
        # 使用动态规划进行流水线划分
        pipeline_assigner = PipelineStageAssignment(model_layers, num_stages, strategy)
        pipeline_assignment = pipeline_assigner.solve()
        
        # 计算流水线划分后的总成本（考虑流水线气泡）
        pipeline_cost = compute_pipeline_cost(pipeline_assignment, strategy, 
                                           inter_node_bandwidth=inter_bw,
                                           batch_size=batch_size, seq_len=seq_len, hidden_dim=hidden_dim)
        total_cost += pipeline_cost
        
        # 计算约束满足情况
        constraint_satisfied = dp * tp == cards_per_stage if cards_per_stage else True
        constraint_info = f"约束满足: {'✓' if constraint_satisfied else '✗'}"
        
        logger.info(f"策略: DP={dp}, TP={tp}, {constraint_info}, 总cost={total_cost:.2e}")
        
        if total_cost < best_cost:
            best_cost = total_cost
            best_strategy = strategy
            best_pipeline_assignment = pipeline_assignment
        
    if best_strategy is None:
        # 如果没有找到满足约束的策略，使用智能默认策略
        best_strategy = strategy_generator._get_intelligent_default_strategy(
            cards_per_stage, num_devices, inter_bw, model_layers, 
            batch_size, seq_len, hidden_dim, gpu_memory_per_device)
        
        logger.warning(f"使用智能默认策略: DP={best_strategy[0]}, TP={best_strategy[1]}")
        pipeline_assigner = PipelineStageAssignment(model_layers, num_stages, best_strategy)
        best_pipeline_assignment = pipeline_assigner.solve()
    
    return best_strategy, best_pipeline_assignment



def compute_pipeline_cost(pipeline_assignment: List[List], strategy: Tuple[int, int], 
                         inter_node_bandwidth: float = 12.5, batch_size: int = 64, 
                         seq_len: int = 2048, hidden_dim: int = 3584) -> float:
    """
    计算流水线划分后的总成本 - 考虑Megatron-LM的流水线优化
    """
    dp, tp = strategy
    num_stages = len(pipeline_assignment)
    
    # 计算每个阶段的成本
    stage_costs = []
    for stage in pipeline_assignment:
        stage_cost = sum(layer.compute_cost for layer in stage) / (dp * tp)
        stage_costs.append(stage_cost)
    
    # Megatron-LM的流水线优化：更小的气泡成本
    max_stage_cost = max(stage_costs)
    # Megatron-LM使用更高效的流水线调度，气泡成本更小
    bubble_cost = (num_stages - 1) * max_stage_cost * 0.05  # 减少气泡成本
    
    # Stage间通信成本 - Megatron-LM的优化通信
    activation_size = hidden_dim * seq_len * 2 / (1024**3)  # GB
    # Megatron-LM使用更高效的stage间通信
    stage_comm_cost = (num_stages - 1) * activation_size / inter_node_bandwidth * 0.8
    
    # Megatron-LM的流水线优化：考虑micro-batch的影响
    # 使用micro-batch可以减少流水线气泡
    micro_batch_optimization = 0.9 if num_stages > 2 else 1.0
    
    total_cost = (sum(stage_costs) + bubble_cost + stage_comm_cost) * micro_batch_optimization
    
    return total_cost


 