import logging
from typing import List, Tuple
import numpy as np
import os
import sys

# 添加项目根目录到 Python 路径
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(os.path.dirname(current_dir))  # 修改这里，只向上两级目录
sys.path.append(project_root)

from get_model.model_computation_generator_by_layer import ModelGeneratorByLayer
from get_model.parse_hardware_config import parse_hardware_config
from get_model.parse_model_config import parse_yaml_modules
from .global_parallel_strategy import solve_global_parallel_strategy
from .global_computation import GlobalComputation, GlobalLayer, GlobalLayerType

logger = logging.getLogger(__name__)

def convert_to_global_computation(model_layers) -> GlobalComputation:
    """
    将模型层转换为全局计算图
    :param model_layers: 原始模型层列表
    :return: 全局计算图
    """
    computation = GlobalComputation()
    layer_map = {}  # 原始层到全局层的映射
    
    # 第一步：创建所有全局层
    for layer in model_layers:
        # 转换层类型
        if "mlp" in layer.name.lower():
            global_type = GlobalLayerType.MLP
        elif "attention" in layer.name.lower():
            global_type = GlobalLayerType.ATTENTION
        elif "embedding" in layer.name.lower():
            global_type = GlobalLayerType.EMBEDDING
        elif "layer_norm" in layer.name.lower():
            global_type = GlobalLayerType.LAYER_NORM
        elif "rms_norm" in layer.name.lower():
            global_type = GlobalLayerType.RMS_NORM
        else:
            global_type = GlobalLayerType.NORM
            
        # 创建全局层
        global_layer = GlobalLayer(
            layer_type=global_type,
            input_shape=layer.input_shape,
            name=layer.name
        )
        
        # 计算成本信息
        if global_type == GlobalLayerType.MLP:
            global_layer.compute_cost = layer._compute_mlp_cost(1)  # 默认TP=1
            global_layer.communication_cost = layer._compute_mlp_comm_cost(1, 1)  # 默认DP=1, TP=1
            global_layer.memory_cost = layer._compute_mlp_memory_cost(1, 1)  # 默认DP=1, TP=1
        elif global_type == GlobalLayerType.ATTENTION:
            global_layer.compute_cost = layer._compute_attention_cost(1)  # 默认TP=1
            global_layer.communication_cost = layer._compute_attention_comm_cost(1, 1)  # 默认DP=1, TP=1
            global_layer.memory_cost = layer._compute_attention_memory_cost(1, 1)  # 默认DP=1, TP=1
        elif global_type == GlobalLayerType.EMBEDDING:
            global_layer.compute_cost = layer._compute_embedding_cost(1)  # 默认DP=1
            global_layer.communication_cost = layer._compute_embedding_comm_cost(1, 1)  # 默认DP=1, TP=1
            global_layer.memory_cost = layer._compute_embedding_memory_cost(1, 1)  # 默认DP=1, TP=1
        elif global_type in [GlobalLayerType.LAYER_NORM, GlobalLayerType.RMS_NORM, GlobalLayerType.NORM]:
            # 对于所有类型的Norm层，使用相同的成本计算方法
            if len(layer.input_shape) == 2:  # Embedding后的Norm层
                batch_size, seq_len = layer.input_shape
                hidden_dim = layer.input_shape[-1]  # 使用最后一个维度作为hidden_dim
                layer.input_shape = (batch_size, seq_len, hidden_dim)
            global_layer.compute_cost = layer._compute_norm_cost()
            global_layer.communication_cost = layer._compute_norm_comm_cost(1)  # 默认DP=1
            global_layer.memory_cost = layer._compute_norm_memory_cost(1)  # 默认DP=1
        
        # 添加到计算图
        computation.add_layer(global_layer)
        layer_map[layer] = global_layer
        
    # 第二步：添加层间依赖关系
    for layer in model_layers:
        global_layer = layer_map[layer]
        for dep in layer.dependencies:
            global_dep = layer_map[dep]
            computation.add_edge(global_dep, global_layer)
            
    return computation

def global_solver(model_config_path, hardware_config_path, batch_size=1, seq_len=2048, hidden_dim=4096, gpu_memory_per_device=80):
    """
    全局并行策略求解器
    :param model_config_path: 模型配置文件路径
    :param hardware_config_path: 硬件配置文件路径
    :return: 最优并行策略和流水线分配
    """
    # 解析模型配置
    training_param, module_list, module_length = parse_yaml_modules(model_config_path)
    
    '''
    parsed_data = {
        "num_nodes": num_nodes,
        "num_gpus_per_node": num_gpus_per_node,
        "total_gpus": total_gpus,
        "mesh_shape": [num_nodes, num_gpus_per_node],
        "device_mesh": {
            "shape": mesh_shape,
            "topology": device_mesh_info.get("topology", "Unknown"),
            "intra_node_bandwidth": intra_node_bandwidth,
            "inter_node_bandwidth": inter_node_bandwidth,
            "latency": latency,
        },
        "memory": {
            "gpu_memory_per_device": memory.get("gpu_memory_per_device", None),
            "total_gpu_memory": memory.get("total_gpu_memory", None),
            "cpu_memory_per_node": memory.get("cpu_memory_per_node", None),
            "total_cpu_memory": memory.get("total_cpu_memory", None),
        }
    }
    '''

    # 解析硬件配置
    hardware_config, device_mesh = parse_hardware_config(hardware_config_path)
    num_devices = hardware_config["total_gpus"]
    mesh_shape = device_mesh
    intra_bw = hardware_config["device_mesh"]["intra_node_bandwidth"]
    inter_bw = hardware_config["device_mesh"]["inter_node_bandwidth"]
    latency = hardware_config["device_mesh"]["latency"]
    
    # 输出硬件配置信息
    logger.info(f"硬件配置: intra_bw={intra_bw} GB/s, inter_bw={inter_bw} GB/s, latency={latency} us")
    logger.info(f"设备拓扑: mesh_shape={mesh_shape}, num_devices={num_devices}")
    
    # 验证配置读取是否正确
    logger.info(f"配置验证: 总GPU={num_devices}, 节点内带宽={intra_bw}, 节点间带宽={inter_bw}")
    if inter_bw > 1000:
        logger.warning(f"节点间带宽异常大: {inter_bw} GB/s，可能配置有误")
    if inter_bw < 0.1:
        logger.warning(f"节点间带宽异常小: {inter_bw} GB/s，可能配置有误")
    
    # 生成模型计算图
    model_generator = ModelGeneratorByLayer()
    computation = model_generator.computation_graph(module_length)
    
    # 转换为全局计算图
    global_computation = convert_to_global_computation(computation.layers)
    
    # 输出模型层信息
    logger.info(f"模型总层数: {len(global_computation.layers)}")
    for i, layer in enumerate(global_computation.layers):
        logger.info(f"层 {i+1}: {layer.name} (类型: {layer.layer_type.value})")
    
    # ========== 新增：流水线stage数搜索 ==========
    max_stages = min(8, len(global_computation.layers))
    # 更智能的stage候选：根据节点间带宽调整偏好
    stage_candidates = []
    for s in range(2, max_stages+1):
        if num_devices % s == 0:
            cards_per_stage = num_devices // s
            stage_candidates.append((s, cards_per_stage))
    
    # 根据节点间带宽调整stage选择偏好
    if inter_bw < 1.0:  # 如果节点间带宽很小，优先选择更多PP stages
        # 按stage数降序排序，优先选择更多PP stages
        stage_candidates.sort(key=lambda x: x[0], reverse=True)
    elif inter_bw < 10.0:  # 如果节点间带宽较小，平衡考虑
        # 按cards_per_stage降序排序，但给予更多PP stages一些权重
        stage_candidates.sort(key=lambda x: (x[1], -x[0]), reverse=True)
    else:  # 如果节点间带宽较大，优先选择较少PP stages
        # 按cards_per_stage降序排序，优先考虑PP stage数较少的配置
        stage_candidates.sort(key=lambda x: x[1], reverse=True)
    
    stage_candidates = [s for s, _ in stage_candidates]
    best_overall_strategy = None
    best_overall_cost = float('inf')
    best_overall_pipeline_assignment = None
    best_overall_num_stages = None
    for num_stages in stage_candidates:
        if num_devices % num_stages != 0:
            continue  # 只有能整除的stage数才考虑
        cards_per_stage = num_devices // num_stages
        logger.info(f"尝试流水线stage数: {num_stages} (每stage分配{cards_per_stage}张卡)")
        
        # 确保 DP × TP × PP = 总GPU个数 的约束
        # 这里 PP = num_stages, 所以需要 DP × TP = cards_per_stage
        logger.info(f"约束条件: DP × TP = {cards_per_stage}, PP = {num_stages}, 总GPU = {num_devices}")
        
        # 求解全局并行策略
        best_strategy, pipeline_assignment = solve_global_parallel_strategy(
            global_computation.layers,
            num_devices,
            num_stages,
            mesh_shape,
            intra_bw,
            inter_bw,
            latency,
            batch_size=batch_size,
            seq_len=seq_len,
            hidden_dim=hidden_dim,
            gpu_memory_per_device=gpu_memory_per_device,
            cards_per_stage=cards_per_stage
        )
        # 计算当前分配的最大stage cost（以总cost近似）
        # 你可以在solve_global_parallel_strategy中返回best_cost，这里用pipeline_assignment长度近似
        current_cost = 0
        for stage in pipeline_assignment:
            stage_cost = sum(getattr(layer, 'compute_cost', 0) + getattr(layer, 'communication_cost', 0) + getattr(layer, 'memory_cost', 0) for layer in stage)
            if stage_cost > current_cost:
                current_cost = stage_cost
        if current_cost < best_overall_cost:
            best_overall_cost = current_cost
            best_overall_strategy = best_strategy
            best_overall_pipeline_assignment = pipeline_assignment
            best_overall_num_stages = num_stages
    logger.info(f"全局最优stage数: {best_overall_num_stages}, 策略: {best_overall_strategy}, 最大stage cost: {best_overall_cost}")
    # ========== 新增结束 ==========
    
    # 你可以将最优结果写入result.txt
    with open(os.path.join(project_root, "result.txt"), "w") as f:
        f.write(f"全局最优stage数: {best_overall_num_stages}\n")
        f.write(f"最优并行策略: DP={best_overall_strategy[0]}, TP={best_overall_strategy[1]}\n")
        f.write("流水线阶段分配:\n")
        for i, stage in enumerate(best_overall_pipeline_assignment):
            stage_names = [layer.name for layer in stage]
            f.write(f" 阶段 {i+1} -- (共{len(stage_names)}层): {stage_names}\n")
            logger.info(f"阶段 {i+1}: {stage_names} (共{len(stage_names)}层)")
    
    return best_overall_strategy, best_overall_pipeline_assignment

if __name__ == "__main__":
    try:
        # 设置日志
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
        )
        
        # 获取当前文件所在目录
        current_dir = os.path.dirname(os.path.abspath(__file__))
        project_root = os.path.dirname(os.path.dirname(current_dir))  # 修改这里，只向上两级目录
        
        # 配置文件路径
        model_config_path = os.path.join(project_root, "config", "model_config_qwen3_32b.yaml")
        hardware_config_path = os.path.join(project_root, "config", "hardware_config.yaml")
        
        logger.info(f"项目根目录: {project_root}")
        logger.info(f"模型配置文件路径: {model_config_path}")
        logger.info(f"硬件配置文件路径: {hardware_config_path}")
        
        # 检查文件是否存在
        if not os.path.exists(model_config_path):
            raise FileNotFoundError(f"模型配置文件不存在: {model_config_path}")
        if not os.path.exists(hardware_config_path):
            raise FileNotFoundError(f"硬件配置文件不存在: {hardware_config_path}")
        
        # 解析模型配置，自动获取batch_size、seq_len、hidden_dim
        training_param, module_list, module_length = parse_yaml_modules(model_config_path)
        batch_size = training_param.get('batch_size', 1)
        seq_len = training_param.get('seq_len', 2048)
        hidden_dim = training_param.get('hidden_dim', 4096)
        logger.info(f"batch_size: {batch_size}, seq_len: {seq_len}, hidden_dim: {hidden_dim}")
        
        # 解析硬件配置，自动获取gpu_memory_per_device
        import yaml
        with open(hardware_config_path, 'r') as f:
            hardware_yaml = yaml.safe_load(f)
        gpu_memory_per_device = hardware_yaml.get('memory', {}).get('gpu_memory_per_device', 16)
        if gpu_memory_per_device is None:
            gpu_memory_per_device = 16
        logger.info(f"gpu_memory_per_device: {gpu_memory_per_device} GB")
        
        # 执行求解
        logger.info("开始执行全局并行策略求解...")
        best_strategy, pipeline_assignment = global_solver(
            model_config_path,
            hardware_config_path,
            batch_size=batch_size,
            seq_len=seq_len,
            hidden_dim=hidden_dim,
            gpu_memory_per_device=gpu_memory_per_device
        )
        
    except Exception as e:
        logger.error(f"执行过程中发生错误: {str(e)}", exc_info=True)
        sys.exit(1) 