
import torch
import torch.nn as nn
import numpy as np
from geesibling.adapters.pytorch.pipeline.megatron import mpu

def get_hybrid_parallel_configs(model_config, training_args):
    """
    生成混合并行（PP/TP/DP）配置，并加入流水线阶段信息
    
    Args:
        model_config: 模型配置
        training_args: 训练参数（包含 pp_deg, global_tp_deg, sdp 等）
        model_info: 函数/类，返回模型层数和其他信息
        
    Returns:
        hybrid_parallel_configs: dict, 包含 PP/TP/DP 配置以及 pre/post_process, pp_rank, pp_size
    """
    local_rank = training_args.local_rank
    world_size = torch.distributed.get_world_size()
    
    # 获取模型层数信息
    total_layer_num = model_config.num_hidden_layers
    
    # 默认 GLOBAL 配置
    # pp_deg = training_args.pp_deg
    # tp_sizes_enc = [training_args.global_tp_deg] * total_layer_num if training_args.global_tp_deg > 0 else [1]*total_layer_num
    # tp_consecutive_flags = [1] * total_layer_num  # 简化，默认连续 TP
    # dp_types_enc = total_layer_num * [training_args.sdp]  # 数据并行类型
    # checkpoint_flags_enc = [training_args.global_checkpoint] * total_layer_num
    pp_deg = training_args.pipeline_model_parallel_size
    checkpoint_flags_enc = [0] * total_layer_num
    
    # 流水线划分
    avg_layer_num = int(total_layer_num // pp_deg)
    last_layer_num = total_layer_num - avg_layer_num * (pp_deg-1)
    pp_divide = [avg_layer_num] * (pp_deg-1) + [last_layer_num]
    
    # PP ranks
    pp_ranks_enc = []
    start = 0
    for num in pp_divide:
        pp_ranks_enc.append(list(range(start, start+num)))
        start += num

    # ======== Megatron-LM / Galvatron PP info ========
    pre_process = mpu.is_pipeline_first_stage()
    post_process = mpu.is_pipeline_last_stage()
    pp_rank = mpu.get_pipeline_model_parallel_rank()
    pp_size = mpu.get_pipeline_model_parallel_world_size()
    
    # 合并成配置字典
    hybrid_parallel_configs = {
        'pp_deg': pp_deg,
        'checkpoint_flags_enc': checkpoint_flags_enc,
        'pp_ranks_enc': pp_ranks_enc,
        'pp_divide': pp_divide,
        'avg_layer_num':avg_layer_num,
        # 加入流水线信息
        'pre_process': pre_process,
        'post_process': post_process,
        'pp_rank': pp_rank,
        'pp_size': pp_size,
    }
    
    if local_rank == 0:
        print("Hybrid parallel config:")
        for k, v in hybrid_parallel_configs.items():
            print(f"  {k}: {v}")
    
    return hybrid_parallel_configs



# def construct_hybrid_parallel_model(model, model_config, training_args, hybrid_parallel_configs):
#     hp_model = construct_hybrid_parallel_model_api(
#         model,
#         model_config,
#         training_args,
#         hybrid_parallel_configs,
#         construct_sequential_model,
#         tied_wte_attr_names=['embed_tokens', 'lm_head'],
#         sp_layernorm=['layer.attention.LayerNorm', 'layer.mlp.LayerNorm']
#     )
#     return hp_model

def construct_hybrid_parallel_model(model, model_config, training_args, hybrid_parallel_configs):
    da 


def construct_hybrid_parallel_model_api(
    model,
    model_config,
    training_args,
    hybrid_parallel_configs,
    construct_sequential_model,
    tied_wte_attr_names=None,
    sp_layernorm=False,
):
    """
    只支持流水线并行 (PP) 的模型构造函数
    """
    # 取出流水线并行的配置
    pp_deg = hybrid_parallel_configs['pp_deg']  # 总 stage 数
    assert pp_deg > 0, "必须指定流水线并行 degree"

    # 构建 nn.Sequential 模型
    seq_model = construct_sequential_model(model, model_config, hybrid_parallel_configs)

    # 将模型按 layer 切分为 pp_deg 段
    layers = list(seq_model.children())
    num_layers = len(layers)
    layers_per_stage = (num_layers + pp_deg - 1) // pp_deg  # 平均划分

    stages = []

    # 假设 seq_model 里第0层是 Embedding
    layers = list(seq_model.children())

    # 把 ModuleList 展开
    flat_layers = []
    for layer in layers:
        if isinstance(layer, nn.ModuleList):
            flat_layers.extend(list(layer))
        else:
            flat_layers.append(layer)

    model_seq = SequentialWithInputIds(*flat_layers)
    return model_seq


from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm

# class PipelineStageModel(nn.Module):
#     def __init__(self, config, layers_per_stage, hybrid_parallel_configs, num_stages, pre_process=False, post_process=False):
#         super().__init__()
#         self.config = config
#         self.hybrid_parallel_configs = hybrid_parallel_configs
#         self.num_stages = num_stages
#         self.layers_per_stage = layers_per_stage
#         self.pre_process = pre_process
#         self.post_process = post_process
#         self.padding_idx = config.pad_token_id if hasattr(config, "pad_token_id") else 0

#         # 如果是第一个 stage，加 embedding
#         if self.pre_process:
#             self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)

#         # 创建该 stage 的 decoder layers
#         self.layers = nn.ModuleList([
#             LlamaDecoderLayer(config, layer_idx=layer_idx)
#             for layer_idx in range(layers_per_stage)
#         ])

#         # 如果是最后一个 stage，加 norm
#         if self.post_process:
#             self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)

#     def forward(self, hidden_states, attention_mask=None, position_ids=None, past_key_values=None):
#         # 只有第一个 stage 才做 embedding
#         if self.pre_process:
#             hidden_states = self.embed_tokens(hidden_states)

#         # 依次经过该 stage 的 decoder layers
#         for layer in self.layers:
#             hidden_states = layer(
#                 hidden_states,
#                 attention_mask=attention_mask,
#                 position_ids=position_ids,
#                 past_key_value=None,  # 这里简化，不考虑 KV cache
#                 output_attentions=False,
#                 use_cache=False
#             )[0]

#         # 最后一个 stage 才加 norm
#         if self.post_process:
#             hidden_states = self.norm(hidden_states)

#         return hidden_states
import torch
import torch.nn as nn
from typing import Optional

# 假设你在环境中有 transformers 的 LlamaDecoderLayer & LlamaRMSNorm & LlamaRotaryEmbedding
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm, LlamaRotaryEmbedding

class PipelineStageModel(nn.Module):
    def __init__(
        self,
        config,
        layers_per_stage,
        hybrid_parallel_configs,
        num_stages,
        pre_process=False,
        post_process=False,
    ):
        super().__init__()
        self.config = config
        self.hybrid_parallel_configs = hybrid_parallel_configs
        self.num_stages = num_stages
        self.layers_per_stage = layers_per_stage
        self.pre_process = pre_process
        self.post_process = post_process

        self.padding_idx = getattr(config, "pad_token_id", None)

        # embedding only for first stage
        if self.pre_process:
            self.embed_tokens = nn.Embedding(
                num_embeddings=config.vocab_size,
                embedding_dim=config.hidden_size,
                padding_idx=self.padding_idx,
            )

        # decoder layers
        self.layers = self._build_decoder_layers(config, layers_per_stage)

        # final norm only for last stage
        if self.post_process:
            self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)

    def _build_decoder_layers(self, config, layers_per_stage):
        layers = nn.ModuleList()
        for layer_idx in range(layers_per_stage):
            layer = LlamaDecoderLayer(config, layer_idx)
            self._ensure_rotary_embedding(layer, config)
            layers.append(layer)
        return layers

    def _ensure_rotary_embedding(self, layer, config):
        attn = getattr(layer, "self_attn", None)
        if attn is not None and (not hasattr(attn, "rotary_emb") or attn.rotary_emb is None):
            head_dim = config.hidden_size // config.num_attention_heads
            attn.rotary_emb = LlamaRotaryEmbedding(head_dim)

    def _generate_position_ids(self, hidden_states, position_ids=None):
        """
        生成 position_ids，如果外部没传入则自动生成并扩展到 batch 大小
        """
        batch_size, seq_len = hidden_states.shape[:2]
        if position_ids is None:
            position_ids = torch.arange(seq_len, dtype=torch.long, device=hidden_states.device)
            position_ids = position_ids.unsqueeze(0).expand(batch_size, -1)  # [batch, seq_len]
        else:
            # 确保 shape 对齐
            if position_ids.shape[0] != batch_size:
                position_ids = position_ids.expand(batch_size, -1)
        return position_ids

    def forward(
        self,
        hidden_states,
        attention_mask=None,
        position_ids=None,
        past_key_values=None
    ):
        # embedding only for first stage
        if self.pre_process:
            hidden_states = self.embed_tokens(hidden_states)

        # 生成 position_ids，保证 batch 大小对齐
        position_ids = self._generate_position_ids(hidden_states, position_ids)

        # 依次经过 decoder layers
        for layer in self.layers:
            hidden_states = layer(
                hidden_states,
                attention_mask=attention_mask,
                position_ids=position_ids,
                past_key_value=None,  # 简化，不考虑 KV cache
                output_attentions=False,
                use_cache=False
            )[0]

        # norm only for last stage
        if self.post_process:
            hidden_states = self.norm(hidden_states)

        return hidden_states



def construct_sequential_model(model, config, hybrid_parallel_configs):
    """
    构建某个 stage 的子模型，只包含该 stage 的 layers + 可能的 embedding / norm
    """
    pre_process =hybrid_parallel_configs['pre_process']
    post_process = hybrid_parallel_configs['post_process']

    model = PipelineStageModel(
        config=config,
        hybrid_parallel_configs=hybrid_parallel_configs,
        layers_per_stage=hybrid_parallel_configs['avg_layer_num'],
        num_stages=hybrid_parallel_configs['pp_size'],
        pre_process=pre_process,
        post_process=post_process
    )
    return model


class SequentialWithInputIds(nn.Sequential):
    def forward(self, *args, input_ids=None, position_ids=None, **kwargs):
        # stage0 的 embedding
        if input_ids is not None:
            x = input_ids
        else:
            x = args[0] if args else None

        for module in self:
            if isinstance(module, nn.Embedding):
                x = module(x)
            else:
                # 显式传 position_ids
                x = module(x, position_ids=position_ids, **kwargs)
        return x