from functools import wraps
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core import parallel_state
from geesibling.adapters.pytorch.megatronModelToGeesiblingGraph import get_global_plan_param

def get_num_layers_to_build_by_plan(config: TransformerConfig):
    # assert GLOBAL_PLAN is not None, f'GLOBAL_PLAN is not set'
    plan = get_global_plan_param()

    if plan is not None:
        print(f'执行计划已经获取了, GLOBAL_PLAN is {plan}')
        rank = parallel_state.get_pipeline_model_parallel_rank()
        return plan.pp_layers_per_partition[rank]

    print(f'没有执行计划， GLOBAL_PLAN is {plan}')
    pipeline_ranks = config.pipeline_model_parallel_size

    num_layers_per_pipeline_rank = config.num_layers // pipeline_ranks

    if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
        # Interleaved pipeline parallelism:
        # Number of layers in each model chunk is the number of layers in the stage,
        # divided by the number of model chunks in a stage.
        # With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
        # layers to stages like (each list is a model chunk):
        # Stage 0: [0]  [2]  [4]  [6]
        # Stage 1: [1]  [3]  [5]  [7]
        # With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
        # layers to stages like (each list is a model chunk):
        # Stage 0: [0, 1]  [4, 5]
        # Stage 1: [2, 3]  [6, 7]

        vp_size = parallel_state.get_virtual_pipeline_model_parallel_world_size()

        num_layers_per_virtual_rank = num_layers_per_pipeline_rank // vp_size

        num_layers_to_build = num_layers_per_virtual_rank

    else: 
        # Non-interleaved pipeline parallelism:
        # Each stage gets a contiguous set of layers.

        num_layers_to_build = num_layers_per_pipeline_rank

    return num_layers_to_build

# def Transformer_init_wrapper(fn):
#     @wraps(fn)
#     def wrapper(self, *args, **kwargs):
#         fn(self, *args, **kwargs)
#         self.submodules = _get_block_submodules(kwargs['config'], kwargs['spec'], plan=GLOBAL_PLAN)
    
#     return wrapper

# def GptModel_init_wrapper(fn):
#     @wraps(fn)
#     def wrapper(self, *args, **kwargs):
#         fn(self, *args, **kwargs)
#         # Transformer.
#         self.decoder = TransformerBlock(
#             config=self.config,
#             spec=kwargs['transformer_layer_spec'],
#             pre_process=self.pre_process,
#             post_process=self.post_process,
#             plan=GLOBAL_PLAN
#         )
#     return wrapper
