import contextlib
import sys
from datetime import datetime
from typing import List, Dict

from initialize import init_empty_weights

from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy

# !
from geesibling.adapters.pytorch.megatron_patch.megatron_adaptor import patch
from transformers.models.llama.modeling_llama import LlamaForCausalLM
# import transformers.models.llama.modeling_llama as modeling_llama
from datasets import load_dataset
import math
import time

# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

from tqdm import tqdm

import torch


from torch.utils.data import DataLoader, TensorDataset

from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import LlamaConfig
from transformers import LlamaTokenizer
from transformers import LlamaModel

from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0

#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args, get_args_
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving, forward_backward_pipelining_without_interleaving2, adjust_batch, \
    forward_backward_pipelining_without_interleaving2_remove_sgp, forward_backward_pipelining_with_interleaving,forward_backward_pipelining_with_interleaving
from geesibling.adapters.pytorch.get_data import get_data_loader,get_data_loader_with_ddp,get_train_dataset,collate_fn
import os
from geesibling.adapters.pytorch.pipeline.pipeline.training2 import \
    forward_backward_pipelining_with_interleaving_overlap_comm, \
    forward_backward_pipelining_without_interleaving2_remove_sgp_fix_dp, \
    forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP, \
    forward_backward_pipelining_with_interleaving_fix_dp, forward_backward_no_pipelining

from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from geesibling.adapters.pytorch.pipeline.megatron.distributed import DistributedDataParallel as LocalDDP
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel import parallelize_module
from geesibling.adapters.pytorch.tensor_parallel.run_tp import TPPolicy
from geesibling.adapters.pytorch.pipeline.optimizer import FP32Optimizer

from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron_autoparallel

from geesibling.adapters.pytorch.pipeline.models.patch import patch_config
# 设置环境变量
os.environ['LOG_DIR'] = './logs'

from parallelize import parallelize

def forward_step_func(input_tensor, model):
    return model(input_tensor)

def Optimizer(params):
    base_optimizer = torch.optim.Adam(params, lr=2.4e-5)
    if mpu.get_data_parallel_world_size() > 1:
        optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=True)
    else:
        optimizer = FP32Optimizer(base_optimizer, params_have_main_grad=False)
    return optimizer

#loss = loss_func(iter(dataloader), model, args, gpus_list, (args.micro_batch_size, args.tensor_length, args.hidden_size))

def loss_func(batch, model, vpp_overlap=False):
    """
    处理单个 batch 的 forward/backward
    """
    #gpus_list = initialize_megatron_autoparallel() 
    # batch = iter(dataloader) 
    args = get_args()
    gpus_list = args.gpus_list
    recv_tensor_shape =(args.micro_batch_size, args.tensor_length, args.hidden_size)
    if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
        if args.virtual_pipeline_model_parallel_size > 1:
            if not vpp_overlap:
                if mpu.get_data_parallel_world_size() > 1:
                    for model_chunk in model:
                        args.use_distributed_optimizer = False
                        model_chunk.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
                loss = forward_backward_pipelining_with_interleaving_fix_dp(
                    forward_step_func, batch, model,
                    False, args.micro_batch, gpus_list, recv_tensor_shape
                )
            else:
                loss = forward_backward_pipelining_with_interleaving_overlap_comm(
                    forward_step_func, batch, model,
                    False, args.micro_batch, gpus_list, recv_tensor_shape
                )
        else:
            if mpu.get_data_parallel_world_size() > 1:
                args.use_distributed_optimizer = False
                model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
            loss = forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP(
                forward_step_func, batch, model,
                False, args.micro_batch, gpus_list, recv_tensor_shape
            )
    else:
        if mpu.get_data_parallel_world_size() > 1:
            args.use_distributed_optimizer = False
            model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
        loss = forward_backward_no_pipelining(
            forward_step_func, batch, model,
            False, args.micro_batch, gpus_list, recv_tensor_shape
        )
    return loss

#num_steps = get_num_steps(dataloader)
        #num_steps = math.ceil(len(dataloader) / config.args.micro_batch)
        
def get_num_steps(dataloader):
    return math.ceil(len(dataloader) / config.args.micro_batch)

# def handle_progress_and_adjust(dataloader, config, epoch_id):
#     """处理进度条显示和最后一轮 batch 调整"""
#     import math

#     progress_bar = tqdm(range(math.ceil(len(dataloader) / config.args.micro_batch)),
#                         desc=f'Epoch {epoch_id + 1}')

#     # 最后一轮调整 batch
#     for bar_id in progress_bar:
#         if bar_id == len(progress_bar) - 1:
#             adjust_batch(
#                 dataloader,
#                 config.args,
#                 len(dataloader) * config.args.micro_batch_size,
#                 config.args.micro_batch_size
#             )

def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))

# global _GLOBAL_ARGS
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import set_args,get_args

# lyh add

# 准备数据
data_files = {
        'train': '/data/xu/gees-pytorch/examples/datasets/mrpc/train.jsonl',
        'test': '/data/xu/gees-pytorch/examples/datasets/mrpc/test.jsonl',
        'validation': '/data/xu/gees-pytorch/examples/datasets/mrpc/validation.jsonl'
    }
class Config:
    def __init__(self):
        # set_args()
        self.args = get_args_()
        # 如果用户直接运行 python xxx.py 且需要多进程 -> 自动 relaunch
        if ((self.args.nproc_per_node > 1) and
                ((self.args.world_size < 1) or ("WORLD_SIZE" not in os.environ))):
            print(f"[Config] Relaunching with torch.distributed.launch, nproc_per_node={self.args.nproc_per_node}")

            DISTRIBUTED_ARGS = [
                "--nproc_per_node", str(self.args.nproc_per_node),
                "--nnodes", str(self.args.nnodes),
                "--node_rank", str(self.args.node_rank),
                "--master_addr", str(self.args.master_addr),
                "--master_port", str(self.args.master_port),
            ]

            cmd = [
                      sys.executable, "-m", "torch.distributed.launch"
                  ] + DISTRIBUTED_ARGS + [sys.argv[0]] + sys.argv[1:]

            print(f"[Config] Executing: {' '.join(cmd)}")
            os.execvp(sys.executable, cmd)  # 直接替换当前进程，不会继续往下走

        # 你可以先初始化 3D 并行度为 None
        print(self.args)
       
        print(type(self.args))
        init_method = 'tcp://'
        master_ip = os.getenv('MASTER_ADDR', 'localhost')
        master_port = os.getenv('MASTER_PORT', '6000')
        init_method += master_ip + ':' + master_port
        """
            这步与 GPU 分配无关，而是设置整个分布式训练环境。
            world_size: 总进程数。
            rank: 当前进程的全局排名。
            这确保所有进程可以相互通信，无论它们使用哪些 GPU。
        """

        # gees 对分布式模块进行一个初始化操作。
        torch.distributed.init_process_group(
            backend=self.args.distributed_backend,
            world_size=self.args.world_size, rank=self.args.rank,
            init_method=init_method)

from parallelize import Profile

config = Config()
config.cluster = Profile.get_cluster()
config.parallel_mode ='MODEL PARALLEL'
config.parallel_method='AWARE'
@parallelize(config)
def pretrain():

    model = LlamaForCausalLM()
    optimizer = Optimizer(model.parameters())

    dataset = load_dataset('json', data_files=data_files)
    dataloader = DataLoader(dataset)
    
    
    for _ in range(pretrain.epoch):
        for batch in dataloader:
            loss = loss_func(batch, model)   
            optimizer.step()
            optimizer.zero_grad()
   
                    
if __name__ == "__main__":
    pretrain()

# def setup_model_and_optimizer(config, args, tokenizer=None):
#     # if tp > 1, must give a tokenizer
#     if mpu.get_tensor_model_parallel_world_size() > 1 and tokenizer is None:
#         raise ValueError("Tokenizer must be provided when tp > 1")

#     # vpp_size = args.virtual_pipeline_model_parallel_size
#     # if vpp_size > 1:
#     #     device_mesh = mpu.get_device_mesh()
#     #     model_chunk_list = []
#     #     for i in range(vpp_size):
#     #         mpu.set_virtual_pipeline_model_parallel_rank(i)
#     #         mpu.set_virtual_pipeline_model_parallel_world_size(vpp_size)
#     #         # Set pre_process and post_process only after virtual rank is set.
#     #         pre_process = mpu.is_pipeline_first_stage()
#     #         post_process = mpu.is_pipeline_last_stage()
#     #         pp_rank = mpu.get_pipeline_model_parallel_rank()
#     #         pp_size = mpu.get_pipeline_model_parallel_world_size()
#     #         vpp_rank = i  # vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank()
#     #         #hybrid_parallel_configs = get_hybrid_parallel_configs(model_config=config, training_args=args)
#     #         config = patch_config(config, pp_rank = pp_rank , pre_process = pre_process, post_process = post_process, pp_size = pp_size, vpp_size = vpp_size, vpp_rank = vpp_rank)
#     #         # with init_empty_weights():
#     #         #     model = LlamaModel(config)
#     #         model = LlamaForCausalLMForVpp(config=config)
#     #         print(1111111111111111111111111)
#     #         # model = LlamaForCausalLMForVpp(config=config, pp_rank=pp_rank, pre_process=pre_process, post_process=post_process, pp_size=pp_size, vpp_size=vpp_size, vpp_rank=vpp_rank)

#     #         # this_model.model_type = model_type
#     #         if mpu.get_tensor_model_parallel_world_size()>1:
#     #             parallelize_plan = TPPolicy(model,tokenizer,device_mesh['tp'],pre_process,config.hidden_size)
#     #             for plan in parallelize_plan:
#     #                 print(plan)
#     #                 print(parallelize_plan[plan].param_layouts)
#     #             model = parallelize_module(
#     #                 model,
#     #                 device_mesh["tp"],
#     #                 parallelize_plan,
#     #             )
#     #             _pre_dp_module_transform(model)
#     #             model=model.to(args.local_rank)
#     #         if args.fp16:
#     #             model = model.half()
#     #         model_chunk_list.append(model)
#     #     model = [every_model.to(args.local_rank) for every_model in model_chunk_list]
#     #     # model = [every_model.to(torch.cuda.current_device()) for every_model in model_chunk_list]
#     #     if mpu.get_data_parallel_world_size() > 1:
#     #         print("-------------------------------------------------------vpp with dp -------------------------------------------------------------------")
#     #         model = [LocalDDP(
#     #             model_chunk,
#     #             data_parallel_group=mpu.get_data_parallel_group(),
#     #             # accumulate_allreduce_grads_in_fp32=args.accumulate_allreduce_grads_in_fp32,
#     #             disable_bucketing=(model_chunk_idx > 0))
#     #             # Turn off bucketing for model_chunk 2 onwards, since communication for these
#     #             # model chunks is overlapped with compute anyway.
#     #             for (model_chunk_idx, model_chunk) in enumerate(model)]
#     #         #  TODO
#     #         # # Broadcast params from data parallel src rank to other data parallel ranks.
#     #         # if args.data_parallel_random_init:
#     #         #     for model_module in model:
#     #         #         model_module.broadcast_params()

#     #     # optimizer
#     #     all_model_params = _get_all_params_1(model)
#     #     base_optimizer = torch.optim.Adam(all_model_params, lr=2.4e-5)
#     #     if mpu.get_data_parallel_world_size() > 1:
#     #         optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=True)
#     #     else:
#     #         # optimizer = base_optimizer
#     #         optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=False)
#     # # elif args.pipeline_model_parallel_size > 1:
#     # else:
#     # from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM
#     #from geesibling.adapters.pytorch.pipeline.models.LlamaModel_hybrid_parallel import get_hybrid_parallel_configs, \
#         construct_hybrid_parallel_model
#     # pp_rank = mpu.get_pipeline_model_parallel_rank()
#     # pre = mpu.is_pipeline_first_stage()
#     # post = mpu.is_pipeline_last_stage()
#     # pp_size = mpu.get_pipeline_model_parallel_world_size()

#     #     hybrid_parallel_configs = get_hybrid_parallel_configs(model_config=config, training_args=args)
#     #     with init_empty_weights():
#     #             model = LlamaModel(config)

#     #     model = construct_hybrid_parallel_model(
#     #         model=model,
#     #         model_config=config,
#     #         training_args=args,
#     #         hybrid_parallel_configs=hybrid_parallel_configs
#     # )
#     config = patch_config(config)
#     model = LlamaForCausalLM(config=config)
#     print(type(model))
#     print(type(model))
#     print(type(model))
#     print(type(model))
#     print(type(model))
#     print(type(model))
#     print(type(model))
#     # model = LlamaForCausalLM(config=config, pp_rank=pp_rank, pre_process=pre, post_process=post, pp_size=pp_size)
#     print(model)
#     device_mesh = mpu.get_device_mesh()
#     if mpu.get_tensor_model_parallel_world_size() > 1:
#         parallelize_plan = TPPolicy(model, tokenizer, device_mesh['tp'], config.pre_process, config.hidden_size)
#         # print(f"has already get through TPPolicy : {args.local_rank}")
#         # print(f"parallelize_plan is {parallelize_plan}")
#         # for one in parallelize_plan:
#         #     print(parallelize_plan[one].param_layouts)
#         # # 假设 args.local_rank 和 parallelize_plan 已经定义
#         # log_file = f"parallelize_plan_llama.log"  # 定义日志文件名
#         #
#         # # 打开文件写入日志
#         # with open(log_file, "a") as log:
#         #     for one in parallelize_plan:
#         #         param_layouts = parallelize_plan[one].param_layouts
#         #         log.write(f"rank: {args.local_rank}   {one} is {param_layouts}\n")
#         #     log.write("\n")
#         #     log.write("\n")

#         model = parallelize_module(
#             model,
#             device_mesh["tp"],
#             parallelize_plan,
#         )  #
#         _pre_dp_module_transform(model)
#         print(model)
#         # gees 10.24 add-------------------
#         model = model.to(args.local_rank)
#     else:
#         # 10.25 add to fix dpp-tp bug
#         if type(model) is not list:
#             model = model.to(args.local_rank)
#     if mpu.get_data_parallel_world_size() > 1:
#         # if torch.cuda.current_device()==args.local_rank:
#         #     print("true")
#         # else:
#         #     print("false")
#         model = LocalDDP(model, data_parallel_group=mpu.get_data_parallel_group())

#     base_optimizer = torch.optim.Adam(model.parameters(), lr=2.4e-5)
#     if mpu.get_data_parallel_world_size() > 1:
#         optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=True)
#     else:
#         optimizer = FP32Optimizer(base_optimizer, params_have_main_grad=False)
#     return model, optimizer


# not use gees
def JudgeSkipFinalVpp(train_loader, args, len_dataset, micro_batch_size):
    temp_micro_batch_num = args.micro_batch
    if len(train_loader) % args.micro_batch != 0:
        # 修改micro-batch数
        temp_micro_batch_num = len(train_loader) % args.micro_batch
        # 最后一个micro-batch不完整，直接去掉最后一个micro batch
        if len_dataset % micro_batch_size != 0:
            temp_micro_batch_num = temp_micro_batch_num - 1
    return temp_micro_batch_num

    # gees 单参数列表-1


# def _get_all_params_1(model_chunks: List) -> List:
#     """Create a single parameter list for Adam optimizer.

#     Args:
#         model_chunks (List[nn.Module]): List of model chunks (submodules).

#     Returns:
#         List[nn.Parameter]: List of parameters compatible with Adam optimizer.
#     """
#     all_params = []

#     for chunk in model_chunks:
#         all_params.extend(chunk.parameters())

#     return all_params

#     # gees 单参数列表-2


# def _get_all_params_2(model_chunks: List) -> List:
#     """Create a single parameter list for Adam optimizer.

#     Args:
#         model_chunks (List[nn.Module]): List of model chunks (submodules).

#     Returns:
#         List[nn.Parameter]: List of parameters compatible with Adam optimizer.
#     """
#     param_list = []

#     for model_chunk in model_chunks:
#         for param in model_chunk.parameters():
#             if param.requires_grad:  # gees 多了一步判断
#                 param_list.append(param)
#     return param_list

#     # gees 多参数列表 ---> 可用于后期扩展,可以定制每一个参数的学习率等.而不使用全局的学习率.


# def _get_param_groups__(model_chunks: List) -> List[Dict]:
#     """Create a simple parameter group for Adam optimizer.

#     Args:
#         model_chunks (List[nn.Module]): List of model chunks (submodules).

#     Returns:
#         List[Dict]: List of parameter groups compatible with Adam optimizer.
#     """
#     param_groups = []

#     for model_chunk in model_chunks:
#         for param in model_chunk.parameters():
#             if param.requires_grad:
#                 param_groups.append({'params': param})

#     return param_groups


# def run_forward_backward(train_data_iterator, model, forward_step_func, args, gpus_list, recv_tensor_shape, vpp_overlap=False):
#     """
#     统一处理 pipeline / virtual pipeline / non-pipeline 的 forward/backward

#     train_data_iterator : DataLoader 或 list of DataLoader iterators
#     model                : 模型或模型分块列表
#     forward_step_func    : 每个微批的 forward step 函数
#     args                 : 配置参数
#     gpus_list            : GPU 列表
#     recv_tensor_shape    : 接收 tensor 的 shape（流水线通信用）
#     vpp_overlap          : 是否开启虚拟 pipeline overlap
#     """

#     # 获取 batch
#     batch = next(train_data_iterator) if not isinstance(train_data_iterator, list) else [next(it) for it in train_data_iterator]

#     # 判断使用哪种 forward/backward
#     if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
#         if args.virtual_pipeline_model_parallel_size > 1:
#             if not vpp_overlap:
#                 if mpu.get_data_parallel_world_size() > 1:
#                     # 清零梯度 buffer
#                     for model_chunk in model:
#                         args.use_distributed_optimizer = False
#                         model_chunk.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
#                 loss = forward_backward_pipelining_with_interleaving_fix_dp(
#                     forward_step_func, train_data_iterator, model,
#                     False, args.micro_batch, gpus_list, recv_tensor_shape
#                 )
#             else:
#                 loss = forward_backward_pipelining_with_interleaving_overlap_comm(
#                     forward_step_func, train_data_iterator, model,
#                     False, args.micro_batch, gpus_list, recv_tensor_shape
#                 )
#         else:
#             if mpu.get_data_parallel_world_size() > 1:
#                 args.use_distributed_optimizer = False
#                 model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
#             loss = forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP(
#                 forward_step_func, train_data_iterator, model,
#                 False, args.micro_batch, gpus_list, recv_tensor_shape
#             )
#     else:
#         if mpu.get_data_parallel_world_size() > 1:
#             args.use_distributed_optimizer = False
#             model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
#         loss = forward_backward_no_pipelining(
#             forward_step_func, train_data_iterator, model,
#             False, args.micro_batch, gpus_list, recv_tensor_shape
#         )

#     return loss


#   loss = loss_func(iter(dataloader), model, config.args, gpus_list,
#                              (config.args.micro_batch_size, config.args.tensor_length, cfg.hidden_size))
# def loss_func(batch, model, args, gpus_list, recv_tensor_shape, vpp_overlap=False):
#     """
#     处理单个 batch 的 forward/backward
#     """
#     # args = config.args
#     recv_tensor_shape =(args.micro_batch_size, args.tensor_length, args.hidden_size)
#     if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
#         if args.virtual_pipeline_model_parallel_size > 1:
#             if not vpp_overlap:
#                 if mpu.get_data_parallel_world_size() > 1:
#                     for model_chunk in model:
#                         args.use_distributed_optimizer = False
#                         model_chunk.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
#                 loss = forward_backward_pipelining_with_interleaving_fix_dp(
#                     forward_step_func, batch, model,
#                     False, args.micro_batch, gpus_list, recv_tensor_shape
#                 )
#             else:
#                 loss = forward_backward_pipelining_with_interleaving_overlap_comm(
#                     forward_step_func, batch, model,
#                     False, args.micro_batch, gpus_list, recv_tensor_shape
#                 )
#         else:
#             if mpu.get_data_parallel_world_size() > 1:
#                 args.use_distributed_optimizer = False
#                 model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
#             loss = forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP(
#                 forward_step_func, batch, model,
#                 False, args.micro_batch, gpus_list, recv_tensor_shape
#             )
#     else:
#         if mpu.get_data_parallel_world_size() > 1:
#             args.use_distributed_optimizer = False
#             model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
#         loss = forward_backward_no_pipelining(
#             forward_step_func, batch, model,
#             False, args.micro_batch, gpus_list, recv_tensor_shape
#         )
#     return loss


# def DataLoader1(dataset):
    
#     micro_batch_size = config.args.micro_batch_size
#     tensor_length = config.args.tensor_length
    
#     print('get tokenizer')
#     tokenizer = LlamaTokenizer.from_pretrained('./llama7bconfig')
#     if tokenizer.pad_token is None:
#         tokenizer.pad_token = tokenizer.eos_token
        
#     train_dataset = get_train_dataset(dataset, tokenizer, tensor_length)
#     if mpu.get_data_parallel_world_size() > 1:
#         train_sampler = DistributedSampler(train_dataset, num_replicas=mpu.get_data_parallel_world_size(),
#                                            rank=mpu.get_data_parallel_rank())
#         dataloader = DataLoader(train_dataset, batch_size=micro_batch_size, shuffle=False, sampler=train_sampler,
#                                 collate_fn=lambda batch: collate_fn(batch, micro_batch_size, tensor_length))
#     else:
#         dataloader = DataLoader(train_dataset, batch_size=micro_batch_size, shuffle=True,
#                                 collate_fn=lambda batch: collate_fn(batch, micro_batch_size, tensor_length))
#     return dataloader


# def get_model(config):
#     args = get_args()
#     tokenizer = LlamaTokenizer.from_pretrained('./llama7bconfig')
#     if tokenizer.pad_token is None:
#         tokenizer.pad_token = tokenizer.eos_token

#     # if tp > 1, must give a tokenizer
#     if mpu.get_tensor_model_parallel_world_size() > 1 and tokenizer is None:
#         raise ValueError("Tokenizer must be provided when tp > 1")

#     from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM

#     config = patch_config(config)
#     model = LlamaForCausalLM(config=config)
#     print(model)
#     device_mesh = mpu.get_device_mesh()
#     if mpu.get_tensor_model_parallel_world_size() > 1:
#         parallelize_plan = TPPolicy(model, tokenizer, device_mesh['tp'], config.pre_process, config.hidden_size)

#         model = parallelize_module(
#             model,
#             device_mesh["tp"],
#             parallelize_plan,
#         )  #
#         _pre_dp_module_transform(model)
#         print(model)
#         # gees 10.24 add-------------------
#         model = model.to(args.local_rank)
#     else:
#         # 10.25 add to fix dpp-tp bug
#         if type(model) is not list:
#             model = model.to(args.local_rank)
#     if mpu.get_data_parallel_world_size() > 1:
#         model = LocalDDP(model, data_parallel_group=mpu.get_data_parallel_group())
#     return model



