import contextlib
from datetime import datetime
from typing import List, Dict

from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy

from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM, LlamaForCausalLMForVpp
from datasets import load_dataset
import math
import time

# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

from tqdm import tqdm

import torch

from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import LlamaConfig
from transformers import LlamaTokenizer

from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0

#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving, forward_backward_pipelining_without_interleaving2, adjust_batch, \
    forward_backward_pipelining_without_interleaving2_remove_sgp, forward_backward_pipelining_with_interleaving,forward_backward_pipelining_with_interleaving
from geesibling.adapters.pytorch.get_data import get_data_loader,get_data_loader_with_ddp
import os
from geesibling.adapters.pytorch.pipeline.pipeline.training2 import \
    forward_backward_pipelining_with_interleaving_overlap_comm, \
    forward_backward_pipelining_without_interleaving2_remove_sgp_fix_dp, \
    forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP, \
    forward_backward_pipelining_with_interleaving_fix_dp, forward_backward_no_pipelining

from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from geesibling.adapters.pytorch.pipeline.megatron.distributed import DistributedDataParallel as LocalDDP
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel import parallelize_module
from geesibling.adapters.pytorch.tensor_parallel.run_tp import TPPolicy
from geesibling.adapters.pytorch.pipeline.optimizer import FP32Optimizer
from geesibling.adapters.pytorch.auto_parallel.auto_parallel_profiling import Profiling
# 设置环境变量
os.environ['LOG_DIR'] = './logs'


def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))


def train(model, optimizer, train_loader, args, epochs, len_train_loader, len_dataset_global, gpus_list,
          recv_tensor_shape, forward_step_func):
    def train_step(model, optimizer, data_iterator, forward_step_func, args, gpus_list, recv_tensor_shape, vpp_overlap,
                   progress_bar, loss_list):
        """执行单个训练步骤（包含前向反向+优化器更新）"""
        vpp_overlap = False
        # 梯度清零逻辑（保持原注释）
        if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
            if args.virtual_pipeline_model_parallel_size > 1:
                if not vpp_overlap and mpu.get_data_parallel_world_size() > 1:
                    # Set grad to zero. (原注释)
                    for model_chunk in model:
                        model_chunk.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))  # 原注释
            else:
                if mpu.get_data_parallel_world_size() > 1:
                    model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))  # 原注释
        else:
            if mpu.get_data_parallel_world_size() > 1:
                model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))  # 原注释

        # 选择前向反向函数（保持原逻辑）
        if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
            if args.virtual_pipeline_model_parallel_size > 1:
                loss_func = (forward_backward_pipelining_with_interleaving_overlap_comm if vpp_overlap
                             else forward_backward_pipelining_with_interleaving_fix_dp)
            else:
                loss_func = forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP
        else:
            loss_func = forward_backward_no_pipelining

        # 执行前向反向传播（参数保持原样）
        loss = loss_func(
            forward_step_func,
            data_iterator,
            model,
            False,
            args.micro_batch,
            gpus_list,
            recv_tensor_shape
        )

        # 更新损失和优化器（原逻辑迁移至此）
        if loss is not None:
            loss_list.append(loss)
            progress_bar.set_postfix({'loss': loss})  # 原注释：更新进度条后缀

        # 优化器操作（从原train函数迁移至此）
        optimizer.step()
        optimizer.zero_grad()  # 注意：这里保持原代码的zero_grad调用位置

        return loss


    total_loss_list = []
    vpp_overlap = False  # 假设该参数已定义或从args中获取

    for epoch in range(epochs):
        loss_list = []
        print('------------------')
        # 初始化数据迭代器（保持原注释）
        # # 重置迭代器
        if args.virtual_pipeline_model_parallel_size > 1:
            train_data_iterator = [iter(loader) for loader in train_loader]  # 虚拟流水线需要多个迭代器
        else:
            train_data_iterator = iter(train_loader)

        # nonlocal train_step
        current_train_step = train_step  # 默认使用全局函数

        # 计算总步数并创建进度条（保持原注释）
        total_steps = math.ceil(len_train_loader / args.micro_batch)
        if args.prof_file:
            total_steps = 5
            print("已成功调整step数量")
            profiling = Profiling(args)
            train_step = profiling.hook_train_step(current_train_step)
            print("已注册hook")
        progress_bar = tqdm(range(total_steps), desc=f'Epoch {epoch + 1}')

        # TODO 在这里可能需要判断是否在autoParrlel里，用args什么的来判断。MCMC结束，使args.inSearchAutoCongfig=false

        for step_idx in progress_bar:
            # 处理最后一个微批次（保持原注释）
            is_last_step = (step_idx == total_steps - 1)
            if is_last_step:
                adjust_batch(train_loader, args, len_dataset_global, args.micro_batch)  # 调整最后一个不完整的batch
                # 虚拟流水线特殊检查（保持原注释）
                #  vpp attention:number of microbatches must be divisible by pipeline-model-parallel-size when using interleaved schedule
                if args.virtual_pipeline_model_parallel_size > 1:
                    pp_world_size = mpu.get_pipeline_model_parallel_world_size()
                    if (args.micro_batch % pp_world_size != 0) or (args.micro_batch == pp_world_size):
                        print(f"当前args.micro_batch为：{args.micro_batch}")
                        print(
                            "In the last pipeline, the pipeline is skipped and not executed because the number of microbatches cannot be divisible by the pipeline model parallel size.")
                        print("跳过不满足整除条件的最后批次")  # 原注释
                        continue

            # 执行训练步骤（包含优化器操作）
            loss = train_step(
                model=model,
                optimizer=optimizer,
                data_iterator=train_data_iterator,
                forward_step_func=forward_step_func,
                args=args,
                gpus_list=gpus_list,
                recv_tensor_shape=recv_tensor_shape,
                vpp_overlap=vpp_overlap,
                progress_bar=progress_bar,
                loss_list=loss_list
            )

        # 记录epoch平均损失（保持原逻辑）
        if loss_list:
            total_loss_list.append(sum(loss_list) / len(loss_list))

    return total_loss_list


# def train_step(model, optimizer, data_iterator, forward_step_func, args, gpus_list, recv_tensor_shape, vpp_overlap,
#                progress_bar, loss_list):
#     """执行单个训练步骤（包含前向反向+优化器更新）"""
#     vpp_overlap = False
#     # 梯度清零逻辑（保持原注释）
#     if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
#         if args.virtual_pipeline_model_parallel_size > 1:
#             if not vpp_overlap and mpu.get_data_parallel_world_size() > 1:
#                 # Set grad to zero. (原注释)
#                 for model_chunk in model:
#                     model_chunk.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))  # 原注释
#         else:
#             if mpu.get_data_parallel_world_size() > 1:
#                 model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))  # 原注释
#     else:
#         if mpu.get_data_parallel_world_size() > 1:
#             model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))  # 原注释
#
#     # 选择前向反向函数（保持原逻辑）
#     if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
#         if args.virtual_pipeline_model_parallel_size > 1:
#             loss_func = (forward_backward_pipelining_with_interleaving_overlap_comm if vpp_overlap
#                          else forward_backward_pipelining_with_interleaving_fix_dp)
#         else:
#             loss_func = forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP
#     else:
#         loss_func = forward_backward_no_pipelining
#
#     # 执行前向反向传播（参数保持原样）
#     loss = loss_func(
#         forward_step_func,
#         data_iterator,
#         model,
#         False,
#         args.micro_batch,
#         gpus_list,
#         recv_tensor_shape
#     )
#
#     # 更新损失和优化器（原逻辑迁移至此）
#     if loss is not None:
#         loss_list.append(loss)
#         progress_bar.set_postfix({'loss': loss})  # 原注释：更新进度条后缀
#
#     # 优化器操作（从原train函数迁移至此）
#     optimizer.step()
#     optimizer.zero_grad()  # 注意：这里保持原代码的zero_grad调用位置
#
#     return loss

def setup_model_and_optimizer(config,args,tokenizer = None):
    # if tp > 1, must give a tokenizer
    if mpu.get_tensor_model_parallel_world_size() > 1 and tokenizer is None:
        raise ValueError("Tokenizer must be provided when tp > 1")

    vpp_size = args.virtual_pipeline_model_parallel_size
    if vpp_size > 1:
        device_mesh = mpu.get_device_mesh()
        model_chunk_list = []
        for i in range(vpp_size):
            mpu.set_virtual_pipeline_model_parallel_rank(i)
            # Set pre_process and post_process only after virtual rank is set.
            pre_process = mpu.is_pipeline_first_stage()
            post_process = mpu.is_pipeline_last_stage()
            pp_rank = mpu.get_pipeline_model_parallel_rank()
            pp_size = mpu.get_pipeline_model_parallel_world_size()
            vpp_rank = i  # vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank()

            model = LlamaForCausalLMForVpp(config=config, pp_rank=pp_rank, pre_process=pre_process, post_process=post_process, pp_size=pp_size, vpp_size=vpp_size, vpp_rank=vpp_rank)

            # this_model.model_type = model_type
            if mpu.get_tensor_model_parallel_world_size()>1:
                parallelize_plan = TPPolicy(model,tokenizer,device_mesh['tp'],pre_process,config.hidden_size)
                for plan in parallelize_plan:
                    print(plan)
                    print(parallelize_plan[plan].param_layouts)
                model = parallelize_module(
                    model,
                    device_mesh["tp"],
                    parallelize_plan,
                )
                _pre_dp_module_transform(model)
                model=model.to(args.local_rank)
            if args.fp16:
                model = model.half()
            model_chunk_list.append(model)
        model = [every_model.to(args.local_rank) for every_model in model_chunk_list]
        # model = [every_model.to(torch.cuda.current_device()) for every_model in model_chunk_list]
        if mpu.get_data_parallel_world_size() > 1:
            print("-------------------------------------------------------vpp with dp -------------------------------------------------------------------")
            model = [LocalDDP(
                model_chunk,
                data_parallel_group=mpu.get_data_parallel_group(),
                # accumulate_allreduce_grads_in_fp32=args.accumulate_allreduce_grads_in_fp32,
                disable_bucketing=(model_chunk_idx > 0))
                # Turn off bucketing for model_chunk 2 onwards, since communication for these
                # model chunks is overlapped with compute anyway.
                for (model_chunk_idx, model_chunk) in enumerate(model)]
            #  TODO
            # # Broadcast params from data parallel src rank to other data parallel ranks.
            # if args.data_parallel_random_init:
            #     for model_module in model:
            #         model_module.broadcast_params()

        # optimizer
        all_model_params = _get_all_params_1(model)
        base_optimizer = torch.optim.Adam(all_model_params, lr=2.4e-5)
        if mpu.get_data_parallel_world_size() > 1:
            optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=True)
        else:
            # optimizer = base_optimizer
            optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=False)
    # elif args.pipeline_model_parallel_size > 1:
    else:
        from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM
        pp_rank = mpu.get_pipeline_model_parallel_rank()
        pre = mpu.is_pipeline_first_stage()
        post = mpu.is_pipeline_last_stage()
        pp_size = mpu.get_pipeline_model_parallel_world_size()

        # 打印所有值
        print(f"Pipeline Model Parallel Rank: {pp_rank}")
        print(f"Is Pipeline First Stage: {pre}")
        print(f"Is Pipeline Last Stage: {post}")
        print(f"Pipeline Model Parallel World Size: {pp_size}")


        model = LlamaForCausalLM(config=config, pp_rank=pp_rank, pre_process=pre, post_process=post, pp_size=pp_size)
        device_mesh = mpu.get_device_mesh()
        if mpu.get_tensor_model_parallel_world_size() > 1:
            parallelize_plan = TPPolicy(model, tokenizer, device_mesh['tp'], pre, config.hidden_size)
            for one in parallelize_plan:
                print(parallelize_plan[one].param_layouts)
            model = parallelize_module(
                model,
                device_mesh["tp"],
                parallelize_plan,
            )  #
            _pre_dp_module_transform(model)
            # gees 10.24 add-------------------
            model = model.to(args.local_rank)
        else:
            # 10.25 add to fix dpp-tp bug
            if type(model) is not list:
                model = model.to(args.local_rank)
        if mpu.get_data_parallel_world_size() > 1:
            # if torch.cuda.current_device()==args.local_rank:
            #     print("true")
            # else:
            #     print("false")
            model = LocalDDP(model, data_parallel_group=mpu.get_data_parallel_group())

        base_optimizer = torch.optim.Adam(model.parameters(), lr=2.4e-5)
        if mpu.get_data_parallel_world_size() > 1:
            optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=True)
        else:
            optimizer = FP32Optimizer(base_optimizer, params_have_main_grad=False)
    return model, optimizer




def forward_step_func(input_tensor, model):
    return model(input_tensor)


# args = get_args()
def pretrain(
        extra_args_provider=None,
        args_defaults={}):
    gpus_list = initialize_megatron()
    args = get_args()

    print('get tokenizer')
    tokenizer = LlamaTokenizer.from_pretrained('./llama7bconfig')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    cfg = LlamaConfig(num_hidden_layers=8)
    print(cfg)
    args.hidden_size = cfg.hidden_size  # Be used in  p2p_communication_4

    tensor_length = args.tensor_length
    micro_batch_size = args.micro_batch_size
    recv_tensor_shape = (micro_batch_size, tensor_length, cfg.hidden_size)

    print(f"tp:{mpu.get_tensor_model_parallel_world_size()}")
    print(f"dp:{mpu.get_data_parallel_world_size()}")
    print(f"pp:{mpu.get_pipeline_model_parallel_world_size()}")

    model,optimizer = setup_model_and_optimizer(config=cfg,args=args,tokenizer=tokenizer)

    # prepare data
    if args.virtual_pipeline_model_parallel_size > 1:
        len_dataset_global = -1
        len_train_loader = -1
        if args.virtual_pipeline_model_parallel_size is not None:
            train_data_iterator = []
            train_loader = []  # 保存所有初始创建的train_loader
            for i in range(len(model)):
                mpu.set_virtual_pipeline_model_parallel_rank(i)  #
                # train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)
                if mpu.get_data_parallel_world_size() > 1:
                    train_loader, len_dataset = get_data_loader_with_ddp(tokenizer, micro_batch_size, tensor_length,
                                                                         mpu.get_data_parallel_world_size(),
                                                                         mpu.get_data_parallel_rank())
                else:
                    train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)

                if len_dataset_global == -1:
                    len_dataset_global = len_dataset
                    len_train_loader = len(train_loader)

                train_loader.append(train_loader)  # 保存train_loader，2025 find fix
                train_data_iterator.append(iter(train_loader))
    else:
        # train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)
        if mpu.get_data_parallel_world_size() > 1:
            train_loader, len_dataset = get_data_loader_with_ddp(tokenizer, micro_batch_size, tensor_length,
                                                                 mpu.get_data_parallel_world_size(),
                                                                 mpu.get_data_parallel_rank())
        else:
            train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)

        len_train_loader = len(train_loader)
        train_data_iterator = iter(train_loader)
        len_dataset_global = len_dataset

        print(len_train_loader)
        print(len_train_loader)
        print(len_train_loader)
        print(len_train_loader)

    vpp_overlap = False
    epochs = args.epoch
    start_time = time.time()
    total_loss_list = []
    save_micro_batch = args.micro_batch


    # TODO
    args.use_distributed_optimizer = False
    total_loss_list = train(model,optimizer,train_loader,args,1,len_train_loader,len_dataset_global,gpus_list,recv_tensor_shape,forward_step_func)

    print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')





# not use gees
def JudgeSkipFinalVpp(train_loader, args, len_dataset, micro_batch_size):
    temp_micro_batch_num = args.micro_batch
    if len(train_loader) % args.micro_batch != 0:
        # 修改micro-batch数
        temp_micro_batch_num = len(train_loader) % args.micro_batch
        # 最后一个micro-batch不完整，直接去掉最后一个micro batch
        if len_dataset % micro_batch_size != 0:
            temp_micro_batch_num = temp_micro_batch_num - 1
    return temp_micro_batch_num

    # gees 单参数列表-1


def _get_all_params_1(model_chunks: List) -> List:
    """Create a single parameter list for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[nn.Parameter]: List of parameters compatible with Adam optimizer.
    """
    all_params = []

    for chunk in model_chunks:
        all_params.extend(chunk.parameters())

    return all_params

    # gees 单参数列表-2


def _get_all_params_2(model_chunks: List) -> List:
    """Create a single parameter list for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[nn.Parameter]: List of parameters compatible with Adam optimizer.
    """
    param_list = []

    for model_chunk in model_chunks:
        for param in model_chunk.parameters():
            if param.requires_grad:  # gees 多了一步判断
                param_list.append(param)
    return param_list

    # gees 多参数列表 ---> 可用于后期扩展,可以定制每一个参数的学习率等.而不使用全局的学习率.


def _get_param_groups__(model_chunks: List) -> List[Dict]:
    """Create a simple parameter group for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[Dict]: List of parameter groups compatible with Adam optimizer.
    """
    param_groups = []

    for model_chunk in model_chunks:
        for param in model_chunk.parameters():
            if param.requires_grad:
                param_groups.append({'params': param})

    return param_groups