import contextlib
from datetime import datetime
from typing import List, Dict

from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy

# from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM, LlamaForCausalLMForVpp
from geesibling.adapters.pytorch.pipeline.models.model_mistral import MistralForCausalLM
from datasets import load_dataset
import math
import time

# from tensor_parallel.run_tp import ShardParallel

# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

from tqdm import tqdm

import torch

from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
# from transformers import LlamaConfig
from transformers import MistralConfig
from transformers import LlamaTokenizer

from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0

#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving, forward_backward_pipelining_without_interleaving2, adjust_batch, \
    forward_backward_pipelining_without_interleaving2_remove_sgp, forward_backward_pipelining_with_interleaving,forward_backward_pipelining_with_interleaving
from geesibling.adapters.pytorch.get_data import get_data_loader,get_data_loader_with_ddp
import os
from geesibling.adapters.pytorch.pipeline.pipeline.training2 import \
    forward_backward_pipelining_with_interleaving_overlap_comm, \
    forward_backward_pipelining_without_interleaving2_remove_sgp_fix_dp, \
    forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP, \
    forward_backward_pipelining_with_interleaving_fix_dp, forward_backward_no_pipelining

from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from geesibling.adapters.pytorch.pipeline.megatron.distributed import DistributedDataParallel as LocalDDP
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel import parallelize_module
from geesibling.adapters.pytorch.tensor_parallel.run_tp import TPPolicy
from geesibling.adapters.pytorch.pipeline.optimizer import FP32Optimizer
# 设置环境变量
os.environ['LOG_DIR'] = './logs'


def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))

# args = get_args()
def pretrain(
        extra_args_provider=None,
        args_defaults={}):
    gpus_list = initialize_megatron()
    args = get_args()

    print('get tokenizer')
    tokenizer = LlamaTokenizer.from_pretrained('./mistral7b')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    # cfg = MistralConfig(num_hidden_layers=8,_attn_implementation="eager")
    cfg = MistralConfig(num_hidden_layers=8,_attn_implementation="eager",attn_implementation="eager")
    # cfg = MistralConfig(num_hidden_layers=8,_attn_implementation="sdpa",attn_implementation="sdpa")
    print(f"_attn_implementation is {cfg._attn_implementation}")
    print(cfg)
    # cfg = LlamaConfig(num_hidden_layers=16)
    args.hidden_size = cfg.hidden_size  # Be used in p2p_communication_4

    tensor_length = args.tensor_length
    micro_batch_size = args.micro_batch_size
    recv_tensor_shape = (micro_batch_size, tensor_length, cfg.hidden_size)

    print(f"tp:{mpu.get_tensor_model_parallel_world_size()}")
    print(f"dp:{mpu.get_data_parallel_world_size()}")
    print(f"pp:{mpu.get_pipeline_model_parallel_world_size()}")

    model,optimizer = setup_model_and_optimizer(config=cfg,args=args,tokenizer=tokenizer)

    # prepare data
    if args.virtual_pipeline_model_parallel_size > 1:
        len_dataset_global = -1
        len_train_loader = -1
        if args.virtual_pipeline_model_parallel_size is not None:
            train_data_iterator = []
            train_loaders = []  # 保存所有初始创建的train_loader
            for i in range(len(model)):
                mpu.set_virtual_pipeline_model_parallel_rank(i)  #
                # train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)
                if mpu.get_data_parallel_world_size() > 1:
                    train_loader, len_dataset = get_data_loader_with_ddp(tokenizer, micro_batch_size, tensor_length,
                                                                         mpu.get_data_parallel_world_size(),
                                                                         mpu.get_data_parallel_rank())
                else:
                    train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)

                if len_dataset_global == -1:
                    len_dataset_global = len_dataset
                    len_train_loader = len(train_loader)

                train_loaders.append(train_loader)  # 保存train_loader，2025 find fix
                train_data_iterator.append(iter(train_loader))
    else:
        # train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)
        if mpu.get_data_parallel_world_size() > 1:
            train_loader, len_dataset = get_data_loader_with_ddp(tokenizer, micro_batch_size, tensor_length,
                                                                 mpu.get_data_parallel_world_size(),
                                                                 mpu.get_data_parallel_rank())
        else:
            train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)

        len_train_loader = len(train_loader)
        train_data_iterator = iter(train_loader)
        len_dataset_global = len_dataset

    vpp_overlap = False
    epochs = args.epoch
    start_time = time.time()
    total_loss_list = []
    save_micro_batch = args.micro_batch

    if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
        for epoch in range(epochs):
            loss_list = []
            print('------------------')
            if args.virtual_pipeline_model_parallel_size > 1:
                for i in range(len(train_loaders)):
                    train_data_iterator[i] = iter(train_loaders[i])  # 重置迭代器
            else:
                train_data_iterator = iter(train_loader)
            progress_bar = tqdm(range(math.ceil(len_train_loader / args.micro_batch)), desc=f'Epoch {epoch + 1}')
            for bar_id in progress_bar:
                # 最后一个mini-batch可能不完整，进行特殊处理cl
                if bar_id == len(progress_bar) - 1:
                    adjust_batch(train_loader, args, len_dataset_global,
                                 micro_batch_size)  # gees 这里面会调整args.micro_batch
                    # gees add for vpp.
                    #  vpp attention:number of microbatches must be divisible by pipeline-model-parallel-size when using interleaved schedule
                    if args.virtual_pipeline_model_parallel_size > 1:
                        if (args.micro_batch % mpu.get_pipeline_model_parallel_world_size() != 0) or (
                                args.micro_batch == mpu.get_pipeline_model_parallel_world_size()):
                            print(f"当前args.micro_batch为：{args.micro_batch}")
                            print(
                                "In the last pipeline, the pipeline is skipped and not executed because the number of microbatches cannot be divisible by the pipeline model parallel size.")
                            continue
                if args.virtual_pipeline_model_parallel_size > 1:

                    if not vpp_overlap:
                        if mpu.get_data_parallel_world_size() > 1:
                            # Set grad to zero.
                            for model_chunk in model:
                                args.use_distributed_optimizer = False
                                # If using distributed optimizer, don't zero buffer here; zeroing of buffer is
                                # handled automatically by the optimizer after all-gathers finish.
                                # Otherwise, zero the buffer.
                                model_chunk.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
                        else:
                            pass
                        loss = forward_backward_pipelining_with_interleaving_fix_dp(
                            forward_step_func, train_data_iterator, model,
                            False, args.micro_batch, gpus_list, recv_tensor_shape
                        )
                    else:
                        loss = forward_backward_pipelining_with_interleaving_overlap_comm(
                            forward_step_func, train_data_iterator, model,
                            False, args.micro_batch, gpus_list, recv_tensor_shape
                        )
                else:
                    # PP or PP-DP
                    # new add
                    if mpu.get_data_parallel_world_size() > 1:
                        args.use_distributed_optimizer = False
                        model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
                    else:
                        pass

                    loss = forward_backward_pipelining_without_interleaving2_remove_sgp_use_megatronDDP(
                        forward_step_func, train_data_iterator, model,
                        False, args.micro_batch, gpus_list, recv_tensor_shape
                    )
                if loss is not None:
                    loss_list.append(loss)
                    progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失。
                optimizer.step()
                optimizer.zero_grad()

                # profiler.step()

                if len(loss_list) > 0:
                    total_loss_list.append(sum(loss_list) / len(loss_list))
    else:
        for epoch in range(epochs):
            loss_list = []
            print('------------------')
            if args.virtual_pipeline_model_parallel_size > 1:
                for i in range(len(train_loaders)):
                    train_data_iterator[i] = iter(train_loaders[i])  # 重置迭代器
            else:
                train_data_iterator = iter(train_loader)
            progress_bar = tqdm(range(math.ceil(len_train_loader / args.micro_batch)), desc=f'Epoch {epoch + 1}')
            for bar_id in progress_bar:
                if mpu.get_data_parallel_world_size() > 1:
                    args.use_distributed_optimizer = False
                    model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
                else:
                    pass

                loss = forward_backward_no_pipelining(
                    forward_step_func, train_data_iterator, model,
                    False, args.micro_batch, gpus_list, recv_tensor_shape
                )
                if loss is not None:
                    loss_list.append(loss)
                    progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失。
                optimizer.step()
                optimizer.zero_grad()
                # profiler.step()

        # device = torch.cuda.current_device()
        #
        # if isinstance(model, LocalDDP):
        #     Dp_flag = True
        #     no_sync_func = model.no_sync
        # else:
        #     no_sync_func = contextlib.nullcontext
        # no_sync_context = None
        #
        # for epoch in range(epochs):
        #     progress_bar = tqdm(range(math.ceil(len(train_loader))), desc=f'Epoch {epoch + 1}')
        #     loss_list = []
        #     train_data_iterator = iter(train_loader)
        #     args.use_distributed_optimizer = False
        #     with no_sync_func():
        #         for i in (progress_bar - 1):
        #             # # Set grad to zero.
        #             model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
        #
        #             batch = next(train_data_iterator)
        #             input_tensor = batch['input_ids'].to(device)
        #             labels = batch['label'].to(device)
        #
        #             outputs = model(input_ids=input_tensor, labels=labels)
        #
        #             loss = outputs.loss
        #
        #             loss.backward()
        #             loss_list.append(loss)
        #             progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
        #             success, _, _ = optimizer.step()
        #             optimizer.zero_grad()
        #             # profiler.step()
        #
        #     model.zero_grad_buffer(zero_buffer=(not args.use_distributed_optimizer))
        #     batch = next(train_data_iterator)
        #     input_tensor = batch['input_ids'].to(device)
        #     labels = batch['label'].to(device)
        #
        #     outputs = model(input_ids=input_tensor, labels=labels)
        #
        #     loss = outputs.loss
        #
        #     loss.backward()
        #     loss_list.append(loss)
        #     progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
        #     success, _, _ = optimizer.step()
        #     optimizer.zero_grad()



            # print(f'epoch {epoch + 1},loss:{sum(loss_list) / len(loss_list)}')
            # if len(loss_list) > 0:
            #     total_loss_list.append(sum(loss_list) / len(loss_list))
            #     print(total_loss_list)
    print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')

def setup_model_and_optimizer(config,args,tokenizer = None):
    # if tp > 1, must give a tokenizer
    if mpu.get_tensor_model_parallel_world_size() > 1 and tokenizer is None:
        raise ValueError("Tokenizer must be provided when tp > 1")

    vpp_size = args.virtual_pipeline_model_parallel_size
    if vpp_size > 1:
        device_mesh = mpu.get_device_mesh()
        model_chunk_list = []
        for i in range(vpp_size):
            mpu.set_virtual_pipeline_model_parallel_rank(i)
            # Set pre_process and post_process only after virtual rank is set.
            pre_process = mpu.is_pipeline_first_stage()
            post_process = mpu.is_pipeline_last_stage()
            pp_rank = mpu.get_pipeline_model_parallel_rank()
            pp_size = mpu.get_pipeline_model_parallel_world_size()
            vpp_rank = i  # vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank()

            model = LlamaForCausalLMForVpp(config=config, pp_rank=pp_rank, pre_process=pre_process, post_process=post_process, pp_size=pp_size, vpp_size=vpp_size, vpp_rank=vpp_rank)

            # this_model.model_type = model_type
            if mpu.get_tensor_model_parallel_world_size()>1:
                parallelize_plan = TPPolicy(model,tokenizer,device_mesh['tp'],pre_process,config.hidden_size)
                for plan in parallelize_plan:
                    print(plan)
                    print(parallelize_plan[plan].param_layouts)
                model = parallelize_module(
                    model,
                    device_mesh["tp"],
                    parallelize_plan,
                )
                _pre_dp_module_transform(model)
                model=model.to(args.local_rank)
            if args.fp16:
                model = model.half()
            model_chunk_list.append(model)
        model = [every_model.to(args.local_rank) for every_model in model_chunk_list]
        # model = [every_model.to(torch.cuda.current_device()) for every_model in model_chunk_list]
        if mpu.get_data_parallel_world_size() > 1:
            print("-------------------------------------------------------vpp with dp -------------------------------------------------------------------")
            model = [LocalDDP(
                model_chunk,
                data_parallel_group=mpu.get_data_parallel_group(),
                # accumulate_allreduce_grads_in_fp32=args.accumulate_allreduce_grads_in_fp32,
                disable_bucketing=(model_chunk_idx > 0))
                # Turn off bucketing for model_chunk 2 onwards, since communication for these
                # model chunks is overlapped with compute anyway.
                for (model_chunk_idx, model_chunk) in enumerate(model)]
            #  TODO
            # # Broadcast params from data parallel src rank to other data parallel ranks.
            # if args.data_parallel_random_init:
            #     for model_module in model:
            #         model_module.broadcast_params()

        # optimizer
        all_model_params = _get_all_params_1(model)
        base_optimizer = torch.optim.Adam(all_model_params, lr=2.4e-5)
        if mpu.get_data_parallel_world_size() > 1:
            optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=True)
        else:
            # optimizer = base_optimizer
            optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=False)
    # elif args.pipeline_model_parallel_size > 1:
    else:
        # from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM
        pp_rank = mpu.get_pipeline_model_parallel_rank()
        pre = mpu.is_pipeline_first_stage()
        post = mpu.is_pipeline_last_stage()
        pp_size = mpu.get_pipeline_model_parallel_world_size()
        model = MistralForCausalLM(config=config, pp_rank=pp_rank, pre_process=pre, post_process=post, pp_size=pp_size)
        print(model)
        device_mesh = mpu.get_device_mesh()
        if mpu.get_tensor_model_parallel_world_size() > 1:
            parallelize_plan = TPPolicy(model, tokenizer, device_mesh['tp'], pre, config.hidden_size)
            # ShardParallel()
            from torch.distributed._tensor import Shard

            # parallelize_plan['model.layers.0.self_attn.q_proj'].param_layouts['weight'] = Shard(dim=0)
            # parallelize_plan['model.layers.0.self_attn.k_proj'].param_layouts['weight'] = Shard(dim=0)
            # parallelize_plan['model.layers.0.self_attn.q_proj'] = parallelize_plan['model.layers.0.self_attn.v_proj']
            # parallelize_plan['model.layers.0.self_attn.k_proj'] = parallelize_plan['model.layers.0.self_attn.v_proj']

            for i in range(config.num_hidden_layers // pp_size):
                parallelize_plan[f'model.layers.{i}.self_attn.q_proj'] = parallelize_plan[
                    f'model.layers.{i}.self_attn.v_proj']
                parallelize_plan[f'model.layers.{i}.self_attn.k_proj'] = parallelize_plan[
                    f'model.layers.{i}.self_attn.v_proj']

            print("修改之后----")
            for one in parallelize_plan:
                print(f"rank:{args.local_rank}   {one} is {parallelize_plan[one].param_layouts}")
            # 假设 args.local_rank 和 parallelize_plan 已经定义
            log_file = f"parallelize_plan_mistral.log"  # 定义日志文件名

            # # 打开文件写入日志
            # with open(log_file, "a") as log:
            #     for one in parallelize_plan:
            #         param_layouts = parallelize_plan[one].param_layouts
            #         log.write(f"rank: {args.local_rank}   {one} is {param_layouts}\n")
            #     log.write("\n")
            #     log.write("\n")

            with open("parallelize_plan_mistral_2", "a") as log:
                for one in parallelize_plan:
                    param_plan = parallelize_plan[one]
                    log.write(f"rank: {args.local_rank}   {one} is {param_plan}\n")
                log.write("\n")
                log.write("\n")

            print("device_mesh")
            print(device_mesh["tp"])


            model = parallelize_module(
                model,
                device_mesh["tp"],
                parallelize_plan,
            )  #
            _pre_dp_module_transform(model)
            # gees 10.24 add-------------------
            model = model.to(args.local_rank)
            print(model)
        else:
            # 10.25 add to fix dpp-tp bug
            if type(model) is not list:
                model = model.to(args.local_rank)
        if mpu.get_data_parallel_world_size() > 1:
            # if torch.cuda.current_device()==args.local_rank:
            #     print("true")
            # else:
            #     print("false")
            model = LocalDDP(model, data_parallel_group=mpu.get_data_parallel_group())

        base_optimizer = torch.optim.Adam(model.parameters(), lr=2.4e-5)
        if mpu.get_data_parallel_world_size() > 1:
            optimizer = FP32Optimizer(optimizer=base_optimizer, params_have_main_grad=True)
        else:
            optimizer = FP32Optimizer(base_optimizer, params_have_main_grad=False)
    return model, optimizer




def forward_step_func(input_tensor, model):
    return model(input_tensor)



# not use gees
def JudgeSkipFinalVpp(train_loader, args, len_dataset, micro_batch_size):
    temp_micro_batch_num = args.micro_batch
    if len(train_loader) % args.micro_batch != 0:
        # 修改micro-batch数
        temp_micro_batch_num = len(train_loader) % args.micro_batch
        # 最后一个micro-batch不完整，直接去掉最后一个micro batch
        if len_dataset % micro_batch_size != 0:
            temp_micro_batch_num = temp_micro_batch_num - 1
    return temp_micro_batch_num

    # gees 单参数列表-1


def _get_all_params_1(model_chunks: List) -> List:
    """Create a single parameter list for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[nn.Parameter]: List of parameters compatible with Adam optimizer.
    """
    all_params = []

    for chunk in model_chunks:
        all_params.extend(chunk.parameters())

    return all_params

    # gees 单参数列表-2


def _get_all_params_2(model_chunks: List) -> List:
    """Create a single parameter list for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[nn.Parameter]: List of parameters compatible with Adam optimizer.
    """
    param_list = []

    for model_chunk in model_chunks:
        for param in model_chunk.parameters():
            if param.requires_grad:  # gees 多了一步判断
                param_list.append(param)
    return param_list

    # gees 多参数列表 ---> 可用于后期扩展,可以定制每一个参数的学习率等.而不使用全局的学习率.


def _get_param_groups__(model_chunks: List) -> List[Dict]:
    """Create a simple parameter group for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[Dict]: List of parameter groups compatible with Adam optimizer.
    """
    param_groups = []

    for model_chunk in model_chunks:
        for param in model_chunk.parameters():
            if param.requires_grad:
                param_groups.append({'params': param})

    return param_groups