from datetime import datetime
from typing import List, Dict

from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy

from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM, LlamaForCausalLMForVpp
from datasets import load_dataset
import math
import time

# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

from tqdm import tqdm

import torch

from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import LlamaConfig
from transformers import LlamaTokenizer

from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0

#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving, forward_backward_pipelining_without_interleaving2, adjust_batch, \
    forward_backward_pipelining_without_interleaving2_remove_sgp, forward_backward_pipelining_with_interleaving,forward_backward_pipelining_with_interleaving
from geesibling.adapters.pytorch.get_data import get_data_loader
import os
from geesibling.adapters.pytorch.pipeline.pipeline.training2 import forward_backward_pipelining_with_interleaving_overlap_comm


from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel import parallelize_module
from geesibling.adapters.pytorch.tensor_parallel.run_tp import TPPolicy
# 设置环境变量
os.environ['LOG_DIR'] = './logs'


def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))

# test
def pretrain(
        extra_args_provider=None,
        args_defaults={}):

    gpus_list = initialize_megatron()
    print('----------------------')
    print(gpus_list)
    # torch.distributed.barrier()
    # print('barrier done')

    args = get_args()

    # Model, optimizer, and learning rate.
    #timers('model-and-optimizer-setup').start()

    print('get tokenizer')
    tokenizer = LlamaTokenizer.from_pretrained('./llama7bconfig')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    print('get cfg')
    # cfg = LlamaConfig()
    cfg = LlamaConfig(num_hidden_layers=16)
    args.hidden_size = cfg.hidden_size
    # print(cfg)
    # cfg.max_position_embeddings = 4096
    print(cfg)

    # pp_rank = mpu.get_pipeline_model_parallel_rank()  # 当前是第几个stage,在pp进程组group里是第几个rank进程
    # pre = mpu.is_pipeline_first_stage()
    # post = mpu.is_pipeline_last_stage()
    # pp_size = mpu.get_pipeline_model_parallel_world_size()  # get every pp word_size ====一个group用了多少gpu/多少个Stage？？？？
    # print(f'pp_rank:{pp_rank}, pre:{pre}, post:{post}, pp_size:{pp_size}-------------------------------------------------------------------')

    # ?模型初始化 根据mpu.is_pipeline_first_stage()，mpu.is_pipeline_last_stage()为首尾的stage/gpu/rank添加层
    # model = LlamaForCausalLM(config=cfg, pp_rank=pp_rank, pre_process=pre, post_process=post, pp_size=pp_size)

    def get_model(config):
        device_mesh = mpu.get_device_mesh()
        model_chunk_list = []  # gees vpp下 一个进程负责多个stage，每一个stage有自己的model部分。所以train里，每个进程拿到的model时一个list列表,每一个virtual_pipline会对应一个model。-------------------------------
        for i in range(args.virtual_pipeline_model_parallel_size):
            mpu.set_virtual_pipeline_model_parallel_rank(i)
            # Set pre_process and post_process only after virtual rank is set.
            pre_process = mpu.is_pipeline_first_stage()  # gees nk 可以放到get_model外部
            post_process = mpu.is_pipeline_last_stage()
            pp_rank = mpu.get_pipeline_model_parallel_rank()
            pp_size = mpu.get_pipeline_model_parallel_world_size()  # pp_size = args.pipeline_model_parallel_size lyh应该是一样的效果
            vpp_size = args.virtual_pipeline_model_parallel_size
            vpp_rank = i  # vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank()
            # if(pp_rank == 0):
            #     # print(f"当前的virtual_pipeline_model_parallel_world_size为{mpu.get_virtual_pipeline_model_parallel_world_size()}")
            #     print(f"当前的pp_rank = node_rank ={pp_rank},且当前的model_chunk_id为{mpu.get_virtual_pipeline_model_parallel_rank()},{i}。取到的pre_process的值为{pre_process}")

            this_model = LlamaForCausalLMForVpp(config=config, pp_rank=pp_rank, pre_process=pre_process, post_process=post_process, pp_size=pp_size, vpp_size=vpp_size, vpp_rank=vpp_rank)
            print(f"tp:{mpu.get_tensor_model_parallel_world_size()}")
            print(f"dp:{mpu.get_data_parallel_world_size()}")
            # this_model.model_type = model_type
            if mpu.get_tensor_model_parallel_world_size()>1:
                parallelize_plan = TPPolicy(this_model,tokenizer,device_mesh['tp'],pre_process,config.hidden_size)
                for plan in parallelize_plan:
                    print(plan)
                    print(parallelize_plan[plan].param_layouts)
                tp_model = parallelize_module(
                    this_model,
                    device_mesh["tp"],
                    parallelize_plan,
                )   #
                _pre_dp_module_transform(tp_model)
                tp_model=tp_model.to(args.local_rank)
            if args.fp16:
                tp_model = tp_model.half()
            # # add DDP
            # if mpu.get_data_parallel_world_size() >1:
            #     if mpu.get_tensor_model_parallel_world_size()>1:
            #         tp_model = torchDDP(tp_model,device_ids=[args.local_rank],process_group=mpu.get_data_parallel_group())
            #     else:
            #         tp_model = this_model
            #         tp_model=tp_model.to(args.local_rank)
            #         tp_model = torchDDP(tp_model,device_ids=[args.local_rank],process_group=mpu.get_data_parallel_group())

            # add to fix bug
            # remove gees  11.08 if mpu.get_tensor_model_parallel_world_size() == 1 and mpu.get_data_parallel_world_size() == 1:
            # if mpu.get_tensor_model_parallel_world_size() == 1 and mpu.get_data_parallel_world_size() == 1:
            if mpu.get_tensor_model_parallel_world_size() <= 1:
                tp_model = this_model
            model_chunk_list.append(tp_model)
        return model_chunk_list


    tensor_length = args.tensor_length
    micro_batch_size = args.micro_batch_size
    recv_tensor_shape = (micro_batch_size, tensor_length, cfg.hidden_size)

    # 根据不同情况处理的pp和vpp获得的model和optimizer和数据集----------------------------
    if args.virtual_pipeline_model_parallel_size > 1:
        # model chunk list
        model = get_model(cfg)
        # args.hidden_size = cfg.hidden_size
        model = [every_model.to(args.local_rank) for every_model in model]

        # LYH 11.08 add DDP
        if mpu.get_data_parallel_world_size() > 1:
            print("-------------------------------------------------------vpp with dp -------------------------------------------------------------------")
            i = torch.cuda.current_device()
            model = [torchDDP(model_module, device_ids=[i], output_device=i,
                              process_group=mpu.get_data_parallel_group())
                     for model_module in model]

        # optimizer
        all_model_params = _get_all_params_1(model)
        optimizer = torch.optim.Adam(all_model_params, lr=2.4e-5)

        # data
        len_dataset_global = -1
        len_train_loader = -1
        if args.virtual_pipeline_model_parallel_size is not None:
            train_data_iterator = []
            train_loaders = []  # 保存所有初始创建的train_loader
            for i in range(len(model)):
                mpu.set_virtual_pipeline_model_parallel_rank(i)  #
                train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)
                if len_dataset_global == -1:
                    len_dataset_global = len_dataset
                    len_train_loader = len(train_loader)
                train_data_iterator.append(iter(train_loader))
                train_data_iterator.append(iter(train_loader))
    elif args.pipeline_model_parallel_size > 1:

        from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM
        pp_rank = mpu.get_pipeline_model_parallel_rank()
        pre = mpu.is_pipeline_first_stage()
        post = mpu.is_pipeline_last_stage()
        pp_size = mpu.get_pipeline_model_parallel_world_size()

        model = LlamaForCausalLM(config=cfg, pp_rank=pp_rank, pre_process=pre, post_process=post, pp_size=pp_size)
        # model = model.to(args.local_rank)
        device_mesh = mpu.get_device_mesh()
        if mpu.get_tensor_model_parallel_world_size() > 1:
            parallelize_plan = TPPolicy(model, tokenizer, device_mesh['tp'], pre, cfg.hidden_size)
            for one in parallelize_plan:
                print(parallelize_plan[one].param_layouts)
            model = parallelize_module(
                model,
                device_mesh["tp"],
                parallelize_plan,
            )  #
            _pre_dp_module_transform(model)
            # gees 10.24 add-------------------
            model = model.to(args.local_rank)
            # fp16
            if args.fp16:
                model = model.half()
        else:
            # 10.25 add to fix dpp-tp bug
            if type(model) is not list:
                model = model.to(args.local_rank)
        # add DDP
        # if mpu.get_data_parallel_world_size() >1:
        #    tp_model = torchDDP(tp_model,device_ids=[args.local_rank],process_group=mpu.get_data_parallel_group())

        if mpu.get_data_parallel_world_size() > 1:
            print("start dpp----------------------------------------------------------------------------------------------------------------------------")
            print("start dpp----------------------------------------------------------------------------------------------------------------------------")
            print("start dpp----------------------------------------------------------------------------------------------------------------------------")
            model = torchDDP(model, device_ids=[args.local_rank], process_group=mpu.get_data_parallel_group())

        print("model is done--------------------------------")
        print("model is done--------------------------------")
        optimizer = torch.optim.Adam(model.parameters(), lr=2.4e-5)

        train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)
        len_train_loader = len(train_loader)
        train_data_iterator = iter(train_loader)
        len_dataset_global = len_dataset
    else:
        from geesibling.adapters.pytorch.tensor_parallel.models.model_llama import LlamaForCausalLM
        model = LlamaForCausalLM(config=cfg)
        train_loader,len_dataset= get_data_loader(tokenizer, micro_batch_size, tensor_length)
        optimizer = torch.optim.Adam(model.parameters(), lr=2.4e-3)
    # # add for tp
    # start = time.time()
    # device_mesh = mpu.get_device_mesh()
    # if mpu.get_tensor_model_parallel_world_size()>1 and not args.virtual_pipeline_model_parallel_size > 1:
    #    # print("11111111111111111111111111111111111111111----------------------------------22222222222222222222222222222222222-")
    #     parallelize_plan = TPPolicy(model,tokenizer,device_mesh['tp'],mpu.is_pipeline_first_stage(),cfg.hidden_size)
    #     for one in parallelize_plan:
    #         print(parallelize_plan[one].param_layouts)
    #     tp_model = parallelize_module(
    #         model,
    #         device_mesh["tp"],
    #         parallelize_plan,
    #     )   #
    #     _pre_dp_module_transform(tp_model)
    #     tp_model=tp_model.to(args.local_rank)
    #     # print("11111111111111111111111111111111111111111-----------------------------------")
    #     # fp16
    #     if args.fp16:
    #         tp_model = tp_model.half()
    # else:
    #     tp_model = model
    #     # tp_model=tp_model.to(args.local_rank)
    #     if type(model) is not list:
    #         tp_model=tp_model.to(args.local_rank)
    # # add DDP
    # if mpu.get_data_parallel_world_size() >1 and not args.virtual_pipeline_model_parallel_size > 1:
    #     print("start dpp----------------------------------------------------------------------------------------------------------------------------")
    #     print("start dpp----------------------------------------------------------------------------------------------------------------------------")
    #     print("start dpp----------------------------------------------------------------------------------------------------------------------------")
    #     tp_model = torchDDP(tp_model,device_ids=[args.local_rank],process_group=mpu.get_data_parallel_group())




    # 11.04 add for profiler---------------------------------------------
    # print("初始化Profiler-------------------------------------------------------")
    # # 初始化 Profiler
    # profiler = torch.profiler.profile(
    #     activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA],
    #     schedule=torch.profiler.schedule(wait=5, warmup=0, active=3,repeat=1),
    #     record_shapes=True,
    #     with_stack=True,
    #     on_trace_ready=torch.profiler.tensorboard_trace_handler('./log_lyh2/dp2_pp4')
    # )
    # profiler.start()
    # print("Profiler is started-------------------------------------------------------")
    
# add for overlaop (temporary)------------------------------------------
    is_overlap = True

    epochs = args.epoch
    start_time = time.time()
    total_loss_list = []
    save_micro_batch = args.micro_batch
    # gees 一个epoch开始
    if args.pipeline_model_parallel_size > 1 or args.virtual_pipeline_model_parallel_size > 1:
        for epoch in range(epochs):
            # gees 一个epoch下的train过程-----------------------------------------------------------------------------------------

            # 10.26 add to fix epoch bug
            # data
            if args.virtual_pipeline_model_parallel_size > 1:
                for i in range(len(train_loaders)):
                    train_data_iterator[i] = iter(train_loaders[i])  # 重置迭代器
            else:
                train_data_iterator = iter(train_loader)

            loss_list = []
            print('------------------')
            # print(len(train_loader))
            # print(math.ceil(len(train_loader) / args.micro_batch))
            # 最后一个mini-batch不完整
            args.micro_batch = save_micro_batch
            progress_bar = tqdm(range(math.ceil(len_train_loader / args.micro_batch)), desc=f'Epoch {epoch + 1}')
            for bar_id in progress_bar:
                # 最后一个mini-batch可能不完整，进行特殊处理
                if bar_id == len(progress_bar) - 1:
                    adjust_batch(train_loader, args, len_dataset_global, micro_batch_size)  # gees 这里面会调整args.micro_batch
                    # gees add for vpp.
                    #  vpp attention:number of microbatches must be divisible by pipeline-model-parallel-size when using interleaved schedule
                    if args.virtual_pipeline_model_parallel_size > 1:
                        if (args.micro_batch % mpu.get_pipeline_model_parallel_world_size() != 0) or (args.micro_batch == mpu.get_pipeline_model_parallel_world_size()):
                            print(f"当前args.micro_batch为：{args.micro_batch}")
                            print("In the last pipeline, the pipeline is skipped and not executed because the number of microbatches cannot be divisible by the pipeline model parallel size.")
                            continue
                if args.virtual_pipeline_model_parallel_size > 1:
                    if not is_overlap:
                        loss = forward_backward_pipelining_with_interleaving(
                            forward_step_func, train_data_iterator, model,
                            False, args.micro_batch, gpus_list, recv_tensor_shape
                        )
                    else:
                        loss = forward_backward_pipelining_with_interleaving_overlap_comm(
                            forward_step_func, train_data_iterator, model,
                            False, args.micro_batch, gpus_list, recv_tensor_shape
                        )
                else:
                    loss = forward_backward_pipelining_without_interleaving2_remove_sgp(
                        forward_step_func, train_data_iterator, model,
                        False, args.micro_batch, gpus_list, recv_tensor_shape
                    )
                if loss is not None:
                    loss_list.append(loss)
                    progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失。
                optimizer.step()
                optimizer.zero_grad()

                # profiler.step()
            # gees train里的所有train_step结束

            if len(loss_list) > 0:
                total_loss_list.append(sum(loss_list) / len(loss_list))
                # print(total_loss_list)
            # gees 一个epoch下的train过程结束-----------------------------------------------------------------------------------------
    else:
        # with torch.profiler.profile(
        # activities=[
        #     torch.profiler.ProfilerActivity.CPU,
        #     torch.profiler.ProfilerActivity.CUDA,
        # ],schedule=torch.profiler.schedule(
        #     wait=3,
        #     warmup=1,
        #     active=2,
        #     repeat=1),profile_memory=True,
        # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/tp4/')
        # # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')
        # # used when outputting for tensorboard
        # ) as p:
        time_list = []
        device = torch.cuda.current_device()
        for epoch in range(epochs):
            # 训练阶段
            progress_bar = tqdm(range(math.ceil(len(train_loader))), desc=f'Epoch {epoch+1}')
            loss_list = []
            train_data_iterator = iter(train_loader)
            start_time = time.time()
            for i in progress_bar:
                batch = next(train_data_iterator)
                input_tensor = batch['input_ids'].to(device)
                labels = batch['label'].to(device)
                outputs = tp_model(input_ids = input_tensor,labels = labels)
                # with autocast(enabled=False):
                loss = outputs.loss
                # Backward pass and optimization with FP16
                loss.backward()
                loss_list.append(loss)
                progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
                optimizer.step()
                optimizer.zero_grad()
                # p.step()
            compute_time = time.time()-start_time
            time_list.append(compute_time)
            print(f'epoch {epoch+1},loss:{sum(loss_list) / len(loss_list)}')
            if len(loss_list) > 0:
                total_loss_list.append(sum(loss_list) / len(loss_list))
                print(total_loss_list)
        with open('time.txt', 'w') as f:
            print("time:    ",sum(time_list),"  ",sum(time_list)/len(time_list),file = f)
        print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')
    # gees 一个epoch结束
    # if post: # 则将训练过程中记录的损失值保存到一个 CSV 文件中
    #     df = pd.DataFrame({
    #         'epoch': range(1, len(total_loss_list) + 1),
    #         'loss': total_loss_list
    #     })
    #     df.to_csv(f'pp_{pp_rank}_epochs{args.epoch}_bs{args.micro_batch * args.micro_batch_size}_len{args.tensor_length}_loss.csv')

    # print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time},search_time:{search_time}')
    print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')

def forward_step_func(input_tensor, model):
    return model(input_tensor)


# not use gees
def JudgeSkipFinalVpp(train_loader, args, len_dataset, micro_batch_size):
    temp_micro_batch_num = args.micro_batch
    if len(train_loader) % args.micro_batch != 0:
        # 修改micro-batch数
        temp_micro_batch_num = len(train_loader) % args.micro_batch
        # 最后一个micro-batch不完整，直接去掉最后一个micro batch
        if len_dataset % micro_batch_size != 0:
            temp_micro_batch_num = temp_micro_batch_num - 1
    return temp_micro_batch_num


# gees 单参数列表-1
def _get_all_params_1(model_chunks: List) -> List:
    """Create a single parameter list for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[nn.Parameter]: List of parameters compatible with Adam optimizer.
    """
    all_params = []

    for chunk in model_chunks:
        all_params.extend(chunk.parameters())

    return all_params


# gees 单参数列表-2
def _get_all_params_2(model_chunks: List) -> List:
    """Create a single parameter list for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[nn.Parameter]: List of parameters compatible with Adam optimizer.
    """
    param_list = []

    for model_chunk in model_chunks:
        for param in model_chunk.parameters():
            if param.requires_grad:  # gees 多了一步判断
                param_list.append(param)
    return param_list


# gees 多参数列表 ---> 可用于后期扩展,可以定制每一个参数的学习率等.而不使用全局的学习率.
def _get_param_groups__(model_chunks: List) -> List[Dict]:
    """Create a simple parameter group for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[Dict]: List of parameter groups compatible with Adam optimizer.
    """
    param_groups = []

    for model_chunk in model_chunks:
        for param in model_chunk.parameters():
            if param.requires_grad:
                param_groups.append({'params': param})

    return param_groups
