from datetime import datetime
from typing import List, Dict

from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy

from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM, LlamaForCausalLMForVpp
from datasets import load_dataset
import math
import time

# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

from tqdm import tqdm

import torch

from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import LlamaConfig
from transformers import LlamaTokenizer

from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0

#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving, forward_backward_pipelining_without_interleaving2, adjust_batch, \
    forward_backward_pipelining_without_interleaving2_remove_sgp, forward_backward_pipelining_with_interleaving,forward_backward_pipelining_with_interleaving
from geesibling.adapters.pytorch.get_data import get_data_loader
import os
from geesibling.adapters.pytorch.pipeline.pipeline.training2 import forward_backward_pipelining_with_interleaving_overlap_comm
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel import parallelize_module
from geesibling.adapters.pytorch.tensor_parallel.run_tp import TPPolicy
# 设置环境变量
os.environ['LOG_DIR'] = './logs'


def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))


def pretrain(
        extra_args_provider=None,
        args_defaults={}):
    """Main training program.

    This function will run the followings in the order provided:
        1) initialize Megatron.
        2) setup model, optimizer and lr schedule using the model_provider.
        3) call train_val_test_data_provider to get train/val/test datasets.
        4) train the modle using the forward_step_func.

    Arguments:
        train_valid_test_dataset_provider: a function that takes the size of
            train/valid/test dataset and returns `train, valid, test` datasets.
        model_provider: a function that returns a vanilla version of the
            model. By vanilla we mean a simple model on cpu with no fp16 or ddp.
        forward_step_func: a function that takes a `data iterator` and `model`,
            and returns a `loss` scalar with a dictionary with key:values being
            the info we would like to monitor during training, for example
            `lm-loss: value`. We also require that this function add
            `batch generator` to the timers class.
        extra_args_provider: a function that takes a parser and adds arguments
            to it. It is used for programs to add their own arguments.
        args_defaults: a dictionary from argument-name to argument-value. It
            to set already parse arguments.
    """

    # Initalize and get arguments, timers, and Tensorboard writer.

    gpus_list = initialize_megatron()
    print('----------------------')
    print(gpus_list)
    # torch.distributed.barrier()
    # print('barrier done')

    args = get_args()

    # Model, optimizer, and learning rate.
    #timers('model-and-optimizer-setup').start()

    print('get tokenizer')
    tokenizer = LlamaTokenizer.from_pretrained('/yjy/GeeSibling_tp/examples/pytorch/3D/llama2/llama7bconfig/')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    print('get cfg')
    # cfg = LlamaConfig()
    cfg = LlamaConfig(num_hidden_layers=2)
    # print(cfg)
    # cfg.max_position_embeddings = 4096
    print(cfg)

    # pp_rank = mpu.get_pipeline_model_parallel_rank()  # 当前是第几个stage,在pp进程组group里是第几个rank进程
    pre = mpu.is_pipeline_first_stage()
    # post = mpu.is_pipeline_last_stage()
    # pp_size = mpu.get_pipeline_model_parallel_world_size()  # get every pp word_size ====一个group用了多少gpu/多少个Stage？？？？
    # print(f'pp_rank:{pp_rank}, pre:{pre}, post:{post}, pp_size:{pp_size}-------------------------------------------------------------------')

    # ?模型初始化 根据mpu.is_pipeline_first_stage()，mpu.is_pipeline_last_stage()为首尾的stage/gpu/rank添加层
    # model = LlamaForCausalLM(config=cfg, pp_rank=pp_rank, pre_process=pre, post_process=post, pp_size=pp_size)
    device_mesh = mpu.get_device_mesh()
    def get_model(config):
        model_chunk_list = []  # gees vpp下 一个进程负责多个stage，每一个stage有自己的model部分。所以train里，每个进程拿到的model时一个list列表,每一个virtual_pipline会对应一个model。-------------------------------
        for i in range(args.virtual_pipeline_model_parallel_size):
            mpu.set_virtual_pipeline_model_parallel_rank(i)
            # Set pre_process and post_process only after virtual rank is set.
            pre_process = mpu.is_pipeline_first_stage()  # gees nk 可以放到get_model外部
            post_process = mpu.is_pipeline_last_stage()
            pp_rank = mpu.get_pipeline_model_parallel_rank()
            pp_size = mpu.get_pipeline_model_parallel_world_size()  # pp_size = args.pipeline_model_parallel_size lyh应该是一样的效果
            vpp_size = args.virtual_pipeline_model_parallel_size
            vpp_rank = i  # vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank()
            # if(pp_rank == 0):
            #     # print(f"当前的virtual_pipeline_model_parallel_world_size为{mpu.get_virtual_pipeline_model_parallel_world_size()}")
            #     print(f"当前的pp_rank = node_rank ={pp_rank},且当前的model_chunk_id为{mpu.get_virtual_pipeline_model_parallel_rank()},{i}。取到的pre_process的值为{pre_process}")

            this_model = LlamaForCausalLMForVpp(config=config, pp_rank=pp_rank, pre_process=pre_process, post_process=post_process, pp_size=pp_size, vpp_size=vpp_size, vpp_rank=vpp_rank)
            # this_model.model_type = model_typei
            if mpu.get_tensor_model_parallel_world_size()>1:
                parallelize_plan = TPPolicy(this_model,tokenizer,device_mesh['tp'],pre_process,config.hidden_size)
                for plan in parallelize_plan:
                    print(plan)
                    print(parallelize_plan[plan].param_layouts)
                    print(parallelize_plan[plan].input_layouts)
                    print(parallelize_plan[plan].output_layouts)
                tp_model = parallelize_module(
                    this_model,
                    device_mesh["tp"],
                    parallelize_plan, 
                )   #
                _pre_dp_module_transform(tp_model)
                tp_model=tp_model.to(args.local_rank)
            model_chunk_list.append(this_model)
        return model_chunk_list

    if args.virtual_pipeline_model_parallel_size >1:
        model = get_model(cfg)
    elif mpu.get_pipeline_model_parallel_world_size()>1:
        # 不加vpp的pp
        from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM
        model = LlamaForCausalLM(config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size)
    else:
        from geesibling.adapters.pytorch.tensor_parallel.models.model_llama import LlamaForCausalLM
        model = LlamaForCausalLM(config = cfg)
        for name,param in model.named_parameters():
            print(name)
        print("inv_freq",model.model.layers[0].self_attn.rotary_emb.inv_freq)
        # tp
        print("tp---")
        start = time.time()
        device_mesh = mpu.get_device_mesh()
        # if mpu.get_tensor_model_parallel_world_size()>1:
        torch.manual_seed(mpu.get_data_parallel_rank())
        parallelize_plan = TPPolicy(model,tokenizer,device_mesh['tp'],pre,cfg.hidden_size)
        for one in parallelize_plan:
            print(one)
            print(parallelize_plan[one].input_layouts)
            print(parallelize_plan[one].param_layouts)
            print(parallelize_plan[one].output_layouts)
        tp_model = parallelize_module(
            model,
            device_mesh["tp"],
            parallelize_plan, 
        )   #
        _pre_dp_module_transform(tp_model)
        # else:
        #     tp_model = model
        tp_model=tp_model.to(args.local_rank)
        search_time = time.time() - start
    # TODO 将model_list里的所有model移动到相应的GPU上---
    # TODO 根据新的p2p_com 设置args.hidden_size----
    # gees args.local_rank = torch.cuda.current_device() 但是不等于 mpu.get_pipeline_model_parallel_rank()
    args.hidden_size = cfg.hidden_size
    #model = [every_model.to(args.local_rank) for every_model in model]

    node_rank = mpu.get_pipeline_model_parallel_rank()
    # args.modelName = 'Llama2'
    # start = time.time()
    # hf_device_map = GeeSiblingPolicy(args.modelName, model, tokenizer, gpus_list, "sgp", pre, cfg.hidden_size)
    # search_time = time.time() - start
    # print(f'search time:{search_time}')
    # print(hf_device_map)

    # model = LlamaForCausalLM.from_pretrained(pretrained_model_name_or_path='/mnt/VMSTORE/jch/Llama-2-7b-hf', pp_rank=pp_rank, pre_process=pre, post_process=post, pp_size=pp_size,
    #                                          device_map=hf_device_map)
    print(f'momdel init done')

    # torch.distributed.barrier()
    # print(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 开始执行。")
    import torch.optim as optim
    optimizer = torch.optim.Adam(model.parameters(), lr=2.4e-5)

    tensor_length = args.tensor_length
    micro_batch_size = args.micro_batch_size
    recv_tensor_shape = (micro_batch_size, tensor_length, cfg.hidden_size)
    # log_dir = './auto_map_2_2_pre_hook'
    #train_loader,len_dataset= get_data_loader(tokenizer,micro_batch_size,tensor_length,mpu.get_data_parallel_world_size(),mpu.get_data_parallel_rank())
    train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)
    len_dataset_global = -1
    len_train_loader = -1
    if args.virtual_pipeline_model_parallel_size>1:
        train_data_iterator = []
        for i in range(len(model)):
            mpu.set_virtual_pipeline_model_parallel_rank(i)  #
            train_loader, len_dataset = get_data_loader(tokenizer, micro_batch_size, tensor_length)
            if len_dataset_global == -1:
                len_dataset_global = len_dataset
                len_train_loader = len(train_loader)
            train_data_iterator.append(iter(train_loader))
    epochs = args.epoch
    start_time = time.time()
    total_loss_list = []
    save_micro_batch = args.micro_batch

    # torch.distributed.barrier()
    print(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 开始执行。")
    if mpu.get_tensor_model_parallel_world_size()>1:
        time_list = []
        device = torch.cuda.current_device()
        for epoch in range(epochs):
            # 训练阶段
            progress_bar = tqdm(range(math.ceil(len(train_loader) / args.micro_batch)), desc=f'Epoch {epoch+1}')
            loss_list = []
            train_data_iterator = iter(train_loader)
            start_time = time.time()
            for i in progress_bar:
                input_tensor = next(train_data_iterator)['input_ids'].to(device)
                labels = next(train_data_iterator)['label'].to(device)
                outputs = tp_model(input_ids = input_tensor,labels = labels)
                # print(outputs.keys())
                loss = outputs.loss
                loss.backward()
                loss_list.append(loss)
                progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
                optimizer.step()
                optimizer.zero_grad()
            compute_time = time.time()-start_time
            time_list.append(compute_time)
            print(f'epoch {epoch+1},loss:{sum(loss_list) / len(loss_list)}')
            if len(loss_list) > 0:
                total_loss_list.append(sum(loss_list) / len(loss_list))
                print(total_loss_list)
        print("time:    ",sum(time_list),"  ",sum(time_list)/len(time_list))
        print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')
        exit()
    # gees 一个epoch开始
    for epoch in range(epochs):
        # gees 一个epoch下的train过程-----------------------------------------------------------------------------------------
        loss_list = []
        print('------------------')
        # print(len(train_loader))
        # print(math.ceil(len(train_loader) / args.micro_batch))
        # 最后一个mini-batch不完整
        args.micro_batch = save_micro_batch
        progress_bar = tqdm(range(math.ceil(len_train_loader / args.micro_batch)), desc=f'Epoch {epoch + 1}')

        # def JudgeSkipFinalVpp(train_loader,args,len_dataset,micro_batch_size,pp_size):
        #     """
        #         bool: 如果 temp_micro_batch_num 不能整除 pp_size，则返回 True；否则返回 False。
        #     """
        #     temp_micro_batch_num = args.micro_batch
        #     if len(train_loader) % args.micro_batch != 0:
        #         # 修改micro-batch数
        #         temp_micro_batch_num = len(train_loader) % args.micro_batch
        #         # 最后一个micro-batch不完整，直接去掉最后一个micro batch
        #         if len_dataset % micro_batch_size != 0:
        #             temp_micro_batch_num = temp_micro_batch_num - 1
        #     return temp_micro_batch_num % pp_size != 0

        # if JudgeSkipFinalVpp(
        #     train_loader,
        #     args,
        #     len_dataset_global,
        #     micro_batch_size,
        #     mpu.get_pipeline_model_parallel_rank()):
        #     progress_bar -= 1
        # TODO progress_bar -= 1是错误的.但是可以先Judge,然后再初始化progress_bar?(好像不行).那就在初始化一次,给range里-1即可

        # gees train里的各个train_step过程
        for bar_id in progress_bar:
            # 最后一个mini-batch可能不完整，进行特殊处理
            if bar_id == len(progress_bar) - 1:
                adjust_batch(train_loader, args, len_dataset_global, micro_batch_size)  # gees 这里面会调整args.micro_batch
                # gees add for vpp.
                #  vpp attention:number of microbatches must be divisible by pipeline-model-parallel-size when using interleaved schedule
                if (args.micro_batch % mpu.get_pipeline_model_parallel_world_size() != 0) or (args.micro_batch == mpu.get_pipeline_model_parallel_world_size()):
                    print("In the last pipeline stage, the pipeline is skipped and not executed because the number of microbatches cannot be divisible by the pipeline model parallel size.")
                    continue
            loss = forward_backward_pipelining_with_interleaving_overlap_comm(
                forward_step_func, train_data_iterator, model,
                False, args.micro_batch, gpus_list, recv_tensor_shape
            )
            if loss is not None:
                loss_list.append(loss)
                progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失。
            optimizer.step()
            optimizer.zero_grad()
        # gees train里的所有train_step结束

        if len(loss_list) > 0:
            total_loss_list.append(sum(loss_list) / len(loss_list))
            print(total_loss_list)
        # gees 一个epoch下的train过程结束-----------------------------------------------------------------------------------------
    # gees 一个epoch结束
    # if post: # 则将训练过程中记录的损失值保存到一个 CSV 文件中
    #     df = pd.DataFrame({
    #         'epoch': range(1, len(total_loss_list) + 1),
    #         'loss': total_loss_list
    #     })
    #     df.to_csv(f'pp_{pp_rank}_epochs{args.epoch}_bs{args.micro_batch * args.micro_batch_size}_len{args.tensor_length}_loss.csv')

    # print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time},search_time:{search_time}')
    print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')


def forward_step_func(input_tensor, model):
    return model(input_tensor)


def JudgeSkipFinalVpp(train_loader, args, len_dataset, micro_batch_size):
    temp_micro_batch_num = args.micro_batch
    if len(train_loader) % args.micro_batch != 0:
        # 修改micro-batch数
        temp_micro_batch_num = len(train_loader) % args.micro_batch
        # 最后一个micro-batch不完整，直接去掉最后一个micro batch
        if len_dataset % micro_batch_size != 0:
            temp_micro_batch_num = temp_micro_batch_num - 1
    return temp_micro_batch_num


# gees 单参数列表-1
def _get_all_params_1(model_chunks: List) -> List:
    """Create a single parameter list for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[nn.Parameter]: List of parameters compatible with Adam optimizer.
    """
    all_params = []

    for chunk in model_chunks:
        all_params.extend(chunk.parameters())

    return all_params


# gees 单参数列表-2
def _get_all_params_2(model_chunks: List) -> List:
    """Create a single parameter list for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[nn.Parameter]: List of parameters compatible with Adam optimizer.
    """
    param_list = []

    for model_chunk in model_chunks:
        for param in model_chunk.parameters():
            if param.requires_grad:  # gees 多了一步判断
                param_list.append(param)
    return param_list


# gees 多参数列表 ---> 可用于后期扩展,可以定制每一个参数的学习率等.而不使用全局的学习率.
def _get_param_groups__(model_chunks: List) -> List[Dict]:
    """Create a simple parameter group for Adam optimizer.

    Args:
        model_chunks (List[nn.Module]): List of model chunks (submodules).

    Returns:
        List[Dict]: List of parameter groups compatible with Adam optimizer.
    """
    param_groups = []

    for model_chunk in model_chunks:
        for param in model_chunk.parameters():
            if param.requires_grad:
                param_groups.append({'params': param})

    return param_groups
