from datetime import datetime
from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy
from torch.profiler import profile, ProfilerActivity, tensorboard_trace_handler

import math
import time
# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()
from geesibling.adapters.pytorch.pipeline.pipeline import p2p_communication
from datasets import load_dataset
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import GPT2Config,AutoConfig,AutoTokenizer, PreTrainedTokenizer

from transformers.models.gpt2.modeling_gpt2 import GPT2Config
from transformers import GPT2Tokenizer
from accelerate import dispatch_model
from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0

from tqdm import tqdm

from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel import parallelize_module
from geesibling.adapters.pytorch.tensor_parallel.run_tp import TPPolicy

#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving2,adjust_batch

def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))

def pretrain(
             extra_args_provider=None,
             args_defaults={}):
    """Main training program.

    This function will run the followings in the order provided:
        1) initialize Megatron.
        2) setup model, optimizer and lr schedule using the model_provider.
        3) call train_val_test_data_provider to get train/val/test datasets.
        4) train the modle using the forward_step_func.

    Arguments:
        train_valid_test_dataset_provider: a function that takes the size of
            train/valid/test dataset and returns `train, valid, test` datasets.
        model_provider: a function that returns a vanilla version of the
            model. By vanilla we mean a simple model on cpu with no fp16 or ddp.
        forward_step_func: a function that takes a `data iterator` and `model`,
            and returns a `loss` scalar with a dictionary with key:values being
            the info we would like to monitor during training, for example
            `lm-loss: value`. We also require that this function add
            `batch generator` to the timers class.
        extra_args_provider: a function that takes a parser and adds arguments
            to it. It is used for programs to add their own arguments.
        args_defaults: a dictionary from argument-name to argument-value. It
            to set already parse arguments.
    """

    # Initalize and get arguments, timers, and Tensorboard writer.

    gpus_list = initialize_megatron(extra_args_provider=extra_args_provider,
                        args_defaults=args_defaults)
    print('----------------------')
    print(gpus_list)
    # Adjust the startup time so it reflects the largest value.
    # This will be closer to what scheduler will see (outside of
    # image ... launches.
    args = get_args()


    # Model, optimizer, and learning rate.
    #timers('model-and-optimizer-setup').start()


    tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained('gpt2', use_fast=True)
    if not getattr(tokenizer, "pad_token", None):
        tokenizer.pad_token = tokenizer._eos_token
    cfg = AutoConfig.from_pretrained(
            'gpt2',
            num_labels=2,
            pad_token=tokenizer.pad_token,
            pad_token_id=tokenizer.pad_token_id,
    )
    pp_rank = mpu.get_pipeline_model_parallel_rank()
    pre = mpu.is_pipeline_first_stage()
    post = mpu.is_pipeline_last_stage()
    pp_size = mpu.get_pipeline_model_parallel_world_size()
    if pp_size>1:
        from geesibling.adapters.pytorch.pipeline.models.model_gpt2 import GPT2LMHeadModel,GPT2ForSequenceClassification
        model = GPT2ForSequenceClassification(config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size)
    else:
        from geesibling.adapters.pytorch.tensor_parallel.models.model_gpt2 import GPT2LMHeadModel,GPT2ForSequenceClassification
        model = GPT2ForSequenceClassification(config = cfg)
    # print(cfg)
    print(model)
    # tp
    device_mesh = mpu.get_device_mesh()
    if mpu.get_tensor_model_parallel_world_size()>1:
        torch.manual_seed(mpu.get_data_parallel_rank())
        parallelize_plan = TPPolicy(model,tokenizer,device_mesh['tp'],pre,cfg.n_embd)
        for one in parallelize_plan:
            print(parallelize_plan[one].param_layouts)
        tp_model = parallelize_module(
            model,
            device_mesh["tp"],
            parallelize_plan, 
        )   #
        _pre_dp_module_transform(tp_model)
    else:
        tp_model = model
    tp_model=tp_model.to(args.local_rank)

    '''
    args.modelName = 'GPT2ForSequenceClassification'
    hf_device_map = GeeSiblingPolicy(args.modelName,model,tokenizer,gpus_list,"sgp",pre,cfg.n_embd)
    print(hf_device_map)
    


    model = GPT2ForSequenceClassification.from_pretrained(pretrained_model_name_or_path = 'gpt2',config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size,device_map = hf_device_map)
    '''
    if mpu.get_data_parallel_world_size() >1:
        tp_model = torchDDP(tp_model,device_ids=[args.local_rank],process_group=mpu.get_data_parallel_group())
    import torch.optim as optim
    optimizer = torch.optim.Adam(tp_model.parameters(), lr=1e-2)
    tensor_length = args.tensor_length
    
    micro_batch_size = args.micro_batch_size
    recv_tensor_shape = (micro_batch_size,tensor_length,cfg.hidden_size)
    # log_dir = './auto_map_2_2_pre_hook'
    train_loader,len_dataset= get_data_loader(tokenizer,micro_batch_size,tensor_length,mpu.get_data_parallel_world_size(),mpu.get_data_parallel_rank())
    epochs = args.epoch
    # print('token len')
    # print(len(tokenizer))
    
    # if pre:
    print('start train')
    import time
    start = time.time()
    if pp_size>1:
        for epoch in range(epochs):
            loss_list = []
            last_micro_batch_size = len(train_loader) % micro_batch_size
            train_data_iterator = iter(train_loader)
            print('------------------')
            # print(len(train_loader))
            # print(math.ceil(len(train_loader) / args.micro_batch))
            # 最后一个mini-batch不完整

            progress_bar = tqdm(range(math.ceil(len(train_loader) / args.micro_batch)), desc=f'Epoch {epoch+1}')
            for bar_id in progress_bar:
                
                # 最后一个mini-batch可能不完整，进行特殊处理
                if bar_id == len(progress_bar) -1 :
                    args.micro_batch = adjust_batch(train_loader,args,len_dataset,micro_batch_size)
                loss = forward_backward_pipelining_without_interleaving2(
                    forward_step_func, train_data_iterator, tp_model,
                    False, args.micro_batch, gpus_list, recv_tensor_shape
                )
                if loss is not None:
                    loss_list.append(loss)
                    progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
                optimizer.step()
                optimizer.zero_grad()
        print(f'bs:{micro_batch_size * 8},time:{time.time() - start}')
    else:
        device = torch.cuda.current_device()
        for epoch in range(epochs):
            # 训练阶段
            progress_bar = tqdm(range(math.ceil(len(train_loader) / args.micro_batch)), desc=f'Epoch {epoch+1}')
            loss_list = []
            train_data_iterator = iter(train_loader)
            for i in progress_bar:
                input_tensor = next(train_data_iterator)['input_ids'].to(device)
                labels = next(train_data_iterator)['label'].to(device)
                outputs = tp_model(input_ids = input_tensor,labels = labels)
                # print(outputs.keys())
                loss = outputs.loss
                loss.backward()
                loss_list.append(loss)
                progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
                optimizer.step()
                optimizer.zero_grad()
            print(f'epoch {epoch+1},loss:{sum(loss_list) / len(loss_list)}')
            if len(loss_list) > 0:
                total_loss_list.append(sum(loss_list) / len(loss_list))
                print(total_loss_list)
        print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')


def forward_step_func(input_tensor,model):
    return model(input_tensor)


def get_data_loader(tokenizer,bs,tensor_length,world_size,local_rank):
    # 加载数据集
    data_files = {
            'train':'/root/yjy/gees/GeeSibling/examples/datasets/mrpc/train.jsonl',
            'test':'/root/yjy/gees/GeeSibling/examples/datasets/mrpc/test.jsonl',
            'validation':'/root/yjy/gees/GeeSibling/examples/datasets/mrpc/validation.jsonl'
            }
    
    dataset = load_dataset('json', data_files=data_files)
    # tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    # if tokenizer.pad_token is None:
    #     tokenizer.pad_token = tokenizer.eos_token

    def tokenize_function(examples):
        # 对文本对进行编码
        return tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)

    # 应用文本编码
    encoded_dataset = dataset.map(tokenize_function, batched=True)
    # 设置格式为 PyTorch
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
    train_dataset = encoded_dataset['train']
    if mpu.get_data_parallel_world_size() >1:
        train_sampler = DistributedSampler(train_dataset,num_replicas=world_size,rank=local_rank)
        train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=False,sampler=train_sampler)
    else:
        train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=True)
    print('data done')
    return train_dataloader,len(train_dataset)

