from datetime import datetime
from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy
from torch.profiler import profile, ProfilerActivity, tensorboard_trace_handler
from geesibling.adapters.pytorch.pipeline.models.model_gpt2 import GPT2LMHeadModel,GPT2ForSequenceClassification
import math
import time
# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()
from geesibling.adapters.pytorch.pipeline.pipeline import p2p_communication
from datasets import load_dataset
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import GPT2Config,AutoConfig,AutoTokenizer, PreTrainedTokenizer

from transformers.models.gpt2.modeling_gpt2 import GPT2Config
from transformers import GPT2Tokenizer
from accelerate import dispatch_model
from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0

from tqdm import tqdm



#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving2,adjust_batch

def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))

def pretrain(
             extra_args_provider=None,
             args_defaults={}):
    """Main training program.

    This function will run the followings in the order provided:
        1) initialize Megatron.
        2) setup model, optimizer and lr schedule using the model_provider.
        3) call train_val_test_data_provider to get train/val/test datasets.
        4) train the modle using the forward_step_func.

    Arguments:
        train_valid_test_dataset_provider: a function that takes the size of
            train/valid/test dataset and returns `train, valid, test` datasets.
        model_provider: a function that returns a vanilla version of the
            model. By vanilla we mean a simple model on cpu with no fp16 or ddp.
        forward_step_func: a function that takes a `data iterator` and `model`,
            and returns a `loss` scalar with a dictionary with key:values being
            the info we would like to monitor during training, for example
            `lm-loss: value`. We also require that this function add
            `batch generator` to the timers class.
        extra_args_provider: a function that takes a parser and adds arguments
            to it. It is used for programs to add their own arguments.
        args_defaults: a dictionary from argument-name to argument-value. It
            to set already parse arguments.
    """

    # Initalize and get arguments, timers, and Tensorboard writer.

    gpus_list = initialize_megatron(extra_args_provider=extra_args_provider,
                        args_defaults=args_defaults)
    print('----------------------')
    print(gpus_list)
    # Adjust the startup time so it reflects the largest value.
    # This will be closer to what scheduler will see (outside of
    # image ... launches.
    args = get_args()


    # Model, optimizer, and learning rate.
    #timers('model-and-optimizer-setup').start()


    tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained('gpt2', use_fast=True)
    if not getattr(tokenizer, "pad_token", None):
        tokenizer.pad_token = tokenizer._eos_token
    cfg = AutoConfig.from_pretrained(
            'gpt2',
            num_labels=2,
            pad_token=tokenizer.pad_token,
            pad_token_id=tokenizer.pad_token_id,
    )
    pp_rank = mpu.get_pipeline_model_parallel_rank()
    pre = mpu.is_pipeline_first_stage()
    post = mpu.is_pipeline_last_stage()
    pp_size = mpu.get_pipeline_model_parallel_world_size()

    model = GPT2ForSequenceClassification(config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size)
    # print(cfg)
    print(model)



    args.modelName = 'GPT2ForSequenceClassification'
    hf_device_map = GeeSiblingPolicy(args.modelName,model,tokenizer,gpus_list,"sgp",pre,cfg.n_embd)
    print(hf_device_map)
    


    model = GPT2ForSequenceClassification.from_pretrained(pretrained_model_name_or_path = 'gpt2',config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size,device_map = hf_device_map)

    import torch.optim as optim
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
    tensor_length = args.tensor_length
    
    micro_batch_size = args.micro_batch_size
    recv_tensor_shape = (micro_batch_size,tensor_length,cfg.hidden_size)
    # log_dir = './auto_map_2_2_pre_hook'
    train_loader,len_dataset= get_data_loader(tokenizer,micro_batch_size,tensor_length)
    epochs = args.epoch
    # print('token len')
    # print(len(tokenizer))
    
    # if pre:
    print('start train')
    import time
    start = time.time()
    for epoch in range(epochs):
        loss_list = []
        last_micro_batch_size = len(train_loader) % micro_batch_size
        train_data_iterator = iter(train_loader)
        print('------------------')
        # print(len(train_loader))
        # print(math.ceil(len(train_loader) / args.micro_batch))
        # 最后一个mini-batch不完整

        progress_bar = tqdm(range(math.ceil(len(train_loader) / args.micro_batch)), desc=f'Epoch {epoch+1}')
        for bar_id in progress_bar:
            
            # 最后一个mini-batch可能不完整，进行特殊处理
            if bar_id == len(progress_bar) -1 :
                args.micro_batch = adjust_batch(train_loader,args,len_dataset,micro_batch_size)
            loss = forward_backward_pipelining_without_interleaving2(
                forward_step_func, train_data_iterator, model,
                False, args.micro_batch, gpus_list, recv_tensor_shape
            )
            if loss is not None:
                loss_list.append(loss)
                progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
            optimizer.step()
            optimizer.zero_grad()
    print(f'bs:{micro_batch_size * 8},time:{time.time() - start}')
def forward_step_func(input_tensor,model):
    return model(input_tensor)


def get_data_loader(tokenizer,bs,tensor_length):
    # 加载数据集
    data_files = {
            'train':'/root/chj/gees/GeeSibling/examples/datasets/mrpc/train.jsonl',
            'test':'/root/chj/gees/GeeSibling/examples/datasets/mrpc/test.jsonl',
            'validation':'/root/chj/gees/GeeSibling/examples/datasets/mrpc/validation.jsonl'
            }
    
    dataset = load_dataset('json', data_files=data_files)
    # tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    # if tokenizer.pad_token is None:
    #     tokenizer.pad_token = tokenizer.eos_token

    def tokenize_function(examples):
        # 对文本对进行编码
        return tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)

    # 应用文本编码
    encoded_dataset = dataset.map(tokenize_function, batched=True)
    # 设置格式为 PyTorch
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
    train_dataset = encoded_dataset['train']
    train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=True)
    print('data done')
    return train_dataloader,len(train_dataset)

