from datetime import datetime
from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy
from torch.profiler import profile, ProfilerActivity, tensorboard_trace_handler

from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss

import time
# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()
from geesibling.adapters.pytorch.pipeline.pipeline import p2p_communication
from datasets import load_dataset
from tqdm import tqdm
import math
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import GPT2Config
from transformers.models.gpt2.modeling_gpt2 import GPT2Config
from transformers import GPT2Tokenizer
from accelerate import dispatch_model
from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0


from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel import parallelize_module
from geesibling.adapters.pytorch.tensor_parallel.run_tp import TPPolicy

#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving2,adjust_batch



def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))

def pretrain(
             extra_args_provider=None,
             args_defaults={}):
    """Main training program.

    This function will run the followings in the order provided:
        1) initialize Megatron.
        2) setup model, optimizer and lr schedule using the model_provider.
        3) call train_val_test_data_provider to get train/val/test datasets.
        4) train the modle using the forward_step_func.

    Arguments:
        train_valid_test_dataset_provider: a function that takes the size of
            train/valid/test dataset and returns `train, valid, test` datasets.
        model_provider: a function that returns a vanilla version of the
            model. By vanilla we mean a simple model on cpu with no fp16 or ddp.
        forward_step_func: a function that takes a `data iterator` and `model`,
            and returns a `loss` scalar with a dictionary with key:values being
            the info we would like to monitor during training, for example
            `lm-loss: value`. We also require that this function add
            `batch generator` to the timers class.
        extra_args_provider: a function that takes a parser and adds arguments
            to it. It is used for programs to add their own arguments.
        args_defaults: a dictionary from argument-name to argument-value. It
            to set already parse arguments.
    """

    # Initalize and get arguments, timers, and Tensorboard writer.

    gpus_list = initialize_megatron()
    print('----------------------')
    print(gpus_list)
    # Adjust the startup time so it reflects the largest value.
    # This will be closer to what scheduler will see (outside of
    # image ... launches.
    # global _TRAIN_START_TIME
    # start_time_tensor = torch.cuda.FloatTensor([_TRAIN_START_TIME])
    # '''
    # torch.distributed.all_reduce(start_time_tensor,
    #                              op=torch.distributed.ReduceOp.MIN)
    # '''
    # _TRAIN_START_TIME = start_time_tensor.item()
    # print_rank_0('time to initialize megatron (seconds): {:.3f}'.format(
    #     time.time() - _TRAIN_START_TIME))
    # print_datetime('after megatron is initialized')

    args = get_args()


    # Model, optimizer, and learning rate.
    #timers('model-and-optimizer-setup').start()

    tokenizer = GPT2Tokenizer.from_pretrained('./gpt2')
    if not getattr(tokenizer, "pad_token", None):
        tokenizer.pad_token = tokenizer._eos_token

    cfg = GPT2Config()
    pp_rank = mpu.get_pipeline_model_parallel_rank()
    pre = mpu.is_pipeline_first_stage()
    post = mpu.is_pipeline_last_stage()
    pp_size = mpu.get_pipeline_model_parallel_world_size()
    if mpu.get_tensor_model_parallel_world_size() > 1:
        torch.manual_seed(mpu.get_data_parallel_rank())
    if pp_size>1:
        from geesibling.adapters.pytorch.pipeline.models.model_gpt2 import GPT2LMHeadModel
        model = GPT2LMHeadModel.from_pretrained('./gpt2',pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size)
    else:
        from geesibling.adapters.pytorch.tensor_parallel.models.model_gpt2 import GPT2LMHeadModel
        model = GPT2LMHeadModel.from_pretrained('./gpt2')
    print(cfg)
    print(model)

    # tp
    start = time.time()
    device_mesh = mpu.get_device_mesh()
    if mpu.get_tensor_model_parallel_world_size()>1:
        parallelize_plan = TPPolicy(model,tokenizer,device_mesh['tp'],pre,cfg.n_embd)
        for one in parallelize_plan:
            print(parallelize_plan[one].param_layouts)
        tp_model = parallelize_module(
            model,
            device_mesh["tp"],
            parallelize_plan, 
        )   #
        _pre_dp_module_transform(tp_model)
    else:
        tp_model = model
    if args.fp16:
        tp_model = tp_model.half()
    tp_model=tp_model.to(args.local_rank)
    search_time = time.time() - start
    # sgp
    # args.modelName = 'GPT2'
    # start = time.time()
    # hf_device_map = GeeSiblingPolicy(args.modelName,model,tokenizer,gpus_list,"sgp",pre,cfg.hidden_size)
    # search_time = time.time() - start
    # print(hf_device_map)
    


    # model = GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path = 'gpt2',config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size,device_map = hf_device_map)
    if mpu.get_data_parallel_world_size() >1:
        tp_model = torchDDP(tp_model,device_ids=[args.local_rank],process_group=mpu.get_data_parallel_group())
    import torch.optim as optim
    optimizer = torch.optim.Adam(tp_model.parameters(), lr=2.4e-5,eps=1e-4)
    tensor_length = args.tensor_length
    micro_batch_size = args.micro_batch_size
    args.hidden_size = cfg.hidden_size
    recv_tensor_shape = (micro_batch_size,tensor_length,cfg.hidden_size)
    # log_dir = './auto_map_2_2_pre_hook'
    train_loader,len_dataset= get_data_loader(tokenizer,micro_batch_size,tensor_length,mpu.get_data_parallel_world_size(),mpu.get_data_parallel_rank())
    
    
    print(f'len loader :{len(train_loader)}')
    print(f'len_dataset:{len_dataset}')
    epochs = args.epoch
    start_time = time.time()
    total_loss_list = []
    save_micro_batch = args.micro_batch

        # pp>1
    if pp_size >1:
        with torch.profiler.profile(
        activities=[
            torch.profiler.ProfilerActivity.CPU,
            torch.profiler.ProfilerActivity.CUDA,
        ],schedule=torch.profiler.schedule(
            wait=3,
            warmup=1,
            active=2,
            repeat=1),profile_memory=True,
        on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/tp4/')
        # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')
        # used when outputting for tensorboard
        ) as p:
            time_list = []
            for epoch in range(epochs):
                # 记录mini-batch中每个micro-batch的loss
                loss_list = []
                train_data_iterator = iter(train_loader)
                print('------------------')
                # print(len(train_loader))
                # print(math.ceil(len(train_loader) / args.micro_batch))
                # 最后一个mini-batch不完整
                args.micro_batch = save_micro_batch
                progress_bar = tqdm(range(math.ceil(len(train_loader) / args.micro_batch)), desc=f'Epoch {epoch+1}')
                start_time = time.time()
                for bar_id in progress_bar:
                    
                    # 最后一个mini-batch可能不完整，进行特殊处理
                    if bar_id == len(progress_bar) -1 :
                        adjust_batch(train_loader,args,len_dataset,micro_batch_size)
                    loss = forward_backward_pipelining_without_interleaving2(
                        forward_step_func, train_data_iterator, tp_model,
                        False, args.micro_batch, gpus_list, recv_tensor_shape
                    )
                    if loss is not None:
                        loss_list.append(loss)
                        progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
                    optimizer.step()
                    optimizer.zero_grad()
                    p.step()
                compute_time = time.time()-start_time
                time_list.append(compute_time)
                if len(loss_list) > 0:
                    total_loss_list.append(sum(loss_list) / len(loss_list))
                    print(total_loss_list)
        with open('time.txt', 'w') as f:
            print("time:    ",sum(time_list),"  ",sum(time_list)/len(time_list),file = f)
        if post:
            df = pd.DataFrame({
                'epoch':range(1,len(total_loss_list) + 1),
                'loss':total_loss_list
            })
            df.to_csv(f'pp_{pp_rank}_epochs{args.epoch}_bs{args.micro_batch * args.micro_batch_size}_len{args.tensor_length}_loss.csv')
        
        print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time},search_time:{search_time}')

    # pp=1
    else:
        with torch.profiler.profile(
        activities=[
            torch.profiler.ProfilerActivity.CPU,
            torch.profiler.ProfilerActivity.CUDA,
        ],schedule=torch.profiler.schedule(
            wait=3,
            warmup=1,
            active=2,
            repeat=1),profile_memory=True,
        on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/tp4/')
        # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')
        # used when outputting for tensorboard
        ) as p:
            time_list = []
            device = torch.cuda.current_device()
            for epoch in range(epochs):
                # 训练阶段
                progress_bar = tqdm(range(math.ceil(len(train_loader))), desc=f'Epoch {epoch+1}')
                loss_list = []
                train_data_iterator = iter(train_loader)
                start_time = time.time()
                for i in progress_bar:
                    batch = next(train_data_iterator)
                    input_tensor = batch['input_ids'].to(device)
                    labels = batch['label'].to(device)
                    outputs = tp_model(input_ids = input_tensor,labels = labels)
                    # with autocast(enabled=False):
                    loss = outputs.loss
                    # Backward pass and optimization with FP16
                    loss.backward()
                    loss_list.append(loss)
                    progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
                    optimizer.step()
                    optimizer.zero_grad()
                    p.step()
                compute_time = time.time()-start_time
                time_list.append(compute_time)
                print(f'epoch {epoch+1},loss:{sum(loss_list) / len(loss_list)}')
                if len(loss_list) > 0:
                    total_loss_list.append(sum(loss_list) / len(loss_list))
                    print(total_loss_list)
            with open('time.txt', 'w') as f:
                print("time:    ",sum(time_list),"  ",sum(time_list)/len(time_list),file = f)
            print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')



def forward_step_func(input_tensor,model):
    return model(input_tensor)


def get_data_loader(tokenizer, bs, tensor_length,world_size,local_rank):
    # 加载数据集
    data_files = {
        'train': '/yjy/GeeSibling/examples/datasets/mrpc/train.jsonl',
        'test': '/yjy/GeeSibling/examples/datasets/mrpc/test.jsonl',
        'validation': '/yjy/GeeSibling/examples/datasets/mrpc/validation.jsonl'
    }

    dataset = load_dataset('json', data_files=data_files)

    def tokenize_function(examples):
        # 对文本对进行编码
        tokenized_output = tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)
        # causal_lm模型将input_ids作为label
        tokenized_output['label'] = tokenized_output['input_ids'].copy()
        return tokenized_output

    # 应用文本编码
    encoded_dataset = dataset.map(tokenize_function, batched=True)
    # 设置格式为 PyTorch
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
    
    train_dataset = encoded_dataset['train']
    

    def collate_fn(batch):
        # 确保每个batch的大小都等于bs
        if len(batch) < bs:
            # 计算缺少的元素数量
            missing = bs - len(batch)
            # 创建一个空的补充条目
            for _ in range(missing):
                batch.append({
                    'input_ids': torch.zeros(tensor_length, dtype=torch.long),
                    'attention_mask': torch.zeros(tensor_length, dtype=torch.long),
                    'label': torch.zeros(tensor_length, dtype=torch.long)
                })
        return {
            'input_ids': torch.stack([item['input_ids'] for item in batch]),
            'attention_mask': torch.stack([item['attention_mask'] for item in batch]),
            'label': torch.stack([item['label'] for item in batch])
        }

    # 创建DataLoader
    if mpu.get_data_parallel_world_size() >1:
        train_sampler = DistributedSampler(train_dataset,num_replicas=world_size,rank=local_rank)
        train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=False,sampler=train_sampler,collate_fn=collate_fn)
    else:
        train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=True, collate_fn=collate_fn)
    print('Data loader created with limited dataset.')
    return train_dataloader, len(train_dataset)
