from datetime import datetime
from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy

from geesibling.adapters.pytorch.pipeline.models.model_mistral import MistralForCausalLM
from datasets import load_dataset
import math
import time
# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

from tqdm import tqdm

import torch

from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import MistralConfig
from transformers import LlamaTokenizer

from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0


#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving,forward_backward_pipelining_without_interleaving2,adjust_batch
from geesibling.adapters.pytorch.get_data import get_data_loader

def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))

def pretrain(
             extra_args_provider=None,
             args_defaults={}):
    """Main training program.

    This function will run the followings in the order provided:
        1) initialize Megatron.
        2) setup model, optimizer and lr schedule using the model_provider.
        3) call train_val_test_data_provider to get train/val/test datasets.
        4) train the modle using the forward_step_func.

    Arguments:
        train_valid_test_dataset_provider: a function that takes the size of
            train/valid/test dataset and returns `train, valid, test` datasets.
        model_provider: a function that returns a vanilla version of the
            model. By vanilla we mean a simple model on cpu with no fp16 or ddp.
        forward_step_func: a function that takes a `data iterator` and `model`,
            and returns a `loss` scalar with a dictionary with key:values being
            the info we would like to monitor during training, for example
            `lm-loss: value`. We also require that this function add
            `batch generator` to the timers class.
        extra_args_provider: a function that takes a parser and adds arguments
            to it. It is used for programs to add their own arguments.
        args_defaults: a dictionary from argument-name to argument-value. It
            to set already parse arguments.
    """

    # Initalize and get arguments, timers, and Tensorboard writer.

    gpus_list = initialize_megatron()
    print('----------------------')
    print(gpus_list)



    print_datetime('after megatron is initialized')

    args = get_args()


    # Model, optimizer, and learning rate.
    #timers('model-and-optimizer-setup').start()


    tokenizer = LlamaTokenizer.from_pretrained('./mistral7b')
    if not getattr(tokenizer, "pad_token", None):
        tokenizer.pad_token = tokenizer._eos_token

    cfg = MistralConfig.from_pretrained('./mistral7b')
    # print(cfg)
    # 13b:
    # cfg.hidden_size = 5120
    # cfg.num_hidden_layers = 40
    print(cfg)
    pp_rank = mpu.get_pipeline_model_parallel_rank()
    pre = mpu.is_pipeline_first_stage()
    post = mpu.is_pipeline_last_stage()
    pp_size = mpu.get_pipeline_model_parallel_world_size()

    print(f"pp_size:{pp_size}")

    # model = MistralForCausalLM(config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size)
    # print(model)
    print()

    # mistral和llama结构一样
    args.modelName = 'Llama2'

    # hf_device_map = GeeSiblingPolicy(args.modelName,model,tokenizer,gpus_list,"sgp",pre,cfg.hidden_size)
    # search_time = time.time() - start
    # print(search_time)
    # # print(hf_device_map)
    
    # model = MistralForCausalLM.from_pretrained(pretrained_model_name_or_path = './mistral7b',config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size,device_map = hf_device_map)
    # model = MistralForCausalLM.from_pretrained(pretrained_model_name_or_path = './mistral7b',config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size,device_map = hf_device_map)
    model = MistralForCausalLM(
        config=cfg,
        pp_rank=pp_rank, pre_process=pre, post_process=post, pp_size=pp_size
        # 注意：这里不使用 from_pretrained，而是直接初始化
    )
    print(model)

    # print((222222222222222222222222222222222222))

    import torch.optim as optim
    optimizer = torch.optim.Adam(model.parameters(), lr=2.4e-5)
    tensor_length = args.tensor_length
    micro_batch_size = args.micro_batch_size
    recv_tensor_shape = (micro_batch_size,tensor_length,cfg.hidden_size)
    # log_dir = './auto_map_2_2_pre_hook'
    train_loader,len_dataset= get_data_loader(tokenizer,micro_batch_size,tensor_length)
    print(f'len loader :{len(train_loader)}')
    epochs = args.epoch
    start_time = time.time()
    total_loss_list = []
    save_micro_batch = args.micro_batch
    for epoch in range(epochs):
        # 记录mini-batch中每个micro-batch的loss
        loss_list = []
        train_data_iterator = iter(train_loader)
        print('------------------')
        # print(len(train_loader))
        # print(math.ceil(len(train_loader) / args.micro_batch))
        # 最后一个mini-batch不完整
        args.micro_batch = save_micro_batch
        progress_bar = tqdm(range(math.ceil(len(train_loader) / args.micro_batch)), desc=f'Epoch {epoch+1}')
        
        for bar_id in progress_bar:
            # 最后一个mini-batch可能不完整，进行特殊处理
            if bar_id == len(progress_bar) -1 :
                adjust_batch(train_loader,args,len_dataset,micro_batch_size)
            loss = forward_backward_pipelining_without_interleaving2(
                forward_step_func, train_data_iterator, model,
                False, args.micro_batch, gpus_list, recv_tensor_shape
            )
            if loss is not None:
                loss_list.append(loss)
                progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
            optimizer.step()
            optimizer.zero_grad()
        if len(loss_list) > 0:
            total_loss_list.append(sum(loss_list) / len(loss_list))
            print(total_loss_list)
    if post:
        df = pd.DataFrame({
            'epoch':range(1,len(total_loss_list) + 1),
            'loss':total_loss_list
        })
        df.to_csv(f'pp_{pp_rank}_epochs{args.epoch}_bs{args.micro_batch * args.micro_batch_size}_len{args.tensor_length}_loss.csv')
    
    
    
    
    
    print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time},search_time:{search_time}')


def forward_step_func(input_tensor,model):
    return model(input_tensor)



