from datetime import datetime
from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy


from datasets import load_dataset
import math
import time
# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

from tqdm import tqdm

import torch

from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from torch.autograd import Variable
import time
from torch.nn import CrossEntropyLoss
import pandas as pd
from geesibling.adapters.pytorch.addHook import addHook
#from geesibling.adapters.pytorch.gees_pipeline.megatron import get_args
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from transformers import LlamaConfig
from transformers import LlamaTokenizer

from geesibling.adapters.pytorch.pipeline.megatron import print_rank_0

from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel import parallelize_module
from geesibling.adapters.pytorch.tensor_parallel.run_tp import TPPolicy


#from megatron.optimizer import get_megatron_optimizer
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron
from geesibling.adapters.pytorch.pipeline.pipeline import forward_backward_pipelining_without_interleaving,forward_backward_pipelining_without_interleaving2,adjust_batch

import os

# 设置环境变量
os.environ['LOG_DIR'] = './logs'

def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))

def pretrain(
             extra_args_provider=None,
             args_defaults={}):
    """Main training program.

    This function will run the followings in the order provided:
        1) initialize Megatron.
        2) setup model, optimizer and lr schedule using the model_provider.
        3) call train_val_test_data_provider to get train/val/test datasets.
        4) train the modle using the forward_step_func.

    Arguments:
        train_valid_test_dataset_provider: a function that takes the size of
            train/valid/test dataset and returns `train, valid, test` datasets.
        model_provider: a function that returns a vanilla version of the
            model. By vanilla we mean a simple model on cpu with no fp16 or ddp.
        forward_step_func: a function that takes a `data iterator` and `model`,
            and returns a `loss` scalar with a dictionary with key:values being
            the info we would like to monitor during training, for example
            `lm-loss: value`. We also require that this function add
            `batch generator` to the timers class.
        extra_args_provider: a function that takes a parser and adds arguments
            to it. It is used for programs to add their own arguments.
        args_defaults: a dictionary from argument-name to argument-value. It
            to set already parse arguments.
    """

    # Initalize and get arguments, timers, and Tensorboard writer.

    gpus_list = initialize_megatron()
    print('----------------------')
    print(gpus_list)
    # torch.distributed.barrier()
    # print('barrier done')



    args = get_args()


    # Model, optimizer, and learning rate.
    #timers('model-and-optimizer-setup').start()

    print('get tokenizer')
    tokenizer = LlamaTokenizer.from_pretrained('./llama7bconfig')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    print('get cfg')
    cfg = LlamaConfig(num_hidden_layers=1)
    # print(cfg)
    # cfg.max_position_embeddings = 4096
    print(cfg)
    
    pp_rank = mpu.get_pipeline_model_parallel_rank()
    pre = mpu.is_pipeline_first_stage()
    post = mpu.is_pipeline_last_stage()
    pp_size = mpu.get_pipeline_model_parallel_world_size()
    if pp_size>1:
        from geesibling.adapters.pytorch.pipeline.models.model_llama import LlamaForCausalLM
        model = LlamaForCausalLM(config = cfg,pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size)
    else:
        from geesibling.adapters.pytorch.tensor_parallel.models.model_llama import LlamaForCausalLM
        model = LlamaForCausalLM(config = cfg)
    print(model)

    # tp
    start = time.time()
    device_mesh = mpu.get_device_mesh()
    # if mpu.get_tensor_model_parallel_world_size()>1:
    torch.manual_seed(mpu.get_data_parallel_rank())
    parallelize_plan = TPPolicy(model,tokenizer,device_mesh['tp'],pre,cfg.hidden_size)
    for one in parallelize_plan:
        print(one)
        print(parallelize_plan[one].input_layouts)
        print(parallelize_plan[one].param_layouts)
        print(parallelize_plan[one].output_layouts)
    tp_model = parallelize_module(
        model,
        device_mesh["tp"],
        parallelize_plan, 
    )   #
    _pre_dp_module_transform(tp_model)
    # else:
    #     tp_model = model
    tp_model=tp_model.to(args.local_rank)
    search_time = time.time() - start

    '''
    args.modelName = 'Llama2'
    start = time.time()
    hf_device_map = GeeSiblingPolicy(args.modelName,model,tokenizer,gpus_list,"sgp",pre,cfg.hidden_size)
    search_time = time.time() - start
    print(f'search time:{search_time}')
    print(hf_device_map)
    
    model = LlamaForCausalLM.from_pretrained(pretrained_model_name_or_path = '/mnt/fs/model/Llama-2-7b-hf',pp_rank = pp_rank,pre_process = pre,post_process = post,pp_size = pp_size,device_map = hf_device_map)
    print(f'momdel init done')
    '''

    '''
    if mpu.get_data_parallel_world_size() >1:
        tp_model = torchDDP(tp_model,device_ids=[args.local_rank],process_group=mpu.get_data_parallel_group(),broadcast_buffers=False)
    # torch.distributed.barrier()
    # print(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 开始执行。")
    import torch.optim as optim
    optimizer = torch.optim.Adam(tp_model.parameters(), lr=2.4e-5)
    tensor_length = args.tensor_length
    micro_batch_size = args.micro_batch_size
    recv_tensor_shape = (micro_batch_size,tensor_length,cfg.hidden_size)
    # log_dir = './auto_map_2_2_pre_hook'
    train_loader,len_dataset= get_data_loader(tokenizer,micro_batch_size,tensor_length,mpu.get_data_parallel_world_size(),mpu.get_data_parallel_rank())
    epochs = args.epoch
    start_time = time.time()
    print(f'search time:{search_time}')
    
    total_loss_list = []
    save_micro_batch = args.micro_batch
    # torch.distributed.barrier()
    print(f"阶段 {mpu.get_pipeline_model_parallel_rank()} 开始执行。")
    if pp_size>1:
        for epoch in range(epochs):
            # 记录mini-batch中每个micro-batch的loss
            loss_list = []
            train_data_iterator = iter(train_loader)
            print('------------------')
            # print(len(train_loader))
            # print(math.ceil(len(train_loader) / args.micro_batch))
            # 最后一个mini-batch不完整
            args.micro_batch = save_micro_batch
            progress_bar = tqdm(range(math.ceil(len(train_loader) / args.micro_batch)), desc=f'Epoch {epoch+1}')
            for bar_id in progress_bar:
                # 最后一个mini-batch可能不完整，进行特殊处理
                if bar_id == len(progress_bar) -1 :
                    adjust_batch(train_loader,args,len_dataset,micro_batch_size)
                loss = forward_backward_pipelining_without_interleaving2(
                    forward_step_func, train_data_iterator, tp_model,
                    False, args.micro_batch, gpus_list, recv_tensor_shape
                )
                if loss is not None:
                    loss_list.append(loss)
                    progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
                optimizer.step()
                optimizer.zero_grad()
                # break
            
            if len(loss_list) > 0:
                total_loss_list.append(sum(loss_list) / len(loss_list))
                print(total_loss_list)
        if post:
            df = pd.DataFrame({
                'epoch':range(1,len(total_loss_list) + 1),
                'loss':total_loss_list
            })
            df.to_csv(f'pp_{pp_rank}_epochs{args.epoch}_bs{args.micro_batch * args.micro_batch_size}_len{args.tensor_length}_loss.csv')
        print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time},search_time:{search_time}')
    else:
        device = torch.cuda.current_device()
        for epoch in range(epochs):
            # 训练阶段
            progress_bar = tqdm(range(math.ceil(len(train_loader) / args.micro_batch)), desc=f'Epoch {epoch+1}')
            loss_list = []
            train_data_iterator = iter(train_loader)
            for i in progress_bar:
                input_tensor = next(train_data_iterator)['input_ids'].to(device)
                labels = next(train_data_iterator)['label'].to(device)
                outputs = tp_model(input_ids = input_tensor,labels = labels)
                # print(outputs.keys())
                loss = outputs.loss
                loss.backward()
                loss_list.append(loss)
                progress_bar.set_postfix({'loss': loss})  # 更新进度条的后缀显示当前批次损失
                optimizer.step()
                optimizer.zero_grad()
            print(f'epoch {epoch+1},loss:{sum(loss_list) / len(loss_list)}')
            if len(loss_list) > 0:
                total_loss_list.append(sum(loss_list) / len(loss_list))
                print(total_loss_list)
        print(f'train on micro_batch_size:{micro_batch_size},len:{tensor_length},time:{time.time() - start_time}')
'''

def forward_step_func(input_tensor,model):
    return model(input_tensor)


def get_data_loader(tokenizer, bs, tensor_length,world_size,local_rank):
    # 加载数据集
    data_files = {
        'train': '/yjy/GeeSibling/examples/datasets/mrpc/train.jsonl',
        'test': '/yjy/GeeSibling/examples/datasets/mrpc/test.jsonl',
        'validation': '/yjy/GeeSibling/examples/datasets/mrpc/validation.jsonl'
    }

    dataset = load_dataset('json', data_files=data_files)

    def tokenize_function(examples):
        # 对文本对进行编码
        tokenized_output = tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)
        # causal_lm模型将input_ids作为label
        tokenized_output['label'] = tokenized_output['input_ids'].copy()
        return tokenized_output

    # 应用文本编码
    encoded_dataset = dataset.map(tokenize_function, batched=True)
    # 设置格式为 PyTorch
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])

    # 选择数据集的前100条数据,测试用
    # train_dataset = encoded_dataset['train'].select(range(100))
    
    train_dataset = encoded_dataset['train']
    

    def collate_fn(batch):
        # 确保每个batch的大小都等于bs
        if len(batch) < bs:
            # 计算缺少的元素数量
            missing = bs - len(batch)
            # 创建一个空的补充条目
            for _ in range(missing):
                batch.append({
                    'input_ids': torch.zeros(tensor_length, dtype=torch.long),
                    'attention_mask': torch.zeros(tensor_length, dtype=torch.long),
                    'label': torch.zeros(tensor_length, dtype=torch.long)
                })
        return {
            'input_ids': torch.stack([item['input_ids'] for item in batch]),
            'attention_mask': torch.stack([item['attention_mask'] for item in batch]),
            'label': torch.stack([item['label'] for item in batch])
        }

    # 创建DataLoader
    if mpu.get_data_parallel_world_size() >1:
        train_sampler = DistributedSampler(train_dataset,num_replicas=world_size,rank=local_rank)
        train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=False,sampler=train_sampler,collate_fn=collate_fn)
    else:
        train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=True, collate_fn=collate_fn)
    print('Data loader created with limited dataset.')
    return train_dataloader, len(train_dataset)