import traceback
import os
from datetime import datetime
import argparse
import torchvision
import torchvision.transforms as transforms
import torch
from transformers import LlamaForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling
from transformers import DataCollatorWithPadding, TrainingArguments,Trainer
import torch.nn as nn
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from geesibling.core.types import Graph, Node
from geesibling.core.lib._graph import DataType,DeviceType,Device,search_policy
from geesibling.tools import log
from geesibling.adapters.pytorch.getTorchGraph import getTorchGraph
from geesibling.adapters.pytorch.graphToGeesiGraph import graphToGeeGraph
from geesibling.adapters.pytorch.opConvert import opNameConvert
from geesibling.adapters.pytorch.geesiDeviceConfig import device_num
from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy
from geesibling.adapters.pytorch.addHook import addHook
from datasets import load_metric,load_dataset
from torch.utils.data import DataLoader
from torch.nn import CrossEntropyLoss
from transformers import get_scheduler
from tqdm.auto import tqdm
__doc__ = """
examples - use geesibling-torch and DDP to train model Llama2-13B
Author: nikainuo
datetime: 2024.07.6
version: 1 2024.05.17 first commit
"""

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-g', '--gpus', type=int, 
                        help='the number of gpus',default=1)
    parser.add_argument('-m', '--modelName',type=str,
                        help="the name of model")
    parser.add_argument('-n', '--nodes', default=1,
                        type=int, metavar='N')
    parser.add_argument('-p', '--process', default=1, type=int,
                        help='number of process per node')
    parser.add_argument('-nr', '--nr', default=0, type=int,
                        help='ranking within the nodes')
    parser.add_argument('-s', '--seq', default=128, type=int,
                        help='seq length')
    parser.add_argument('-bs', '--batch_size', default=1, type=int,
                        help='batch_size')
    parser.add_argument('--epochs', default=1, type=int,
                        metavar='N',
                        help='number of total epochs to run')
    args = parser.parse_args()

    #########################################################
    args.world_size = args.process * args.nodes             #
    os.environ['MASTER_ADDR'] = '172.16.1.49'               #
    os.environ['MASTER_PORT'] = '1234'                      #
    #########################################################
    train(args)  

def train(args):
    rank = args.nr
    #初始化进程组
    dist.init_process_group(
        backend='nccl',
        init_method='env://',
        world_size=args.world_size,  # 进程（节点）数量
        rank=rank  # 节点编号
    )
    print(torch.cuda.device_count())
    print(torch.cuda.current_device())
    # 载入预训练模型的分词器
    tokenizer = AutoTokenizer.from_pretrained('/mnt/fs/model/Llama-2-13b-hf')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    # 读取 GPT-2 预训练模型
    model = LlamaForCausalLM.from_pretrained("/mnt/fs/model/Llama-2-13b-hf")
    
    # use geesibling policy to set op
    geesibling_policy = GeeSiblingPolicy(args.modelName,model,tokenizer,args.gpus,"sgp")
    
    # redefine the model using geesibling policy
    model = LlamaForCausalLM.from_pretrained('/mnt/fs/model/Llama-2-13b-hf',device_map=geesibling_policy,num_labels=2)
    print(model.hf_device_map)

    # Wrap the model
    model = DDP(model)
    print("gpt start train")

    # step4 load the dataset from huggingface and process the raw data
    raw_datasets = load_dataset("glue", "mrpc")
    tensor_length = args.seq
    def tokenize_function(examples):
        # 对文本对进行编码
        return tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)
 
    tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
    tokenized_datasets = tokenized_datasets.rename_column("label", 'labels')
    data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
    tokenized_datasets = tokenized_datasets.remove_columns(
        ["sentence1", "sentence2", "idx"]
    )
    tokenized_datasets.set_format("torch")
    print(tokenized_datasets["train"].column_names)
    # 数据切片
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        tokenized_datasets["train"],
        num_replicas=args.world_size,
        rank=rank
    )
    train_loader = torch.utils.data.DataLoader(dataset=tokenized_datasets["train"],
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=0,
                                               collate_fn=data_collator,
                                               sampler=train_sampler)

    print('------------------data done------------------')

    #优化器
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)  # 定义优化器
    #总步数
    num_training_steps = args.epochs * len(train_loader)
    step_num = len(train_loader)
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    start = datetime.now()
    progress_bar = tqdm(range(num_training_steps))
    for epoch in range(args.epochs):
        train_sampler.set_epoch(epoch)
        total_loss = 0
        for i, batch in enumerate(train_loader):
            batch = {k: v.to(device) for k, v in batch.items()}
            outputs = model(**batch)
            loss = outputs.loss
            total_loss += loss
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            progress_bar.update(1)
            if i == len(train_loader) - 1:
                # 在主进程每个 Epoch 的最后输出一下结果
                print('Epoch [{}/{}], average loss {:.4f}'.format(epoch + 1, args.epochs, total_loss / step_num))
    print("Training time: " + str(datetime.now() - start))
    dist.destroy_process_group()

if __name__ == '__main__':
    main()