import torch
from torch import nn
from transformers import GPT2Tokenizer, GPT2Model,GPT2LMHeadModel,TrainingArguments,Trainer
from transformers import DataCollatorForLanguageModeling,DataCollatorWithPadding
from transformers.utils.fx import symbolic_trace
from geesibling.core.types import Graph, Node
from geesibling.core.lib._graph import DataType,DeviceType,Device,search_policy
from geesibling.tools import log
from geesibling.adapters.pytorch.getTorchGraph import getTorchGraph
from geesibling.adapters.pytorch.graphToGeesiGraph import graphToGeeGraph
from geesibling.adapters.pytorch.opConvert import opNameConvert
from geesibling.adapters.pytorch.geesiDeviceConfig import device_num
from geesibling.adapters.pytorch.geesiblingPolicy import GeeSiblingPolicy
from datasets import load_metric,load_dataset
import argparse
from transformers import BertTokenizer,BertLMHeadModel,BertModel,BertForSequenceClassification
import numpy as np

__doc__ = """
examples - use geesibling-torch to train model bert
Author: jirongting
datetime: 2024.05.17
version: 2 2024.05.17 first commit
"""


def tokenize_function(example):
    return tokenizer(example["sentence1"], example["sentence2"], truncation=True)

def compute_metrics(eval_preds):
    metric = load_metric("glue", "mrpc")
    logits, labels = eval_preds
    predictions = np.argmax(logits, axis=-1)
    return metric.compute(predictions=predictions, references=labels)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpus', type=int, help='the number of gpus',default=1)
    parser.add_argument('--modelName',type=str,help="the name of model")

    args = parser.parse_args()

    # step1 define the model bert(from huggingface)
    tokenizer = BertTokenizer.from_pretrained('/mnt/VMSTORE/jch/bert-base-uncased')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    model = BertForSequenceClassification.from_pretrained("/mnt/VMSTORE/jch/bert-base-uncased")

    # step2 use geesibling policy to set op
    geesibling_policy = GeeSiblingPolicy(args.modelName,model,tokenizer,args.gpus,"sgp")
    
    # step3 redefine the model using geesibling policy
    model = BertForSequenceClassification.from_pretrained('/mnt/VMSTORE/jch/bert-base-uncased',device_map=geesibling_policy,num_labels=2)
    model=model.to("cuda")

    print("bert start train")

    # step4 load the dataset from huggingface and process the raw data
    raw_datasets = load_dataset("glue","mrpc")
    tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
    tokenized_datasets = tokenized_datasets.rename_column("label",'labels')
    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

    # step5 train the model
    training_args = TrainingArguments(
        output_dir='./results',          # 输出目录
        num_train_epochs=5,              # 训练轮数
        per_device_train_batch_size=16,   # 每个设备上的训练批次大小
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["validation"],
        data_collator=data_collator,
        tokenizer=tokenizer,
        compute_metrics=compute_metrics,
    )
    trainer.train()
