from transformers import AutoTokenizer, AutoModelForMaskedLM, BertModel, AutoModelForSequenceClassification
import torch
from dataset_extractor import *
import torch_directml as torch_dml

tokenizer = AutoTokenizer.from_pretrained('data/bert-base-chinese')
# model=BertModel.from_pretrained('data/bert-base-chinese')
model_cls = AutoModelForSequenceClassification.from_pretrained('data/bert-base-chinese', num_labels=202)
train_pair = extract("data/dataset/cail/exercise_contest/data_train.json")
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler

batch_size = 20
train_batch = tokenizer(train_pair["content"], max_length=512, truncation=True, padding="max_length",
                        return_tensors="pt")
train = TensorDataset(train_batch["input_ids"], train_batch["attention_mask"], torch.tensor(train_pair["label"]))
train_sampler = RandomSampler(train)
train_dataloader = DataLoader(train, sampler=train_sampler, batch_size=batch_size)
from torch.optim import AdamW

optimizer = AdamW(model_cls.parameters(), lr=2e-5)
num_epochs = 8
from transformers import get_scheduler

num_training_steps = num_epochs * len(train_dataloader)
lr_scheduler = get_scheduler(
    name="linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps
)

device = torch_dml.device()


def train(model):
    model.to(device)
    for epoch in range(num_epochs):
        # model.save_pretrained(f"data/bert-base-legal-chinese-epoch-{epoch}")
        model.train()
        total_loss = 0
        for step, batch in enumerate(train_dataloader):
            if step % 10 == 0 and not step == 0:
                print("step: ", step, "  loss:", total_loss / (step * batch_size))
            b_input_ids = batch[0].to(device)
            b_input_mask = batch[1].to(device)
            b_labels = batch[2].to(device)
            model.zero_grad()
            outputs = model(b_input_ids,
                            token_type_ids=None,
                            attention_mask=b_input_mask,
                            labels=b_labels)
            loss = outputs.loss  # include cross-entropy loss
            total_loss += loss.item()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)  # 防止梯度爆炸
            optimizer.step()
            lr_scheduler.step()
        avg_train_loss = total_loss / len(train_dataloader)
        print("avg_loss:", avg_train_loss)


train(model_cls)