import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
import numpy as np
from datasets import load_dataset

if __name__ == "__main__":
    model_checkpoint = "model/bert-base-chinese"
    final_save_path = "model/bert-base-chinese/deie"
    batch_size = 16
    task = "deie"
    dataset = load_dataset(f'data/init_data/{task}')
    tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
    model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=2).to("cuda")

    args = TrainingArguments(
        final_save_path,
        evaluation_strategy = "epoch",
        save_strategy = "epoch",
        learning_rate=2e-5,
        per_device_train_batch_size=batch_size,
        per_device_eval_batch_size=batch_size,
        num_train_epochs=1,
        weight_decay=0.01,
        push_to_hub=False,
    )

    def preprocess_function(examples):
        return tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=512, padding="max_length")
    
    encoded_dataset = dataset.map(preprocess_function, batched=True)
    trainer = Trainer(
        model,
        args,
        train_dataset=encoded_dataset["train"],
        eval_dataset=encoded_dataset["test"],
        tokenizer=tokenizer,
    )
    trainer.train()
    trainer.save_model(final_save_path)  # Saves the tokenizer too
