|
|
import os |
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" |
|
|
|
|
|
from transformers import CamembertTokenizer, CamembertForSequenceClassification, CamembertConfig |
|
|
from transformers import Trainer, TrainingArguments |
|
|
|
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
|
|
|
from loadDataSet import loadData, labels_to_numeric |
|
|
from helpers import compute_max_sent_length, get_device, set_seed |
|
|
from bert_utils import ( |
|
|
FrenchDataset, |
|
|
compute_metrics, |
|
|
) |
|
|
|
|
|
from nltk.tokenize import sent_tokenize |
|
|
|
|
|
set_seed(1) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
device = get_device() |
|
|
|
|
|
|
|
|
base_path = "../code/" |
|
|
train_path = base_path + "train_slices.txt" |
|
|
val_path = base_path + "val_slices.txt" |
|
|
|
|
|
|
|
|
trainSamples, trainLabels = loadData("train", train_path) |
|
|
valSamples, valLabels = loadData("validation", val_path) |
|
|
|
|
|
print("Initial train size: %d" % len(trainSamples)) |
|
|
print("Val size: %d" % len(valSamples)) |
|
|
|
|
|
|
|
|
print("Loading CamemBERT tokenizer...") |
|
|
tokenizer = CamembertTokenizer.from_pretrained("camembert-base") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
max_len = 128 |
|
|
|
|
|
|
|
|
|
|
|
trainLabels = labels_to_numeric(trainLabels) |
|
|
valLabels = labels_to_numeric(valLabels) |
|
|
|
|
|
|
|
|
|
|
|
train_encodings = tokenizer(trainSamples, truncation=True, padding=True, max_length=max_len) |
|
|
|
|
|
valid_encodings = tokenizer(valSamples, truncation=True, padding=True, max_length=max_len) |
|
|
|
|
|
|
|
|
train_dataset = FrenchDataset(train_encodings, trainLabels) |
|
|
valid_dataset = FrenchDataset(valid_encodings, valLabels) |
|
|
|
|
|
|
|
|
config = CamembertConfig.from_pretrained("camembert-base", output_hidden_states=True) |
|
|
model = CamembertForSequenceClassification.from_pretrained("camembert-base", num_labels=4).to(device) |
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir="./bert_models_saved/out_fold", |
|
|
num_train_epochs=30, |
|
|
per_device_train_batch_size=32, |
|
|
per_device_eval_batch_size=32, |
|
|
warmup_steps=500, |
|
|
weight_decay=0.01, |
|
|
logging_dir='./logs', |
|
|
load_best_model_at_end=True, |
|
|
|
|
|
logging_steps=250, |
|
|
eval_steps=250, |
|
|
|
|
|
save_total_limit=5, |
|
|
save_strategy="steps", |
|
|
evaluation_strategy="steps", |
|
|
) |
|
|
|
|
|
trainer = Trainer( |
|
|
model=model, |
|
|
args=training_args, |
|
|
train_dataset=train_dataset, |
|
|
eval_dataset=valid_dataset, |
|
|
compute_metrics=compute_metrics, |
|
|
) |
|
|
|
|
|
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
trainer.save_model("./bert_models_saved/out_fold") |
|
|
|
|
|
|
|
|
trainer.evaluate() |
|
|
|
|
|
|
|
|
model.save_pretrained("./bert_models_saved/best_model/") |
|
|
tokenizer.save_pretrained("./bert_models_saved/best_model/") |
|
|
|
|
|
|
|
|
|