File size: 2,713 Bytes
b536abf 27c7dcd b536abf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import evaluate
import numpy as np
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
Trainer,
TrainingArguments,
)
dataset_id = "google/fleurs"
model_id = "facebook/xlm-v-base"
metric_name = "accuracy"
# Keep only the raw transcription and the language id (which we'll use as label)
columns_to_remove = [
"audio",
"id",
"num_samples",
"path",
"transcription",
"gender",
"language",
"lang_group_id",
]
train, val = load_dataset(dataset_id, "all", split=["train", "validation"], ignore_verifications=True)
# Build the label2id and id2label dictionaries
unique_langs = set()
label2id = {}
id2label = {}
for lang, lang_id in zip(val["language"], val["lang_id"]):
if lang not in unique_langs:
unique_langs.add(lang)
id2label[lang_id] = lang
label2id[lang] = lang_id
id2label = dict(sorted(id2label.items(), key=lambda item: item[0]))
label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))
train = train.remove_columns(columns_to_remove)
val = val.remove_columns(columns_to_remove)
train = train.rename_column("raw_transcription", "text")
val = val.rename_column("raw_transcription", "text")
train = train.rename_column("lang_id", "label")
val = val.rename_column("lang_id", "label")
train = train.shuffle(seed=42)
val = val.shuffle(seed=42)
tokenizer = AutoTokenizer.from_pretrained(model_id)
def preprocess(data):
return tokenizer(data["text"], truncation=True)
processed_train = train.map(preprocess, batched=True)
processed_val = val.map(preprocess, batched=True)
print(processed_train)
print(processed_val)
# Fine-tune the model
model = AutoModelForSequenceClassification.from_pretrained(
model_id,
num_labels=len(id2label),
label2id=label2id,
id2label=id2label,
ignore_mismatched_sizes=True,
)
args = TrainingArguments(
"xlm-v-base-language-id",
learning_rate=3e-5,
warmup_ratio=0.1,
per_device_train_batch_size=16,
gradient_accumulation_steps=4,
per_device_eval_batch_size=16,
num_train_epochs=5,
load_best_model_at_end=True,
metric_for_best_model=metric_name,
evaluation_strategy="epoch",
save_strategy="epoch",
logging_steps=10,
fp16=True,
push_to_hub=True,
)
metric = evaluate.load(metric_name)
def compute_metrics(eval_pred):
predictions = np.argmax(eval_pred.predictions, axis=1)
return metric.compute(predictions=predictions, references=eval_pred.label_ids)
trainer = Trainer(
model,
args,
train_dataset=processed_train,
eval_dataset=processed_val,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
trainer.train()
|