xls-r-300m-sv / eval.py
hf-test's picture
add eval script
21443b2
raw history blame
No virus
1.54 kB
#!/usr/bin/env python3
from datasets import load_dataset, load_metric, Audio
from transformers import AutoModelForCTC, AutoProcessor, Wav2Vec2Processor
import torch
import re
lang = "sv-SE"
model_id = "./xls-r-300m-sv"
device = "cuda" if torch.cuda.is_available() else "cpu"
dataset = load_dataset("mozilla-foundation/common_voice_7_0", lang, split="test", use_auth_token=True)
wer = load_metric("wer")
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))
model = AutoModelForCTC.from_pretrained(model_id).to(device)
processor = AutoProcessor.from_pretrained(model_id)
chars_to_ignore_regex = '[,?.!\-\;\:\"β€œ%β€˜β€οΏ½β€”β€™β€¦β€“]' # change to the ignored characters of your fine-tuned model
def map_to_pred(batch):
input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest", sampling_rate=16_000).input_values
with torch.no_grad():
logits = model(input_values.to(device)).logits
if processor.__class__.__name__ == "Wav2Vec2Processor":
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)[0]
else:
transcription = processor.batch_decode(logits.cpu().numpy()).text[0]
batch["transcription"] = transcription
batch["text"] = re.sub(chars_to_ignore_regex, "", batch["sentence"].lower())
return batch
result = dataset.map(map_to_pred, remove_columns=["audio"])
wer_result = wer.compute(references=result["text"], predictions=result["transcription"])
print("WER", wer_result)