#!/usr/bin/env python3 from datasets import load_dataset, load_metric, Audio from transformers import AutoModelForCTC, AutoProcessor, Wav2Vec2Processor import torch lang = "sv-SE" model_id = "./xls-r-300m-sv" device = "cuda" if torch.cuda.is_available() else "cpu" dataset = load_dataset("mozilla-foundation/common_voice_7_0", lang, split="test", use_auth_token=True) wer = load_metric("wer") dataset = dataset.select(range(100)) dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) model = AutoModelForCTC.from_pretrained(model_id).to(device) processor = Wav2Vec2Processor.from_pretrained(model_id) def map_to_pred(batch): input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest", sampling_rate=16_000).input_values with torch.no_grad(): logits = model(input_values.to(device)).logits predicted_ids = torch.argmax(logits, dim=-1) transcription = processor.batch_decode(predicted_ids)[0] batch["transcription"] = transcription return batch result = dataset.map(map_to_pred, remove_columns=["audio"]) import ipdb; ipdb.set_trace() wer_result = wer.compute(references=result["sentence"], predictions=result["transcription"]) print("WER", wer_result)