Edit model card

Whisper small finetuned for Greek transcription

How to use

You can use the model for Greek ASR:

from transformers import WhisperProcessor, WhisperForConditionalGeneration
from datasets import Audio, load_dataset

# load model and processor
processor = WhisperProcessor.from_pretrained("voxreality/whisper-small-el-finetune")
model = WhisperForConditionalGeneration.from_pretrained("voxreality/whisper-small-el-finetune")
forced_decoder_ids = processor.get_decoder_prompt_ids(language="greek", task="transcribe")

# load streaming dataset and read first audio sample
ds = load_dataset("mozilla-foundation/common_voice_11_0", "el", split="test", streaming=True)
ds = ds.cast_column("audio", Audio(sampling_rate=16_000))
input_speech = next(iter(ds))["audio"]
input_features = processor(input_speech["array"], sampling_rate=input_speech["sampling_rate"], return_tensors="pt").input_features

# generate token ids
predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids)

# decode token ids to text
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)

You can also use an HF pipeline:

from transformers import pipeline
from datasets import Audio, load_dataset

ds = load_dataset("mozilla-foundation/common_voice_11_0", "el", split="test", streaming=True)
ds = ds.cast_column("audio", Audio(sampling_rate=16_000))
input_speech = next(iter(ds))["audio"]

pipe = pipeline("automatic-speech-recognition", model='voxreality/whisper-small-el-finetune',
                             device='cpu', batch_size=32)

transcription = pipe(input_speech['array'], generate_kwargs = {"language":f"<|el|>","task": "transcribe"})
Downloads last month
7
Safetensors
Model size
242M params
Tensor type
F32
·

Dataset used to train voxreality/whisper-small-el-finetune