Spaces:
Sleeping
Sleeping
import torch | |
import librosa | |
import gradio as gr | |
from transformers import AutoProcessor, AutoModelForCTC | |
from pyctcdecode import build_ctcdecoder | |
# Загрузка модели и процессора | |
processor = AutoProcessor.from_pretrained("MatricariaV/kbd-asr-colab2") | |
model = AutoModelForCTC.from_pretrained("MatricariaV/kbd-asr-colab2") | |
model.eval() | |
# Подключение 3-граммной LM | |
vocab_list = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я', 'ё', 'ӏ', '[UNK]', '[PAD]', '<s>', '</s>'] | |
decoder = build_ctcdecoder(vocab_list, kenlm_model_path="3gram.bin") | |
def transcribe(audio_file): | |
waveform, sr = librosa.load(audio_file, sr=16000) | |
inputs = processor(waveform, sampling_rate=sr, return_tensors="pt") | |
with torch.no_grad(): | |
logits = model(**inputs).logits | |
probs = torch.nn.functional.softmax(logits, dim=-1).squeeze(0).cpu().numpy() | |
transcription = decoder.decode(probs) | |
return transcription | |
iface = gr.Interface( | |
fn=transcribe, | |
inputs=gr.Audio(type="filepath"), # без `source` | |
outputs="text", | |
title="ASR для кабардинского", | |
description="Модель MMS + 3-граммная языковая модель (kenlm)" | |
) | |
if __name__ == "__main__": | |
iface.launch() | |