File size: 1,413 Bytes
ab4f192
 
 
 
4a37100
ab4f192
 
6b346ab
 
4a37100
 
 
28c91c1
4a37100
ab4f192
 
 
4a37100
ab4f192
 
4a37100
 
ab4f192
 
 
 
579f419
ab4f192
4a37100
 
ab4f192
 
 
c7fbe15
4a37100
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import torch
import librosa
import gradio as gr
from transformers import AutoProcessor, AutoModelForCTC
from pyctcdecode import build_ctcdecoder

# Загрузка модели и процессора
processor = AutoProcessor.from_pretrained("MatricariaV/kbd-asr-colab2")
model = AutoModelForCTC.from_pretrained("MatricariaV/kbd-asr-colab2")
model.eval()

# Подключение 3-граммной LM
vocab_list = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я', 'ё', 'ӏ', '[UNK]', '[PAD]', '<s>', '</s>']
decoder = build_ctcdecoder(vocab_list, kenlm_model_path="3gram.bin")

def transcribe(audio_file):
    waveform, sr = librosa.load(audio_file, sr=16000)
    inputs = processor(waveform, sampling_rate=sr, return_tensors="pt")
    with torch.no_grad():
        logits = model(**inputs).logits
    probs = torch.nn.functional.softmax(logits, dim=-1).squeeze(0).cpu().numpy()
    transcription = decoder.decode(probs)
    return transcription

iface = gr.Interface(
    fn=transcribe,
    inputs=gr.Audio(type="filepath"),  # без `source`
    outputs="text",
    title="ASR для кабардинского",
    description="Модель MMS + 3-граммная языковая модель (kenlm)"
)

if __name__ == "__main__":
    iface.launch()