mbarnig's picture
Upload app.py
4a86b3f
import gradio as gr
import numpy as np
import wave
from huggingface_hub import hf_hub_download
from stt import Model
state = gr.Variable()
REPO_ID = "mbarnig/lb-de-fr-en-pt-coqui-stt-models"
my_title = "🇩🇪 🇫🇷 🇬🇧 🇵🇹 Mir verstinn och Lëtzebuergesch ! 🇱🇺"
my_description = "Multilingual Speech-to-Text (STT) system understanding Lëtzebuergesch, Deutsch, Français, English and Português. My luxembourgish stt-model is based on [Coqui-STT version 1.3.0](https://github.com/coqui-ai/STT), the other models are downloaded from the [Coqui Model Zoo](https://coqui.ai/models). Thanks to 🐸 [Coqui.ai](https://https://coqui.ai/)."
STT_LANGUAGES = [
"Deutsch",
"English",
"Français",
"Lëtzebuergesch",
"Português"
]
EXAMPLES = [
["examples/german.wav", "Deutsch", True, "Thorsten", "wir setzen uns deshalb für eine zweistaaten lösung ein und hoffen auch dass hier fortschritte im friedensprozess gemacht werden"],
["examples/english.wav", "English", True, "Linda", "every window and roof which could command a view of the horrible performance was occupied"],
["examples/french.wav", "Français", True, "Bernard", "chacun avait sa part dans ces travaux suivant les prescriptions d'un règlement affiché dans la grande salle"],
["examples/luxembourgish.wav", "Lëtzebuergesch", True, "Pit", "ma och den aarbechtsmaart muss weider wuessen fir datt de system funktionéiert déi faméis rentemauer steet schonn do ze wénken"],
["examples/portuguese.wav", "Português", True, "Ed", "academicismo ou academismo designam originalmente o método de ensino artístico profissionalizante concebido formalizado e ministrado pelas academias de arte europeias"]
]
def reformat_freq(sr, y):
if sr not in (
48000,
16000,
): # Deepspeech only supports 16k, (we convert 48k -> 16k)
raise ValueError("Unsupported rate", sr)
if sr == 48000:
y = (
((y / max(np.max(y), 1)) * 32767)
.reshape((-1, 3))
.mean(axis=1)
.astype("int16")
)
sr = 16000
return sr, y
def customization(language, scorer):
if language == "Lëtzebuergesch":
lb_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "luxembourgish/model.tflite")
lb_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "luxembourgish/kenlm-luxembourgish.scorer")
myModel = Model(lb_stt_model_path)
myScorer_path = lb_stt_scorer_path
elif language == "Deutsch":
de_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "german/model.tflite")
de_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "german/de-aashishag-1-prune-kenlm.scorer")
myModel = Model(de_stt_model_path)
myScorer_path = de_stt_scorer_path
elif language == "Français":
fr_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "french/model.tflite")
fr_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "french/kenlm.scorer")
myModel = Model(fr_stt_model_path)
myScorer_path = fr_stt_scorer_path
elif language == "English":
en_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "english/model.tflite")
en_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "english/huge-vocabulary.scorer")
myModel = Model(en_stt_model_path)
myScorer_path = en_stt_scorer_path
elif language == "Português":
pt_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "portuguese/model.tflite")
pt_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "portuguese/pt-itml-0-prune-kenlm.scorer")
myModel = Model(pt_stt_model_path)
myScorer_path = pt_stt_scorer_path
else:
print("Please select a language !")
return myModel, myScorer_path
def stt_upload(audio_file_path, language, scorer, speaker, groundtruth):
if audio_file_path:
acoustic_model, scorer_path = customization(language, scorer)
audio = wave.open(audio_file_path, 'r')
audio_buffer = np.frombuffer(audio.readframes(audio.getnframes()), np.int16)
if scorer:
acoustic_model.enableExternalScorer(scorer_path)
result = acoustic_model.stt(audio_buffer)
else:
acoustic_model.disableExternalScorer()
result = acoustic_model.stt(audio_buffer)
return result
else:
print("Please upload an audio file with sample-rate 16000 Hz for transcription !")
def stt_record(language, scorer, audio_record_buffer, state=""):
if audio_record_buffer:
acoustic_model, scorer_path = customization(language, scorer)
_, y = reformat_freq(*audio_record_buffer)
if scorer:
acoustic_model.enableExternalScorer(scorer_path)
result = acoustic_model.stt(y)
else:
acoustic_model.disableExternalScorer()
result = acoustic_model.stt(y)
newstate = state + result + " "
return newstate, newstate
else:
print("Please record your own speech in the selected language for transcription !")
upload_article = "<h3>User guide</h3><p>1. Click one row from the examples and view the results. Compare the transcription with the ground-truth text. 2. Clear the interface and upload your own audio-file in the selected language. The sampling-rate of the audio file must be 16000 Hz. 3. Submit an audio-file with or without a language model and compare the results. 4. Switch to the realtime-streaming STT tab and record your own speech. 5. Have fun !</p>"
record_article = "<h3>User guide</h3><p>1. Record your own speech in the selected language and view the automatic streamed transcription which is updated continuously with additionalwords. 2. Stop the recording and compare the final transcription with your input. 3. Switch to the file-upload STT tab to test the examples in the different languages. 4. Have fun!"
upload_inputs = [
gr.Audio(type="filepath", label="Upload Audio"),
gr.Radio(label="Language", choices = STT_LANGUAGES, value = "Lëtzebuergesch"),
gr.Checkbox(label="use language model", value = True),
gr.Textbox(label = "Speaker", visible=False),
gr.Textbox(label = "Groundtruth", visible=False)
]
record_inputs = [
gr.Radio(label="Language", choices = STT_LANGUAGES, value = "Lëtzebuergesch"),
gr.Checkbox(label="use language model", value = True),
gr.Audio(source="microphone", type="numpy", label="Record Audio", streaming=True), "state"
]
upload_outputs = gr.Textbox(lines=5, label="Transcription")
record_outputs = [gr.Textbox(lines=5, label="Transcription"), "state"]
upload_iface = gr.Interface(
fn=stt_upload,
inputs=upload_inputs,
outputs=upload_outputs,
title=my_title,
description = my_description,
article = upload_article,
examples = EXAMPLES,
allow_flagging = False
)
record_iface = gr.Interface(
fn=stt_record,
inputs=record_inputs,
outputs=record_outputs,
title=my_title,
description = my_description,
article = record_article,
allow_flagging = False,
live=True
)
iface = gr.TabbedInterface([upload_iface, record_iface], ["Text-to-Speech with audio-file upload", "Realtime Text-to-Speech"])
iface.launch()