Whisperapp / app.py
Funbi's picture
Create new file
8134325
raw
history blame
5.82 kB
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper
import io
import os
import numpy as np
from datetime import datetime
import assets
def sendToWhisper(audio_record, audio_upload, task, models_selected, language_toggle, language_selected, without_timestamps):
results = []
audio = None
if audio_record is not None:
audio = audio_record
elif audio_upload is not None:
audio = audio_upload
else:
return [["Invalid input"]*5]
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
for model_name in models_selected:
start = datetime.now()
model = whisper.load_model(model_name)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task)
if language_toggle:
options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task, language=language_selected)
language = ""
prob = 0
if model_name in assets.lang_detect:
_, probs = model.detect_language(mel)
language = max(probs, key=probs.get)
prob = probs[language]
else:
language="en"
options = whisper.DecodingOptions(fp16 = False, without_timestamps=without_timestamps, task=task, language="en")
output_text = whisper.decode(model, mel, options)
results.append([model_name, output_text.text, language, str(prob), str((datetime.now() - start).total_seconds())])
return results
avail_models = whisper.available_models()
with gr.Blocks(css=assets.css) as demo:
gr.Markdown("This is a demo to use Open AI's Speech to Text (ASR) Model: Whisper. Learn more about the models here on [Github](https://github.com/openai/whisper/search?q=DecodingOptions&type=) FYI: The larger models take a lot longer to transcribe the text :)")
gr.Markdown("Here are sample audio files to try out: [Sample Audio](https://drive.google.com/drive/folders/1qYek06ZVeKr9f5Jf35eqi-9CnjNIp98u?usp=sharing)")
gr.Markdown("Built by:[@davidtsong](https://twitter.com/davidtsong)")
# with gr.Row():
with gr.Column():
# with gr.Column():
gr.Markdown("## Input")
with gr.Row():
audio_record = gr.Audio(source="microphone", label="Audio to transcribe", type="filepath",elem_id="audio_inputs")
audio_upload = gr.Audio(source="upload", type="filepath", interactive=True,elem_id="audio_inputs")
models_selected = gr.CheckboxGroup(avail_models, label="Models to use")
with gr.Accordion("Settings", open=False):
task = gr.Dropdown(["transcribe", "translate"], label="Task", value="transcribe")
language_toggle = gr.Dropdown(["Automatic", "Manual"], label="Language Selection", value="Automatic")
language_selected = gr.Dropdown(list(assets.LANGUAGES.keys()), label="Language")
without_timestamps = gr.Checkbox(label="Without timestamps",value=True)
submit = gr.Button(label="Run")
# with gr.Row():
# with gr.Column():
gr.Markdown("## Output")
output = gr.Dataframe(headers=["Model", "Text", "Language", "Language Confidence","Time(s)"], label="Results", wrap=True)
submit.click(fn=sendToWhisper, inputs=[audio_record, audio_upload, task, models_selected, language_toggle, language_selected, without_timestamps], outputs=output)
demo.launch()
assets.py
LANGUAGES = {
"en": "english",
"zh": "chinese",
"de": "german",
"es": "spanish",
"ru": "russian",
"ko": "korean",
"fr": "french",
"ja": "japanese",
"pt": "portuguese",
"tr": "turkish",
"pl": "polish",
"ca": "catalan",
"nl": "dutch",
"ar": "arabic",
"sv": "swedish",
"it": "italian",
"id": "indonesian",
"hi": "hindi",
"fi": "finnish",
"vi": "vietnamese",
"iw": "hebrew",
"uk": "ukrainian",
"el": "greek",
"ms": "malay",
"cs": "czech",
"ro": "romanian",
"da": "danish",
"hu": "hungarian",
"ta": "tamil",
"no": "norwegian",
"th": "thai",
"ur": "urdu",
"hr": "croatian",
"bg": "bulgarian",
"lt": "lithuanian",
"la": "latin",
"mi": "maori",
"ml": "malayalam",
"cy": "welsh",
"sk": "slovak",
"te": "telugu",
"fa": "persian",
"lv": "latvian",
"bn": "bengali",
"sr": "serbian",
"az": "azerbaijani",
"sl": "slovenian",
"kn": "kannada",
"et": "estonian",
"mk": "macedonian",
"br": "breton",
"eu": "basque",
"is": "icelandic",
"hy": "armenian",
"ne": "nepali",
"mn": "mongolian",
"bs": "bosnian",
"kk": "kazakh",
"sq": "albanian",
"sw": "swahili",
"gl": "galician",
"mr": "marathi",
"pa": "punjabi",
"si": "sinhala",
"km": "khmer",
"sn": "shona",
"yo": "yoruba",
"so": "somali",
"af": "afrikaans",
"oc": "occitan",
"ka": "georgian",
"be": "belarusian",
"tg": "tajik",
"sd": "sindhi",
"gu": "gujarati",
"am": "amharic",
"yi": "yiddish",
"lo": "lao",
"uz": "uzbek",
"fo": "faroese",
"ht": "haitian creole",
"ps": "pashto",
"tk": "turkmen",
"nn": "nynorsk",
"mt": "maltese",
"sa": "sanskrit",
"lb": "luxembourgish",
"my": "myanmar",
"bo": "tibetan",
"tl": "tagalog",
"mg": "malagasy",
"as": "assamese",
"tt": "tatar",
"haw": "hawaiian",
"ln": "lingala",
"ha": "hausa",
"ba": "bashkir",
"jw": "javanese",
"su": "sundanese",
}
lang_detect = ['tiny', 'base', 'small', 'medium', 'large']
css = """
#audio_inputs{
height:100px;
max-height:100px;
}
"""