Spaces:
Runtime error
Runtime error
update
Browse files
app.py
CHANGED
@@ -1,6 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from datasets import load_dataset
|
2 |
|
3 |
dataset = load_dataset("ysharma/short_jokes")
|
4 |
|
|
|
|
|
5 |
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import whisper
|
4 |
+
import requests
|
5 |
+
import tempfile
|
6 |
+
from neon_tts_plugin_coqui import CoquiTTS
|
7 |
from datasets import load_dataset
|
8 |
|
9 |
dataset = load_dataset("ysharma/short_jokes")
|
10 |
|
11 |
+
# Language common in both the multilingual models - English, Chinese, Spanish, and French etc
|
12 |
+
# So it would make sense to test the App on these four prominently
|
13 |
|
14 |
+
# Whisper: Speech-to-text
|
15 |
+
model = whisper.load_model("base")
|
16 |
+
model_med = whisper.load_model("medium")
|
17 |
+
# Languages covered in Whisper - (exhaustive list) :
|
18 |
+
#"en": "english", "zh": "chinese", "de": "german", "es": "spanish", "ru": "russian",
|
19 |
+
#"ko": "korean", "fr": "french", "ja": "japanese", "pt": "portuguese", "tr": "turkish",
|
20 |
+
#"pl": "polish", "ca": "catalan", "nl": "dutch", "ar": "arabic", "sv": "swedish",
|
21 |
+
#"it": "italian", "id": "indonesian", "hi": "hindi", "fi": "finnish", "vi": "vietnamese",
|
22 |
+
#"iw": "hebrew", "uk": "ukrainian", "el": "greek", "ms": "malay", "cs": "czech",
|
23 |
+
#"ro": "romanian", "da": "danish", "hu": "hungarian", "ta": "tamil", "no": "norwegian",
|
24 |
+
#"th": "thai", "ur": "urdu", "hr": "croatian", "bg": "bulgarian", "lt": "lithuanian",
|
25 |
+
#"la": "latin", "mi": "maori", "ml": "malayalam", "cy": "welsh", "sk": "slovak",
|
26 |
+
#"te": "telugu", "fa": "persian", "lv": "latvian", "bn": "bengali", "sr": "serbian",
|
27 |
+
#"az": "azerbaijani", "sl": "slovenian", "kn": "kannada", "et": "estonian",
|
28 |
+
#"mk": "macedonian", "br": "breton", "eu": "basque", "is": "icelandic", "hy": "armenian",
|
29 |
+
#"ne": "nepali", "mn": "mongolian", "bs": "bosnian", "kk": "kazakh", "sq": "albanian",
|
30 |
+
#"sw": "swahili", "gl": "galician", "mr": "marathi", "pa": "punjabi", "si": "sinhala",
|
31 |
+
#"km": "khmer", "sn": "shona", "yo": "yoruba", "so": "somali", "af": "afrikaans",
|
32 |
+
#"oc": "occitan", "ka": "georgian", "be": "belarusian", "tg": "tajik", "sd": "sindhi",
|
33 |
+
#"gu": "gujarati", "am": "amharic", "yi": "yiddish", "lo": "lao", "uz": "uzbek",
|
34 |
+
#"fo": "faroese", "ht": "haitian creole", "ps": "pashto", "tk": "turkmen", "nn": "nynorsk",
|
35 |
+
#"mt": "maltese", "sa": "sanskrit", "lb": "luxembourgish", "my": "myanmar", "bo": "tibetan",
|
36 |
+
#"tl": "tagalog", "mg": "malagasy", "as": "assamese", "tt": "tatar", "haw": "hawaiian",
|
37 |
+
#"ln": "lingala", "ha": "hausa", "ba": "bashkir", "jw": "javanese", "su": "sundanese",
|
38 |
+
|
39 |
+
|
40 |
+
# Text-to-Speech
|
41 |
+
LANGUAGES = list(CoquiTTS.langs.keys())
|
42 |
+
coquiTTS = CoquiTTS()
|
43 |
+
print(f"Languages for Coqui are: {LANGUAGES}")
|
44 |
+
#Languages for Coqui are: ['en', 'es', 'fr', 'de', 'pl', 'uk', 'ro', 'hu', 'el', 'bg', 'nl', 'fi', 'sl', 'lv', 'ga']
|
45 |
+
# en - English, es - Spanish, fr - French, de - German, pl - Polish
|
46 |
+
# uk - Ukrainian, ro - Romanian, hu - Hungarian, el - Greek, bg - Bulgarian,
|
47 |
+
# nl - dutch, fi - finnish, sl - slovenian, lv - latvian, ga - ??
|
48 |
+
|
49 |
+
|
50 |
+
# Driver function
|
51 |
+
def driver_fun(audio) :
|
52 |
+
transcribe, translation, lang = whisper_stt(audio)
|
53 |
+
#text1 = model.transcribe(audio)["text"]
|
54 |
+
|
55 |
+
#For now only taking in English text for Bloom prompting as inference model is not high spec
|
56 |
+
#text_generated = lang_model_response(transcribe, lang)
|
57 |
+
#text_generated_en = lang_model_response(translation, 'en')
|
58 |
+
|
59 |
+
if lang in ['es', 'fr']:
|
60 |
+
speech = tts(transcribe, lang)
|
61 |
+
else:
|
62 |
+
speech = tts(translation, 'en') #'en')
|
63 |
+
return transcribe, translation, speech
|
64 |
+
|
65 |
+
|
66 |
+
# Whisper - speech-to-text
|
67 |
+
def whisper_stt(audio):
|
68 |
+
print("Inside Whisper TTS")
|
69 |
+
# load audio and pad/trim it to fit 30 seconds
|
70 |
+
audio = whisper.load_audio(audio)
|
71 |
+
audio = whisper.pad_or_trim(audio)
|
72 |
+
|
73 |
+
# make log-Mel spectrogram and move to the same device as the model
|
74 |
+
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
75 |
+
|
76 |
+
# detect the spoken language
|
77 |
+
_, probs = model.detect_language(mel)
|
78 |
+
lang = max(probs, key=probs.get)
|
79 |
+
print(f"Detected language: {max(probs, key=probs.get)}")
|
80 |
+
|
81 |
+
# decode the audio
|
82 |
+
options_transc = whisper.DecodingOptions(fp16 = False, language=lang, task='transcribe') #lang
|
83 |
+
options_transl = whisper.DecodingOptions(fp16 = False, language='en', task='translate') #lang
|
84 |
+
result_transc = whisper.decode(model_med, mel, options_transc)
|
85 |
+
result_transl = whisper.decode(model_med, mel, options_transl)
|
86 |
+
|
87 |
+
# print the recognized text
|
88 |
+
print(f"transcript is : {result_transc.text}")
|
89 |
+
print(f"translation is : {result_transl.text}")
|
90 |
+
|
91 |
+
return result_transc.text, result_transl.text, lang
|
92 |
+
|
93 |
+
|
94 |
+
# Coqui - Text-to-Speech
|
95 |
+
def tts(text, language):
|
96 |
+
print(f"Inside tts - language is : {language}")
|
97 |
+
coqui_langs = ['en' ,'es' ,'fr' ,'de' ,'pl' ,'uk' ,'ro' ,'hu' ,'bg' ,'nl' ,'fi' ,'sl' ,'lv' ,'ga']
|
98 |
+
if language not in coqui_langs:
|
99 |
+
language = 'en'
|
100 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
101 |
+
coquiTTS.get_tts(text, fp, speaker = {"language" : language})
|
102 |
+
return fp.name
|
103 |
+
|
104 |
+
demo = gr.Blocks()
|
105 |
+
with demo:
|
106 |
+
gr.Markdown("<h1><center>Multilingual AI Assistant - Voice to Joke</center></h1>")
|
107 |
+
gr.Markdown(
|
108 |
+
"""Model pipeline consisting of - <br>- [**Whisper**](https://github.com/openai/whisper) for Speech-to-text, <br>- [**CoquiTTS**](https://huggingface.co/coqui) for Text-To-Speech. <br>- Front end is built using [**Gradio Block API**](https://gradio.app/docs/#blocks).<br>Both CoquiTTS and Whisper are Multilingual, there are several overlapping languages between them. Hence it would be suggested to test this ML-App using these two languages to get the best results</u>.<br>If you want to reuse the App, simply click on the small cross button in the top right corner of your voice record panel, and then press record again!
|
109 |
+
""")
|
110 |
+
with gr.Row():
|
111 |
+
with gr.Column():
|
112 |
+
in_audio = gr.Audio(source="microphone", type="filepath", label='Record your voice command here in English, Spanish or French for best results-') #type='filepath'
|
113 |
+
b1 = gr.Button("AI response pipeline (Whisper - Bloom - Coqui pipeline)")
|
114 |
+
out_transcript = gr.Textbox(label= 'English/Spanish/French Transcript of your Audio using OpenAI Whisper')
|
115 |
+
out_translation_en = gr.Textbox(label= 'English Translation of audio using OpenAI Whisper')
|
116 |
+
with gr.Column():
|
117 |
+
out_audio = gr.Audio(label='AI response in Audio form in your language - This will be either in Spanish, or in French or in English for all other languages -')
|
118 |
+
out_generated_text = gr.Textbox(label= 'AI response to your query in your preferred language using Bloom! ')
|
119 |
+
out_generated_text_en = gr.Textbox(label= 'AI response to your query in English using Bloom! ')
|
120 |
+
|
121 |
+
b1.click(driver_fun,inputs=[in_audio], outputs=[out_transcript, out_translation_en, out_generated_text,out_generated_text_en, out_audio])
|
122 |
+
|
123 |
+
demo.launch(enable_queue=True, debug=True)
|