ysharma HF staff commited on
Commit
71843eb
1 Parent(s): cd49d70
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -105,18 +105,18 @@ def tts(text, language):
105
 
106
  demo = gr.Blocks()
107
  with demo:
108
- gr.Markdown("<h1><center>Multilingual AI Assistant - Voice to Joke</center></h1>")
109
  gr.Markdown(
110
- """Model pipeline consisting of - <br>- [**Whisper**](https://github.com/openai/whisper) for Speech-to-text, <br>- [**CoquiTTS**](https://huggingface.co/coqui) for Text-To-Speech. <br>- Front end is built using [**Gradio Block API**](https://gradio.app/docs/#blocks).<br>Both CoquiTTS and Whisper are Multilingual, there are several overlapping languages between them. Hence it would be suggested to test this ML-App using these two languages to get the best results</u>.<br>If you want to reuse the App, simply click on the small cross button in the top right corner of your voice record panel, and then press record again!
111
  """)
112
  with gr.Row():
113
  with gr.Column():
114
  in_audio = gr.Audio(source="microphone", type="filepath", label='Record your voice command here in English -') #type='filepath'
115
  b1 = gr.Button("AI Response")
116
- out_transcript = gr.Textbox(label= 'English/Spanish/French Transcript of your Audio using OpenAI Whisper')
117
  #out_translation_en = gr.Textbox(label= 'English Translation of audio using OpenAI Whisper')
118
  with gr.Column():
119
- out_audio = gr.Audio(label='AI response in Audio form in English language')
120
  #out_generated_text = gr.Textbox(label= 'AI response to your query in your preferred language using Bloom! ')
121
  #out_generated_text_en = gr.Textbox(label= 'AI response to your query in English using Bloom! ')
122
 
 
105
 
106
  demo = gr.Blocks()
107
  with demo:
108
+ gr.Markdown("<h1><center>AI Assistant - Voice to Joke</center></h1>")
109
  gr.Markdown(
110
+ """Model pipeline consisting of - <br>- [**Whisper**](https://github.com/openai/whisper) for Speech-to-text, <br>- [**CoquiTTS**](https://huggingface.co/coqui) for Text-To-Speech. <br>- Front end is built using [**Gradio Block API**](https://gradio.app/docs/#blocks).<br><br>Both CoquiTTS and Whisper are Multilingual, there are several overlapping languages between them. Hence it would be suggested to test this ML-App using these two languages to get the best results</u>.<br>If you want to reuse the App, simply click on the small cross button in the top right corner of your voice record panel, and then press record again!
111
  """)
112
  with gr.Row():
113
  with gr.Column():
114
  in_audio = gr.Audio(source="microphone", type="filepath", label='Record your voice command here in English -') #type='filepath'
115
  b1 = gr.Button("AI Response")
116
+ out_transcript = gr.Textbox(label= 'Transcript of your Audio using OpenAI Whisper')
117
  #out_translation_en = gr.Textbox(label= 'English Translation of audio using OpenAI Whisper')
118
  with gr.Column():
119
+ out_audio = gr.Audio(label='Audio response form CoquiTTS')
120
  #out_generated_text = gr.Textbox(label= 'AI response to your query in your preferred language using Bloom! ')
121
  #out_generated_text_en = gr.Textbox(label= 'AI response to your query in English using Bloom! ')
122