ysharma HF staff commited on
Commit
84665f7
1 Parent(s): d85011f
Files changed (1) hide show
  1. app.py +23 -14
app.py CHANGED
@@ -97,7 +97,7 @@ def whisper_stt(audio):
97
 
98
 
99
  # LLM - Bloom Response
100
- def lang_model_response(prompt, language):
101
  print(f"Inside lang_model_response - Prompt is :{prompt}")
102
  p_en = """Question: How are you doing today?
103
  Answer: I am doing good, thanks.
@@ -109,18 +109,25 @@ def lang_model_response(prompt, language):
109
  Réponse: Je vais bien, merci.
110
  Question: """
111
 
112
- if len(prompt) == 0:
113
  prompt = """Question: Can you help me please?
114
  Answer: Sure, I am here for you.
115
- Question: """
116
 
117
- if language == 'en':
118
- prompt = p_en + prompt + "\n" + "Answer: "
119
- elif language == 'es':
 
120
  prompt = p_es + prompt + "\n" + "Responder: "
 
121
  elif language == 'fr':
122
  prompt = p_fr + prompt + "\n" + "Réponse: "
 
123
 
 
 
 
 
124
  json_ = {"inputs": prompt,
125
  "parameters":
126
  {
@@ -150,11 +157,12 @@ def lang_model_response(prompt, language):
150
  return solution
151
 
152
  # Coqui - Text-to-Speech
153
- def tts(text, language):
154
  print(f"Inside tts - language is : {language}")
155
  coqui_langs = ['en' ,'es' ,'fr' ,'de' ,'pl' ,'uk' ,'ro' ,'hu' ,'bg' ,'nl' ,'fi' ,'sl' ,'lv' ,'ga']
156
- if language not in coqui_langs:
157
  language = 'en'
 
158
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
159
  coquiTTS.get_tts(text, fp, speaker = {"language" : language})
160
  return fp.name
@@ -168,18 +176,19 @@ with demo:
168
  with gr.Row():
169
  with gr.Column():
170
  in_audio = gr.Audio(source="microphone", type="filepath", label='Record your voice here') #type='filepath'
171
- b1 = gr.Button("Whisper") #- Bloom - Coqui pipeline)")
172
  out_transcript = gr.Textbox(label= 'As is Transcript using OpenAI Whisper')
173
  out_translation_en = gr.Textbox(label= 'English Translation of audio using OpenAI Whisper')
174
  out_lang = gr.Textbox(visible=False)
175
  with gr.Column():
176
- out_audio = gr.Audio(label='AI response in Audio form in your preferred language')
177
  out_generated_text = gr.Textbox(label= 'AI response to your query in your preferred language using Bloom! ')
178
  out_generated_text_en = gr.Textbox(label= 'AI response to your query in English using Bloom! ')
179
-
180
- b1.click(whisper_stt, inputs=[in_audio], outputs=[out_transcript, out_translation_en, out_lang])
181
- b2.click(
182
 
183
- b1.click(driver_fun,inputs=[in_audio], outputs=[out_transcript, out_translation_en, out_generated_text,out_generated_text_en, out_audio])
 
 
184
 
185
  demo.launch(enable_queue=True, debug=True)
 
97
 
98
 
99
  # LLM - Bloom Response
100
+ def lang_model_response(prompt, prompt_en, language):
101
  print(f"Inside lang_model_response - Prompt is :{prompt}")
102
  p_en = """Question: How are you doing today?
103
  Answer: I am doing good, thanks.
 
109
  Réponse: Je vais bien, merci.
110
  Question: """
111
 
112
+ if len(prompt) == 0 or len(prompt_en) == 0 :
113
  prompt = """Question: Can you help me please?
114
  Answer: Sure, I am here for you.
115
+ Question: What do you do when you don't get what you want?"""
116
 
117
+ #if language == 'en':
118
+ prompt = p_en + prompt_en + "\n" + "Answer: "
119
+ solution_en = query(prompt, 'en')
120
+ if language == 'es':
121
  prompt = p_es + prompt + "\n" + "Responder: "
122
+ solution = query(prompt, 'es')
123
  elif language == 'fr':
124
  prompt = p_fr + prompt + "\n" + "Réponse: "
125
+ solution = query(prompt, 'fr')
126
 
127
+ return solution, solution_en
128
+
129
+ # Bloom API Request
130
+ def query(prompt, language):
131
  json_ = {"inputs": prompt,
132
  "parameters":
133
  {
 
157
  return solution
158
 
159
  # Coqui - Text-to-Speech
160
+ def tts(text, text_en, language):
161
  print(f"Inside tts - language is : {language}")
162
  coqui_langs = ['en' ,'es' ,'fr' ,'de' ,'pl' ,'uk' ,'ro' ,'hu' ,'bg' ,'nl' ,'fi' ,'sl' ,'lv' ,'ga']
163
+ if language =='en' or language not in coqui_langs:
164
  language = 'en'
165
+ text = text_en
166
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
167
  coquiTTS.get_tts(text, fp, speaker = {"language" : language})
168
  return fp.name
 
176
  with gr.Row():
177
  with gr.Column():
178
  in_audio = gr.Audio(source="microphone", type="filepath", label='Record your voice here') #type='filepath'
179
+ b1 = gr.Button("Whisper") #- Bloom - Coqui pipeline
180
  out_transcript = gr.Textbox(label= 'As is Transcript using OpenAI Whisper')
181
  out_translation_en = gr.Textbox(label= 'English Translation of audio using OpenAI Whisper')
182
  out_lang = gr.Textbox(visible=False)
183
  with gr.Column():
184
+ b2 = gr.Button("Bloom") #-- Coqui pipeline
185
  out_generated_text = gr.Textbox(label= 'AI response to your query in your preferred language using Bloom! ')
186
  out_generated_text_en = gr.Textbox(label= 'AI response to your query in English using Bloom! ')
187
+ b3 = gr.Button("CoquiTTS") #-- pipeline complets
188
+ out_audio = gr.Audio(label='AI response in Audio form in your preferred language')
 
189
 
190
+ b1.click(whisper_stt, inputs=[in_audio], outputs=[out_transcript, out_translation_en, out_lang])
191
+ b2.click(lang_model_response, inputs=[out_transcript, out_translation_en, out_lang], outputs=[out_generated_text,out_generated_text_en])
192
+ b3.click(tts,inputs=[out_generated_text,out_generated_text_en,out_lang], outputs=[out_audio])
193
 
194
  demo.launch(enable_queue=True, debug=True)