amir22010 commited on
Commit
a123fa1
1 Parent(s): 78badb3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -127,7 +127,7 @@ async def greet(product,description):
127
  if response.choices[0].message.content != "not moderated":
128
  a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
129
  processed_audio = combine_audio_files([text_to_speech(a_list[0])])
130
- yield (processed_audio.sample_rate,processed_audio), a_list[0]
131
  else:
132
  output = llm.create_chat_completion(
133
  messages=[
@@ -149,7 +149,7 @@ async def greet(product,description):
149
  audio_list = audio_list + [text_to_speech(delta.get('content', ''))]
150
  processed_audio = combine_audio_files(audio_list)
151
  partial_message = partial_message + delta.get('content', '')
152
- yield (processed_audio.sample_rate,processed_audio), partial_message
153
 
154
  audio = gr.Audio()
155
  demo = gr.Interface(fn=greet, inputs=["text","text"], concurrency_limit=10, outputs=[audio,"text"])
 
127
  if response.choices[0].message.content != "not moderated":
128
  a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
129
  processed_audio = combine_audio_files([text_to_speech(a_list[0])])
130
+ yield (processed_audio.frame_rate,processed_audio), a_list[0]
131
  else:
132
  output = llm.create_chat_completion(
133
  messages=[
 
149
  audio_list = audio_list + [text_to_speech(delta.get('content', ''))]
150
  processed_audio = combine_audio_files(audio_list)
151
  partial_message = partial_message + delta.get('content', '')
152
+ yield (processed_audio.frame_rate,processed_audio), partial_message
153
 
154
  audio = gr.Audio()
155
  demo = gr.Interface(fn=greet, inputs=["text","text"], concurrency_limit=10, outputs=[audio,"text"])