Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,10 +9,8 @@ import wave
|
|
9 |
from balacoon_tts import TTS
|
10 |
from threading import Lock
|
11 |
from huggingface_hub import hf_hub_download, list_repo_files
|
12 |
-
from pydub import AudioSegment
|
13 |
import io
|
14 |
import tempfile
|
15 |
-
import pydub
|
16 |
|
17 |
#tts cpu model
|
18 |
tts_model_str = "en_us_hifi_jets_cpu.addon"
|
@@ -47,12 +45,19 @@ def text_to_speech(text):
|
|
47 |
return output_file
|
48 |
|
49 |
def combine_audio_files(audio_files):
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
#client
|
58 |
client = Groq(
|
@@ -127,7 +132,7 @@ async def greet(product,description):
|
|
127 |
if response.choices[0].message.content != "not moderated":
|
128 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
129 |
processed_audio = combine_audio_files([text_to_speech(a_list[0])])
|
130 |
-
yield
|
131 |
else:
|
132 |
output = llm.create_chat_completion(
|
133 |
messages=[
|
@@ -149,7 +154,7 @@ async def greet(product,description):
|
|
149 |
audio_list = audio_list + [text_to_speech(delta.get('content', ''))]
|
150 |
processed_audio = combine_audio_files(audio_list)
|
151 |
partial_message = partial_message + delta.get('content', '')
|
152 |
-
yield
|
153 |
|
154 |
audio = gr.Audio()
|
155 |
demo = gr.Interface(fn=greet, inputs=["text","text"], concurrency_limit=10, outputs=[audio,"text"])
|
|
|
9 |
from balacoon_tts import TTS
|
10 |
from threading import Lock
|
11 |
from huggingface_hub import hf_hub_download, list_repo_files
|
|
|
12 |
import io
|
13 |
import tempfile
|
|
|
14 |
|
15 |
#tts cpu model
|
16 |
tts_model_str = "en_us_hifi_jets_cpu.addon"
|
|
|
45 |
return output_file
|
46 |
|
47 |
def combine_audio_files(audio_files):
|
48 |
+
data= []
|
49 |
+
outfile = "sounds.wav"
|
50 |
+
for infile in audio_files:
|
51 |
+
w = wave.open(infile, 'rb')
|
52 |
+
data.append([w.getparams(), w.readframes(w.getnframes())] )
|
53 |
+
w.close()
|
54 |
+
os.remove(infile) # Remove temporary files
|
55 |
+
output = wave.open(outfile, 'wb')
|
56 |
+
output.setparams(data[0][0])
|
57 |
+
for i in range(len(data)):
|
58 |
+
output.writeframes(data[i][1])
|
59 |
+
output.close()
|
60 |
+
return outfile
|
61 |
|
62 |
#client
|
63 |
client = Groq(
|
|
|
132 |
if response.choices[0].message.content != "not moderated":
|
133 |
a_list = ["Sorry, I can't proceed for generating marketing email. Your content needs to be moderated first. Thank you!"]
|
134 |
processed_audio = combine_audio_files([text_to_speech(a_list[0])])
|
135 |
+
yield processed_audio, a_list[0]
|
136 |
else:
|
137 |
output = llm.create_chat_completion(
|
138 |
messages=[
|
|
|
154 |
audio_list = audio_list + [text_to_speech(delta.get('content', ''))]
|
155 |
processed_audio = combine_audio_files(audio_list)
|
156 |
partial_message = partial_message + delta.get('content', '')
|
157 |
+
yield processed_audio, partial_message
|
158 |
|
159 |
audio = gr.Audio()
|
160 |
demo = gr.Interface(fn=greet, inputs=["text","text"], concurrency_limit=10, outputs=[audio,"text"])
|