Mahiruoshi
commited on
Commit
•
e2da336
1
Parent(s):
ddc58d8
Update app.py
Browse files
app.py
CHANGED
@@ -33,6 +33,36 @@ def is_english(string):
|
|
33 |
else:
|
34 |
return False
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
def extrac(text):
|
37 |
text = re.sub("<[^>]*>","",text)
|
38 |
result_list = re.split(r'\n', text)
|
@@ -76,7 +106,7 @@ def chatgpt(text):
|
|
76 |
del messages[-2:]
|
77 |
with open('log.pickle', 'wb') as f:
|
78 |
pickle.dump(messages, f)
|
79 |
-
return reply
|
80 |
except:
|
81 |
messages.append({"role": "user", "content": text},)
|
82 |
chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
|
@@ -88,7 +118,7 @@ def chatgpt(text):
|
|
88 |
del messages[-2:]
|
89 |
with open('log.pickle', 'wb') as f:
|
90 |
pickle.dump(messages, f)
|
91 |
-
return reply
|
92 |
|
93 |
def get_symbols_from_json(path):
|
94 |
assert os.path.isfile(path)
|
@@ -122,12 +152,12 @@ def get_text(text,hps_ms):
|
|
122 |
|
123 |
def create_tts_fn(net_g,hps,speaker_id):
|
124 |
speaker_id = int(speaker_id)
|
125 |
-
def tts_fn(
|
126 |
-
|
127 |
if is_gpt:
|
128 |
openai.api_key = api_key
|
129 |
-
text = chatgpt(text)
|
130 |
-
|
131 |
if not extract:
|
132 |
print(text)
|
133 |
t1 = time.time()
|
@@ -149,7 +179,7 @@ def create_tts_fn(net_g,hps,speaker_id):
|
|
149 |
os.system(cmd)
|
150 |
except:
|
151 |
pass
|
152 |
-
return
|
153 |
else:
|
154 |
a = ['【','[','(','(']
|
155 |
b = ['】',']',')',')']
|
@@ -192,7 +222,7 @@ def create_tts_fn(net_g,hps,speaker_id):
|
|
192 |
pass
|
193 |
|
194 |
file_path = "subtitles.srt"
|
195 |
-
return history,file_path,(hps.data.sampling_rate, np.concatenate(audio_fin))
|
196 |
return tts_fn
|
197 |
|
198 |
if __name__ == '__main__':
|
@@ -241,7 +271,7 @@ if __name__ == '__main__':
|
|
241 |
f'<img style="width:auto;height:400px;" src="file/image/{name}.png">'
|
242 |
'</div>'
|
243 |
)
|
244 |
-
|
245 |
with gr.Row():
|
246 |
with gr.Column(scale=0.85):
|
247 |
input1 = gr.TextArea(label="Text", value=example,lines = 1)
|
@@ -262,6 +292,6 @@ if __name__ == '__main__':
|
|
262 |
audio_input1 = gr.Checkbox(value=False, label="修改音频路径(live2d)")
|
263 |
audio_input2 = gr.TextArea(label="音频路径",lines=1,value = '#参考 D:/app_develop/live2d_whole/2010002/sounds/temp.wav')
|
264 |
|
265 |
-
btnVC.click(tts_fn, inputs=[
|
266 |
|
267 |
app.launch()
|
|
|
33 |
else:
|
34 |
return False
|
35 |
|
36 |
+
def to_html(chat_history):
|
37 |
+
chat_html = ""
|
38 |
+
for item in chat_history:
|
39 |
+
if item['role'] == 'user':
|
40 |
+
chat_html += f"""
|
41 |
+
<div style="margin-bottom: 20px;">
|
42 |
+
<div style="text-align: right; margin-right: 20px;">
|
43 |
+
<span style="background-color: #4CAF50; color: black; padding: 10px; border-radius: 10px; display: inline-block; max-width: 80%; word-wrap: break-word;">
|
44 |
+
{item['content']}
|
45 |
+
</span>
|
46 |
+
</div>
|
47 |
+
</div>
|
48 |
+
"""
|
49 |
+
else:
|
50 |
+
chat_html += f"""
|
51 |
+
<div style="margin-bottom: 20px;">
|
52 |
+
<div style="text-align: left; margin-left: 20px;">
|
53 |
+
<span style="background-color: white; color: black; padding: 10px; border-radius: 10px; display: inline-block; max-width: 80%; word-wrap: break-word;">
|
54 |
+
{item['content']}
|
55 |
+
</span>
|
56 |
+
</div>
|
57 |
+
</div>
|
58 |
+
"""
|
59 |
+
output_html = f"""
|
60 |
+
<div style="height: 400px; overflow-y: scroll; padding: 10px;">
|
61 |
+
{chat_html}
|
62 |
+
</div>
|
63 |
+
"""
|
64 |
+
return output_html
|
65 |
+
|
66 |
def extrac(text):
|
67 |
text = re.sub("<[^>]*>","",text)
|
68 |
result_list = re.split(r'\n', text)
|
|
|
106 |
del messages[-2:]
|
107 |
with open('log.pickle', 'wb') as f:
|
108 |
pickle.dump(messages, f)
|
109 |
+
return reply,messages
|
110 |
except:
|
111 |
messages.append({"role": "user", "content": text},)
|
112 |
chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
|
|
|
118 |
del messages[-2:]
|
119 |
with open('log.pickle', 'wb') as f:
|
120 |
pickle.dump(messages, f)
|
121 |
+
return reply,messages
|
122 |
|
123 |
def get_symbols_from_json(path):
|
124 |
assert os.path.isfile(path)
|
|
|
152 |
|
153 |
def create_tts_fn(net_g,hps,speaker_id):
|
154 |
speaker_id = int(speaker_id)
|
155 |
+
def tts_fn(is_gpt,api_key,is_audio,audiopath,repeat_time,text, language, extract, n_scale= 0.667,n_scale_w = 0.8, l_scale = 1 ):
|
156 |
+
repeat_ime = int(repeat_time)
|
157 |
if is_gpt:
|
158 |
openai.api_key = api_key
|
159 |
+
text,messages = chatgpt(text)
|
160 |
+
htm = to_html(messages)
|
161 |
if not extract:
|
162 |
print(text)
|
163 |
t1 = time.time()
|
|
|
179 |
os.system(cmd)
|
180 |
except:
|
181 |
pass
|
182 |
+
return (hps.data.sampling_rate, audio),file_path,htm
|
183 |
else:
|
184 |
a = ['【','[','(','(']
|
185 |
b = ['】',']',')',')']
|
|
|
222 |
pass
|
223 |
|
224 |
file_path = "subtitles.srt"
|
225 |
+
return history,file_path,(hps.data.sampling_rate, np.concatenate(audio_fin)),file_path,htm
|
226 |
return tts_fn
|
227 |
|
228 |
if __name__ == '__main__':
|
|
|
271 |
f'<img style="width:auto;height:400px;" src="file/image/{name}.png">'
|
272 |
'</div>'
|
273 |
)
|
274 |
+
output_UI = gr.outputs.HTML()
|
275 |
with gr.Row():
|
276 |
with gr.Column(scale=0.85):
|
277 |
input1 = gr.TextArea(label="Text", value=example,lines = 1)
|
|
|
292 |
audio_input1 = gr.Checkbox(value=False, label="修改音频路径(live2d)")
|
293 |
audio_input2 = gr.TextArea(label="音频路径",lines=1,value = '#参考 D:/app_develop/live2d_whole/2010002/sounds/temp.wav')
|
294 |
|
295 |
+
btnVC.click(tts_fn, inputs=[api_input1,api_input2,audio_input1,audio_input2,audio_input3,input1,input2,input3,input4,input5,input6], outputs=[output1,output2,output_UI])
|
296 |
|
297 |
app.launch()
|