showgan commited on
Commit
9b31078
1 Parent(s): 97fbfa0
Files changed (1) hide show
  1. app.py +270 -0
app.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # import csv
3
+ import typing
4
+ import wave
5
+ import subprocess
6
+ import tempfile
7
+ from pathlib import Path
8
+
9
+ import gradio as gr
10
+ # from typing import Tuple, Dict
11
+ # import pandas as pd
12
+ import socket
13
+ # import yaml
14
+ # from os.path import exists
15
+
16
+
17
+ # def read_config(file) -> Dict:
18
+ # with open(file, 'r') as f:
19
+ # return yaml.safe_load(f)
20
+
21
+
22
+ # def write_config(file, config_data):
23
+ # with open(file, 'w') as yaml_file:
24
+ # yaml.dump(config_data, yaml_file, default_flow_style=False)
25
+
26
+
27
+ # def load_data(filename: str) -> pd.DataFrame:
28
+ # text_df = pd.read_csv(filename, header=None, names=['wav', 'text'], sep='|', on_bad_lines='skip')
29
+ # print(text_df.head(4))
30
+ # return text_df
31
+
32
+
33
+ # def validate_index(index: int) -> int:
34
+ # max_index = Globals['max_index']
35
+ # if index > max_index:
36
+ # index = max_index
37
+ # if index < 0:
38
+ # index = 0
39
+ # return index
40
+
41
+
42
+ def process_text(text: str) -> typing.List:
43
+ text2 = text.lower().split('\n')
44
+ # mucella_001|500|Nartıme zı pşıj-themate gore yaağ. A pşıjım zerécew ştığexer pşı Jećej. A pşıjım yıe yiĺ šıfxem lıyew arixırer afemış'ejew ḣuğe.|ady-lt
45
+ index = 0
46
+ text3 = []
47
+ for line in text2:
48
+ line = line.strip()
49
+ if line == '':
50
+ print('-D- Skipping an empty line')
51
+ continue
52
+ print(f'-D- Processing line: {line}')
53
+ line = f'mucella_{index:04d}|500|{line}|ady-lt'
54
+ index = index + 1
55
+ text3.append(line)
56
+ print(f'-D- process_text() text\n-D- before:\n{text}\n-D- after:\n{text3}')
57
+ return text3
58
+
59
+
60
+ # def get_item_data(index: int) -> Tuple[int, str, str, str]:
61
+ # index = validate_index(index)
62
+ # df = Globals['text_df']
63
+ # text = df.at[index, 'text']
64
+ # if not isinstance(text, str):
65
+ # text = ''
66
+ # # Limit text to 'max_characters'
67
+ # # if len(text) > Globals['max_characters']:
68
+ # # text = text[0:Globals['max_characters']]
69
+ # audio_tag = df.at[index, 'wav']
70
+ # audio_file = None
71
+ # if isinstance(audio_tag, str):
72
+ # audio_file = Globals['audio_dir'] + '/' + audio_tag + '.wav'
73
+ # save_index(index)
74
+ # return index, text, audio_tag, audio_file
75
+
76
+
77
+ # Globals = {
78
+ # 'input_csv': '',
79
+ # 'output_csv': '',
80
+ # 'audio_dir': '',
81
+ # 'session_config_file': '',
82
+ # 'text_df': pd.DataFrame(),
83
+ # 'max_index': 0
84
+ # }
85
+ # config_file = '/home/haroon/PycharmProjects/gradio_creating_web_apis/mucella_metadata.cfg'
86
+
87
+ # python eval.py --model-dir=/home/haroon/git_repos/few-shot-transformer-tts/Models/Mucella --log-dir=/home/haroon/git_repos/few-shot-transformer-tts/Models/Mucella/synthesize/kamzegur1 --data-dir=/home/haroon/git_repos/few-shot-transformer-tts/Samples/ --eval_meta=/home/haroon/git_repos/few-shot-transformer-tts/Samples/kamzegur1.txt --start_step=2580000 --no_wait=True
88
+
89
+ Globals = {
90
+ 'python_interpreter': '/home/haroon/python_virtual_envs/few_shot_tts/bin/python3',
91
+ 'code_repository_dir': '/home/haroon/git_repos/few-shot-transformer-tts',
92
+ 'model_dir': '/home/haroon/git_repos/few-shot-transformer-tts/Models/Mucella',
93
+ 'start_step': 2580000,
94
+ 'log_dir': '/tmp/few-shot-transformer-tts-server/log',
95
+ 'data_dir': '/tmp/few-shot-transformer-tts-server/text',
96
+ 'default_text': ' Maḣe keume sépĺı! \n \n Harun Şewgen \n',
97
+ 'outfile': ''
98
+ }
99
+
100
+
101
+ # def incr_index(current_index: int) -> int:
102
+ # current_index = current_index + 1
103
+ # current_index = validate_index(current_index)
104
+ # return current_index
105
+
106
+
107
+ # def decr_index(current_index: int) -> int:
108
+ # current_index = current_index - 1
109
+ # current_index = validate_index(current_index)
110
+ # return current_index
111
+
112
+
113
+ def concat_wavs(processed_text: str) -> str:
114
+ temp_wav_name = f'adiga_{next(tempfile._get_candidate_names())}'
115
+ # outfile = f"{Globals['log_dir']}/output.wav"
116
+ outfile = f"{Globals['log_dir']}/{temp_wav_name}.wav"
117
+ print(f'-D- Concatenating to a single wav file: {outfile}')
118
+ # /tmp/few-shot-transformer-tts-server/log/eval_2580000/mucella_0000.wav
119
+ data = []
120
+ for line in processed_text:
121
+ print(f'-D- concat_wavs() line before split: {line}')
122
+ wav_file, _, _, _ = line.split('|')
123
+ wav_file = f"{Globals['log_dir']}/eval_{Globals['start_step']}/{wav_file}.wav"
124
+ print(f'-D- wav_file: {wav_file}')
125
+ w = wave.open(wav_file, 'rb')
126
+ data.append([w.getparams(), w.readframes(w.getnframes())])
127
+ w.close()
128
+ output = wave.open(outfile, 'wb')
129
+ output.setparams(data[0][0])
130
+ for i in range(len(data)):
131
+ output.writeframes(data[i][1])
132
+ output.close()
133
+ return outfile
134
+
135
+
136
+ def speak(text: str) -> str:
137
+ Globals['outfile'] = ''
138
+ print('-I- Generating speech ...')
139
+ print(f'-D- speak() text: {text}')
140
+ processed_text = process_text(text)
141
+ # Save text to a temporary file
142
+ text_file = f"{Globals['data_dir']}/text.txt"
143
+ with open(text_file, 'w') as f:
144
+ for line in processed_text:
145
+ f.write(f'{line}\n')
146
+ f.close()
147
+
148
+ # Prepare speech synthesis command:
149
+ # python eval.py --model-dir=/home/haroon/git_repos/few-shot-transformer-tts/Models/Mucella --log-dir=/home/haroon/git_repos/few-shot-transformer-tts/Models/Mucella/synthesize/kamzegur1 --data-dir=/home/haroon/git_repos/few-shot-transformer-tts/Samples/ --eval_meta=/home/haroon/git_repos/few-shot-transformer-tts/Samples/kamzegur1.txt --start_step=2580000 --no_wait=True
150
+ cmd = f"{Globals['python_interpreter']} {Globals['code_repository_dir']}/eval.py --model-dir={Globals['model_dir']} --log-dir={Globals['log_dir']} --data-dir={Globals['data_dir']} --eval_meta={text_file} --start_step={Globals['start_step']} --no_wait=True".split()
151
+ print(f'-D- Speech synthesis command:\n{cmd}')
152
+ subprocess.run(cmd)
153
+ print(f'-D- Finished synthesizing speech.')
154
+ outfile = concat_wavs(processed_text)
155
+ Globals['outfile'] = outfile
156
+ return outfile, outfile
157
+
158
+
159
+ def download() -> str:
160
+ outfile = Globals['outfile']
161
+ if outfile != '' and Path(outfile).is_file():
162
+ print(f'-I- Downloading {outfile}')
163
+ return outfile
164
+ return None
165
+
166
+
167
+ # def handle_text_editing(index: int, text: str) -> dict:
168
+ # index = int(index)
169
+ # index = validate_index(index)
170
+ # new_text = text
171
+ # if "\n" in new_text:
172
+ # if index == Globals['max_index']:
173
+ # print("-W- Can't split text since there are no audio tags left. Please add more audio tags.")
174
+ # return gr.update(value=new_text)
175
+ # [new_text1, new_text2] = new_text.split("\n", 1)
176
+ # orig_text = Globals['text_df'].iat[index, 1]
177
+ # orig_text2_index = orig_text.find(new_text2)
178
+ # if orig_text2_index == -1:
179
+ # # If text after the new line has been modified then just take the whole original text of this item to the
180
+ # # next item (not easy way to figure out how to cut the original text).
181
+ # new_text2 = orig_text
182
+ # else:
183
+ # new_text2 = orig_text[orig_text2_index:]
184
+ # next_item_orig_text = Globals['text_df'].iat[index + 1, 1]
185
+ # if not isinstance(next_item_orig_text, str):
186
+ # next_item_orig_text = ''
187
+ # new_text2 = new_text2 + ' ' + next_item_orig_text
188
+ # save_text(index, new_text1)
189
+ # save_text(index + 1, new_text2)
190
+ # return gr.update(value=new_text1)
191
+ # save_text(index, new_text)
192
+ # return gr.update(value=new_text)
193
+
194
+
195
+ # def update_output_file(output_file: str):
196
+ # Globals['output_csv'] = output_file
197
+
198
+
199
+ # def save_text(index: int, text: str):
200
+ # index = int(index)
201
+ # index = validate_index(index)
202
+ # Globals['text_df'].iat[index, 1] = text
203
+ # Globals['text_df'].to_csv(Globals['output_csv'], index=False, sep='|', header=False, quoting=csv.QUOTE_NONE)
204
+
205
+
206
+ # def save_index(index: int):
207
+ # index = validate_index(index)
208
+ # session_config_data = {'current_index': int(index)}
209
+ # write_config(Globals['session_config_file'], session_config_data)
210
+
211
+
212
+ def main():
213
+ global Globals
214
+ # # Read main config file
215
+ # config_data = {}
216
+ # if exists(config_file):
217
+ # config_data = read_config(config_file)
218
+ # if config_data is not None:
219
+ # Globals.update(config_data)
220
+ # # Read config file which was written by an earlier session of the app
221
+ # Globals['session_config_file'] = config_file + '.session'
222
+ # session_config_data = None
223
+ # if exists(Globals['session_config_file']):
224
+ # session_config_data = read_config(Globals['session_config_file'])
225
+ # if session_config_data is not None:
226
+ # Globals.update(session_config_data)
227
+
228
+ # Globals['output_csv'] = Globals['input_csv'] + '.new'
229
+ # Globals['text_df'] = load_data(Globals['input_csv'])
230
+ # Globals['max_index'] = Globals['text_df'].shape[0] - 1
231
+ # default_index = 0
232
+ # if 'current_index' in Globals:
233
+ # default_index = Globals['current_index']
234
+ # default_index = validate_index(default_index)
235
+ # _, default_text, default_audio_tag, default_audio_file = get_item_data(default_index)
236
+ # print(f'-D- default_text: {default_text}, default_wav: {default_audio_file}')
237
+
238
+ # Close port(s) in case it's still open from previous session
239
+ gr.close_all()
240
+ with gr.Blocks() as app:
241
+ # output_file_elem = gr.Text(label='Output File', value=Globals['output_csv'], interactive=True, max_lines=1)
242
+ # index_elem = gr.Number(label='Index', value=default_index)
243
+ text_elem = gr.Text(show_label=False, value=Globals['default_text'], interactive=True, max_lines=5)
244
+ # audio_tag_elem = gr.Text(label='Audio Tag', value=default_audio_tag, interactive=False)
245
+ # audio_file_elem = gr.Audio(show_label=False, value='')
246
+ speak_btn = gr.Button('Speak')
247
+ # with gr.Row():
248
+ # speak_btn = gr.Button('Speak')
249
+ # download_btn = gr.Button("Download")
250
+ # audio_file_elem = gr.Audio(show_label=False)
251
+ # file_elem = gr.File(visible=True)
252
+ # file_elem.change(fn=download, inputs=[download_btn], outputs=[])
253
+
254
+ # index_elem.change(fn=get_item_data, inputs=[index_elem], outputs=[index_elem, text_elem, audio_tag_elem, audio_file_elem])
255
+ # prev_btn.click(fn=decr_index, inputs=[index_elem], outputs=[index_elem])
256
+ # next_btn.click(fn=incr_index, inputs=[index_elem], outputs=[index_elem])
257
+ speak_btn.click(fn=speak, inputs=[text_elem], outputs=[gr.Audio(show_label=False), gr.File()])
258
+ # download_btn.click(fn=download, inputs=[], outputs=[gr.File()])
259
+ # text_elem.change(fn=handle_text_editing, inputs=[index_elem, text_elem], outputs=[text_elem])
260
+ # output_file_elem.change(fn=update_output_file, inputs=[output_file_elem], outputs=[])
261
+
262
+ hostname = (([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
263
+ print(f'-D- Hostname: {hostname}')
264
+ # app.launch(server_name=hostname, server_port=6012, share=True)
265
+ app.launch(server_name=hostname, share=True)
266
+ exit(0)
267
+
268
+
269
+ if __name__ == '__main__':
270
+ main()