Pendrokar commited on
Commit
2ad24b2
β€’
1 Parent(s): 22b65d9

skip web server

Browse files
Files changed (2) hide show
  1. app.py +18 -15
  2. resources/app/no_server.py +202 -0
app.py CHANGED
@@ -7,6 +7,7 @@ from subprocess import Popen, PIPE
7
  import threading
8
  from huggingface_hub import hf_hub_download
9
  import gradio as gr
 
10
 
11
  hf_model_name = "Pendrokar/xvapitch_nvidia"
12
  hf_cache_models_path = '/home/user/.cache/huggingface/hub/models--Pendrokar--xvapitch_nvidia/snapshots/61b10e60b22bc21c1e072f72f1108b9c2b21e94c/'
@@ -146,8 +147,9 @@ def load_model(voice_model_name):
146
 
147
  print('Loading voice model...')
148
  try:
149
- response = requests.post('http://0.0.0.0:8008/loadModel', json=data, timeout=60)
150
- response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
 
151
  current_voice_model = voice_model_name
152
 
153
  with open(model_path + '.json', 'r', encoding='utf-8') as f:
@@ -211,9 +213,10 @@ def predict(
211
 
212
  print('Synthesizing...')
213
  try:
214
- response = requests.post('http://0.0.0.0:8008/synthesize', json=data, timeout=60)
215
- response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
216
- json_data = json.loads(response.text)
 
217
  except requests.exceptions.RequestException as err:
218
  print('FAILED to synthesize: {err}')
219
  save_path = ''
@@ -487,18 +490,18 @@ with gr.Blocks(css=".arpabet {display: inline-block; background-color: gray; bor
487
  if __name__ == "__main__":
488
  # Run the web server in a separate thread
489
 
490
- print('Attempting to connect to local xVASynth server...')
491
- try:
492
- response = requests.get('http://0.0.0.0:8008')
493
- response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
494
- except requests.exceptions.RequestException as err:
495
- print('Failed to connect to xVASynth!')
496
- web_server_thread = threading.Thread(target=run_xvaserver)
497
- print('Starting xVAServer thread')
498
- web_server_thread.start()
499
 
500
  print('running Gradio interface')
501
  demo.launch()
502
 
503
  # Wait for the web server thread to finish (shouldn't be reached in normal execution)
504
- web_server_thread.join()
 
7
  import threading
8
  from huggingface_hub import hf_hub_download
9
  import gradio as gr
10
+ import resources.app.no_server as xvaserver
11
 
12
  hf_model_name = "Pendrokar/xvapitch_nvidia"
13
  hf_cache_models_path = '/home/user/.cache/huggingface/hub/models--Pendrokar--xvapitch_nvidia/snapshots/61b10e60b22bc21c1e072f72f1108b9c2b21e94c/'
 
147
 
148
  print('Loading voice model...')
149
  try:
150
+ xvaserver.loadModel(data)
151
+ # response = requests.post('http://0.0.0.0:8008/loadModel', json=data, timeout=60)
152
+ # response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
153
  current_voice_model = voice_model_name
154
 
155
  with open(model_path + '.json', 'r', encoding='utf-8') as f:
 
213
 
214
  print('Synthesizing...')
215
  try:
216
+ xvaserver.synthesize(data)
217
+ # response = requests.post('http://0.0.0.0:8008/synthesize', json=data, timeout=60)
218
+ # response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
219
+ # json_data = json.loads(response.text)
220
  except requests.exceptions.RequestException as err:
221
  print('FAILED to synthesize: {err}')
222
  save_path = ''
 
490
  if __name__ == "__main__":
491
  # Run the web server in a separate thread
492
 
493
+ # print('Attempting to connect to local xVASynth server...')
494
+ # try:
495
+ # response = requests.get('http://0.0.0.0:8008')
496
+ # response.raise_for_status() # If the response contains an HTTP error status code, raise an exception
497
+ # except requests.exceptions.RequestException as err:
498
+ # print('Failed to connect to xVASynth!')
499
+ # web_server_thread = threading.Thread(target=run_xvaserver)
500
+ # print('Starting xVAServer thread')
501
+ # web_server_thread.start()
502
 
503
  print('running Gradio interface')
504
  demo.launch()
505
 
506
  # Wait for the web server thread to finish (shouldn't be reached in normal execution)
507
+ # web_server_thread.join()
resources/app/no_server.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import traceback
4
+ import multiprocessing
5
+ import json
6
+
7
+ torch_dml_device = None
8
+
9
+ multiprocessing.freeze_support()
10
+
11
+ # PROD = 'xVASynth.exe' in os.listdir(".")
12
+ PROD = True
13
+ sys.path.append("./resources/app")
14
+
15
+ # Saves me having to do backend re-compilations for every little UI hotfix
16
+ with open(f'{"./resources/app" if PROD else "."}/javascript/script.js', encoding="utf8") as f:
17
+ lines = f.read().split("\n")
18
+ APP_VERSION = lines[1].split('"v')[1].split('"')[0]
19
+
20
+ # Imports and logger setup
21
+ # ========================
22
+ try:
23
+ # import python.pyinstaller_imports
24
+ import numpy
25
+
26
+ import logging
27
+ from logging.handlers import RotatingFileHandler
28
+ import json
29
+ from socketserver import ThreadingMixIn
30
+ from python.audio_post import run_audio_post, prepare_input_audio, mp_ffmpeg_output, normalize_audio, start_microphone_recording, move_recorded_file
31
+ import ffmpeg
32
+ except:
33
+ print(traceback.format_exc())
34
+ with open("./DEBUG_err_imports.txt", "w+") as f:
35
+ f.write(traceback.format_exc())
36
+
37
+ # Pyinstaller hack
38
+ # ================
39
+ try:
40
+ def script_method(fn, _rcb=None):
41
+ return fn
42
+ def script(obj, optimize=True, _frames_up=0, _rcb=None):
43
+ return obj
44
+ import torch.jit
45
+ torch.jit.script_method = script_method
46
+ torch.jit.script = script
47
+ import torch
48
+ import tqdm
49
+ import regex
50
+ except:
51
+ with open("./DEBUG_err_import_torch.txt", "w+") as f:
52
+ f.write(traceback.format_exc())
53
+ # ================
54
+ # CPU_ONLY = not torch.cuda.is_available()
55
+ CPU_ONLY = True
56
+
57
+ try:
58
+ logger = logging.getLogger('serverLog')
59
+ logger.setLevel(logging.DEBUG)
60
+ server_log_path = f'{os.path.dirname(os.path.realpath(__file__))}/{"../../../" if PROD else ""}/server.log'
61
+ fh = RotatingFileHandler(server_log_path, maxBytes=2*1024*1024, backupCount=5)
62
+ fh.setLevel(logging.DEBUG)
63
+ ch = logging.StreamHandler()
64
+ ch.setLevel(logging.ERROR)
65
+ formatter = logging.Formatter('%(asctime)s - %(message)s')
66
+ fh.setFormatter(formatter)
67
+ ch.setFormatter(formatter)
68
+ logger.addHandler(fh)
69
+ logger.addHandler(ch)
70
+ logger.info(f'New session. Version: {APP_VERSION}. Installation: {"CPU" if CPU_ONLY else "CPU+GPU"} | Prod: {PROD} | Log path: {server_log_path}')
71
+
72
+ logger.orig_info = logger.info
73
+
74
+ def prefixed_log (msg):
75
+ logger.info(f'{logger.logging_prefix}{msg}')
76
+
77
+
78
+ def set_logger_prefix (prefix=""):
79
+ if len(prefix):
80
+ logger.logging_prefix = f'[{prefix}]: '
81
+ logger.log = prefixed_log
82
+ else:
83
+ logger.log = logger.orig_info
84
+
85
+ logger.set_logger_prefix = set_logger_prefix
86
+ logger.set_logger_prefix("")
87
+
88
+ except:
89
+ with open("./DEBUG_err_logger.txt", "w+") as f:
90
+ f.write(traceback.format_exc())
91
+ try:
92
+ logger.info(traceback.format_exc())
93
+ except:
94
+ pass
95
+
96
+ # if CPU_ONLY:
97
+ # torch_dml_device = torch.device("cpu")
98
+
99
+
100
+ try:
101
+ from python.plugins_manager import PluginManager
102
+ plugin_manager = PluginManager(APP_VERSION, PROD, CPU_ONLY, logger)
103
+ active_plugins = plugin_manager.get_active_plugins_count()
104
+ logger.info(f'Plugin manager loaded. {active_plugins} active plugins.')
105
+ except:
106
+ logger.info("Plugin manager FAILED.")
107
+ logger.info(traceback.format_exc())
108
+
109
+ plugin_manager.run_plugins(plist=plugin_manager.plugins["start"]["pre"], event="pre start", data=None)
110
+
111
+
112
+ # ======================== Models manager
113
+ # modelsPaths = {}
114
+ try:
115
+ from python.models_manager import ModelsManager
116
+ models_manager = ModelsManager(logger, PROD, device="cpu")
117
+ except:
118
+ logger.info("Models manager failed to initialize")
119
+ logger.info(traceback.format_exc())
120
+ # ========================
121
+
122
+
123
+
124
+ print("Models ready")
125
+ logger.info("Models ready")
126
+
127
+
128
+ global modelsPaths
129
+ post_data = ""
130
+ def loadModel(post_data):
131
+ logger.info("Direct: loadModel")
132
+ logger.info(post_data)
133
+ ckpt = post_data["model"]
134
+ modelType = post_data["modelType"]
135
+ instance_index = post_data["instance_index"] if "instance_index" in post_data else 0
136
+ modelType = modelType.lower().replace(".", "_").replace(" ", "")
137
+ post_data["pluginsContext"] = json.loads(post_data["pluginsContext"])
138
+ n_speakers = post_data["model_speakers"] if "model_speakers" in post_data else None
139
+ base_lang = post_data["base_lang"] if "base_lang" in post_data else None
140
+
141
+
142
+ plugin_manager.run_plugins(plist=plugin_manager.plugins["load-model"]["pre"], event="pre load-model", data=post_data)
143
+ models_manager.load_model(modelType, ckpt+".pt", instance_index=instance_index, n_speakers=n_speakers, base_lang=base_lang)
144
+ plugin_manager.run_plugins(plist=plugin_manager.plugins["load-model"]["post"], event="post load-model", data=post_data)
145
+
146
+ if modelType=="fastpitch1_1":
147
+ models_manager.models_bank["fastpitch1_1"][instance_index].init_arpabet_dicts()
148
+
149
+ return req_response
150
+
151
+ def synthesize(post_data):
152
+ logger.info("Direct: synthesize")
153
+ post_data["pluginsContext"] = json.loads(post_data["pluginsContext"])
154
+ instance_index = post_data["instance_index"] if "instance_index" in post_data else 0
155
+
156
+
157
+ # Handle the case where the vocoder remains selected on app start-up, with auto-HiFi turned off, but no setVocoder call is made before synth
158
+ continue_synth = True
159
+ if "waveglow" in post_data["vocoder"]:
160
+ waveglowPath = post_data["waveglowPath"]
161
+ req_response = models_manager.load_model(post_data["vocoder"], waveglowPath, instance_index=instance_index)
162
+ if req_response=="ENOENT":
163
+ continue_synth = False
164
+
165
+ device = post_data["device"] if "device" in post_data else models_manager.device_label
166
+ device = torch.device("cpu") if device=="cpu" else (torch_dml_device if CPU_ONLY else torch.device("cuda:0"))
167
+ models_manager.set_device(device, instance_index=instance_index)
168
+
169
+ if continue_synth:
170
+ plugin_manager.set_context(post_data["pluginsContext"])
171
+ plugin_manager.run_plugins(plist=plugin_manager.plugins["synth-line"]["pre"], event="pre synth-line", data=post_data)
172
+
173
+ modelType = post_data["modelType"]
174
+ text = post_data["sequence"]
175
+ pace = float(post_data["pace"])
176
+ out_path = post_data["outfile"]
177
+ base_lang = post_data["base_lang"] if "base_lang" in post_data else None
178
+ base_emb = post_data["base_emb"] if "base_emb" in post_data else None
179
+ pitch = post_data["pitch"] if "pitch" in post_data else None
180
+ energy = post_data["energy"] if "energy" in post_data else None
181
+ emAngry = post_data["emAngry"] if "emAngry" in post_data else None
182
+ emHappy = post_data["emHappy"] if "emHappy" in post_data else None
183
+ emSad = post_data["emSad"] if "emSad" in post_data else None
184
+ emSurprise = post_data["emSurprise"] if "emSurprise" in post_data else None
185
+ editorStyles = post_data["editorStyles"] if "editorStyles" in post_data else None
186
+ duration = post_data["duration"] if "duration" in post_data else None
187
+ speaker_i = post_data["speaker_i"] if "speaker_i" in post_data else None
188
+ useSR = post_data["useSR"] if "useSR" in post_data else None
189
+ useCleanup = post_data["useCleanup"] if "useCleanup" in post_data else None
190
+ vocoder = post_data["vocoder"]
191
+ globalAmplitudeModifier = float(post_data["globalAmplitudeModifier"]) if "globalAmplitudeModifier" in post_data else None
192
+ editor_data = [pitch, duration, energy, emAngry, emHappy, emSad, emSurprise, editorStyles]
193
+ old_sequence = post_data["old_sequence"] if "old_sequence" in post_data else None
194
+
195
+ model = models_manager.models(modelType.lower().replace(".", "_").replace(" ", ""), instance_index=instance_index)
196
+ req_response = model.infer(plugin_manager, text, out_path, vocoder=vocoder, \
197
+ speaker_i=speaker_i, editor_data=editor_data, pace=pace, old_sequence=old_sequence, \
198
+ globalAmplitudeModifier=globalAmplitudeModifier, base_lang=base_lang, base_emb=base_emb, useSR=useSR, useCleanup=useCleanup)
199
+
200
+ plugin_manager.run_plugins(plist=plugin_manager.plugins["synth-line"]["post"], event="post synth-line", data=post_data)
201
+
202
+ return req_response