jlopez00 commited on
Commit
bbf5262
1 Parent(s): 92772e9

Upload folder using huggingface_hub

Browse files
core/__init__.py CHANGED
@@ -3,7 +3,6 @@ import subprocess
3
  import sys
4
  from functools import lru_cache
5
 
6
- from rvc.lib.tools.model_download import model_download_pipeline
7
  from rvc.lib.tools.prerequisites_download import prequisites_download_pipeline
8
  from tts_service.utils import cache_path
9
  from tts_service.voices import voice_manager
@@ -29,7 +28,7 @@ def run_tts_script(
29
  voice = voice_manager.voices[voice_name]
30
  format = "wav"
31
 
32
- output_tts_path = cache_path(voice.tts, tts_text, extension=format)
33
  if not os.path.exists(output_tts_path):
34
  command_tts = [
35
  *map(
@@ -47,7 +46,7 @@ def run_tts_script(
47
  ]
48
  subprocess.run(command_tts)
49
 
50
- output_rvc_path = cache_path(voice.tts, voice.name, tts_text, extension=format)
51
  if not os.path.exists(output_rvc_path):
52
  infer_pipeline = import_voice_converter()
53
  infer_pipeline.convert_audio(
@@ -93,12 +92,6 @@ def run_tts_script(
93
  return "Text synthesized successfully.", str(output_rvc_path)
94
 
95
 
96
- # Download
97
- def run_download_script(model_link: str):
98
- model_download_pipeline(model_link)
99
- return "Model downloaded successfully."
100
-
101
-
102
  # Prerequisites
103
  def run_prerequisites_script(
104
  pretraineds_v1_f0: bool,
 
3
  import sys
4
  from functools import lru_cache
5
 
 
6
  from rvc.lib.tools.prerequisites_download import prequisites_download_pipeline
7
  from tts_service.utils import cache_path
8
  from tts_service.voices import voice_manager
 
28
  voice = voice_manager.voices[voice_name]
29
  format = "wav"
30
 
31
+ output_tts_path = cache_path(voice.tts, "", tts_rate, tts_text, extension=format)
32
  if not os.path.exists(output_tts_path):
33
  command_tts = [
34
  *map(
 
46
  ]
47
  subprocess.run(command_tts)
48
 
49
+ output_rvc_path = cache_path(voice.tts, voice.name, tts_rate, tts_text, extension=format)
50
  if not os.path.exists(output_rvc_path):
51
  infer_pipeline = import_voice_converter()
52
  infer_pipeline.convert_audio(
 
92
  return "Text synthesized successfully.", str(output_rvc_path)
93
 
94
 
 
 
 
 
 
 
95
  # Prerequisites
96
  def run_prerequisites_script(
97
  pretraineds_v1_f0: bool,
rvc/lib/tools/model_download.py CHANGED
@@ -1,18 +1,16 @@
1
  import os
2
  import re
3
- import six
4
  import sys
5
- import wget
6
- import shutil
7
- import zipfile
8
  import requests
 
 
9
  from bs4 import BeautifulSoup
10
- from urllib.parse import unquote, urlencode, parse_qs, urlparse
11
 
12
  now_dir = os.getcwd()
13
  sys.path.append(now_dir)
14
 
15
- from rvc.lib.utils import format_title
16
  from rvc.lib.tools import gdown
17
 
18
 
@@ -27,21 +25,6 @@ file_path = find_folder_parent(now_dir, "logs")
27
  zips_path = os.path.join(file_path, "zips")
28
 
29
 
30
- def search_pth_index(folder):
31
- pth_paths = [
32
- os.path.join(folder, file)
33
- for file in os.listdir(folder)
34
- if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth")
35
- ]
36
- index_paths = [
37
- os.path.join(folder, file)
38
- for file in os.listdir(folder)
39
- if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index")
40
- ]
41
-
42
- return pth_paths, index_paths
43
-
44
-
45
  def download_from_url(url):
46
  os.makedirs(zips_path, exist_ok=True)
47
  if url != "":
@@ -62,18 +45,11 @@ def download_from_url(url):
62
  fuzzy=True,
63
  )
64
  except Exception as error:
65
- error_message = str(
66
- f"An error occurred downloading the file: {error}"
67
- )
68
- if (
69
- "Too many users have viewed or downloaded this file recently"
70
- in error_message
71
- ):
72
  os.chdir(now_dir)
73
  return "too much use"
74
- elif (
75
- "Cannot retrieve the public link of the file." in error_message
76
- ):
77
  os.chdir(now_dir)
78
  return "private link"
79
  else:
@@ -89,9 +65,7 @@ def download_from_url(url):
89
  download_response = requests.get(download_url)
90
 
91
  if download_response.status_code == 200:
92
- filename = parse_qs(urlparse(unquote(download_url)).query).get(
93
- "filename", [""]
94
- )[0]
95
  if filename:
96
  os.chdir(zips_path)
97
  with open(filename, "wb") as f:
@@ -107,11 +81,7 @@ def download_from_url(url):
107
  print(file_id)
108
  response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
109
  if response.status_code == 200:
110
- file_name = (
111
- response.headers.get("Content-Disposition")
112
- .split("filename=")[-1]
113
- .strip('";')
114
- )
115
  os.makedirs(zips_path, exist_ok=True)
116
  with open(os.path.join(zips_path, file_name), "wb") as newfile:
117
  newfile.write(response.content)
@@ -141,9 +111,7 @@ def download_from_url(url):
141
 
142
  response = requests.get(url, stream=True)
143
  if response.status_code == 200:
144
- content_disposition = six.moves.urllib_parse.unquote(
145
- response.headers["Content-Disposition"]
146
- )
147
  m = re.search(r'filename="([^"]+)"', content_disposition)
148
  file_name = m.groups()[0]
149
  file_name = file_name.replace(os.path.sep, "_")
@@ -157,15 +125,8 @@ def download_from_url(url):
157
  file.write(data)
158
  progress += len(data)
159
  progress_percent = int((progress / total_size_in_bytes) * 100)
160
- num_dots = int(
161
- (progress / total_size_in_bytes) * progress_bar_length
162
- )
163
- progress_bar = (
164
- "["
165
- + "." * num_dots
166
- + " " * (progress_bar_length - num_dots)
167
- + "]"
168
- )
169
  print(
170
  f"{progress_percent}% {progress_bar} {progress}/{total_size_in_bytes} ",
171
  end="\r",
@@ -243,121 +204,3 @@ def download_from_url(url):
243
 
244
  os.chdir(now_dir)
245
  return None
246
-
247
-
248
- def extract_and_show_progress(zipfile_path, unzips_path):
249
- try:
250
- with zipfile.ZipFile(zipfile_path, "r") as zip_ref:
251
- for file_info in zip_ref.infolist():
252
- zip_ref.extract(file_info, unzips_path)
253
- os.remove(zipfile_path)
254
- return True
255
- except Exception as error:
256
- print(f"An error occurred extracting the zip file: {error}")
257
- return False
258
-
259
-
260
- def model_download_pipeline(url: str):
261
- try:
262
- verify = download_from_url(url)
263
- if verify == "downloaded":
264
- extract_folder_path = ""
265
- for filename in os.listdir(zips_path):
266
- if filename.endswith(".zip"):
267
- zipfile_path = os.path.join(zips_path, filename)
268
- print("Proceeding with the extraction...")
269
-
270
- model_zip = os.path.basename(zipfile_path)
271
- model_name = format_title(model_zip.split(".zip")[0])
272
- extract_folder_path = os.path.join(
273
- "logs",
274
- os.path.normpath(model_name),
275
- )
276
- success = extract_and_show_progress(
277
- zipfile_path, extract_folder_path
278
- )
279
-
280
- macosx_path = os.path.join(extract_folder_path, "__MACOSX")
281
- if os.path.exists(macosx_path):
282
- shutil.rmtree(macosx_path)
283
-
284
- subfolders = [
285
- f
286
- for f in os.listdir(extract_folder_path)
287
- if os.path.isdir(os.path.join(extract_folder_path, f))
288
- ]
289
- if len(subfolders) == 1:
290
- subfolder_path = os.path.join(
291
- extract_folder_path, subfolders[0]
292
- )
293
- for item in os.listdir(subfolder_path):
294
- s = os.path.join(subfolder_path, item)
295
- d = os.path.join(extract_folder_path, item)
296
- shutil.move(s, d)
297
- os.rmdir(subfolder_path)
298
-
299
- for item in os.listdir(extract_folder_path):
300
- if ".pth" in item:
301
- file_name = item.split(".pth")[0]
302
- if file_name != model_name:
303
- os.rename(
304
- os.path.join(extract_folder_path, item),
305
- os.path.join(
306
- extract_folder_path, model_name + ".pth"
307
- ),
308
- )
309
- else:
310
- if "v2" not in item:
311
- if "_nprobe_1_" in item and "_v1" in item:
312
- file_name = item.split("_nprobe_1_")[1].split(
313
- "_v1"
314
- )[0]
315
- if file_name != model_name:
316
- new_file_name = (
317
- item.split("_nprobe_1_")[0]
318
- + "_nprobe_1_"
319
- + model_name
320
- + "_v1"
321
- )
322
- os.rename(
323
- os.path.join(extract_folder_path, item),
324
- os.path.join(
325
- extract_folder_path,
326
- new_file_name + ".index",
327
- ),
328
- )
329
- else:
330
- if "_nprobe_1_" in item and "_v2" in item:
331
- file_name = item.split("_nprobe_1_")[1].split(
332
- "_v2"
333
- )[0]
334
- if file_name != model_name:
335
- new_file_name = (
336
- item.split("_nprobe_1_")[0]
337
- + "_nprobe_1_"
338
- + model_name
339
- + "_v2"
340
- )
341
- os.rename(
342
- os.path.join(extract_folder_path, item),
343
- os.path.join(
344
- extract_folder_path,
345
- new_file_name + ".index",
346
- ),
347
- )
348
-
349
- if success:
350
- print(f"Model {model_name} downloaded!")
351
- else:
352
- print(f"Error downloading {model_name}")
353
- return "Error"
354
- if extract_folder_path == "":
355
- print("Zip file was not found.")
356
- return "Error"
357
- result = search_pth_index(extract_folder_path)
358
- return result
359
- else:
360
- return "Error"
361
- except Exception as error:
362
- print(f"An unexpected error occurred: {error}")
363
- return "Error"
 
1
  import os
2
  import re
 
3
  import sys
4
+ from urllib.parse import parse_qs, unquote, urlencode, urlparse
5
+
 
6
  import requests
7
+ import six
8
+ import wget
9
  from bs4 import BeautifulSoup
 
10
 
11
  now_dir = os.getcwd()
12
  sys.path.append(now_dir)
13
 
 
14
  from rvc.lib.tools import gdown
15
 
16
 
 
25
  zips_path = os.path.join(file_path, "zips")
26
 
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  def download_from_url(url):
29
  os.makedirs(zips_path, exist_ok=True)
30
  if url != "":
 
45
  fuzzy=True,
46
  )
47
  except Exception as error:
48
+ error_message = str(f"An error occurred downloading the file: {error}")
49
+ if "Too many users have viewed or downloaded this file recently" in error_message:
 
 
 
 
 
50
  os.chdir(now_dir)
51
  return "too much use"
52
+ elif "Cannot retrieve the public link of the file." in error_message:
 
 
53
  os.chdir(now_dir)
54
  return "private link"
55
  else:
 
65
  download_response = requests.get(download_url)
66
 
67
  if download_response.status_code == 200:
68
+ filename = parse_qs(urlparse(unquote(download_url)).query).get("filename", [""])[0]
 
 
69
  if filename:
70
  os.chdir(zips_path)
71
  with open(filename, "wb") as f:
 
81
  print(file_id)
82
  response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
83
  if response.status_code == 200:
84
+ file_name = response.headers.get("Content-Disposition").split("filename=")[-1].strip('";')
 
 
 
 
85
  os.makedirs(zips_path, exist_ok=True)
86
  with open(os.path.join(zips_path, file_name), "wb") as newfile:
87
  newfile.write(response.content)
 
111
 
112
  response = requests.get(url, stream=True)
113
  if response.status_code == 200:
114
+ content_disposition = six.moves.urllib_parse.unquote(response.headers["Content-Disposition"])
 
 
115
  m = re.search(r'filename="([^"]+)"', content_disposition)
116
  file_name = m.groups()[0]
117
  file_name = file_name.replace(os.path.sep, "_")
 
125
  file.write(data)
126
  progress += len(data)
127
  progress_percent = int((progress / total_size_in_bytes) * 100)
128
+ num_dots = int((progress / total_size_in_bytes) * progress_bar_length)
129
+ progress_bar = "[" + "." * num_dots + " " * (progress_bar_length - num_dots) + "]"
 
 
 
 
 
 
 
130
  print(
131
  f"{progress_percent}% {progress_bar} {progress}/{total_size_in_bytes} ",
132
  end="\r",
 
204
 
205
  os.chdir(now_dir)
206
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
rvc/lib/utils.py CHANGED
@@ -1,15 +1,14 @@
1
- import os, sys
 
 
 
 
2
  import librosa
3
- import soundfile as sf
4
  import numpy as np
5
- import re
6
- import unicodedata
7
  import wget
8
  from torch import nn
9
-
10
- import logging
11
  from transformers import HubertModel
12
- import warnings
13
 
14
  # Remove this to see warnings about transformers models
15
  warnings.filterwarnings("ignore")
@@ -65,16 +64,6 @@ def load_audio_infer(
65
  return np.array(audio).flatten()
66
 
67
 
68
- def format_title(title):
69
- formatted_title = (
70
- unicodedata.normalize("NFKD", title).encode("ascii", "ignore").decode("utf-8")
71
- )
72
- formatted_title = re.sub(r"[\u2500-\u257F]+", "", formatted_title)
73
- formatted_title = re.sub(r"[^\w\s.-]", "", formatted_title)
74
- formatted_title = re.sub(r"\s+", "_", formatted_title)
75
- return formatted_title
76
-
77
-
78
  def load_embedding(embedder_model, custom_embedder=None):
79
  embedder_root = os.path.join(now_dir, "rvc", "models", "embedders")
80
  embedding_list = {
 
1
+ import logging
2
+ import os
3
+ import sys
4
+ import warnings
5
+
6
  import librosa
 
7
  import numpy as np
8
+ import soundfile as sf
 
9
  import wget
10
  from torch import nn
 
 
11
  from transformers import HubertModel
 
12
 
13
  # Remove this to see warnings about transformers models
14
  warnings.filterwarnings("ignore")
 
64
  return np.array(audio).flatten()
65
 
66
 
 
 
 
 
 
 
 
 
 
 
67
  def load_embedding(embedder_model, custom_embedder=None):
68
  embedder_root = os.path.join(now_dir, "rvc", "models", "embedders")
69
  embedding_list = {
tabs/download/download.py CHANGED
@@ -1,22 +1,13 @@
1
  import os
2
- import sys
3
- import json
4
  import shutil
5
- import requests
6
  import tempfile
7
- import gradio as gr
8
- import pandas as pd
9
-
10
- from concurrent.futures import ThreadPoolExecutor
11
- from tqdm import tqdm
12
 
 
13
 
14
  now_dir = os.getcwd()
15
  sys.path.append(now_dir)
16
 
17
- from core import run_download_script
18
- from rvc.lib.utils import format_title
19
-
20
  from assets.i18n.i18n import I18nAuto
21
 
22
  i18n = I18nAuto()
@@ -27,107 +18,6 @@ if os.path.exists(gradio_temp_dir):
27
  shutil.rmtree(gradio_temp_dir)
28
 
29
 
30
- def save_drop_model(dropbox):
31
- if "pth" not in dropbox and "index" not in dropbox:
32
- raise gr.Error(
33
- message="The file you dropped is not a valid model file. Please try again."
34
- )
35
- else:
36
- file_name = format_title(os.path.basename(dropbox))
37
- if ".pth" in dropbox:
38
- model_name = format_title(file_name.split(".pth")[0])
39
- else:
40
- if "v2" not in dropbox:
41
- model_name = format_title(
42
- file_name.split("_nprobe_1_")[1].split("_v1")[0]
43
- )
44
- else:
45
- model_name = format_title(
46
- file_name.split("_nprobe_1_")[1].split("_v2")[0]
47
- )
48
- model_path = os.path.join(now_dir, "logs", model_name)
49
- if not os.path.exists(model_path):
50
- os.makedirs(model_path)
51
- if os.path.exists(os.path.join(model_path, file_name)):
52
- os.remove(os.path.join(model_path, file_name))
53
- shutil.move(dropbox, os.path.join(model_path, file_name))
54
- print(f"{file_name} saved in {model_path}")
55
- gr.Info(f"{file_name} saved in {model_path}")
56
- return None
57
-
58
-
59
- def search_models(name):
60
- url = f"https://cjtfqzjfdimgpvpwhzlv.supabase.co/rest/v1/models?name=ilike.%25{name}%25&order=created_at.desc&limit=15"
61
- headers = {
62
- "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImNqdGZxempmZGltZ3B2cHdoemx2Iiwicm9sZSI6ImFub24iLCJpYXQiOjE3MjY5MjYxMzQsImV4cCI6MjA0MjUwMjEzNH0.OyDXlhvH6D-IsHiWhPAGUtsPGGUvWQynfxUeQwfYToE"
63
- }
64
- response = requests.get(url, headers=headers)
65
- data = response.json()
66
- if len(data) == 0:
67
- gr.Info(i18n("We couldn't find models by that name."))
68
- return None
69
- else:
70
- df = pd.DataFrame(data)[["name", "link", "epochs", "type"]]
71
- df["link"] = df["link"].apply(
72
- lambda x: f'<a href="{x}" target="_blank">{x}</a>'
73
- )
74
- return df
75
-
76
-
77
- json_url = "https://huggingface.co/IAHispano/Applio/raw/main/pretrains.json"
78
-
79
-
80
- def fetch_pretrained_data():
81
- pretraineds_custom_path = os.path.join(
82
- "rvc", "models", "pretraineds", "pretraineds_custom"
83
- )
84
- os.makedirs(pretraineds_custom_path, exist_ok=True)
85
- try:
86
- with open(
87
- os.path.join(pretraineds_custom_path, json_url.split("/")[-1]), "r"
88
- ) as f:
89
- data = json.load(f)
90
- except:
91
- try:
92
- response = requests.get(json_url)
93
- response.raise_for_status()
94
- data = response.json()
95
- with open(
96
- os.path.join(pretraineds_custom_path, json_url.split("/")[-1]),
97
- "w",
98
- encoding="utf-8",
99
- ) as f:
100
- json.dump(
101
- data,
102
- f,
103
- indent=2,
104
- separators=(",", ": "),
105
- ensure_ascii=False,
106
- )
107
- except:
108
- data = {
109
- "Titan": {
110
- "32k": {"D": "null", "G": "null"},
111
- },
112
- }
113
- return data
114
-
115
-
116
- def get_pretrained_list():
117
- data = fetch_pretrained_data()
118
- return list(data.keys())
119
-
120
-
121
- def get_pretrained_sample_rates(model):
122
- data = fetch_pretrained_data()
123
- return list(data[model].keys())
124
-
125
-
126
- def get_file_size(url):
127
- response = requests.head(url)
128
- return int(response.headers.get("content-length", 0))
129
-
130
-
131
  def download_file(url, destination_path, progress_bar):
132
  os.makedirs(os.path.dirname(destination_path), exist_ok=True)
133
  response = requests.get(url, stream=True)
@@ -136,128 +26,3 @@ def download_file(url, destination_path, progress_bar):
136
  for data in response.iter_content(block_size):
137
  file.write(data)
138
  progress_bar.update(len(data))
139
-
140
-
141
- def download_pretrained_model(model, sample_rate):
142
- data = fetch_pretrained_data()
143
- paths = data[model][sample_rate]
144
- pretraineds_custom_path = os.path.join(
145
- "rvc", "models", "pretraineds", "pretraineds_custom"
146
- )
147
- os.makedirs(pretraineds_custom_path, exist_ok=True)
148
-
149
- d_url = f"https://huggingface.co/{paths['D']}"
150
- g_url = f"https://huggingface.co/{paths['G']}"
151
-
152
- total_size = get_file_size(d_url) + get_file_size(g_url)
153
-
154
- gr.Info("Downloading pretrained model...")
155
-
156
- with tqdm(
157
- total=total_size, unit="iB", unit_scale=True, desc="Downloading files"
158
- ) as progress_bar:
159
- with ThreadPoolExecutor(max_workers=2) as executor:
160
- futures = [
161
- executor.submit(
162
- download_file,
163
- d_url,
164
- os.path.join(pretraineds_custom_path, os.path.basename(paths["D"])),
165
- progress_bar,
166
- ),
167
- executor.submit(
168
- download_file,
169
- g_url,
170
- os.path.join(pretraineds_custom_path, os.path.basename(paths["G"])),
171
- progress_bar,
172
- ),
173
- ]
174
- for future in futures:
175
- future.result()
176
-
177
- gr.Info("Pretrained model downloaded successfully!")
178
- print("Pretrained model downloaded successfully!")
179
-
180
-
181
- def update_sample_rate_dropdown(model):
182
- return {
183
- "choices": get_pretrained_sample_rates(model),
184
- "value": get_pretrained_sample_rates(model)[0],
185
- "__type__": "update",
186
- }
187
-
188
-
189
- def download_tab():
190
- with gr.Column():
191
- gr.Markdown(value=i18n("## Download Model"))
192
- model_link = gr.Textbox(
193
- label=i18n("Model Link"),
194
- placeholder=i18n("Introduce the model link"),
195
- interactive=True,
196
- )
197
- model_download_output_info = gr.Textbox(
198
- label=i18n("Output Information"),
199
- info=i18n("The output information will be displayed here."),
200
- value="",
201
- max_lines=8,
202
- interactive=False,
203
- )
204
- model_download_button = gr.Button(i18n("Download Model"))
205
- model_download_button.click(
206
- fn=run_download_script,
207
- inputs=[model_link],
208
- outputs=[model_download_output_info],
209
- )
210
- gr.Markdown(value=i18n("## Drop files"))
211
- dropbox = gr.File(
212
- label=i18n(
213
- "Drag your .pth file and .index file into this space. Drag one and then the other."
214
- ),
215
- type="filepath",
216
- )
217
-
218
- dropbox.upload(
219
- fn=save_drop_model,
220
- inputs=[dropbox],
221
- outputs=[dropbox],
222
- )
223
- gr.Markdown(value=i18n("## Search Model"))
224
- search_name = gr.Textbox(
225
- label=i18n("Model Name"),
226
- placeholder=i18n("Introduce the model name to search."),
227
- interactive=True,
228
- )
229
- search_table = gr.Dataframe(datatype="markdown")
230
- search = gr.Button(i18n("Search"))
231
- search.click(
232
- fn=search_models,
233
- inputs=[search_name],
234
- outputs=[search_table],
235
- )
236
- search_name.submit(search_models, [search_name], search_table)
237
- gr.Markdown(value=i18n("## Download Pretrained Models"))
238
- pretrained_model = gr.Dropdown(
239
- label=i18n("Pretrained"),
240
- info=i18n("Select the pretrained model you want to download."),
241
- choices=get_pretrained_list(),
242
- value="Titan",
243
- interactive=True,
244
- )
245
- pretrained_sample_rate = gr.Dropdown(
246
- label=i18n("Sampling Rate"),
247
- info=i18n("And select the sampling rate."),
248
- choices=get_pretrained_sample_rates(pretrained_model.value),
249
- value="40k",
250
- interactive=True,
251
- allow_custom_value=True,
252
- )
253
- pretrained_model.change(
254
- update_sample_rate_dropdown,
255
- inputs=[pretrained_model],
256
- outputs=[pretrained_sample_rate],
257
- )
258
- download_pretrained = gr.Button(i18n("Download"))
259
- download_pretrained.click(
260
- fn=download_pretrained_model,
261
- inputs=[pretrained_model, pretrained_sample_rate],
262
- outputs=[],
263
- )
 
1
  import os
 
 
2
  import shutil
3
+ import sys
4
  import tempfile
 
 
 
 
 
5
 
6
+ import requests
7
 
8
  now_dir = os.getcwd()
9
  sys.path.append(now_dir)
10
 
 
 
 
11
  from assets.i18n.i18n import I18nAuto
12
 
13
  i18n = I18nAuto()
 
18
  shutil.rmtree(gradio_temp_dir)
19
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def download_file(url, destination_path, progress_bar):
22
  os.makedirs(os.path.dirname(destination_path), exist_ok=True)
23
  response = requests.get(url, stream=True)
 
26
  for data in response.iter_content(block_size):
27
  file.write(data)
28
  progress_bar.update(len(data))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tabs/plugins/plugins_core.py CHANGED
@@ -1,17 +1,16 @@
1
- import os, sys, shutil
2
  import json
3
- import gradio as gr
4
- import zipfile
5
  import subprocess
 
6
 
7
  from assets.i18n.i18n import I18nAuto
 
8
 
9
  i18n = I18nAuto()
10
 
11
  now_dir = os.getcwd()
12
  sys.path.append(now_dir)
13
 
14
- from tabs.settings.sections.restart import restart_applio
15
 
16
  plugins_path = os.path.join(now_dir, "tabs", "plugins", "installed")
17
  if not os.path.exists(plugins_path):
@@ -22,7 +21,7 @@ current_folders = os.listdir(plugins_path)
22
 
23
  def get_existing_folders():
24
  if os.path.exists(json_file_path):
25
- with open(json_file_path, "r") as file:
26
  config = json.load(file)
27
  return config["plugins"]
28
  else:
@@ -30,7 +29,7 @@ def get_existing_folders():
30
 
31
 
32
  def save_existing_folders(existing_folders):
33
- with open(json_file_path, "r") as file:
34
  config = json.load(file)
35
  config["plugins"] = existing_folders
36
  with open(json_file_path, "w") as file:
 
 
1
  import json
2
+ import os
 
3
  import subprocess
4
+ import sys
5
 
6
  from assets.i18n.i18n import I18nAuto
7
+ from tabs.settings.sections.restart import restart_applio
8
 
9
  i18n = I18nAuto()
10
 
11
  now_dir = os.getcwd()
12
  sys.path.append(now_dir)
13
 
 
14
 
15
  plugins_path = os.path.join(now_dir, "tabs", "plugins", "installed")
16
  if not os.path.exists(plugins_path):
 
21
 
22
  def get_existing_folders():
23
  if os.path.exists(json_file_path):
24
+ with open(json_file_path) as file:
25
  config = json.load(file)
26
  return config["plugins"]
27
  else:
 
29
 
30
 
31
  def save_existing_folders(existing_folders):
32
+ with open(json_file_path) as file:
33
  config = json.load(file)
34
  config["plugins"] = existing_folders
35
  with open(json_file_path, "w") as file:
tts_service/app.py CHANGED
@@ -7,7 +7,6 @@ import assets.installation_checker as installation_checker
7
  import assets.themes.loadThemes as loadThemes
8
  from assets.i18n.i18n import I18nAuto
9
  from core import run_prerequisites_script
10
- from tabs.download.download import download_tab
11
  from tabs.plugins import plugins_core
12
  from tabs.tts.tts import tts_tab
13
  from tts_service.utils import env_bool
@@ -44,18 +43,15 @@ installation_checker.check_installation()
44
  my_applio = loadThemes.load_theme() or "ParityError/Interstellar"
45
 
46
  # Define Gradio interface
47
- with gr.Blocks(theme=my_applio, title="Applio", css="footer{display:none !important}") as Applio:
48
  gr.Markdown("# Text-to-Speech Playground")
49
  gr.Markdown(i18n("Select a voice model, enter text, and press 'Convert' to synthesize speech."))
50
  with gr.Tab(i18n("TTS")):
51
  tts_tab()
52
 
53
- with gr.Tab(i18n("Download")):
54
- download_tab()
55
-
56
 
57
  def launch_gradio():
58
- Applio.launch(
59
  favicon_path="assets/ICON.ico",
60
  share="--share" in sys.argv,
61
  inbrowser="--open" in sys.argv,
 
7
  import assets.themes.loadThemes as loadThemes
8
  from assets.i18n.i18n import I18nAuto
9
  from core import run_prerequisites_script
 
10
  from tabs.plugins import plugins_core
11
  from tabs.tts.tts import tts_tab
12
  from tts_service.utils import env_bool
 
43
  my_applio = loadThemes.load_theme() or "ParityError/Interstellar"
44
 
45
  # Define Gradio interface
46
+ with gr.Blocks(theme=my_applio, title="TTS Playground", css="footer{display:none !important}") as app:
47
  gr.Markdown("# Text-to-Speech Playground")
48
  gr.Markdown(i18n("Select a voice model, enter text, and press 'Convert' to synthesize speech."))
49
  with gr.Tab(i18n("TTS")):
50
  tts_tab()
51
 
 
 
 
52
 
53
  def launch_gradio():
54
+ app.queue(status_update_rate=1).launch(
55
  favicon_path="assets/ICON.ico",
56
  share="--share" in sys.argv,
57
  inbrowser="--open" in sys.argv,
tts_service/cli.py CHANGED
@@ -22,9 +22,9 @@ def main() -> None:
22
  @click.option("--share", is_flag=True, help="Share the service")
23
  def serve(share: bool) -> None:
24
  """Start the TTS Service"""
25
- from tts_service.app import Applio
26
 
27
- Applio.launch(share=share)
28
 
29
 
30
  @main.group()
 
22
  @click.option("--share", is_flag=True, help="Share the service")
23
  def serve(share: bool) -> None:
24
  """Start the TTS Service"""
25
+ from tts_service.app import app
26
 
27
+ app.launch(share=share)
28
 
29
 
30
  @main.group()
tts_service/whitelist.py CHANGED
@@ -1,3 +1,4 @@
 
1
  _.secondary_100 # unused attribute (assets/themes/Applio.py:44)
2
  _.secondary_200 # unused attribute (assets/themes/Applio.py:45)
3
  _.secondary_300 # unused attribute (assets/themes/Applio.py:46)
@@ -12,13 +13,10 @@ _.secondary_950 # unused attribute (assets/themes/Applio.py:54)
12
  __getattr__ # unused function (rvc/lib/predictors/FCPE.py:799)
13
  _.graph # unused attribute (rvc/lib/zluda.py:33)
14
  _.enabled # unused attribute (rvc/lib/zluda.py:40)
15
- rvc # unused import (rvc/train/extract/extract.py:19)
16
  _.nprobe # unused attribute (rvc/train/process/extract_index.py:76)
17
  rvc # unused import (rvc/train/train.py:28)
18
- _.deterministic # unused attribute (rvc/train/train.py:80)
19
- _.benchmark # unused attribute (rvc/train/train.py:81)
20
- losses_disc_g # unused variable (rvc/train/train.py:632)
21
- losses_disc_r # unused variable (rvc/train/train.py:632)
22
- losses_gen # unused variable (rvc/train/train.py:651)
23
- components # unused variable (tabs/report/report.py:55)
24
- rvc # unused import (tts_service/app.py:19)
 
1
+ Applio # unused class (assets/themes/Applio.py:12)
2
  _.secondary_100 # unused attribute (assets/themes/Applio.py:44)
3
  _.secondary_200 # unused attribute (assets/themes/Applio.py:45)
4
  _.secondary_300 # unused attribute (assets/themes/Applio.py:46)
 
13
  __getattr__ # unused function (rvc/lib/predictors/FCPE.py:799)
14
  _.graph # unused attribute (rvc/lib/zluda.py:33)
15
  _.enabled # unused attribute (rvc/lib/zluda.py:40)
 
16
  _.nprobe # unused attribute (rvc/train/process/extract_index.py:76)
17
  rvc # unused import (rvc/train/train.py:28)
18
+ _.deterministic # unused attribute (rvc/train/train.py:78)
19
+ _.benchmark # unused attribute (rvc/train/train.py:79)
20
+ losses_disc_g # unused variable (rvc/train/train.py:630)
21
+ losses_disc_r # unused variable (rvc/train/train.py:630)
22
+ losses_gen # unused variable (rvc/train/train.py:649)
 
 
web-root/index.html ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <meta name="description" content="Description of the page" />
7
+ <meta name="author" content="Author Name" />
8
+ <script
9
+ type="module"
10
+ src="https://gradio.s3-us-west-2.amazonaws.com/4.43.0/gradio.js"
11
+ ></script>
12
+ <title>TTS Playground</title>
13
+ </head>
14
+ <body style="background-color: black">
15
+ <gradio-app src="https://jlopez00-tts-service.hf.space"></gradio-app>
16
+ </body>
17
+ </html>