xJuuzouYTx commited on
Commit
925d97e
1 Parent(s): 97fc03f

[ADD] youtube video download as wav

Browse files
.gitignore CHANGED
@@ -4,6 +4,7 @@ __pycache__
4
  /audios/
5
  /audio-outputs/
6
  /LOGS
 
7
  /RUNTIME
8
  *.pyd
9
  hubert_base.pt
 
4
  /audios/
5
  /audio-outputs/
6
  /LOGS
7
+ /yt_videos
8
  /RUNTIME
9
  *.pyd
10
  hubert_base.pt
app.py CHANGED
@@ -1,166 +1,48 @@
1
  import gradio as gr
2
- from inference import Inference
3
  import os
4
- import zipfile
5
- import hashlib
6
- from utils.model import model_downloader, get_model
7
- import requests
8
- import json
9
- import torch
10
- from tts.constants import VOICE_METHODS, BARK_VOICES, EDGE_VOICES
11
- from tts.conversion import tts_infer, ELEVENLABS_VOICES_RAW, ELEVENLABS_VOICES_NAMES, COQUI_LANGUAGES
12
-
13
- api_url = "https://rvc-models-api.onrender.com/uploadfile/"
14
-
15
- zips_folder = "./zips"
16
- unzips_folder = "./unzips"
17
- if not os.path.exists(zips_folder):
18
- os.mkdir(zips_folder)
19
- if not os.path.exists(unzips_folder):
20
- os.mkdir(unzips_folder)
21
-
22
- def get_info(path):
23
- path = os.path.join(unzips_folder, path)
24
- try:
25
- a = torch.load(path, map_location="cpu")
26
- return a
27
- except Exception as e:
28
- print("*****************eeeeeeeeeeeeeeeeeeeerrrrrrrrrrrrrrrrrr*****")
29
- print(e)
30
- return {
31
-
32
- }
33
- def calculate_md5(file_path):
34
- hash_md5 = hashlib.md5()
35
- with open(file_path, "rb") as f:
36
- for chunk in iter(lambda: f.read(4096), b""):
37
- hash_md5.update(chunk)
38
- return hash_md5.hexdigest()
39
-
40
- def compress(modelname, files):
41
- file_path = os.path.join(zips_folder, f"{modelname}.zip")
42
- # Select the compression mode ZIP_DEFLATED for compression
43
- # or zipfile.ZIP_STORED to just store the file
44
- compression = zipfile.ZIP_DEFLATED
45
-
46
- # Comprueba si el archivo ZIP ya existe
47
- if not os.path.exists(file_path):
48
- # Si no existe, crea el archivo ZIP
49
- with zipfile.ZipFile(file_path, mode="w") as zf:
50
- try:
51
- for file in files:
52
- if file:
53
- # Agrega el archivo al archivo ZIP
54
- zf.write(unzips_folder if ".index" in file else os.path.join(unzips_folder, file), compress_type=compression)
55
- except FileNotFoundError as fnf:
56
- print("An error occurred", fnf)
57
- else:
58
- # Si el archivo ZIP ya existe, agrega los archivos a un archivo ZIP existente
59
- with zipfile.ZipFile(file_path, mode="a") as zf:
60
- try:
61
- for file in files:
62
- if file:
63
- # Agrega el archivo al archivo ZIP
64
- zf.write(unzips_folder if ".index" in file else os.path.join(unzips_folder, file), compress_type=compression)
65
- except FileNotFoundError as fnf:
66
- print("An error occurred", fnf)
67
-
68
- return file_path
69
-
70
- def infer(model, f0_method, audio_file):
71
- print("****", audio_file)
72
- inference = Inference(
73
- model_name=model,
74
- f0_method=f0_method,
75
- source_audio_path=audio_file,
76
- output_file_name=os.path.join("./audio-outputs", os.path.basename(audio_file))
77
- )
78
- output = inference.run()
79
- if 'success' in output and output['success']:
80
- return output, output['file']
81
- else:
82
- return
83
-
84
-
85
- def post_model(name, model_url, version, creator):
86
- modelname = model_downloader(model_url, zips_folder, unzips_folder)
87
- model_files = get_model(unzips_folder, modelname)
88
-
89
- if not model_files:
90
- return "No se encontrado un modelo valido, verifica el contenido del enlace e intentalo más tarde."
91
 
92
- if not model_files.get('pth'):
93
- return "No se encontrado un modelo valido, verifica el contenido del enlace e intentalo más tarde."
94
-
95
- md5_hash = calculate_md5(os.path.join(unzips_folder,model_files['pth']))
96
- zipfile = compress(modelname, list(model_files.values()))
97
-
98
- a = get_info(model_files.get('pth'))
99
- file_to_upload = open(zipfile, "rb")
100
- info = a.get("info", "None"),
101
- sr = a.get("sr", "None"),
102
- f0 = a.get("f0", "None"),
103
 
104
- data = {
105
- "name": name,
106
- "version": version,
107
- "creator": creator,
108
- "hash": md5_hash,
109
- "info": info,
110
- "sr": sr,
111
- "f0": f0
112
- }
113
- print("Subiendo archivo...")
114
- # Realizar la solicitud POST
115
- response = requests.post(api_url, files={"file": file_to_upload}, data=data)
116
- result = response.json()
117
-
118
- # Comprobar la respuesta
119
- if response.status_code == 200:
120
- result = response.json()
121
- return json.dumps(result, indent=4)
122
- else:
123
- print("Error al cargar el archivo:", response.status_code)
124
- return result
125
 
126
-
127
- def search_model(name):
128
- web_service_url = "https://script.google.com/macros/s/AKfycbyRaNxtcuN8CxUrcA_nHW6Sq9G2QJor8Z2-BJUGnQ2F_CB8klF4kQL--U2r2MhLFZ5J/exec"
129
- response = requests.post(web_service_url, json={
130
- 'type': 'search_by_filename',
131
- 'name': name
132
- })
133
- result = []
134
- response.raise_for_status() # Lanza una excepción en caso de error
135
- json_response = response.json()
136
- cont = 0
137
- result.append("""| Nombre del modelo | Url | Epoch | Sample Rate |
138
- | ---------------- | -------------- |:------:|:-----------:|
139
- """)
140
- yield "<br />".join(result)
141
- if json_response.get('ok', None):
142
- for model in json_response['ocurrences']:
143
- if cont < 20:
144
- model_name = str(model.get('name', 'N/A')).strip()
145
- model_url = model.get('url', 'N/A')
146
- epoch = model.get('epoch', 'N/A')
147
- sr = model.get('sr', 'N/A')
148
- line = f"""|{model_name}|<a>{model_url}</a>|{epoch}|{sr}|
149
- """
150
- result.append(line)
151
- yield "".join(result)
152
- cont += 1
153
 
154
- def update_tts_methods_voice(select_value):
155
- if select_value == "Edge-tts":
156
- return gr.Dropdown.update(choices=EDGE_VOICES, visible=True, value="es-CO-GonzaloNeural-Male"), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False),gr.Radio.update(visible=False)
157
- elif select_value == "Bark-tts":
158
- return gr.Dropdown.update(choices=BARK_VOICES, visible=True), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False),gr.Radio.update(visible=False)
159
- elif select_value == 'ElevenLabs':
160
- return gr.Dropdown.update(choices=ELEVENLABS_VOICES_NAMES, visible=True, value="Bella"), gr.Markdown.update(visible=True), gr.Textbox.update(visible=True), gr.Radio.update(visible=False)
161
- elif select_value == 'CoquiTTS':
162
- return gr.Dropdown.update(visible=False), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False), gr.Radio.update(visible=True)
163
-
164
  with gr.Blocks() as app:
165
  gr.HTML("<h1> Simple RVC Inference - by Juuxn 💻 </h1>")
166
 
@@ -215,6 +97,29 @@ with gr.Blocks() as app:
215
  """, visible=False)
216
 
217
  tts_method.change(fn=update_tts_methods_voice, inputs=[tts_method], outputs=[tts_model, tts_msg, tts_api_key, tts_coqui_languages])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
  with gr.Tab("Modelos"):
220
  gr.HTML("<h4>Buscar modelos</h4>")
 
1
  import gradio as gr
 
2
  import os
3
+ from constants import VOICE_METHODS, BARK_VOICES, EDGE_VOICES
4
+ import platform
5
+ from models.model import *
6
+ from tts.conversion import COQUI_LANGUAGES
7
+ import pytube
8
+ import os
9
+ import traceback
10
+ from pydub import AudioSegment
11
+ # from audio_enhance.functions import audio_enhance
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ def convert_yt_to_wav(url):
14
+ if not url:
15
+ return "Primero introduce el enlace del video", None
 
 
 
 
 
 
 
 
16
 
17
+ try:
18
+ print(f"Convirtiendo video {url}...")
19
+ # Descargar el video utilizando pytube
20
+ video = pytube.YouTube(url)
21
+ stream = video.streams.filter(only_audio=True).first()
22
+ video_output_folder = os.path.join(f"yt_videos") # Ruta de destino de la carpeta
23
+ audio_output_folder = 'audios'
24
+
25
+ print("Downloading video")
26
+ video_file_path = stream.download(output_path=video_output_folder)
27
+ print(video_file_path)
28
+
29
+ file_name = os.path.basename(video_file_path)
 
 
 
 
 
 
 
 
30
 
31
+ audio_file_path = os.path.join(audio_output_folder, file_name.replace('.mp4','.wav'))
32
+ # convert mp4 to wav
33
+ print("Converting to wav")
34
+ sound = AudioSegment.from_file(video_file_path,format="mp4")
35
+ sound.export(audio_file_path, format="wav")
36
+
37
+ if os.path.exists(video_file_path):
38
+ os.remove(video_file_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ return "Success", audio_file_path
41
+ except ConnectionResetError as cre:
42
+ return "Se ha perdido la conexión, recarga o reintentalo nuevamente más tarde.", None
43
+ except Exception as e:
44
+ return str(e), None
45
+
 
 
 
 
46
  with gr.Blocks() as app:
47
  gr.HTML("<h1> Simple RVC Inference - by Juuxn 💻 </h1>")
48
 
 
97
  """, visible=False)
98
 
99
  tts_method.change(fn=update_tts_methods_voice, inputs=[tts_method], outputs=[tts_model, tts_msg, tts_api_key, tts_coqui_languages])
100
+
101
+ with gr.TabItem("Youtube"):
102
+ gr.Markdown("## Convertir video de Youtube a audio")
103
+ with gr.Row():
104
+ yt_url = gr.Textbox(
105
+ label="Url del video:",
106
+ placeholder="https://www.youtube.com/watch?v=3vEiqil5d3Q"
107
+ )
108
+ yt_btn = gr.Button(value="Convertir")
109
+
110
+ with gr.Row():
111
+ yt_output1 = gr.Textbox(label="Salida")
112
+ yt_output2 = gr.Audio(label="Audio de salida")
113
+
114
+ yt_btn.click(fn=convert_yt_to_wav, inputs=[yt_url], outputs=[yt_output1, yt_output2])
115
+
116
+ # with gr.TabItem("Mejora de audio"):
117
+ # enhance_input_audio = gr.Audio(label="Audio de entrada")
118
+ # enhance_output_audio = gr.Audio(label="Audio de salida")
119
+
120
+ # btn_enhance_audio = gr.Button()
121
+ # # btn_enhance_audio.click(fn=audio_enhance, inputs=[enhance_input_audio], outputs=[enhance_output_audio])
122
+
123
 
124
  with gr.Tab("Modelos"):
125
  gr.HTML("<h4>Buscar modelos</h4>")
audio_enhance/functions.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torchaudio
2
+ import numpy as np
3
+ import torchaudio.transforms as T
4
+ from df import enhance, init_df
5
+
6
+ df_sr = 48000
7
+ model, df_state, _ = init_df()
8
+
9
+ def audio_enchance(input_audio):
10
+ extension = input_audio.split('.')[-1]
11
+ if extension not in ['wav', 'mpeg', 'ogg']:
12
+ return "El formato del audio no es valido, usa wav, mpeg o ogg", None
13
+ else:
14
+ noisy_audio, sr = torchaudio.load(input_audio)
15
+ print("np.shape(noisy_audio)", np.shape(noisy_audio))
16
+
17
+ if sr != df_sr:
18
+ resampler = T.Resample(orig_freq=sr, new_freq=df_sr)
19
+ noisy_audio = resampler(noisy_audio)
20
+
21
+ output_audio = enhance(model, df_state, noisy_audio)
22
+ return np.shape(noisy_audio), noisy_audio
23
+
24
+
tts/constants.py → constants.py RENAMED
@@ -1,3 +1,6 @@
 
 
 
1
  VOICE_METHODS = ["Edge-tts", "CoquiTTS", "ElevenLabs",]
2
 
3
  BARK_VOICES = [
 
1
+ zips_folder = "./zips"
2
+ unzips_folder = "./unzips"
3
+
4
  VOICE_METHODS = ["Edge-tts", "CoquiTTS", "ElevenLabs",]
5
 
6
  BARK_VOICES = [
models/model.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import zipfile
2
+ import hashlib
3
+ from utils.model import model_downloader, get_model
4
+ import requests
5
+ import json
6
+ import torch
7
+ import os
8
+ from inference import Inference
9
+ import gradio as gr
10
+ from constants import VOICE_METHODS, BARK_VOICES, EDGE_VOICES, zips_folder, unzips_folder
11
+ from tts.conversion import tts_infer, ELEVENLABS_VOICES_RAW, ELEVENLABS_VOICES_NAMES
12
+
13
+ api_url = "https://rvc-models-api.onrender.com/uploadfile/"
14
+
15
+ if not os.path.exists(zips_folder):
16
+ os.mkdir(zips_folder)
17
+ if not os.path.exists(unzips_folder):
18
+ os.mkdir(unzips_folder)
19
+
20
+ def get_info(path):
21
+ path = os.path.join(unzips_folder, path)
22
+ try:
23
+ a = torch.load(path, map_location="cpu")
24
+ return a
25
+ except Exception as e:
26
+ print("*****************eeeeeeeeeeeeeeeeeeeerrrrrrrrrrrrrrrrrr*****")
27
+ print(e)
28
+ return {
29
+
30
+ }
31
+ def calculate_md5(file_path):
32
+ hash_md5 = hashlib.md5()
33
+ with open(file_path, "rb") as f:
34
+ for chunk in iter(lambda: f.read(4096), b""):
35
+ hash_md5.update(chunk)
36
+ return hash_md5.hexdigest()
37
+
38
+ def compress(modelname, files):
39
+ file_path = os.path.join(zips_folder, f"{modelname}.zip")
40
+ # Select the compression mode ZIP_DEFLATED for compression
41
+ # or zipfile.ZIP_STORED to just store the file
42
+ compression = zipfile.ZIP_DEFLATED
43
+
44
+ # Comprueba si el archivo ZIP ya existe
45
+ if not os.path.exists(file_path):
46
+ # Si no existe, crea el archivo ZIP
47
+ with zipfile.ZipFile(file_path, mode="w") as zf:
48
+ try:
49
+ for file in files:
50
+ if file:
51
+ # Agrega el archivo al archivo ZIP
52
+ zf.write(unzips_folder if ".index" in file else os.path.join(unzips_folder, file), compress_type=compression)
53
+ except FileNotFoundError as fnf:
54
+ print("An error occurred", fnf)
55
+ else:
56
+ # Si el archivo ZIP ya existe, agrega los archivos a un archivo ZIP existente
57
+ with zipfile.ZipFile(file_path, mode="a") as zf:
58
+ try:
59
+ for file in files:
60
+ if file:
61
+ # Agrega el archivo al archivo ZIP
62
+ zf.write(unzips_folder if ".index" in file else os.path.join(unzips_folder, file), compress_type=compression)
63
+ except FileNotFoundError as fnf:
64
+ print("An error occurred", fnf)
65
+
66
+ return file_path
67
+
68
+ def infer(model, f0_method, audio_file):
69
+ print("****", audio_file)
70
+ inference = Inference(
71
+ model_name=model,
72
+ f0_method=f0_method,
73
+ source_audio_path=audio_file,
74
+ output_file_name=os.path.join("./audio-outputs", os.path.basename(audio_file))
75
+ )
76
+ output = inference.run()
77
+ if 'success' in output and output['success']:
78
+ return output, output['file']
79
+ else:
80
+ return
81
+
82
+
83
+ def post_model(name, model_url, version, creator):
84
+ modelname = model_downloader(model_url, zips_folder, unzips_folder)
85
+ model_files = get_model(unzips_folder, modelname)
86
+
87
+ if not model_files:
88
+ return "No se encontrado un modelo valido, verifica el contenido del enlace e intentalo más tarde."
89
+
90
+ if not model_files.get('pth'):
91
+ return "No se encontrado un modelo valido, verifica el contenido del enlace e intentalo más tarde."
92
+
93
+ md5_hash = calculate_md5(os.path.join(unzips_folder,model_files['pth']))
94
+ zipfile = compress(modelname, list(model_files.values()))
95
+
96
+ a = get_info(model_files.get('pth'))
97
+ file_to_upload = open(zipfile, "rb")
98
+ info = a.get("info", "None"),
99
+ sr = a.get("sr", "None"),
100
+ f0 = a.get("f0", "None"),
101
+
102
+ data = {
103
+ "name": name,
104
+ "version": version,
105
+ "creator": creator,
106
+ "hash": md5_hash,
107
+ "info": info,
108
+ "sr": sr,
109
+ "f0": f0
110
+ }
111
+ print("Subiendo archivo...")
112
+ # Realizar la solicitud POST
113
+ response = requests.post(api_url, files={"file": file_to_upload}, data=data)
114
+ result = response.json()
115
+
116
+ # Comprobar la respuesta
117
+ if response.status_code == 200:
118
+ result = response.json()
119
+ return json.dumps(result, indent=4)
120
+ else:
121
+ print("Error al cargar el archivo:", response.status_code)
122
+ return result
123
+
124
+
125
+ def search_model(name):
126
+ web_service_url = "https://script.google.com/macros/s/AKfycbyRaNxtcuN8CxUrcA_nHW6Sq9G2QJor8Z2-BJUGnQ2F_CB8klF4kQL--U2r2MhLFZ5J/exec"
127
+ response = requests.post(web_service_url, json={
128
+ 'type': 'search_by_filename',
129
+ 'name': name
130
+ })
131
+ result = []
132
+ response.raise_for_status() # Lanza una excepción en caso de error
133
+ json_response = response.json()
134
+ cont = 0
135
+ result.append("""| Nombre del modelo | Url | Epoch | Sample Rate |
136
+ | ---------------- | -------------- |:------:|:-----------:|
137
+ """)
138
+ yield "<br />".join(result)
139
+ if json_response.get('ok', None):
140
+ for model in json_response['ocurrences']:
141
+ if cont < 20:
142
+ model_name = str(model.get('name', 'N/A')).strip()
143
+ model_url = model.get('url', 'N/A')
144
+ epoch = model.get('epoch', 'N/A')
145
+ sr = model.get('sr', 'N/A')
146
+ line = f"""|{model_name}|<a>{model_url}</a>|{epoch}|{sr}|
147
+ """
148
+ result.append(line)
149
+ yield "".join(result)
150
+ cont += 1
151
+
152
+ def update_tts_methods_voice(select_value):
153
+ if select_value == "Edge-tts":
154
+ return gr.Dropdown.update(choices=EDGE_VOICES, visible=True, value="es-CO-GonzaloNeural-Male"), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False),gr.Radio.update(visible=False)
155
+ elif select_value == "Bark-tts":
156
+ return gr.Dropdown.update(choices=BARK_VOICES, visible=True), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False),gr.Radio.update(visible=False)
157
+ elif select_value == 'ElevenLabs':
158
+ return gr.Dropdown.update(choices=ELEVENLABS_VOICES_NAMES, visible=True, value="Bella"), gr.Markdown.update(visible=True), gr.Textbox.update(visible=True), gr.Radio.update(visible=False)
159
+ elif select_value == 'CoquiTTS':
160
+ return gr.Dropdown.update(visible=False), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False), gr.Radio.update(visible=True)
requirements.txt CHANGED
@@ -172,4 +172,9 @@ validators
172
  #git+https://github.com/suno-ai/bark.git
173
  #tortoise-tts
174
  #git+https://github.com/neonbjb/tortoise-tts.git
175
- neon-tts-plugin-coqui
 
 
 
 
 
 
172
  #git+https://github.com/suno-ai/bark.git
173
  #tortoise-tts
174
  #git+https://github.com/neonbjb/tortoise-tts.git
175
+ neon-tts-plugin-coqui
176
+ deepfilternet
177
+ librosa
178
+ matplotlib
179
+ maturin
180
+ #git+https://github.com/microsoft/DeepSpeed.git@v0.8.0
tts/conversion.py CHANGED
@@ -10,8 +10,16 @@ import asyncio
10
  from elevenlabs import voices, generate, save
11
  from elevenlabs.api.error import UnauthenticatedRateLimitError
12
  # Not working in windows
13
- from neon_tts_plugin_coqui import CoquiTTS
14
- import tempfile
 
 
 
 
 
 
 
 
15
 
16
  # Elevenlabs
17
  ELEVENLABS_VOICES_RAW = voices()
@@ -24,10 +32,6 @@ def get_elevenlabs_voice_names():
24
 
25
  ELEVENLABS_VOICES_NAMES = get_elevenlabs_voice_names()
26
 
27
- # CoquiTTS
28
- COQUI_LANGUAGES = list(CoquiTTS.langs.keys())
29
- coquiTTS = CoquiTTS()
30
-
31
  def tts_infer(tts_text, model_url, tts_method, tts_model, tts_api_key, language):
32
  if not tts_text:
33
  return 'Primero escribe el texto que quieres convertir.', None
@@ -68,6 +72,9 @@ def tts_infer(tts_text, model_url, tts_method, tts_model, tts_api_key, language)
68
  # api.TextToSpeech()
69
 
70
  if tts_method == "CoquiTTS":
 
 
 
71
  print(tts_text, language)
72
  # return output
73
  coquiTTS.get_tts(tts_text, converted_tts_filename, speaker = {"language" : language})
 
10
  from elevenlabs import voices, generate, save
11
  from elevenlabs.api.error import UnauthenticatedRateLimitError
12
  # Not working in windows
13
+ import platform
14
+
15
+ COQUI_LANGUAGES = []
16
+ if platform.system() != 'Windows':
17
+ from neon_tts_plugin_coqui import CoquiTTS
18
+
19
+ # CoquiTTS
20
+ COQUI_LANGUAGES = list(CoquiTTS.langs.keys())
21
+ coquiTTS = CoquiTTS()
22
+
23
 
24
  # Elevenlabs
25
  ELEVENLABS_VOICES_RAW = voices()
 
32
 
33
  ELEVENLABS_VOICES_NAMES = get_elevenlabs_voice_names()
34
 
 
 
 
 
35
  def tts_infer(tts_text, model_url, tts_method, tts_model, tts_api_key, language):
36
  if not tts_text:
37
  return 'Primero escribe el texto que quieres convertir.', None
 
72
  # api.TextToSpeech()
73
 
74
  if tts_method == "CoquiTTS":
75
+ if platform.system() == 'Windows':
76
+ return "Funcionalidad no disponible en windows", None
77
+
78
  print(tts_text, language)
79
  # return output
80
  coquiTTS.get_tts(tts_text, converted_tts_filename, speaker = {"language" : language})
tts/test.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from neon_tts_plugin_coqui import CoquiTTS