Spaces:
Running
Running
Alp
commited on
Commit
·
8548594
1
Parent(s):
9502afd
d vectors fix intent
Browse files
app.py
CHANGED
|
@@ -48,6 +48,8 @@ def load_model(language):
|
|
| 48 |
model_repo = MODELS[language]["model_repo"]
|
| 49 |
model_name = MODELS[language]["model_name"]
|
| 50 |
config_name = MODELS[language]["config_name"]
|
|
|
|
|
|
|
| 51 |
try:
|
| 52 |
from huggingface_hub import hf_hub_download
|
| 53 |
import json
|
|
@@ -67,61 +69,38 @@ def load_model(language):
|
|
| 67 |
speakers_file = hf_hub_download(repo_id=model_repo, filename=speakers_filename)
|
| 68 |
language_ids_file = hf_hub_download(repo_id=model_repo, filename=language_ids_filename)
|
| 69 |
|
| 70 |
-
#
|
| 71 |
-
# Based on the error, the speakers.pth format is incompatible with d_vector loading
|
| 72 |
-
if "d_vector_file" in config:
|
| 73 |
-
del config["d_vector_file"]
|
| 74 |
-
if "model_args" in config and "d_vector_file" in config["model_args"]:
|
| 75 |
-
del config["model_args"]["d_vector_file"]
|
| 76 |
-
|
| 77 |
-
# Update the remaining paths to point to the downloaded files
|
| 78 |
config["speakers_file"] = speakers_file
|
| 79 |
config["language_ids_file"] = language_ids_file
|
| 80 |
config["model_args"]["speakers_file"] = speakers_file
|
| 81 |
config["model_args"]["language_ids_file"] = language_ids_file
|
| 82 |
-
|
| 83 |
-
# Set use_d_vector_file to False to avoid d_vector loading issues
|
| 84 |
-
config["use_d_vector_file"] = False
|
| 85 |
-
config["model_args"]["use_d_vector_file"] = False
|
| 86 |
-
|
| 87 |
-
# Ensure speaker embedding is enabled the correct way
|
| 88 |
-
config["use_speaker_embedding"] = True
|
| 89 |
-
config["model_args"]["use_speaker_embedding"] = True
|
| 90 |
-
|
| 91 |
-
# Remove speaker encoder paths that might cause issues
|
| 92 |
-
if "model_args" in config:
|
| 93 |
-
if "speaker_encoder_config_path" in config["model_args"]:
|
| 94 |
-
config["model_args"]["speaker_encoder_config_path"] = ""
|
| 95 |
-
if "speaker_encoder_model_path" in config["model_args"]:
|
| 96 |
-
config["model_args"]["speaker_encoder_model_path"] = ""
|
| 97 |
|
| 98 |
# Save the updated config to a temporary file
|
| 99 |
import tempfile
|
| 100 |
temp_config = tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False)
|
| 101 |
json.dump(config, temp_config, indent=2)
|
| 102 |
temp_config.close()
|
| 103 |
-
|
| 104 |
print(f"Loading {language} model with config:")
|
| 105 |
print(f"- use_speaker_embedding: {config.get('use_speaker_embedding')}")
|
| 106 |
print(f"- use_d_vector_file: {config.get('use_d_vector_file')}")
|
|
|
|
| 107 |
print(f"- speakers_file: {config.get('speakers_file')}")
|
| 108 |
-
|
| 109 |
# Load TTS model with specific model and config paths
|
| 110 |
loaded_models[language] = TTS(model_path=model_path,
|
| 111 |
config_path=temp_config.name,
|
| 112 |
gpu=torch.cuda.is_available())
|
| 113 |
|
| 114 |
-
# Clean up temporary config file
|
| 115 |
-
os.unlink(temp_config.name)
|
| 116 |
-
|
| 117 |
except Exception as e:
|
| 118 |
print(f"Error loading {language} model: {e}")
|
| 119 |
import traceback
|
| 120 |
traceback.print_exc()
|
| 121 |
return None
|
| 122 |
-
|
| 123 |
return loaded_models[language]
|
| 124 |
-
|
| 125 |
def update_speakers(language):
|
| 126 |
"""Update speaker dropdown based on selected language"""
|
| 127 |
if language in MODELS:
|
|
|
|
| 48 |
model_repo = MODELS[language]["model_repo"]
|
| 49 |
model_name = MODELS[language]["model_name"]
|
| 50 |
config_name = MODELS[language]["config_name"]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
try:
|
| 54 |
from huggingface_hub import hf_hub_download
|
| 55 |
import json
|
|
|
|
| 69 |
speakers_file = hf_hub_download(repo_id=model_repo, filename=speakers_filename)
|
| 70 |
language_ids_file = hf_hub_download(repo_id=model_repo, filename=language_ids_filename)
|
| 71 |
|
| 72 |
+
# Update the config paths to point to the downloaded files
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
config["speakers_file"] = speakers_file
|
| 74 |
config["language_ids_file"] = language_ids_file
|
| 75 |
config["model_args"]["speakers_file"] = speakers_file
|
| 76 |
config["model_args"]["language_ids_file"] = language_ids_file
|
| 77 |
+
config["model_args"]["d_vector_file"] = [language_ids_file]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
# Save the updated config to a temporary file
|
| 80 |
import tempfile
|
| 81 |
temp_config = tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False)
|
| 82 |
json.dump(config, temp_config, indent=2)
|
| 83 |
temp_config.close()
|
| 84 |
+
|
| 85 |
print(f"Loading {language} model with config:")
|
| 86 |
print(f"- use_speaker_embedding: {config.get('use_speaker_embedding')}")
|
| 87 |
print(f"- use_d_vector_file: {config.get('use_d_vector_file')}")
|
| 88 |
+
print(f"- d_vector_file: {config.get('d_vector_file')}")
|
| 89 |
print(f"- speakers_file: {config.get('speakers_file')}")
|
| 90 |
+
|
| 91 |
# Load TTS model with specific model and config paths
|
| 92 |
loaded_models[language] = TTS(model_path=model_path,
|
| 93 |
config_path=temp_config.name,
|
| 94 |
gpu=torch.cuda.is_available())
|
| 95 |
|
|
|
|
|
|
|
|
|
|
| 96 |
except Exception as e:
|
| 97 |
print(f"Error loading {language} model: {e}")
|
| 98 |
import traceback
|
| 99 |
traceback.print_exc()
|
| 100 |
return None
|
| 101 |
+
|
| 102 |
return loaded_models[language]
|
| 103 |
+
|
| 104 |
def update_speakers(language):
|
| 105 |
"""Update speaker dropdown based on selected language"""
|
| 106 |
if language in MODELS:
|