Update train_vits-2.py
Browse files- train_vits-2.py +4 -4
train_vits-2.py
CHANGED
@@ -56,8 +56,8 @@ def mozilla_with_speaker(root_path, meta_file, **kwargs):
|
|
56 |
|
57 |
|
58 |
dataset_config = BaseDatasetConfig(
|
59 |
-
|
60 |
-
formatter="mozilla",
|
61 |
dataset_name="multi_persian",
|
62 |
meta_file_train="metadata.csv",
|
63 |
language="fa",
|
@@ -143,7 +143,7 @@ config = VitsConfig(
|
|
143 |
["یکی اسبی به عاریت خواست","changiz",None,"fa"],
|
144 |
],
|
145 |
output_path=output_path,
|
146 |
-
datasets=[
|
147 |
|
148 |
# Enable the weighted sampler
|
149 |
use_weighted_sampler=True,
|
@@ -175,7 +175,7 @@ tokenizer, config = TTSTokenizer.init_from_config(config)
|
|
175 |
# Load all the datasets samples and split traning and evaluation sets
|
176 |
train_samples, eval_samples = load_tts_samples(
|
177 |
config.datasets,
|
178 |
-
formatter=mozilla_with_speaker,
|
179 |
eval_split=True,
|
180 |
eval_split_max_size=config.eval_split_max_size,
|
181 |
eval_split_size=config.eval_split_size,
|
|
|
56 |
|
57 |
|
58 |
dataset_config = BaseDatasetConfig(
|
59 |
+
formatter="mozilla_with_speaker",
|
60 |
+
# formatter="mozilla",
|
61 |
dataset_name="multi_persian",
|
62 |
meta_file_train="metadata.csv",
|
63 |
language="fa",
|
|
|
143 |
["یکی اسبی به عاریت خواست","changiz",None,"fa"],
|
144 |
],
|
145 |
output_path=output_path,
|
146 |
+
datasets=[dataset_config],
|
147 |
|
148 |
# Enable the weighted sampler
|
149 |
use_weighted_sampler=True,
|
|
|
175 |
# Load all the datasets samples and split traning and evaluation sets
|
176 |
train_samples, eval_samples = load_tts_samples(
|
177 |
config.datasets,
|
178 |
+
# formatter=mozilla_with_speaker,
|
179 |
eval_split=True,
|
180 |
eval_split_max_size=config.eval_split_max_size,
|
181 |
eval_split_size=config.eval_split_size,
|