jvision commited on
Commit
3b8d67a
·
1 Parent(s): aeadb49

first commit

Browse files
.gitattributes CHANGED
@@ -32,3 +32,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ checkpoint_40000.pth filter=lfs diff=lfs merge=lfs -text
36
+ supplemental/speakers-combined.pth filter=lfs diff=lfs merge=lfs -text
37
+ supplemental/speakers-dataset.pth filter=lfs diff=lfs merge=lfs -text
38
+ supplemental/model_se.pth.tar filter=lfs diff=lfs merge=lfs -text
checkpoint_40000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:200b870bd53bf698babc890d0b47a4cf3c99246f58bc3825ffa6a9b4cb6353b1
3
+ size 1043207501
config.json ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "logger_uri": null,
3
+ "project_name": "VCTK_Canadian_English",
4
+ "run_description": "",
5
+ "print_step": 50,
6
+ "plot_step": 100,
7
+ "model_param_stats": false,
8
+ "wandb_entity": null,
9
+ "dashboard_logger": "tensorboard",
10
+ "log_model_step": 1000,
11
+ "save_step": 5000,
12
+ "save_n_checkpoints": 2,
13
+ "save_checkpoints": true,
14
+ "save_all_best": false,
15
+ "save_best_after": 10000,
16
+ "target_loss": "loss_1",
17
+ "print_eval": false,
18
+ "test_delay_epochs": 0,
19
+ "run_eval": true,
20
+ "run_eval_steps": null,
21
+ "distributed_backend": "nccl",
22
+ "distributed_url": "tcp://localhost:54321",
23
+ "mixed_precision": true,
24
+ "epochs": 1000,
25
+ "batch_size": 16,
26
+ "eval_batch_size": 16,
27
+ "grad_clip": [
28
+ 1000.0,
29
+ 1000.0
30
+ ],
31
+ "scheduler_after_epoch": true,
32
+ "lr": 0.001,
33
+ "optimizer": "AdamW",
34
+ "optimizer_params": {
35
+ "betas": [
36
+ 0.8,
37
+ 0.99
38
+ ],
39
+ "eps": 1e-09,
40
+ "weight_decay": 0.01
41
+ },
42
+ "lr_scheduler": null,
43
+ "lr_scheduler_params": {},
44
+ "use_grad_scaler": false,
45
+ "cudnn_enable": true,
46
+ "cudnn_deterministic": false,
47
+ "cudnn_benchmark": false,
48
+ "training_seed": 54321,
49
+ "model": "vits",
50
+ "num_loader_workers": 8,
51
+ "num_eval_loader_workers": 0,
52
+ "use_noise_augment": false,
53
+ "audio": {
54
+ "fft_size": 1024,
55
+ "sample_rate": 16000,
56
+ "win_length": 1024,
57
+ "hop_length": 256,
58
+ "num_mels": 80,
59
+ "mel_fmin": 0,
60
+ "mel_fmax": null
61
+ },
62
+ "use_phonemes": false,
63
+ "phonemizer": "espeak",
64
+ "phoneme_language": "en-us",
65
+ "compute_input_seq_cache": true,
66
+ "text_cleaner": "multilingual_cleaners",
67
+ "enable_eos_bos_chars": false,
68
+ "test_sentences_file": "",
69
+ "characters": {
70
+ "characters_class": "TTS.tts.models.vits.VitsCharacters",
71
+ "vocab_dict": null,
72
+ "pad": "_",
73
+ "eos": "&",
74
+ "bos": "*",
75
+ "blank": null,
76
+ "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\u00af\u00b7\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f1\u00f2\u00f3\u00f4\u00f5\u00f6\u00f9\u00fa\u00fb\u00fc\u00ff\u0101\u0105\u0107\u0113\u0119\u011b\u012b\u0131\u0142\u0144\u014d\u0151\u0153\u015b\u016b\u0171\u017a\u017c\u01ce\u01d0\u01d2\u01d4\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0451\u0454\u0456\u0457\u0491\u2013!'(),-.:;? ",
77
+ "punctuations": "!'(),-.:;? ",
78
+ "phonemes": "iy\u0268\u0289\u026fu\u026a\u028f\u028ae\u00f8\u0258\u0259\u0275\u0264o\u025b\u0153\u025c\u025e\u028c\u0254\u00e6\u0250a\u0276\u0251\u0252\u1d7b\u0298\u0253\u01c0\u0257\u01c3\u0284\u01c2\u0260\u01c1\u029bpbtd\u0288\u0256c\u025fk\u0261q\u0262\u0294\u0274\u014b\u0272\u0273n\u0271m\u0299r\u0280\u2c71\u027e\u027d\u0278\u03b2fv\u03b8\u00f0sz\u0283\u0292\u0282\u0290\u00e7\u029dx\u0263\u03c7\u0281\u0127\u0295h\u0266\u026c\u026e\u028b\u0279\u027bj\u0270l\u026d\u028e\u029f\u02c8\u02cc\u02d0\u02d1\u028dw\u0265\u029c\u02a2\u02a1\u0255\u0291\u027a\u0267\u025a\u02de\u026b'\u0303' ",
79
+ "is_unique": true,
80
+ "is_sorted": true
81
+ },
82
+ "add_blank": true,
83
+ "batch_group_size": 48,
84
+ "loss_masking": null,
85
+ "min_audio_len": 1,
86
+ "max_audio_len": 160000,
87
+ "min_text_len": 1,
88
+ "max_text_len": Infinity,
89
+ "compute_f0": false,
90
+ "compute_energy": false,
91
+ "compute_linear_spec": true,
92
+ "precompute_num_workers": 12,
93
+ "start_by_longest": true,
94
+ "shuffle": false,
95
+ "drop_last": false,
96
+ "datasets": [
97
+ {
98
+ "formatter": "vctk",
99
+ "language": "en"
100
+ }
101
+ ],
102
+ "eval_split_max_size": 256,
103
+ "eval_split_size": 0.01,
104
+ "use_speaker_weighted_sampler": false,
105
+ "speaker_weighted_sampler_alpha": 1.0,
106
+ "use_language_weighted_sampler": false,
107
+ "language_weighted_sampler_alpha": 1.0,
108
+ "use_length_weighted_sampler": false,
109
+ "length_weighted_sampler_alpha": 1.0,
110
+ "model_args": {
111
+ "num_chars": 284,
112
+ "out_channels": 513,
113
+ "spec_segment_size": 32,
114
+ "hidden_channels": 192,
115
+ "hidden_channels_ffn_text_encoder": 768,
116
+ "num_heads_text_encoder": 2,
117
+ "num_layers_text_encoder": 10,
118
+ "kernel_size_text_encoder": 3,
119
+ "dropout_p_text_encoder": 0.1,
120
+ "dropout_p_duration_predictor": 0.5,
121
+ "kernel_size_posterior_encoder": 5,
122
+ "dilation_rate_posterior_encoder": 1,
123
+ "num_layers_posterior_encoder": 16,
124
+ "kernel_size_flow": 5,
125
+ "dilation_rate_flow": 1,
126
+ "num_layers_flow": 4,
127
+ "resblock_type_decoder": "2",
128
+ "resblock_kernel_sizes_decoder": [
129
+ 3,
130
+ 7,
131
+ 11
132
+ ],
133
+ "resblock_dilation_sizes_decoder": [
134
+ [
135
+ 1,
136
+ 3,
137
+ 5
138
+ ],
139
+ [
140
+ 1,
141
+ 3,
142
+ 5
143
+ ],
144
+ [
145
+ 1,
146
+ 3,
147
+ 5
148
+ ]
149
+ ],
150
+ "upsample_rates_decoder": [
151
+ 8,
152
+ 8,
153
+ 2,
154
+ 2
155
+ ],
156
+ "upsample_initial_channel_decoder": 512,
157
+ "upsample_kernel_sizes_decoder": [
158
+ 16,
159
+ 16,
160
+ 4,
161
+ 4
162
+ ],
163
+ "periods_multi_period_discriminator": [
164
+ 2,
165
+ 3,
166
+ 5,
167
+ 7,
168
+ 11
169
+ ],
170
+ "use_sdp": true,
171
+ "noise_scale": 1.0,
172
+ "inference_noise_scale": 0.667,
173
+ "length_scale": 1.0,
174
+ "noise_scale_dp": 1.0,
175
+ "inference_noise_scale_dp": 1.0,
176
+ "max_inference_len": null,
177
+ "init_discriminator": true,
178
+ "use_spectral_norm_disriminator": false,
179
+ "use_speaker_embedding": false,
180
+ "num_speakers": 14,
181
+ "speakers_file": "supplemental/speakers.pth",
182
+ "d_vector_file": [
183
+ "supplemental/speakers-base.json",
184
+ "supplemental/speakers-dataset.pth"
185
+ ],
186
+ "speaker_embedding_channels": 256,
187
+ "use_d_vector_file": true,
188
+ "d_vector_dim": 512,
189
+ "detach_dp_input": true,
190
+ "use_language_embedding": true,
191
+ "embedded_language_dim": 4,
192
+ "num_languages": 3,
193
+ "language_ids_file": "supplemental/language_ids.json",
194
+ "use_speaker_encoder_as_loss": true,
195
+ "speaker_encoder_config_path": "supplemental/config_se.json",
196
+ "speaker_encoder_model_path": "supplemental/model_se.pth.tar",
197
+ "condition_dp_on_speaker": true,
198
+ "freeze_encoder": false,
199
+ "freeze_DP": false,
200
+ "freeze_PE": false,
201
+ "freeze_flow_decoder": false,
202
+ "freeze_waveform_decoder": false,
203
+ "encoder_sample_rate": null,
204
+ "interpolate_z": true,
205
+ "reinit_DP": false,
206
+ "reinit_text_encoder": false
207
+ },
208
+ "lr_gen": 0.0002,
209
+ "lr_disc": 0.0002,
210
+ "lr_scheduler_gen": "ExponentialLR",
211
+ "lr_scheduler_gen_params": {
212
+ "gamma": 0.999875,
213
+ "last_epoch": -1
214
+ },
215
+ "lr_scheduler_disc": "ExponentialLR",
216
+ "lr_scheduler_disc_params": {
217
+ "gamma": 0.999875,
218
+ "last_epoch": -1
219
+ },
220
+ "kl_loss_alpha": 1.0,
221
+ "disc_loss_alpha": 1.0,
222
+ "gen_loss_alpha": 1.0,
223
+ "feat_loss_alpha": 1.0,
224
+ "mel_loss_alpha": 45.0,
225
+ "dur_loss_alpha": 1.0,
226
+ "speaker_encoder_loss_alpha": 9.0,
227
+ "return_wav": true,
228
+ "use_weighted_sampler": false,
229
+ "weighted_sampler_attrs": {},
230
+ "weighted_sampler_multipliers": {},
231
+ "r": 1,
232
+ "num_speakers": 14,
233
+ "use_speaker_embedding": false,
234
+ "speakers_file": "supplemental/speakers.pth",
235
+ "speaker_embedding_channels": 256,
236
+ "language_ids_file": "supplemental/language_ids.json",
237
+ "use_language_embedding": true,
238
+ "use_d_vector_file": true,
239
+ "d_vector_file": [
240
+ "supplemental/speakers-base.json",
241
+ "supplemental/speakers-dataset.pth"
242
+ ],
243
+ "d_vector_dim": 512
244
+ }
prepare_model.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import subprocess
4
+
5
+ # Load the data from the provided dictionary
6
+ data = {
7
+ "VCTK_p303": {"age": 24, "gender": "F", "accents": "Canadian", "region": "Toronto", "comments": ""},
8
+ "VCTK_p312": {"age": 19, "gender": "F", "accents": "Canadian", "region": "Hamilton", "comments": ""},
9
+ "VCTK_p317": {"age": 23, "gender": "F", "accents": "Canadian", "region": "Hamilton", "comments": ""},
10
+ "VCTK_p343": {"age": 27, "gender": "F", "accents": "Canadian", "region": "Alberta", "comments": ""},
11
+ "VCTK_p307": {"age": 23, "gender": "F", "accents": "Canadian", "region": "Ontario", "comments": ""},
12
+ "VCTK_p316": {"age": 20, "gender": "M", "accents": "Canadian", "region": "Alberta", "comments": ""},
13
+ "VCTK_p363": {"age": 22, "gender": "M", "accents": "Canadian", "region": "Toronto", "comments": ""},
14
+ "VCTK_p302": {"age": 20, "gender": "M", "accents": "Canadian", "region": "Montreal", "comments": ""}
15
+ }
16
+
17
+
18
+ # Convert the data to JSON format
19
+ json_data = json.dumps(data, indent=2)
20
+
21
+ # Save the JSON data to a file
22
+ with open('speakers-log.json', 'w') as file:
23
+ file.write(json_data)
24
+
25
+ # Run the TTS command to get the speaker indices
26
+ command = "tts --model_path checkpoint_40000.pth --config_path config.json --list_speaker_idxs | grep -vE '^(\s*\||\s*>|\s*$)'"
27
+ output = subprocess.check_output(command, shell=True, text=True)
28
+
29
+ # Parse the JSON output into a Python dictionary
30
+ speaker_indices = eval(output)
31
+
32
+ # Load the speaker IDs from speakers.json
33
+ with open('speakers-log.json', 'r') as file:
34
+ speaker_ids = json.load(file)
35
+
36
+
37
+ for speaker_idx in speaker_indices:
38
+ # # Remove the 'VCTK_' prefix
39
+ speaker_id = speaker_idx
40
+ # speaker_id = speaker_idx.replace('VCTK_', '')
41
+
42
+ # Lookup the speaker ID in the loaded speaker IDs
43
+ if speaker_id in speaker_ids:
44
+ speaker_id_json = speaker_ids[speaker_id]
45
+ else:
46
+ continue
47
+
48
+ # # Generate the TTS command to create the audio file
49
+ text = f"Hello, I am from {speaker_id_json['region']}. I hope that you will select my voice for your project. Thank you."
50
+ # # make samples directory if it doesn't exist
51
+ if not os.path.exists("samples"):
52
+ os.makedirs("samples")
53
+
54
+ out_path = f"samples/{speaker_id}.wav"
55
+ tts_command = f"tts --text \"{text}\" --model_path checkpoint_40000.pth --language_idx en --config_path config.json --speaker_idx \"{speaker_id}\" --out_path {out_path}"
56
+
57
+ # Execute the TTS command
58
+ os.system(tts_command)
samples/VCTK_p302.wav ADDED
Binary file (182 kB). View file
 
samples/VCTK_p303.wav ADDED
Binary file (220 kB). View file
 
samples/VCTK_p307.wav ADDED
Binary file (205 kB). View file
 
samples/VCTK_p312.wav ADDED
Binary file (204 kB). View file
 
samples/VCTK_p316.wav ADDED
Binary file (200 kB). View file
 
samples/VCTK_p317.wav ADDED
Binary file (210 kB). View file
 
samples/VCTK_p343.wav ADDED
Binary file (221 kB). View file
 
samples/VCTK_p363.wav ADDED
Binary file (190 kB). View file
 
speakers-log.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "VCTK_p303": {
3
+ "age": 24,
4
+ "gender": "F",
5
+ "accents": "Canadian",
6
+ "region": "Toronto",
7
+ "comments": ""
8
+ },
9
+ "VCTK_p312": {
10
+ "age": 19,
11
+ "gender": "F",
12
+ "accents": "Canadian",
13
+ "region": "Hamilton",
14
+ "comments": ""
15
+ },
16
+ "VCTK_p317": {
17
+ "age": 23,
18
+ "gender": "F",
19
+ "accents": "Canadian",
20
+ "region": "Hamilton",
21
+ "comments": ""
22
+ },
23
+ "VCTK_p343": {
24
+ "age": 27,
25
+ "gender": "F",
26
+ "accents": "Canadian",
27
+ "region": "Alberta",
28
+ "comments": ""
29
+ },
30
+ "VCTK_p307": {
31
+ "age": 23,
32
+ "gender": "F",
33
+ "accents": "Canadian",
34
+ "region": "Ontario",
35
+ "comments": ""
36
+ },
37
+ "VCTK_p316": {
38
+ "age": 20,
39
+ "gender": "M",
40
+ "accents": "Canadian",
41
+ "region": "Alberta",
42
+ "comments": ""
43
+ },
44
+ "VCTK_p363": {
45
+ "age": 22,
46
+ "gender": "M",
47
+ "accents": "Canadian",
48
+ "region": "Toronto",
49
+ "comments": ""
50
+ },
51
+ "VCTK_p302": {
52
+ "age": 20,
53
+ "gender": "M",
54
+ "accents": "Canadian",
55
+ "region": "Montreal",
56
+ "comments": ""
57
+ }
58
+ }
supplemental/config_se.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "speaker_encoder",
3
+ "run_name": "speaker_encoder",
4
+ "run_description": "resnet speaker encoder trained with commonvoice all languages dev and train, Voxceleb 1 dev and Voxceleb 2 dev",
5
+ "epochs": 100000,
6
+ "batch_size": null,
7
+ "eval_batch_size": null,
8
+ "mixed_precision": false,
9
+ "run_eval": true,
10
+ "test_delay_epochs": 0,
11
+ "print_eval": false,
12
+ "print_step": 50,
13
+ "tb_plot_step": 100,
14
+ "tb_model_param_stats": false,
15
+ "save_step": 1000,
16
+ "checkpoint": true,
17
+ "keep_all_best": false,
18
+ "keep_after": 10000,
19
+ "num_loader_workers": 8,
20
+ "num_val_loader_workers": 0,
21
+ "use_noise_augment": false,
22
+ "output_path": "../checkpoints/speaker_encoder/language_balanced/normalized/angleproto-4-samples-by-speakers/",
23
+ "distributed_backend": "nccl",
24
+ "distributed_url": "tcp://localhost:54321",
25
+ "audio": {
26
+ "fft_size": 512,
27
+ "win_length": 400,
28
+ "hop_length": 160,
29
+ "frame_shift_ms": null,
30
+ "frame_length_ms": null,
31
+ "stft_pad_mode": "reflect",
32
+ "sample_rate": 16000,
33
+ "resample": false,
34
+ "preemphasis": 0.97,
35
+ "ref_level_db": 20,
36
+ "do_sound_norm": false,
37
+ "do_trim_silence": false,
38
+ "trim_db": 60,
39
+ "power": 1.5,
40
+ "griffin_lim_iters": 60,
41
+ "num_mels": 64,
42
+ "mel_fmin": 0.0,
43
+ "mel_fmax": 8000.0,
44
+ "spec_gain": 20,
45
+ "signal_norm": false,
46
+ "min_level_db": -100,
47
+ "symmetric_norm": false,
48
+ "max_norm": 4.0,
49
+ "clip_norm": false,
50
+ "stats_path": null,
51
+ "do_rms_norm": true,
52
+ "db_level": -27.0
53
+ },
54
+ "datasets": [
55
+ {
56
+ "name": "voxceleb2",
57
+ "path": "/workspace/scratch/ecasanova/datasets/VoxCeleb/vox2_dev_aac/",
58
+ "meta_file_train": null,
59
+ "ununsed_speakers": null,
60
+ "meta_file_val": null,
61
+ "meta_file_attn_mask": "",
62
+ "language": "voxceleb"
63
+ }
64
+ ],
65
+ "model_params": {
66
+ "model_name": "resnet",
67
+ "input_dim": 64,
68
+ "use_torch_spec": true,
69
+ "log_input": true,
70
+ "proj_dim": 512
71
+ },
72
+ "audio_augmentation": {
73
+ "p": 0.5,
74
+ "rir": {
75
+ "rir_path": "/workspace/store/ecasanova/ComParE/RIRS_NOISES/simulated_rirs/",
76
+ "conv_mode": "full"
77
+ },
78
+ "additive": {
79
+ "sounds_path": "/workspace/store/ecasanova/ComParE/musan/",
80
+ "speech": {
81
+ "min_snr_in_db": 13,
82
+ "max_snr_in_db": 20,
83
+ "min_num_noises": 1,
84
+ "max_num_noises": 1
85
+ },
86
+ "noise": {
87
+ "min_snr_in_db": 0,
88
+ "max_snr_in_db": 15,
89
+ "min_num_noises": 1,
90
+ "max_num_noises": 1
91
+ },
92
+ "music": {
93
+ "min_snr_in_db": 5,
94
+ "max_snr_in_db": 15,
95
+ "min_num_noises": 1,
96
+ "max_num_noises": 1
97
+ }
98
+ },
99
+ "gaussian": {
100
+ "p": 0.0,
101
+ "min_amplitude": 0.0,
102
+ "max_amplitude": 1e-05
103
+ }
104
+ },
105
+ "storage": {
106
+ "sample_from_storage_p": 0.5,
107
+ "storage_size": 40
108
+ },
109
+ "max_train_step": 1000000,
110
+ "loss": "angleproto",
111
+ "grad_clip": 3.0,
112
+ "lr": 0.0001,
113
+ "lr_decay": false,
114
+ "warmup_steps": 4000,
115
+ "wd": 1e-06,
116
+ "steps_plot_stats": 100,
117
+ "num_speakers_in_batch": 100,
118
+ "num_utters_per_speaker": 4,
119
+ "skip_speakers": true,
120
+ "voice_len": 2.0
121
+ }
supplemental/language_ids.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "en": 0,
3
+ "fr-fr": 1,
4
+ "pt-br": 2
5
+ }
supplemental/model_se.pth.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f96efb20cbeeefd81fd8336d7f0155bf8902f82f9474e58ccb19d9e12345172
3
+ size 44610930
supplemental/speaker_ids.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "female-en-5": 0,
3
+ "female-en-5\n": 1,
4
+ "female-pt-4\n": 2,
5
+ "male-en-2": 3,
6
+ "male-en-2\n": 4,
7
+ "male-pt-3\n": 5,
8
+ "VCTK_p302": 6,
9
+ "VCTK_p303": 7,
10
+ "VCTK_p307": 8,
11
+ "VCTK_p312": 9,
12
+ "VCTK_p316": 10,
13
+ "VCTK_p317": 11,
14
+ "VCTK_p343": 12,
15
+ "VCTK_p363": 13
16
+ }
supplemental/speakers-base.json ADDED
The diff for this file is too large to render. See raw diff
 
supplemental/speakers-combined.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:becb5d22b96aeb6b93afa098f8e07af3ddefdd13279ee43ee6353171d4c9f303
3
+ size 14983919
supplemental/speakers-dataset.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb0ab1326f8373e9e9457f3be10b0181317ab6bf4ee15c9e79f46af737ab11fe
3
+ size 14803055