File size: 6,333 Bytes
adbc107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
{
    "base_config": "config/vits.json",
    "dataset": [
        "hifitts",
    ],
    "model": {
        "filter_channels": 768,
        "gin_channels": 256,
        "hidden_channels": 192,
        "inter_channels": 192,
        "kernel_size": 3,
        "n_heads": 2,
        "n_layers": 6,
        "n_layers_q": 3,
        "n_speakers": 10,
        "p_dropout": 0.1,
        "resblock": "1",
        "resblock_dilation_sizes": [
            [
                1,
                3,
                5,
            ],
            [
                1,
                3,
                5,
            ],
            [
                1,
                3,
                5,
            ],
        ],
        "resblock_kernel_sizes": [
            3,
            7,
            11,
        ],
        "text_token_num": 512,
        "upsample_initial_channel": 512,
        "upsample_kernel_sizes": [
            16,
            16,
            4,
            4,
        ],
        "upsample_rates": [
            8,
            8,
            2,
            2,
        ],
        "use_sdp": true,
        "use_spectral_norm": false,
    },
    "model_type": "VITS",
    "preprocess": {
        "add_blank": true,
        "align_mel_duration": false,
        "audio_dir": "audios",
        "bits": 8,
        "contentvec_dir": "contentvec",
        "data_augment": false,
        "dur_dir": "durs",
        "duration_dir": "duration",
        "emo2id": "emo2id.json",
        "energy_dir": "energys",
        "energy_extract_mode": "from_mel",
        "energy_norm": false,
        "energy_remove_outlier": false,
        "extract_acoustic_token": false,
        "extract_amplitude_phase": false,
        "extract_audio": true,
        "extract_contentvec_feature": false,
        "extract_duration": false,
        "extract_energy": false,
        "extract_label": false,
        "extract_linear_spec": true,
        "extract_mcep": false,
        "extract_mel": true,
        "extract_mert_feature": false,
        "extract_phone": true,
        "extract_pitch": false,
        "extract_uv": false,
        "extract_wenet_feature": false,
        "extract_whisper_feature": false,
        "file_lst": "file.lst",
        "fmax": null,
        "fmin": 0,
        "hop_size": 256,
        "imaginary_dir": "imaginarys",
        "lab_dir": "labs",
        "label_dir": "labels",
        "language": "en-us",
        "lexicon_path": "./text/lexicon/librispeech-lexicon.txt",
        "linear_dir": "linears",
        "log_amplitude_dir": "log_amplitudes",
        "mcep_dir": "mcep",
        "mel_dir": "mels",
        "mel_extract_mode": "",
        "mel_min_max_norm": false,
        "min_level_db": -115,
        "n_fft": 1024,
        "n_mel": 80,
        "num_silent_frames": 8,
        "phase_dir": "phases",
        "phone_dir": "phones",
        "phone_energy_dir": "phone_energys",
        "phone_extractor": "espeak",
        "phone_pitch_dir": "phone_pitches",
        "phone_seq_file": "phone_seq_file",
        "pitch_dir": "pitches",
        "pitch_extractor": "parselmouth",
        "pitch_norm": false,
        "pitch_remove_outlier": false,
        "raw_data": "raw_data",
        "real_dir": "reals",
        "ref_level_db": 20,
        "sample_rate": 24000,
        "segment_size": 8192,
        "spk2id": "spk2id.json",
        "symbols_dict": "symbols.dict",
        "text_cleaners": [
            "english_cleaners",
        ],
        "train_file": "train.json",
        "trim_fft_size": 512,
        "trim_hop_size": 128,
        "trim_silence": false,
        "trim_top_db": 30,
        "trimmed_wav_dir": "trimmed_wavs",
        "use_amplitude_phase": false,
        "use_audio": true,
        "use_dur": false,
        "use_emoid": false,
        "use_frame_duration": false,
        "use_frame_energy": false,
        "use_frame_pitch": false,
        "use_lab": false,
        "use_label": false,
        "use_linear": true,
        "use_log_scale_energy": false,
        "use_log_scale_pitch": false,
        "use_mel": true,
        "use_min_max_norm_mel": false,
        "use_one_hot": false,
        "use_phn_seq": false,
        "use_phone": true,
        "use_phone_duration": false,
        "use_phone_energy": false,
        "use_phone_pitch": false,
        "use_spkid": true,
        "use_text": false,
        "use_uv": false,
        "use_wav": false,
        "use_wenet": false,
        "utt2emo": "utt2emo",
        "utt2spk": "utt2spk",
        "uv_dir": "uvs",
        "valid_file": "valid.json",
        "wav_dir": "wavs",
        "wenet_dir": "wenet",
        "win_size": 1024,
    },
    "supported_model_type": [
        "Fastspeech2",
        "VITS",
        "VALLE",
    ],
    "task_type": "tts",
    "train": {
        "AdamW": {
            "betas": [
                0.8,
                0.99,
            ],
            "eps": 1e-09,
        },
        "adamw": {
            "lr": 0.0004,
        },
        "batch_size": 16,
        "betas": [
            0.8,
            0.99,
        ],
        "c_kl": 1.0,
        "c_mel": 45,
        "dataloader": {
            "num_worker": 32,
            "pin_memory": true,
        },
        "ddp": true,
        "eps": 1e-09,
        "fp16_run": true,
        "gradient_accumulation_step": 1,
        "init_lr_ratio": 1,
        "keep_checkpoint_max": 5,
        "keep_last": [
            3,
            -1,
        ],
        "learning_rate": 0.0002,
        "lr_decay": 0.999875,
        "max_epoch": -1,
        "max_steps": 1000000,
        "multi_speaker_training": true,
        "optimizer": "AdamW",
        "random_seed": 10086,
        "reducelronplateau": {
            "factor": 0.8,
            "min_lr": 0.0001,
            "patience": 10,
        },
        "run_eval": [
            false,
            true,
        ],
        "sampler": {
            "drop_last": true,
            "holistic_shuffle": true,
        },
        "save_checkpoint_stride": [
            5,
            20,
        ],
        "save_checkpoints_steps": 10000,
        "save_summary_steps": 500,
        "scheduler": "ReduceLROnPlateau",
        "total_training_steps": 50000,
        "tracker": [
            "tensorboard",
        ],
        "valid_interval": 10000,
        "warmup_epochs": 0,
    },
    "use_custom_dataset": false,
}