zym1's picture
Upload finetune_speaker.json
bdd6531
{
"train": {
"log_interval": 10,
"eval_interval": 100,
"seed": 1234,
"epochs": 10000,
"learning_rate": 0.0002,
"betas": [
0.8,
0.99
],
"eps": 1e-09,
"batch_size": 16,
"fp16_run": true,
"lr_decay": 0.999875,
"segment_size": 8192,
"init_lr_ratio": 1,
"warmup_epochs": 0,
"c_mel": 45,
"c_kl": 1.0
},
"data": {
"training_files": "final_annotation_train.txt",
"validation_files": "final_annotation_val.txt",
"text_cleaners": [
"cjke_cleaners2"
],
"max_wav_value": 32768.0,
"sampling_rate": 22050,
"filter_length": 1024,
"hop_length": 256,
"win_length": 1024,
"n_mel_channels": 80,
"mel_fmin": 0.0,
"mel_fmax": null,
"add_blank": true,
"n_speakers": 9,
"cleaned_text": true
},
"model": {
"inter_channels": 192,
"hidden_channels": 192,
"filter_channels": 768,
"n_heads": 2,
"n_layers": 6,
"kernel_size": 3,
"p_dropout": 0.1,
"resblock": "1",
"resblock_kernel_sizes": [
3,
7,
11
],
"resblock_dilation_sizes": [
[
1,
3,
5
],
[
1,
3,
5
],
[
1,
3,
5
]
],
"upsample_rates": [
8,
8,
2,
2
],
"upsample_initial_channel": 512,
"upsample_kernel_sizes": [
16,
16,
4,
4
],
"n_layers_q": 3,
"use_spectral_norm": false,
"gin_channels": 256
},
"symbols": [
"_",
",",
".",
"!",
"?",
"-",
"~",
"\u2026",
"N",
"Q",
"a",
"b",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"\u0251",
"\u00e6",
"\u0283",
"\u0291",
"\u00e7",
"\u026f",
"\u026a",
"\u0254",
"\u025b",
"\u0279",
"\u00f0",
"\u0259",
"\u026b",
"\u0265",
"\u0278",
"\u028a",
"\u027e",
"\u0292",
"\u03b8",
"\u03b2",
"\u014b",
"\u0266",
"\u207c",
"\u02b0",
"`",
"^",
"#",
"*",
"=",
"\u02c8",
"\u02cc",
"\u2192",
"\u2193",
"\u2191",
" "
],
"speakers": {
"\u534e\u76db\u987f": 0,
"\u5e73\u6d77": 1,
"\u8d64\u57ce": 2,
"\u978d\u5c71": 3,
"\u957f\u6625": 4,
"\u80e1\u5fb7": 5,
"\u9038\u4ed9": 6,
"\u5217\u514b\u661f\u6566": 7,
"\u6d4e\u5357": 8
}
}