litagin commited on
Commit
3a5c923
1 Parent(s): b8e41aa

Upload 12 files

Browse files
jvnv-F1-jp/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 300,
7
+ "learning_rate": 0.0001,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 4,
11
+ "bf16_run": true,
12
+ "fp16_run": false,
13
+ "lr_decay": 0.99996,
14
+ "segment_size": 16384,
15
+ "init_lr_ratio": 1,
16
+ "warmup_epochs": 0,
17
+ "c_mel": 45,
18
+ "c_kl": 1.0,
19
+ "c_commit": 100,
20
+ "skip_optimizer": true,
21
+ "freeze_ZH_bert": false,
22
+ "freeze_JP_bert": false,
23
+ "freeze_EN_bert": false,
24
+ "freeze_emo": false,
25
+ "freeze_style": false
26
+ },
27
+ "data": {
28
+ "use_jp_extra": true,
29
+ "training_files": "Data/jvnv-F1-jp/train.list",
30
+ "validation_files": "Data/jvnv-F1-jp/val.list",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 128,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": null,
39
+ "add_blank": true,
40
+ "n_speakers": 1,
41
+ "cleaned_text": true,
42
+ "spk2id": {
43
+ "jvnv-F1-jp": 0
44
+ },
45
+ "num_styles": 7,
46
+ "style2id": {
47
+ "Neutral": 0,
48
+ "Angry": 1,
49
+ "Disgust": 2,
50
+ "Fear": 3,
51
+ "Happy": 4,
52
+ "Sad": 5,
53
+ "Surprise": 6
54
+ }
55
+ },
56
+ "model": {
57
+ "use_spk_conditioned_encoder": true,
58
+ "use_noise_scaled_mas": true,
59
+ "use_mel_posterior_encoder": false,
60
+ "use_duration_discriminator": false,
61
+ "use_wavlm_discriminator": true,
62
+ "inter_channels": 192,
63
+ "hidden_channels": 192,
64
+ "filter_channels": 768,
65
+ "n_heads": 2,
66
+ "n_layers": 6,
67
+ "kernel_size": 3,
68
+ "p_dropout": 0.1,
69
+ "resblock": "1",
70
+ "resblock_kernel_sizes": [3, 7, 11],
71
+ "resblock_dilation_sizes": [
72
+ [1, 3, 5],
73
+ [1, 3, 5],
74
+ [1, 3, 5]
75
+ ],
76
+ "upsample_rates": [8, 8, 2, 2, 2],
77
+ "upsample_initial_channel": 512,
78
+ "upsample_kernel_sizes": [16, 16, 8, 2, 2],
79
+ "n_layers_q": 3,
80
+ "use_spectral_norm": false,
81
+ "gin_channels": 512,
82
+ "slm": {
83
+ "model": "./slm/wavlm-base-plus",
84
+ "sr": 16000,
85
+ "hidden": 768,
86
+ "nlayers": 13,
87
+ "initial_channel": 64
88
+ }
89
+ },
90
+ "version": "2.0-JP-Extra",
91
+ "model_name": "jvnv-F1-jp"
92
+ }
jvnv-F1-jp/jvnv-F1-jp_e160_s14000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90fa6c9444d9235c9ec4db99daf7c5c6a21cc26ca141b4c48455d66a3257d01
3
+ size 251150980
jvnv-F1-jp/style_vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f959bb45ed0922efc31ff24e9147253814f42cb1d2d1e2bb10391a9df368489
3
+ size 7296
jvnv-F2-jp/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 300,
7
+ "learning_rate": 0.0001,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 4,
11
+ "bf16_run": false,
12
+ "fp16_run": false,
13
+ "lr_decay": 0.99996,
14
+ "segment_size": 16384,
15
+ "init_lr_ratio": 1,
16
+ "warmup_epochs": 0,
17
+ "c_mel": 45,
18
+ "c_kl": 1.0,
19
+ "c_commit": 100,
20
+ "skip_optimizer": true,
21
+ "freeze_ZH_bert": false,
22
+ "freeze_JP_bert": false,
23
+ "freeze_EN_bert": false,
24
+ "freeze_emo": false,
25
+ "freeze_style": false
26
+ },
27
+ "data": {
28
+ "use_jp_extra": true,
29
+ "training_files": "/content/drive/MyDrive/Style-Bert-VITS2/Data/jvnv-F2/train.list",
30
+ "validation_files": "/content/drive/MyDrive/Style-Bert-VITS2/Data/jvnv-F2/val.list",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 128,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": null,
39
+ "add_blank": true,
40
+ "n_speakers": 1,
41
+ "cleaned_text": true,
42
+ "spk2id": {
43
+ "jvnv-F2-jp": 0
44
+ },
45
+ "num_styles": 7,
46
+ "style2id": {
47
+ "Neutral": 0,
48
+ "Angry": 1,
49
+ "Disgust": 2,
50
+ "Fear": 3,
51
+ "Happy": 4,
52
+ "Sad": 5,
53
+ "Surprise": 6
54
+ }
55
+ },
56
+ "model": {
57
+ "use_spk_conditioned_encoder": true,
58
+ "use_noise_scaled_mas": true,
59
+ "use_mel_posterior_encoder": false,
60
+ "use_duration_discriminator": false,
61
+ "use_wavlm_discriminator": true,
62
+ "inter_channels": 192,
63
+ "hidden_channels": 192,
64
+ "filter_channels": 768,
65
+ "n_heads": 2,
66
+ "n_layers": 6,
67
+ "kernel_size": 3,
68
+ "p_dropout": 0.1,
69
+ "resblock": "1",
70
+ "resblock_kernel_sizes": [3, 7, 11],
71
+ "resblock_dilation_sizes": [
72
+ [1, 3, 5],
73
+ [1, 3, 5],
74
+ [1, 3, 5]
75
+ ],
76
+ "upsample_rates": [8, 8, 2, 2, 2],
77
+ "upsample_initial_channel": 512,
78
+ "upsample_kernel_sizes": [16, 16, 8, 2, 2],
79
+ "n_layers_q": 3,
80
+ "use_spectral_norm": false,
81
+ "gin_channels": 512,
82
+ "slm": {
83
+ "model": "./slm/wavlm-base-plus",
84
+ "sr": 16000,
85
+ "hidden": 768,
86
+ "nlayers": 13,
87
+ "initial_channel": 64
88
+ }
89
+ },
90
+ "version": "2.0-JP-Extra",
91
+ "model_name": "jvnv-F2-jp"
92
+ }
jvnv-F2-jp/jvnv-F2_e166_s20000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6289a6f30bb9795744815b9da764a3c8198b18652d9fddef82fff1e14f0e784
3
+ size 251150980
jvnv-F2-jp/style_vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:900f8cde3a336d12193fec7b7d8e6c5dc77b3a5d719a9be3f8598389cd88e643
3
+ size 7296
jvnv-M1-jp/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 300,
7
+ "learning_rate": 0.0001,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 4,
11
+ "bf16_run": true,
12
+ "fp16_run": false,
13
+ "lr_decay": 0.99996,
14
+ "segment_size": 16384,
15
+ "init_lr_ratio": 1,
16
+ "warmup_epochs": 0,
17
+ "c_mel": 45,
18
+ "c_kl": 1.0,
19
+ "c_commit": 100,
20
+ "skip_optimizer": true,
21
+ "freeze_ZH_bert": false,
22
+ "freeze_JP_bert": false,
23
+ "freeze_EN_bert": false,
24
+ "freeze_emo": false,
25
+ "freeze_style": false
26
+ },
27
+ "data": {
28
+ "use_jp_extra": true,
29
+ "training_files": "Data/jvnv-M1-jp/train.list",
30
+ "validation_files": "Data/jvnv-M1-jp/val.list",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 128,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": null,
39
+ "add_blank": true,
40
+ "n_speakers": 1,
41
+ "cleaned_text": true,
42
+ "spk2id": {
43
+ "jvnv-M1-jp": 0
44
+ },
45
+ "num_styles": 7,
46
+ "style2id": {
47
+ "Neutral": 0,
48
+ "Angry": 1,
49
+ "Disgust": 2,
50
+ "Fear": 3,
51
+ "Happy": 4,
52
+ "Sad": 5,
53
+ "Surprise": 6
54
+ }
55
+ },
56
+ "model": {
57
+ "use_spk_conditioned_encoder": true,
58
+ "use_noise_scaled_mas": true,
59
+ "use_mel_posterior_encoder": false,
60
+ "use_duration_discriminator": false,
61
+ "use_wavlm_discriminator": true,
62
+ "inter_channels": 192,
63
+ "hidden_channels": 192,
64
+ "filter_channels": 768,
65
+ "n_heads": 2,
66
+ "n_layers": 6,
67
+ "kernel_size": 3,
68
+ "p_dropout": 0.1,
69
+ "resblock": "1",
70
+ "resblock_kernel_sizes": [3, 7, 11],
71
+ "resblock_dilation_sizes": [
72
+ [1, 3, 5],
73
+ [1, 3, 5],
74
+ [1, 3, 5]
75
+ ],
76
+ "upsample_rates": [8, 8, 2, 2, 2],
77
+ "upsample_initial_channel": 512,
78
+ "upsample_kernel_sizes": [16, 16, 8, 2, 2],
79
+ "n_layers_q": 3,
80
+ "use_spectral_norm": false,
81
+ "gin_channels": 512,
82
+ "slm": {
83
+ "model": "./slm/wavlm-base-plus",
84
+ "sr": 16000,
85
+ "hidden": 768,
86
+ "nlayers": 13,
87
+ "initial_channel": 64
88
+ }
89
+ },
90
+ "version": "2.0-JP-Extra",
91
+ "model_name": "jvnv-M1-jp"
92
+ }
jvnv-M1-jp/jvnv-M1-jp_e158_s14000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d86765f1fe08dbba74cd06283e96b6941b3f232329fabbba9c30e6edc27887a
3
+ size 251150980
jvnv-M1-jp/style_vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a925435e8c1c9efc8fc8e90e690655ab9a7bae00a790892e13e936510d04f05
3
+ size 7296
jvnv-M2-jp/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 300,
7
+ "learning_rate": 0.0001,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 4,
11
+ "bf16_run": true,
12
+ "fp16_run": false,
13
+ "lr_decay": 0.99996,
14
+ "segment_size": 16384,
15
+ "init_lr_ratio": 1,
16
+ "warmup_epochs": 0,
17
+ "c_mel": 45,
18
+ "c_kl": 1.0,
19
+ "c_commit": 100,
20
+ "skip_optimizer": true,
21
+ "freeze_ZH_bert": false,
22
+ "freeze_JP_bert": false,
23
+ "freeze_EN_bert": false,
24
+ "freeze_emo": false,
25
+ "freeze_style": false
26
+ },
27
+ "data": {
28
+ "use_jp_extra": true,
29
+ "training_files": "Data/jvnv-M2-jp/train.list",
30
+ "validation_files": "Data/jvnv-M2-jp/val.list",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 128,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": null,
39
+ "add_blank": true,
40
+ "n_speakers": 1,
41
+ "cleaned_text": true,
42
+ "spk2id": {
43
+ "jvnv-M2-jp": 0
44
+ },
45
+ "num_styles": 7,
46
+ "style2id": {
47
+ "Neutral": 0,
48
+ "Angry": 1,
49
+ "Disgust": 2,
50
+ "Fear": 3,
51
+ "Happy": 4,
52
+ "Sad": 5,
53
+ "Surprise": 6
54
+ }
55
+ },
56
+ "model": {
57
+ "use_spk_conditioned_encoder": true,
58
+ "use_noise_scaled_mas": true,
59
+ "use_mel_posterior_encoder": false,
60
+ "use_duration_discriminator": false,
61
+ "use_wavlm_discriminator": true,
62
+ "inter_channels": 192,
63
+ "hidden_channels": 192,
64
+ "filter_channels": 768,
65
+ "n_heads": 2,
66
+ "n_layers": 6,
67
+ "kernel_size": 3,
68
+ "p_dropout": 0.1,
69
+ "resblock": "1",
70
+ "resblock_kernel_sizes": [3, 7, 11],
71
+ "resblock_dilation_sizes": [
72
+ [1, 3, 5],
73
+ [1, 3, 5],
74
+ [1, 3, 5]
75
+ ],
76
+ "upsample_rates": [8, 8, 2, 2, 2],
77
+ "upsample_initial_channel": 512,
78
+ "upsample_kernel_sizes": [16, 16, 8, 2, 2],
79
+ "n_layers_q": 3,
80
+ "use_spectral_norm": false,
81
+ "gin_channels": 512,
82
+ "slm": {
83
+ "model": "./slm/wavlm-base-plus",
84
+ "sr": 16000,
85
+ "hidden": 768,
86
+ "nlayers": 13,
87
+ "initial_channel": 64
88
+ }
89
+ },
90
+ "version": "2.0-JP-Extra",
91
+ "model_name": "jvnv-M2-jp"
92
+ }
jvnv-M2-jp/jvnv-M2-jp_e159_s17000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8245f39438076d36a3befd8aefb15c38830cef326c1f7c9d9c8e64b647645402
3
+ size 251150980
jvnv-M2-jp/style_vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c965bb63fa4a759d41a8a4a3649333125d6497ae8a705d81b7d5c5bd2854797c
3
+ size 7296