Mahiruoshi commited on
Commit
5422b18
1 Parent(s): 9bb2f48

Upload 343 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +185 -0
  2. .gitmodules +0 -0
  3. .pre-commit-config.yaml +25 -0
  4. Data/BangDream/config.json +187 -0
  5. Data/BangDream/configs/config.json +187 -0
  6. Data/BangDream/filelists/bushroid.list +0 -0
  7. Data/BangDream/filelists/bushroid.list.cleaned +0 -0
  8. Data/BangDream/filelists/train.list +0 -0
  9. Data/BangDream/filelists/val.list +8 -0
  10. Data/BangDream/models/G_10000.pth +3 -0
  11. Data/BangDream/models/G_12000.pth +3 -0
  12. app.py +90 -383
  13. attentions_onnx.py +378 -0
  14. bert/bert-large-japanese-v2/.gitattributes +34 -0
  15. bert/bert-large-japanese-v2/README.md +53 -0
  16. bert/bert-large-japanese-v2/config.json +19 -0
  17. bert/bert-large-japanese-v2/tokenizer_config.json +10 -0
  18. bert/bert-large-japanese-v2/vocab.txt +0 -0
  19. bert/bert_models.json +14 -0
  20. bert/deberta-v2-large-japanese/.gitattributes +34 -0
  21. bert/deberta-v2-large-japanese/README.md +111 -0
  22. bert/deberta-v2-large-japanese/config.json +38 -0
  23. bert/deberta-v2-large-japanese/pytorch_model.bin +3 -0
  24. bert/deberta-v2-large-japanese/special_tokens_map.json +9 -0
  25. bert/deberta-v2-large-japanese/tokenizer.json +0 -0
  26. bert/deberta-v2-large-japanese/tokenizer_config.json +15 -0
  27. bert/deberta-v3-large/.gitattributes +27 -0
  28. bert/deberta-v3-large/README.md +93 -0
  29. bert/deberta-v3-large/config.json +22 -0
  30. bert/deberta-v3-large/generator_config.json +22 -0
  31. bert/deberta-v3-large/pytorch_model.bin +3 -0
  32. bert/deberta-v3-large/spm.model +3 -0
  33. bert/deberta-v3-large/tokenizer_config.json +4 -0
  34. bert_gen.py +28 -14
  35. commons.py +7 -1
  36. config.py +237 -0
  37. config.yml +160 -0
  38. configs/config.json +9 -8
  39. configs/config_old.json +187 -0
  40. data_utils.py +26 -31
  41. default_config.yml +160 -0
  42. emo_gen.py +169 -0
  43. emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/.gitattributes +28 -0
  44. emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/LICENSE +437 -0
  45. emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/README.md +127 -0
  46. emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/config.json +122 -0
  47. emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/preprocessor_config.json +9 -0
  48. emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/vocab.json +1 -0
  49. export_onnx.py +56 -0
  50. infer.py +207 -0
.gitignore ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ .DS_Store
163
+ /models
164
+ /logs
165
+
166
+ filelists/*
167
+ !/filelists/esd.list
168
+ data/*
169
+ /*.yml
170
+ !/default_config.yml
171
+ /Web/
172
+ /emotional/*/*.bin
173
+ /bert/*/*.bin
174
+ /bert/*/*.h5
175
+ /bert/*/*.model
176
+ /bert/*/*.safetensors
177
+ /bert/*/*.msgpack
178
+ asr_transcript.py
179
+ extract_list.py
180
+ dataset
181
+ /Data
182
+ Model
183
+ raw/
184
+ logs/
185
+ Data/*
.gitmodules ADDED
File without changes
.pre-commit-config.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.5.0
4
+ hooks:
5
+ - id: check-yaml
6
+ - id: end-of-file-fixer
7
+ - id: trailing-whitespace
8
+
9
+ - repo: https://github.com/astral-sh/ruff-pre-commit
10
+ rev: v0.1.4
11
+ hooks:
12
+ - id: ruff
13
+ args: [ --fix ]
14
+
15
+ - repo: https://github.com/psf/black
16
+ rev: 23.10.1
17
+ hooks:
18
+ - id: black
19
+
20
+ - repo: https://github.com/codespell-project/codespell
21
+ rev: v2.2.6
22
+ hooks:
23
+ - id: codespell
24
+ files: ^.*\.(py|md|rst|yml)$
25
+ args: [-L=fro]
Data/BangDream/config.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 1000,
7
+ "learning_rate": 0.0002,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-09,
13
+ "batch_size": 16,
14
+ "fp16_run": false,
15
+ "lr_decay": 0.99995,
16
+ "segment_size": 16384,
17
+ "init_lr_ratio": 1,
18
+ "warmup_epochs": 0,
19
+ "c_mel": 45,
20
+ "c_kl": 1.0,
21
+ "skip_optimizer": true
22
+ },
23
+ "data": {
24
+ "training_files": "Data/BangDream/filelists/train.list",
25
+ "validation_files": "Data/BangDream/filelists/val.list",
26
+ "max_wav_value": 32768.0,
27
+ "sampling_rate": 44100,
28
+ "filter_length": 2048,
29
+ "hop_length": 512,
30
+ "win_length": 2048,
31
+ "n_mel_channels": 128,
32
+ "mel_fmin": 0.0,
33
+ "mel_fmax": null,
34
+ "add_blank": true,
35
+ "n_speakers": 700,
36
+ "cleaned_text": true,
37
+ "spk2id": {
38
+ "華戀": 0,
39
+ "晶": 1,
40
+ "光": 2,
41
+ "未知留": 3,
42
+ "香子": 4,
43
+ "雙葉": 5,
44
+ "真晝": 6,
45
+ "艾露": 7,
46
+ "珠緒": 8,
47
+ "艾露露": 9,
48
+ "純那": 10,
49
+ "克洛迪娜": 11,
50
+ "真矢": 12,
51
+ "奈奈": 13,
52
+ "壘": 14,
53
+ "文": 15,
54
+ "一愛": 16,
55
+ "菈樂菲": 17,
56
+ "司": 18,
57
+ "美空": 19,
58
+ "靜羽": 20,
59
+ "悠悠子": 21,
60
+ "八千代": 22,
61
+ "栞": 23,
62
+ "美帆": 24,
63
+ "芙蘿菈": 25,
64
+ "克蕾兒": 26,
65
+ "安德露": 27,
66
+ "瑪莉亞貝菈": 28,
67
+ "克拉迪亞": 29,
68
+ "桃樂西": 30,
69
+ "瑪麗安": 31,
70
+ "三月七": 32,
71
+ "香澄": 33,
72
+ "有咲": 34,
73
+ "沙綾": 35,
74
+ "りみ": 36,
75
+ "たえ": 37,
76
+ "沙綾、りみ、たえ": 38,
77
+ "巴": 39,
78
+ "一同": 40,
79
+ "まりな": 41,
80
+ "ゆり": 42,
81
+ "明日香": 43,
82
+ "???": 44,
83
+ "ひまり": 45,
84
+ "モカ": 46,
85
+ "つぐみ": 47,
86
+ "蘭": 48,
87
+ "リサ": 49,
88
+ "千聖": 50,
89
+ "花音": 51,
90
+ "イヴ": 52,
91
+ "日菜": 53,
92
+ "友希那": 54,
93
+ "紗夜": 55,
94
+ "こころ": 56,
95
+ "美咲": 57,
96
+ "薫": 58,
97
+ "はぐみ": 59,
98
+ "ミッシェル": 60,
99
+ "マリー": 61,
100
+ "怪盗ハロハッピー": 62,
101
+ "ニコリーナ": 63,
102
+ "彩": 64,
103
+ "麻弥": 65,
104
+ "燐子": 66,
105
+ "あこ": 67,
106
+ "ゆきな": 68,
107
+ "ましろ": 69,
108
+ "つくし": 70,
109
+ "透子": 71,
110
+ "七深": 72,
111
+ "瑠唯": 73,
112
+ "六花": 74,
113
+ "パレオ": 75,
114
+ "レイヤ": 76,
115
+ "マスキング": 77,
116
+ "チュチュ": 78,
117
+ "ますき": 79,
118
+ "ロック": 80,
119
+ "令王那": 81,
120
+ "CHIYU": 82,
121
+ "レイ": 83,
122
+ "燈": 84,
123
+ "そよ": 85,
124
+ "祥子": 86,
125
+ "立希": 87,
126
+ "睦": 88,
127
+ "愛音": 89,
128
+ "楽奈": 90,
129
+ "海鈴": 91
130
+ }
131
+ },
132
+ "model": {
133
+ "use_spk_conditioned_encoder": true,
134
+ "use_noise_scaled_mas": true,
135
+ "use_mel_posterior_encoder": false,
136
+ "use_duration_discriminator": true,
137
+ "inter_channels": 192,
138
+ "hidden_channels": 192,
139
+ "filter_channels": 768,
140
+ "n_heads": 2,
141
+ "n_layers": 6,
142
+ "kernel_size": 3,
143
+ "p_dropout": 0.1,
144
+ "resblock": "1",
145
+ "resblock_kernel_sizes": [
146
+ 3,
147
+ 7,
148
+ 11
149
+ ],
150
+ "resblock_dilation_sizes": [
151
+ [
152
+ 1,
153
+ 3,
154
+ 5
155
+ ],
156
+ [
157
+ 1,
158
+ 3,
159
+ 5
160
+ ],
161
+ [
162
+ 1,
163
+ 3,
164
+ 5
165
+ ]
166
+ ],
167
+ "upsample_rates": [
168
+ 8,
169
+ 8,
170
+ 2,
171
+ 2,
172
+ 2
173
+ ],
174
+ "upsample_initial_channel": 512,
175
+ "upsample_kernel_sizes": [
176
+ 16,
177
+ 16,
178
+ 8,
179
+ 2,
180
+ 2
181
+ ],
182
+ "n_layers_q": 3,
183
+ "use_spectral_norm": false,
184
+ "gin_channels": 256
185
+ },
186
+ "version": "2.0"
187
+ }
Data/BangDream/configs/config.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 1000,
7
+ "learning_rate": 0.0002,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-09,
13
+ "batch_size": 16,
14
+ "fp16_run": false,
15
+ "lr_decay": 0.99995,
16
+ "segment_size": 16384,
17
+ "init_lr_ratio": 1,
18
+ "warmup_epochs": 0,
19
+ "c_mel": 45,
20
+ "c_kl": 1.0,
21
+ "skip_optimizer": true
22
+ },
23
+ "data": {
24
+ "training_files": "Data/BangDream/filelists/train.list",
25
+ "validation_files": "Data/BangDream/filelists/val.list",
26
+ "max_wav_value": 32768.0,
27
+ "sampling_rate": 44100,
28
+ "filter_length": 2048,
29
+ "hop_length": 512,
30
+ "win_length": 2048,
31
+ "n_mel_channels": 128,
32
+ "mel_fmin": 0.0,
33
+ "mel_fmax": null,
34
+ "add_blank": true,
35
+ "n_speakers": 700,
36
+ "cleaned_text": true,
37
+ "spk2id": {
38
+ "華戀": 0,
39
+ "晶": 1,
40
+ "光": 2,
41
+ "未知留": 3,
42
+ "香子": 4,
43
+ "雙葉": 5,
44
+ "真晝": 6,
45
+ "艾露": 7,
46
+ "珠緒": 8,
47
+ "艾露露": 9,
48
+ "純那": 10,
49
+ "克洛迪娜": 11,
50
+ "真矢": 12,
51
+ "奈奈": 13,
52
+ "壘": 14,
53
+ "文": 15,
54
+ "一愛": 16,
55
+ "菈樂菲": 17,
56
+ "司": 18,
57
+ "美空": 19,
58
+ "靜羽": 20,
59
+ "悠悠子": 21,
60
+ "八千代": 22,
61
+ "栞": 23,
62
+ "美帆": 24,
63
+ "芙蘿菈": 25,
64
+ "克蕾兒": 26,
65
+ "安德露": 27,
66
+ "瑪莉亞貝菈": 28,
67
+ "克拉迪亞": 29,
68
+ "桃樂西": 30,
69
+ "瑪麗安": 31,
70
+ "三月七": 32,
71
+ "香澄": 33,
72
+ "有咲": 34,
73
+ "沙綾": 35,
74
+ "りみ": 36,
75
+ "たえ": 37,
76
+ "沙綾、りみ、たえ": 38,
77
+ "巴": 39,
78
+ "一同": 40,
79
+ "まりな": 41,
80
+ "ゆり": 42,
81
+ "明日香": 43,
82
+ "???": 44,
83
+ "ひまり": 45,
84
+ "モカ": 46,
85
+ "つぐみ": 47,
86
+ "蘭": 48,
87
+ "リサ": 49,
88
+ "千聖": 50,
89
+ "花音": 51,
90
+ "イヴ": 52,
91
+ "日菜": 53,
92
+ "友希那": 54,
93
+ "紗夜": 55,
94
+ "こころ": 56,
95
+ "美咲": 57,
96
+ "薫": 58,
97
+ "はぐみ": 59,
98
+ "ミッシェル": 60,
99
+ "マリー": 61,
100
+ "怪盗ハロハッピー": 62,
101
+ "ニコリーナ": 63,
102
+ "彩": 64,
103
+ "麻弥": 65,
104
+ "燐子": 66,
105
+ "あこ": 67,
106
+ "ゆきな": 68,
107
+ "ましろ": 69,
108
+ "つくし": 70,
109
+ "透子": 71,
110
+ "七深": 72,
111
+ "瑠唯": 73,
112
+ "六花": 74,
113
+ "パレオ": 75,
114
+ "レイヤ": 76,
115
+ "マスキング": 77,
116
+ "チュチュ": 78,
117
+ "ますき": 79,
118
+ "ロック": 80,
119
+ "令王那": 81,
120
+ "CHIYU": 82,
121
+ "レイ": 83,
122
+ "燈": 84,
123
+ "そよ": 85,
124
+ "祥子": 86,
125
+ "立希": 87,
126
+ "睦": 88,
127
+ "愛音": 89,
128
+ "楽奈": 90,
129
+ "海鈴": 91
130
+ }
131
+ },
132
+ "model": {
133
+ "use_spk_conditioned_encoder": true,
134
+ "use_noise_scaled_mas": true,
135
+ "use_mel_posterior_encoder": false,
136
+ "use_duration_discriminator": true,
137
+ "inter_channels": 192,
138
+ "hidden_channels": 192,
139
+ "filter_channels": 768,
140
+ "n_heads": 2,
141
+ "n_layers": 6,
142
+ "kernel_size": 3,
143
+ "p_dropout": 0.1,
144
+ "resblock": "1",
145
+ "resblock_kernel_sizes": [
146
+ 3,
147
+ 7,
148
+ 11
149
+ ],
150
+ "resblock_dilation_sizes": [
151
+ [
152
+ 1,
153
+ 3,
154
+ 5
155
+ ],
156
+ [
157
+ 1,
158
+ 3,
159
+ 5
160
+ ],
161
+ [
162
+ 1,
163
+ 3,
164
+ 5
165
+ ]
166
+ ],
167
+ "upsample_rates": [
168
+ 8,
169
+ 8,
170
+ 2,
171
+ 2,
172
+ 2
173
+ ],
174
+ "upsample_initial_channel": 512,
175
+ "upsample_kernel_sizes": [
176
+ 16,
177
+ 16,
178
+ 8,
179
+ 2,
180
+ 2
181
+ ],
182
+ "n_layers_q": 3,
183
+ "use_spectral_norm": false,
184
+ "gin_channels": 256
185
+ },
186
+ "version": "2.0"
187
+ }
Data/BangDream/filelists/bushroid.list ADDED
The diff for this file is too large to render. See raw diff
 
Data/BangDream/filelists/bushroid.list.cleaned ADDED
The diff for this file is too large to render. See raw diff
 
Data/BangDream/filelists/train.list ADDED
The diff for this file is too large to render. See raw diff
 
Data/BangDream/filelists/val.list ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ /content/data/m1402_068.wav|華戀|JP|初稿の時はふわふわしてて,何をどう演じていいのかわからなかったんだけど…….|_ sh o k o o n o t o k i w a f u w a f u w a sh i t e t e , n a n i o d o o e n j i t e i i n o k a w a k a r a n a k a q t a n d a k e d o … … . _|0 0 0 1 1 1 0 0 0 0 1 1 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0|1 3 2 2 4 2 8 2 2 2 1 4 1 3 4 2 2 2 2 6 3 2 2 1 2 4 1 1 1 1
2
+ /content/data/m0401_041.wav|華戀|JP|はやく,取り戻さないと,本当に消えちゃうっ……!!|_ h a y a k u , t o r i m o d o s a n a i t o , h o n t o o n i k i e ch a u q … … ! ! _|0 0 0 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 1 0 0 0 0 0 0 0 0 0|1 3 3 1 4 3 3 3 2 1 8 3 2 1 1 1 1 1 1 1
3
+ /content/data/m1403_003.wav|華戀|JP|今日はレッスン室の鍵開け当番だし,早起き早起き…….|_ ky o o w a r e q s u n sh i ts u n o k a g i a k e t o o b a n d a sh i , h a y a o k i h a y a o k i … … . _|0 1 1 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0|1 3 2 6 4 2 4 3 6 2 2 1 7 7 1 1 1 1
4
+ /content/data/m0402_062.wav|華戀|JP|どんな舞台でも誰がいても,キラめいてみせる!スタァライトを守るために!|_ d o n n a b u t a i d e m o d a r e g a i t e m o , k i r a m e i t e m i s e r u ! s u t a a r a i t o o m a m o r u t a m e n i ! _|0 1 1 0 0 0 1 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 0 1 1 0 0 0 0 0 1 1 0 1 1 0 0 0 0 0 0 1 1 0 0 0 0 1 1 0 0 0 0|1 5 5 2 2 4 2 1 2 2 1 4 3 2 3 3 1 4 1 0 5 1 6 4 2 1 1
5
+ /content/data/m1708_030.wav|晶|JP|そうだ……私に憧れていただけの自分など超えてゆけ.フラウ,ルビン……リュウ,メイファン!!|_ s o o d a … … w a t a sh i n i a k o g a r e t e i t a d a k e n o j i b u n n a d o k o e t e y u k e . f u r a u , r u b i n … … ry u u , m e i f a n ! ! _|0 0 0 1 0 0 0 0 0 0 1 1 1 1 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 1 0 0 1 1 0 0 0 0 0 1 1 0 0 1 1 0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 0|1 3 2 1 1 6 2 7 2 1 2 4 2 5 4 3 2 2 2 1 3 2 1 3 2 1 1 3 1 3 3 1 1 1
6
+ /content/data/m1101_137.wav|晶|JP|戯曲を生むは我らが舞台.それは我らが戯曲の登場人物ということ.|_ g i k i y o k u o u m u w a w a r e r a g a b u t a i . s o r e w a w a r e r a g a g i k i y o k u n o t o o j o o j i n b u ts u t o i u k o t o . _|0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0|1 8 1 3 2 3 3 2 5 1 4 2 3 3 2 8 2 6 7 2 2 4 1 1
7
+ /content/data/m0806_031.wav|晶|JP|いざ,削劇を!シークフェルト音楽学院,エーデルよ,ここに!|_ i z a , s o g i g e k i o ! sh i i k u f e r u t o o n g a k u g a k u i n , e e d e r u y o , k o k o n i ! _|0 1 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0|1 3 1 4 4 1 1 3 2 6 6 6 1 3 3 2 1 4 2 1 1
8
+ /content/data/m1707_057.wav|晶|JP|まだこの程度の所で彷徨しているのか,お前たちは.苛烈な生存競争を勝ち抜いたお前たちが,この程度の人間だったとは…….|_ m a d a k o n o t e e d o n o t o k o r o d e h o o k o o sh i t e i r u n o k a , o m a e t a ch i w a . k a r e ts u n a s e e z o n ky o o s o o o k a ch i n u i t a o m a e t a ch i g a , k o n o t e e d o n o n i n g e n d a q t a t o w a … … . _|0 1 1 0 0 0 0 1 1 0 0 1 1 1 0 0 0 0 1 1 1 1 0 0 0 0 1 1 1 1 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 1 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 1 1 0 0 1 1 1 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0|1 4 4 5 2 6 2 2 2 2 2 2 3 2 2 1 4 4 2 1 3 3 2 6 6 1 3 2 2 2 4 4 2 1 4 5 2 6 2 1 2 2 2 1 1 1 1
Data/BangDream/models/G_10000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f8342e564362deed68b49a49ba86910bcdc6a57e201ab44effe42485834d596
3
+ size 705948086
Data/BangDream/models/G_12000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90afbafe34786dd893d2d3a7088c241c401df6213d00b418e51874b8ffdcb37c
3
+ size 705948086
app.py CHANGED
@@ -1,5 +1,9 @@
1
  # flake8: noqa: E402
 
2
  import logging
 
 
 
3
  logging.getLogger("numba").setLevel(logging.WARNING)
4
  logging.getLogger("markdown_it").setLevel(logging.WARNING)
5
  logging.getLogger("urllib3").setLevel(logging.WARNING)
@@ -10,30 +14,25 @@ logging.basicConfig(
10
  )
11
 
12
  logger = logging.getLogger(__name__)
13
- import datetime
14
- import numpy as np
 
 
 
 
 
15
  import torch
16
- from ebooklib import epub
17
- import PyPDF2
18
- from PyPDF2 import PdfReader
19
- import zipfile
20
- import shutil
21
- import sys, os
22
- import json
23
- from bs4 import BeautifulSoup
24
- import argparse
25
- import commons
26
  import utils
27
- from models import SynthesizerTrn
28
- from text.symbols import symbols
29
- from text import cleaned_text_to_sequence, get_bert
30
- from text.cleaner import clean_text
31
  import gradio as gr
32
- import webbrowser
33
- import re
34
- from scipy.io.wavfile import write
35
- from datetime import datetime
36
  net_g = None
 
 
 
37
  BandList = {
38
 
39
  "PoppinParty":["香澄","有咲","たえ","りみ","沙綾"],
@@ -56,350 +55,104 @@ if sys.platform == "darwin" and torch.backends.mps.is_available():
56
  else:
57
  device = "cuda"
58
 
59
- def is_japanese(string):
60
- for ch in string:
61
- if ord(ch) > 0x3040 and ord(ch) < 0x30FF:
62
- return True
63
- return False
64
-
65
- def extrac(text):
66
- text = re.sub("<[^>]*>","",text)
67
- result_list = re.split(r'\n', text)
68
- final_list = []
69
- for i in result_list:
70
- i = i.replace('\n','').replace(' ','')
71
- #Current length of single sentence: 20
72
- if len(i)>1:
73
- if len(i) > 20:
74
- try:
75
- cur_list = re.split(r'。|!', i)
76
- for i in cur_list:
77
- if len(i)>1:
78
- final_list.append(i+'。')
79
- except:
80
- pass
81
- else:
82
- final_list.append(i)
83
- '''
84
- final_list.append(i)
85
- '''
86
- final_list = [x for x in final_list if x != '']
87
- return final_list
88
-
89
- def get_text(text, language_str, hps):
90
- norm_text, phone, tone, word2ph = clean_text(text, language_str)
91
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
92
-
93
- if hps.data.add_blank:
94
- phone = commons.intersperse(phone, 0)
95
- tone = commons.intersperse(tone, 0)
96
- language = commons.intersperse(language, 0)
97
- for i in range(len(word2ph)):
98
- word2ph[i] = word2ph[i] * 2
99
- word2ph[0] += 1
100
- bert = get_bert(norm_text, word2ph, language_str, device)
101
- del word2ph
102
- assert bert.shape[-1] == len(phone), phone
103
-
104
- if language_str == "ZH":
105
- bert = bert
106
- ja_bert = torch.zeros(768, len(phone))
107
- elif language_str == "JA":
108
- ja_bert = bert
109
- bert = torch.zeros(1024, len(phone))
110
- else:
111
- bert = torch.zeros(1024, len(phone))
112
- ja_bert = torch.zeros(768, len(phone))
113
-
114
- assert bert.shape[-1] == len(
115
- phone
116
- ), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
117
-
118
- phone = torch.LongTensor(phone)
119
- tone = torch.LongTensor(tone)
120
- language = torch.LongTensor(language)
121
- return bert, ja_bert, phone, tone, language
122
-
123
-
124
- def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language):
125
- global net_g
126
- bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps)
127
  with torch.no_grad():
128
- x_tst = phones.to(device).unsqueeze(0)
129
- tones = tones.to(device).unsqueeze(0)
130
- lang_ids = lang_ids.to(device).unsqueeze(0)
131
- bert = bert.to(device).unsqueeze(0)
132
- ja_bert = ja_bert.to(device).unsqueeze(0)
133
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
134
- del phones
135
- speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
136
- audio = (
137
- net_g.infer(
138
- x_tst,
139
- x_tst_lengths,
140
- speakers,
141
- tones,
142
- lang_ids,
143
- bert,
144
- ja_bert,
145
- sdp_ratio=sdp_ratio,
146
- noise_scale=noise_scale,
147
- noise_scale_w=noise_scale_w,
148
- length_scale=length_scale,
149
- )[0][0, 0]
150
- .data.cpu()
151
- .float()
152
- .numpy()
153
  )
154
- current_time = datetime.now()
155
- print(str(current_time)+':'+str(sid))
156
- del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
157
- return audio
158
-
159
 
160
  def tts_fn(
161
- text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,LongSentence
 
 
 
 
 
 
 
162
  ):
163
  if not LongSentence:
164
  with torch.no_grad():
165
- audio = infer(
166
  text,
167
  sdp_ratio=sdp_ratio,
168
  noise_scale=noise_scale,
169
  noise_scale_w=noise_scale_w,
170
  length_scale=length_scale,
171
- sid=speaker,
172
- language= "JP" if is_japanese(text) else "ZH",
173
  )
174
  torch.cuda.empty_cache()
175
  return (hps.data.sampling_rate, audio)
176
  else:
177
- audiopath = 'voice.wav'
178
- a = ['【','[','(','(']
179
- b = ['】',']',')',')']
180
- for i in a:
181
- text = text.replace(i,'<')
182
- for i in b:
183
- text = text.replace(i,'>')
184
- final_list = extrac(text.replace('“','').replace('”',''))
185
  audio_fin = []
186
  for sentence in final_list:
187
- with torch.no_grad():
188
- audio = infer(
189
- sentence,
190
- sdp_ratio=sdp_ratio,
191
- noise_scale=noise_scale,
192
- noise_scale_w=noise_scale_w,
193
- length_scale=length_scale,
194
- sid=speaker,
195
- language= "JP" if is_japanese(text) else "ZH",
196
- )
197
- audio_fin.append(audio)
 
 
 
 
198
  return (hps.data.sampling_rate, np.concatenate(audio_fin))
199
 
200
- def split_into_sentences(text):
201
- """将文本分割为句子,基于中文的标点符号"""
202
- sentences = re.split(r'(?<=[。!?…\n])', text)
203
- return [sentence.strip() for sentence in sentences if sentence]
204
-
205
-
206
- def seconds_to_ass_time(seconds):
207
- """将秒数转换为ASS时间格式"""
208
- hours = int(seconds / 3600)
209
- minutes = int((seconds % 3600) / 60)
210
- seconds = int(seconds) % 60
211
- milliseconds = int((seconds - int(seconds)) * 1000)
212
- return "{:01d}:{:02d}:{:02d}.{:02d}".format(hours, minutes, seconds, int(milliseconds / 10))
213
-
214
- def generate_audio_and_srt_for_group(group, outputPath, group_index, sampling_rate, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime):
215
- audio_fin = []
216
- ass_entries = []
217
- start_time = 0
218
-
219
- ass_header = """[Script Info]
220
- ; Script generated by OpenAI Assistant
221
- Title: Audiobook
222
- ScriptType: v4.00+
223
- WrapStyle: 0
224
- PlayResX: 640
225
- PlayResY: 360
226
- ScaledBorderAndShadow: yes
227
- [V4+ Styles]
228
- Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
229
- Style: Default,Arial,20,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,1,1,2,10,10,10,1
230
- [Events]
231
- Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
232
- """
233
-
234
- for sentence in group:
235
- try:
236
- print(sentence)
237
- FakeSpeaker = sentence.split("|")[0]
238
- print(FakeSpeaker)
239
- SpeakersList = re.split('\n', spealerList)
240
- if FakeSpeaker in list(hps.data.spk2id.keys()):
241
- speaker = FakeSpeaker
242
- for i in SpeakersList:
243
- if FakeSpeaker == i.split("|")[1]:
244
- speaker = i.split("|")[0]
245
- speaker_ids = hps.data.spk2id
246
-
247
- _, audio = tts_fn(sentence.split("|")[-1], speaker=speaker, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, LongSentence=True)
248
- silence_frames = int(silenceTime * 44010)
249
- silence_data = np.zeros((silence_frames,), dtype=audio.dtype)
250
- audio_fin.append(audio)
251
- audio_fin.append(silence_data)
252
-
253
- duration = len(audio) / sampling_rate
254
- end_time = start_time + duration + silenceTime
255
- ass_entries.append("Dialogue: 0,{},{},".format(seconds_to_ass_time(start_time), seconds_to_ass_time(end_time)) + "Default,,0,0,0,,{}".format(sentence.replace("|",":")))
256
- start_time = end_time
257
- except:
258
- pass
259
- wav_filename = os.path.join(outputPath, f'audiobook_part_{group_index}.wav')
260
- ass_filename = os.path.join(outputPath, f'audiobook_part_{group_index}.ass')
261
-
262
- write(wav_filename, sampling_rate, np.concatenate(audio_fin))
263
-
264
- with open(ass_filename, 'w', encoding='utf-8') as f:
265
- f.write(ass_header + '\n'.join(ass_entries))
266
- return (hps.data.sampling_rate, np.concatenate(audio_fin))
267
- def extract_text_from_epub(file_path):
268
- book = epub.read_epub(file_path)
269
- content = []
270
- for item in book.items:
271
- if isinstance(item, epub.EpubHtml):
272
- soup = BeautifulSoup(item.content, 'html.parser')
273
- content.append(soup.get_text())
274
- return '\n'.join(content)
275
-
276
- def extract_text_from_pdf(file_path):
277
- with open(file_path, 'rb') as file:
278
- reader = PdfReader(file)
279
- content = [page.extract_text() for page in reader.pages]
280
- return '\n'.join(content)
281
-
282
- def extract_text_from_game2(data):
283
- current_content = []
284
-
285
- def _extract(data, current_data=None):
286
- nonlocal current_content
287
-
288
- if current_data is None:
289
- current_data = {}
290
-
291
- if isinstance(data, dict):
292
- if 'name' in data and 'body' in data:
293
- current_name = data['name']
294
- current_body = data['body'].replace('\n', '')
295
- current_content.append(f"{current_name}|{current_body}")
296
-
297
- for key, value in data.items():
298
- _extract(value, dict(current_data))
299
-
300
- elif isinstance(data, list):
301
- for item in data:
302
- _extract(item, dict(current_data))
303
-
304
- _extract(data)
305
- return '\n'.join(current_content)
306
-
307
- def extract_text_from_file(inputFile):
308
- file_extension = os.path.splitext(inputFile)[1].lower()
309
-
310
- if file_extension == ".epub":
311
- return extract_text_from_epub(inputFile)
312
- elif file_extension == ".pdf":
313
- return extract_text_from_pdf(inputFile)
314
- elif file_extension == ".txt":
315
- with open(inputFile, 'r', encoding='utf-8') as f:
316
- return f.read()
317
- elif file_extension == ".asset":
318
- with open(inputFile, 'r', encoding='utf-8') as f:
319
- content = json.load(f)
320
- return extract_text_from_game2(content) if extract_text_from_game2(content) != '' else extract_text_from_game2(content)
321
- else:
322
- raise ValueError(f"Unsupported file format: {file_extension}")
323
-
324
- def audiobook(inputFile, groupsize, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime):
325
- directory_path = "books"
326
- output_path = "books/audiobook_part_1.wav"
327
-
328
- if os.path.exists(directory_path):
329
- shutil.rmtree(directory_path)
330
-
331
- os.makedirs(directory_path)
332
- text = extract_text_from_file(inputFile.name)
333
- sentences = split_into_sentences(text)
334
- GROUP_SIZE = groupsize
335
- for i in range(0, len(sentences), GROUP_SIZE):
336
- group = sentences[i:i+GROUP_SIZE]
337
- if spealerList == "":
338
- spealerList = "无"
339
- result = generate_audio_and_srt_for_group(group,directory_path, i//GROUP_SIZE + 1, 44100, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime)
340
- if not torch.cuda.is_available():
341
- return result
342
- return result
343
-
344
  def loadmodel(model):
345
  _ = net_g.eval()
346
  _ = utils.load_checkpoint(model, net_g, None, skip_optimizer=True)
347
  return "success"
348
 
349
-
350
  if __name__ == "__main__":
351
- parser = argparse.ArgumentParser()
352
- parser.add_argument(
353
- "-m", "--model", default="./logs/Bangdream/G_10000.pth", help="path of your model"
 
354
  )
355
- parser.add_argument(
356
- "-c",
357
- "--config",
358
- default="configs/config.json",
359
- help="path of your config file",
360
- )
361
- parser.add_argument(
362
- "--share", default=True, help="make link public", action="store_true"
363
- )
364
- parser.add_argument(
365
- "-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log"
366
- )
367
-
368
- args = parser.parse_args()
369
- if args.debug:
370
- logger.info("Enable DEBUG-LEVEL log")
371
- logging.basicConfig(level=logging.DEBUG)
372
- device = (
373
- "cuda:0"
374
- if torch.cuda.is_available()
375
- else (
376
- "mps"
377
- if sys.platform == "darwin" and torch.backends.mps.is_available()
378
- else "cpu"
379
- )
380
- )
381
- hps = utils.get_hparams_from_file(args.config)
382
- net_g = SynthesizerTrn(
383
- len(symbols),
384
- hps.data.filter_length // 2 + 1,
385
- hps.train.segment_size // hps.data.hop_length,
386
- n_speakers=hps.data.n_speakers,
387
- **hps.model,
388
- ).to(device)
389
- loadmodel(args.model)
390
  speaker_ids = hps.data.spk2id
391
  speakers = list(speaker_ids.keys())
392
- languages = ["ZH", "JP"]
393
- examples = [
394
- ["filelist/Scenarioband6-018.asset", 500, "つくし", "ましろ|真白\n七深|七深\n透子|透子\nつくし|筑紫\n瑠唯|瑠唯\nそよ|素世\n祥子|祥子", "扩展功能"],
395
- ]
396
  modelPaths = []
397
- for dirpath, dirnames, filenames in os.walk("./logs/Bangdream/"):
398
  for filename in filenames:
399
  modelPaths.append(os.path.join(dirpath, filename))
400
  with gr.Blocks() as app:
401
  gr.Markdown(
402
- f"少歌邦邦全员TTS,使用本模型请严格遵守法律法规!\n备份: <a href='https://huggingface.co/spaces/Mahiruoshi/MyGO_VIts-bert'>V1.0版本模型</a> 查看使用说明</a>\m 发布二创作品请注明项目和本模型作者<a href='https://space.bilibili.com/19874615/'>B站@Mahiroshi</a>及项目链接\n从 <a href='https://nijigaku.top/2023/10/03/BangDreamTTS/'>我的博客站点</a> 查看使用说明</a>"
403
  )
404
  for band in BandList:
405
  with gr.TabItem(band):
@@ -416,7 +169,7 @@ if __name__ == "__main__":
416
  length_scale = gr.Slider(
417
  minimum=0.1, maximum=2, value=1, step=0.01, label="语速调节"
418
  )
419
- with gr.Accordion(label="切换模型(合成中文建议切换为早期模型)", open=False):
420
  modelstrs = gr.Dropdown(label = "模型", choices = modelPaths, value = modelPaths[0], type = "value")
421
  btnMod = gr.Button("载入模型")
422
  statusa = gr.TextArea()
@@ -440,6 +193,9 @@ if __name__ == "__main__":
440
  minimum=0.1, maximum=2, value=0.8, step=0.01, label="音素长度"
441
  )
442
  LongSentence = gr.Checkbox(value=True, label="Generate LongSentence")
 
 
 
443
  speaker = gr.Dropdown(
444
  choices=speakers, value=name, label="说话人"
445
  )
@@ -452,60 +208,11 @@ if __name__ == "__main__":
452
  noise_scale,
453
  noise_scale_w,
454
  length_scale,
 
455
  LongSentence,
456
  ],
457
  outputs=[audio_output],
458
  )
459
- for i in examples:
460
- with gr.Tab(i[-1]):
461
- with gr.Row():
462
- with gr.Column():
463
- gr.Markdown(
464
- f"从 <a href='https://nijigaku.top/2023/10/03/BangDreamTTS/'>我的博客站点</a> 查看自制galgame使用说明\n</a>"
465
- )
466
- inputFile = gr.inputs.File(label="上传txt(可设置角色对应表)、epub或mobi文件")
467
- groupSize = gr.Slider(
468
- minimum=10, maximum=100,value = i[1], step=1, label="当个音频文件包含的最大字数"
469
- )
470
- silenceTime = gr.Slider(
471
- minimum=0, maximum=1, value=0.5, step=0.1, label="句子的间隔"
472
- )
473
- spealerList = gr.TextArea(
474
- label="角色对应表",
475
- placeholder="左边是你想要在每一句话合成中用到的speaker(见角色清单)右边是你上传文本时分隔符左边设置的说话人:{ChoseSpeakerFromConfigList1}|{SeakerInUploadText1}\n{ChoseSpeakerFromConfigList2}|{SeakerInUploadText2}\n{ChoseSpeakerFromConfigList3}|{SeakerInUploadText3}\n",
476
- value = i[3],
477
- )
478
- speaker = gr.Dropdown(
479
- choices=speakers, value = i[2], label="选择默认说话人"
480
- )
481
- with gr.Column():
482
- sdp_ratio = gr.Slider(
483
- minimum=0, maximum=1, value=0.2, step=0.01, label="SDP/DP混合比"
484
- )
485
- noise_scale = gr.Slider(
486
- minimum=0.1, maximum=2, value=0.6, step=0.01, label="感情调节"
487
- )
488
- noise_scale_w = gr.Slider(
489
- minimum=0.1, maximum=2, value=0.8, step=0.01, label="音素长度"
490
- )
491
- length_scale = gr.Slider(
492
- minimum=0.1, maximum=2, value=1, step=0.01, label="生成长度"
493
- )
494
- LastAudioOutput = gr.Audio(label="当用cuda在本地运行时才能在book文件夹下浏览全部合成内容")
495
- btn2 = gr.Button("点击生成", variant="primary")
496
- btn2.click(
497
- audiobook,
498
- inputs=[
499
- inputFile,
500
- groupSize,
501
- speaker,
502
- sdp_ratio,
503
- noise_scale,
504
- noise_scale_w,
505
- length_scale,
506
- spealerList,
507
- silenceTime
508
- ],
509
- outputs=[LastAudioOutput],
510
- )
511
  app.launch()
 
1
  # flake8: noqa: E402
2
+ import os
3
  import logging
4
+
5
+ import re_matching
6
+
7
  logging.getLogger("numba").setLevel(logging.WARNING)
8
  logging.getLogger("markdown_it").setLevel(logging.WARNING)
9
  logging.getLogger("urllib3").setLevel(logging.WARNING)
 
14
  )
15
 
16
  logger = logging.getLogger(__name__)
17
+
18
+ import warnings
19
+
20
+ warnings.filterwarnings("ignore", category=UserWarning, module="gradio.blocks")
21
+
22
+
23
+ import re
24
  import torch
 
 
 
 
 
 
 
 
 
 
25
  import utils
26
+ from infer import infer, latest_version, get_net_g
 
 
 
27
  import gradio as gr
28
+ import numpy as np
29
+ from tools.sentence import extrac, is_japanese, is_chinese
30
+ import sys, os
31
+ import math
32
  net_g = None
33
+
34
+ cara_list = ["ひまり","たえ","彩","日菜","美咲","ましろ","燐子","香子","珠緒","たえ"]
35
+
36
  BandList = {
37
 
38
  "PoppinParty":["香澄","有咲","たえ","りみ","沙綾"],
 
55
  else:
56
  device = "cuda"
57
 
58
+ def generate_audio(
59
+ text,
60
+ sdp_ratio,
61
+ noise_scale,
62
+ noise_scale_w,
63
+ length_scale,
64
+ speaker,
65
+ language,
66
+ ):
67
+ audio_list = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  with torch.no_grad():
69
+ if language == 'Auto':
70
+ language = "EN"
71
+ if is_japanese(text):
72
+ language = "JP"
73
+ elif is_chinese(text):
74
+ language = "ZH"
75
+ print(text+":"+language)
76
+ audio = infer(
77
+ text,
78
+ sdp_ratio=sdp_ratio,
79
+ noise_scale=noise_scale,
80
+ noise_scale_w=noise_scale_w,
81
+ length_scale=length_scale,
82
+ sid=speaker,
83
+ language=language,
84
+ hps=hps,
85
+ net_g=net_g,
86
+ device=device,
 
 
 
 
 
 
 
87
  )
88
+ return audio
 
 
 
 
89
 
90
  def tts_fn(
91
+ text: str,
92
+ speaker,
93
+ sdp_ratio,
94
+ noise_scale,
95
+ noise_scale_w,
96
+ length_scale,
97
+ language,
98
+ LongSentence,
99
  ):
100
  if not LongSentence:
101
  with torch.no_grad():
102
+ audio = generate_audio(
103
  text,
104
  sdp_ratio=sdp_ratio,
105
  noise_scale=noise_scale,
106
  noise_scale_w=noise_scale_w,
107
  length_scale=length_scale,
108
+ speaker=speaker,
109
+ language= language,
110
  )
111
  torch.cuda.empty_cache()
112
  return (hps.data.sampling_rate, audio)
113
  else:
114
+
115
+ final_list = extrac(text)
 
 
 
 
 
 
116
  audio_fin = []
117
  for sentence in final_list:
118
+ if len(sentence) > 1:
119
+ with torch.no_grad():
120
+ audio = generate_audio(
121
+ sentence,
122
+ sdp_ratio=sdp_ratio,
123
+ noise_scale=noise_scale,
124
+ noise_scale_w=noise_scale_w,
125
+ length_scale=length_scale,
126
+ speaker=speaker,
127
+ language= language,
128
+ )
129
+ silence_frames = int(math.log(len(sentence)+1, 1000) * 44010) if is_chinese(sentence) else int(math.log(len(sentence)+1, 3000) * 44010)
130
+ silence_data = np.zeros((silence_frames,), dtype=audio.dtype)
131
+ audio_fin.append(audio)
132
+ audio_fin.append(silence_data)
133
  return (hps.data.sampling_rate, np.concatenate(audio_fin))
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  def loadmodel(model):
136
  _ = net_g.eval()
137
  _ = utils.load_checkpoint(model, net_g, None, skip_optimizer=True)
138
  return "success"
139
 
 
140
  if __name__ == "__main__":
141
+ hps = utils.get_hparams_from_file('Data/BangDream/config.json')
142
+ version = hps.version if hasattr(hps, "version") else latest_version
143
+ net_g = get_net_g(
144
+ model_path='Data/BangDream/models/G_10000.pth', version=version, device=device, hps=hps
145
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  speaker_ids = hps.data.spk2id
147
  speakers = list(speaker_ids.keys())
148
+ languages = [ "Auto", "ZH", "JP"]
 
 
 
149
  modelPaths = []
150
+ for dirpath, dirnames, filenames in os.walk("Data/BangDream/models/"):
151
  for filename in filenames:
152
  modelPaths.append(os.path.join(dirpath, filename))
153
  with gr.Blocks() as app:
154
  gr.Markdown(
155
+ f"少歌邦邦全员TTS,使用本模型请严格遵守法律法规!\n 发布二创作品请注明项目和本模型作者<a href='https://space.bilibili.com/19874615/'>B站@Mahiroshi</a>及项目链接\n从 <a href='https://nijigaku.top/2023/10/03/BangDreamTTS/'>我的博客站点</a> 查看使用说明</a>"
156
  )
157
  for band in BandList:
158
  with gr.TabItem(band):
 
169
  length_scale = gr.Slider(
170
  minimum=0.1, maximum=2, value=1, step=0.01, label="语速调节"
171
  )
172
+ with gr.Accordion(label="切换模型", open=False):
173
  modelstrs = gr.Dropdown(label = "模型", choices = modelPaths, value = modelPaths[0], type = "value")
174
  btnMod = gr.Button("载入模型")
175
  statusa = gr.TextArea()
 
193
  minimum=0.1, maximum=2, value=0.8, step=0.01, label="音素长度"
194
  )
195
  LongSentence = gr.Checkbox(value=True, label="Generate LongSentence")
196
+ language = gr.Dropdown(
197
+ choices=languages, value=languages[0], label="选择语言(默认自动)"
198
+ )
199
  speaker = gr.Dropdown(
200
  choices=speakers, value=name, label="说话人"
201
  )
 
208
  noise_scale,
209
  noise_scale_w,
210
  length_scale,
211
+ language,
212
  LongSentence,
213
  ],
214
  outputs=[audio_output],
215
  )
216
+
217
+ print("推理页面已开启!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  app.launch()
attentions_onnx.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ import commons
7
+ import logging
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class LayerNorm(nn.Module):
13
+ def __init__(self, channels, eps=1e-5):
14
+ super().__init__()
15
+ self.channels = channels
16
+ self.eps = eps
17
+
18
+ self.gamma = nn.Parameter(torch.ones(channels))
19
+ self.beta = nn.Parameter(torch.zeros(channels))
20
+
21
+ def forward(self, x):
22
+ x = x.transpose(1, -1)
23
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
24
+ return x.transpose(1, -1)
25
+
26
+
27
+ @torch.jit.script
28
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
29
+ n_channels_int = n_channels[0]
30
+ in_act = input_a + input_b
31
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
32
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
33
+ acts = t_act * s_act
34
+ return acts
35
+
36
+
37
+ class Encoder(nn.Module):
38
+ def __init__(
39
+ self,
40
+ hidden_channels,
41
+ filter_channels,
42
+ n_heads,
43
+ n_layers,
44
+ kernel_size=1,
45
+ p_dropout=0.0,
46
+ window_size=4,
47
+ isflow=True,
48
+ **kwargs
49
+ ):
50
+ super().__init__()
51
+ self.hidden_channels = hidden_channels
52
+ self.filter_channels = filter_channels
53
+ self.n_heads = n_heads
54
+ self.n_layers = n_layers
55
+ self.kernel_size = kernel_size
56
+ self.p_dropout = p_dropout
57
+ self.window_size = window_size
58
+ # if isflow:
59
+ # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
60
+ # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
61
+ # self.cond_layer = weight_norm(cond_layer, name='weight')
62
+ # self.gin_channels = 256
63
+ self.cond_layer_idx = self.n_layers
64
+ if "gin_channels" in kwargs:
65
+ self.gin_channels = kwargs["gin_channels"]
66
+ if self.gin_channels != 0:
67
+ self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
68
+ # vits2 says 3rd block, so idx is 2 by default
69
+ self.cond_layer_idx = (
70
+ kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
71
+ )
72
+ logging.debug(self.gin_channels, self.cond_layer_idx)
73
+ assert (
74
+ self.cond_layer_idx < self.n_layers
75
+ ), "cond_layer_idx should be less than n_layers"
76
+ self.drop = nn.Dropout(p_dropout)
77
+ self.attn_layers = nn.ModuleList()
78
+ self.norm_layers_1 = nn.ModuleList()
79
+ self.ffn_layers = nn.ModuleList()
80
+ self.norm_layers_2 = nn.ModuleList()
81
+ for i in range(self.n_layers):
82
+ self.attn_layers.append(
83
+ MultiHeadAttention(
84
+ hidden_channels,
85
+ hidden_channels,
86
+ n_heads,
87
+ p_dropout=p_dropout,
88
+ window_size=window_size,
89
+ )
90
+ )
91
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
92
+ self.ffn_layers.append(
93
+ FFN(
94
+ hidden_channels,
95
+ hidden_channels,
96
+ filter_channels,
97
+ kernel_size,
98
+ p_dropout=p_dropout,
99
+ )
100
+ )
101
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
102
+
103
+ def forward(self, x, x_mask, g=None):
104
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
105
+ x = x * x_mask
106
+ for i in range(self.n_layers):
107
+ if i == self.cond_layer_idx and g is not None:
108
+ g = self.spk_emb_linear(g.transpose(1, 2))
109
+ g = g.transpose(1, 2)
110
+ x = x + g
111
+ x = x * x_mask
112
+ y = self.attn_layers[i](x, x, attn_mask)
113
+ y = self.drop(y)
114
+ x = self.norm_layers_1[i](x + y)
115
+
116
+ y = self.ffn_layers[i](x, x_mask)
117
+ y = self.drop(y)
118
+ x = self.norm_layers_2[i](x + y)
119
+ x = x * x_mask
120
+ return x
121
+
122
+
123
+ class MultiHeadAttention(nn.Module):
124
+ def __init__(
125
+ self,
126
+ channels,
127
+ out_channels,
128
+ n_heads,
129
+ p_dropout=0.0,
130
+ window_size=None,
131
+ heads_share=True,
132
+ block_length=None,
133
+ proximal_bias=False,
134
+ proximal_init=False,
135
+ ):
136
+ super().__init__()
137
+ assert channels % n_heads == 0
138
+
139
+ self.channels = channels
140
+ self.out_channels = out_channels
141
+ self.n_heads = n_heads
142
+ self.p_dropout = p_dropout
143
+ self.window_size = window_size
144
+ self.heads_share = heads_share
145
+ self.block_length = block_length
146
+ self.proximal_bias = proximal_bias
147
+ self.proximal_init = proximal_init
148
+ self.attn = None
149
+
150
+ self.k_channels = channels // n_heads
151
+ self.conv_q = nn.Conv1d(channels, channels, 1)
152
+ self.conv_k = nn.Conv1d(channels, channels, 1)
153
+ self.conv_v = nn.Conv1d(channels, channels, 1)
154
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
155
+ self.drop = nn.Dropout(p_dropout)
156
+
157
+ if window_size is not None:
158
+ n_heads_rel = 1 if heads_share else n_heads
159
+ rel_stddev = self.k_channels**-0.5
160
+ self.emb_rel_k = nn.Parameter(
161
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
162
+ * rel_stddev
163
+ )
164
+ self.emb_rel_v = nn.Parameter(
165
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
166
+ * rel_stddev
167
+ )
168
+
169
+ nn.init.xavier_uniform_(self.conv_q.weight)
170
+ nn.init.xavier_uniform_(self.conv_k.weight)
171
+ nn.init.xavier_uniform_(self.conv_v.weight)
172
+ if proximal_init:
173
+ with torch.no_grad():
174
+ self.conv_k.weight.copy_(self.conv_q.weight)
175
+ self.conv_k.bias.copy_(self.conv_q.bias)
176
+
177
+ def forward(self, x, c, attn_mask=None):
178
+ q = self.conv_q(x)
179
+ k = self.conv_k(c)
180
+ v = self.conv_v(c)
181
+
182
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
183
+
184
+ x = self.conv_o(x)
185
+ return x
186
+
187
+ def attention(self, query, key, value, mask=None):
188
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
189
+ b, d, t_s, t_t = (*key.size(), query.size(2))
190
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
191
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
192
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
193
+
194
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
195
+ if self.window_size is not None:
196
+ assert (
197
+ t_s == t_t
198
+ ), "Relative attention is only available for self-attention."
199
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
200
+ rel_logits = self._matmul_with_relative_keys(
201
+ query / math.sqrt(self.k_channels), key_relative_embeddings
202
+ )
203
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
204
+ scores = scores + scores_local
205
+ if self.proximal_bias:
206
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
207
+ scores = scores + self._attention_bias_proximal(t_s).to(
208
+ device=scores.device, dtype=scores.dtype
209
+ )
210
+ if mask is not None:
211
+ scores = scores.masked_fill(mask == 0, -1e4)
212
+ if self.block_length is not None:
213
+ assert (
214
+ t_s == t_t
215
+ ), "Local attention is only available for self-attention."
216
+ block_mask = (
217
+ torch.ones_like(scores)
218
+ .triu(-self.block_length)
219
+ .tril(self.block_length)
220
+ )
221
+ scores = scores.masked_fill(block_mask == 0, -1e4)
222
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
223
+ p_attn = self.drop(p_attn)
224
+ output = torch.matmul(p_attn, value)
225
+ if self.window_size is not None:
226
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
227
+ value_relative_embeddings = self._get_relative_embeddings(
228
+ self.emb_rel_v, t_s
229
+ )
230
+ output = output + self._matmul_with_relative_values(
231
+ relative_weights, value_relative_embeddings
232
+ )
233
+ output = (
234
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
235
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
236
+ return output, p_attn
237
+
238
+ def _matmul_with_relative_values(self, x, y):
239
+ """
240
+ x: [b, h, l, m]
241
+ y: [h or 1, m, d]
242
+ ret: [b, h, l, d]
243
+ """
244
+ ret = torch.matmul(x, y.unsqueeze(0))
245
+ return ret
246
+
247
+ def _matmul_with_relative_keys(self, x, y):
248
+ """
249
+ x: [b, h, l, d]
250
+ y: [h or 1, m, d]
251
+ ret: [b, h, l, m]
252
+ """
253
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
254
+ return ret
255
+
256
+ def _get_relative_embeddings(self, relative_embeddings, length):
257
+ max_relative_position = 2 * self.window_size + 1
258
+ # Pad first before slice to avoid using cond ops.
259
+ pad_length = max(length - (self.window_size + 1), 0)
260
+ slice_start_position = max((self.window_size + 1) - length, 0)
261
+ slice_end_position = slice_start_position + 2 * length - 1
262
+ if pad_length > 0:
263
+ padded_relative_embeddings = F.pad(
264
+ relative_embeddings,
265
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
266
+ )
267
+ else:
268
+ padded_relative_embeddings = relative_embeddings
269
+ used_relative_embeddings = padded_relative_embeddings[
270
+ :, slice_start_position:slice_end_position
271
+ ]
272
+ return used_relative_embeddings
273
+
274
+ def _relative_position_to_absolute_position(self, x):
275
+ """
276
+ x: [b, h, l, 2*l-1]
277
+ ret: [b, h, l, l]
278
+ """
279
+ batch, heads, length, _ = x.size()
280
+ # Concat columns of pad to shift from relative to absolute indexing.
281
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
282
+
283
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
284
+ x_flat = x.view([batch, heads, length * 2 * length])
285
+ x_flat = F.pad(
286
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
287
+ )
288
+
289
+ # Reshape and slice out the padded elements.
290
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
291
+ :, :, :length, length - 1 :
292
+ ]
293
+ return x_final
294
+
295
+ def _absolute_position_to_relative_position(self, x):
296
+ """
297
+ x: [b, h, l, l]
298
+ ret: [b, h, l, 2*l-1]
299
+ """
300
+ batch, heads, length, _ = x.size()
301
+ # padd along column
302
+ x = F.pad(
303
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
304
+ )
305
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
306
+ # add 0's in the beginning that will skew the elements after reshape
307
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
308
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
309
+ return x_final
310
+
311
+ def _attention_bias_proximal(self, length):
312
+ """Bias for self-attention to encourage attention to close positions.
313
+ Args:
314
+ length: an integer scalar.
315
+ Returns:
316
+ a Tensor with shape [1, 1, length, length]
317
+ """
318
+ r = torch.arange(length, dtype=torch.float32)
319
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
320
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
321
+
322
+
323
+ class FFN(nn.Module):
324
+ def __init__(
325
+ self,
326
+ in_channels,
327
+ out_channels,
328
+ filter_channels,
329
+ kernel_size,
330
+ p_dropout=0.0,
331
+ activation=None,
332
+ causal=False,
333
+ ):
334
+ super().__init__()
335
+ self.in_channels = in_channels
336
+ self.out_channels = out_channels
337
+ self.filter_channels = filter_channels
338
+ self.kernel_size = kernel_size
339
+ self.p_dropout = p_dropout
340
+ self.activation = activation
341
+ self.causal = causal
342
+
343
+ if causal:
344
+ self.padding = self._causal_padding
345
+ else:
346
+ self.padding = self._same_padding
347
+
348
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
349
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
350
+ self.drop = nn.Dropout(p_dropout)
351
+
352
+ def forward(self, x, x_mask):
353
+ x = self.conv_1(self.padding(x * x_mask))
354
+ if self.activation == "gelu":
355
+ x = x * torch.sigmoid(1.702 * x)
356
+ else:
357
+ x = torch.relu(x)
358
+ x = self.drop(x)
359
+ x = self.conv_2(self.padding(x * x_mask))
360
+ return x * x_mask
361
+
362
+ def _causal_padding(self, x):
363
+ if self.kernel_size == 1:
364
+ return x
365
+ pad_l = self.kernel_size - 1
366
+ pad_r = 0
367
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
368
+ x = F.pad(x, commons.convert_pad_shape(padding))
369
+ return x
370
+
371
+ def _same_padding(self, x):
372
+ if self.kernel_size == 1:
373
+ return x
374
+ pad_l = (self.kernel_size - 1) // 2
375
+ pad_r = self.kernel_size // 2
376
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
377
+ x = F.pad(x, commons.convert_pad_shape(padding))
378
+ return x
bert/bert-large-japanese-v2/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/bert-large-japanese-v2/README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - cc100
5
+ - wikipedia
6
+ language:
7
+ - ja
8
+ widget:
9
+ - text: 東北大学で[MASK]の研究をしています。
10
+ ---
11
+
12
+ # BERT large Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
13
+
14
+ This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
15
+
16
+ This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
17
+ Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
18
+
19
+ The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
20
+
21
+ ## Model architecture
22
+
23
+ The model architecture is the same as the original BERT large model; 24 layers, 1024 dimensions of hidden states, and 16 attention heads.
24
+
25
+ ## Training Data
26
+
27
+ The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
28
+ For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
29
+ The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
30
+
31
+ For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
32
+
33
+ ## Tokenization
34
+
35
+ The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
36
+ The vocabulary size is 32768.
37
+
38
+ We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
39
+
40
+ ## Training
41
+
42
+ We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
43
+ For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
44
+
45
+ For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
46
+
47
+ ## Licenses
48
+
49
+ The pretrained models are distributed under the Apache License 2.0.
50
+
51
+ ## Acknowledgments
52
+
53
+ This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
bert/bert-large-japanese-v2/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 1024,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4096,
11
+ "layer_norm_eps": 1e-12,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "bert",
14
+ "num_attention_heads": 16,
15
+ "num_hidden_layers": 24,
16
+ "pad_token_id": 0,
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 32768
19
+ }
bert/bert-large-japanese-v2/tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "BertJapaneseTokenizer",
3
+ "model_max_length": 512,
4
+ "do_lower_case": false,
5
+ "word_tokenizer_type": "mecab",
6
+ "subword_tokenizer_type": "wordpiece",
7
+ "mecab_kwargs": {
8
+ "mecab_dic": "unidic_lite"
9
+ }
10
+ }
bert/bert-large-japanese-v2/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/bert_models.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "deberta-v2-large-japanese": {
3
+ "repo_id": "ku-nlp/deberta-v2-large-japanese",
4
+ "files": ["pytorch_model.bin"]
5
+ },
6
+ "chinese-roberta-wwm-ext-large": {
7
+ "repo_id": "hfl/chinese-roberta-wwm-ext-large",
8
+ "files": ["pytorch_model.bin"]
9
+ },
10
+ "deberta-v3-large": {
11
+ "repo_id": "microsoft/deberta-v3-large",
12
+ "files": ["spm.model", "pytorch_model.bin"]
13
+ }
14
+ }
bert/deberta-v2-large-japanese/.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/deberta-v2-large-japanese/README.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: ja
3
+ license: cc-by-sa-4.0
4
+ library_name: transformers
5
+ tags:
6
+ - deberta
7
+ - deberta-v2
8
+ - fill-mask
9
+ datasets:
10
+ - wikipedia
11
+ - cc100
12
+ - oscar
13
+ metrics:
14
+ - accuracy
15
+ mask_token: "[MASK]"
16
+ widget:
17
+ - text: "京都 大学 で 自然 言語 処理 を [MASK] する 。"
18
+ ---
19
+
20
+ # Model Card for Japanese DeBERTa V2 large
21
+
22
+ ## Model description
23
+
24
+ This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the
25
+ Japanese portion of OSCAR.
26
+
27
+ ## How to use
28
+
29
+ You can use this model for masked language modeling as follows:
30
+
31
+ ```python
32
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
33
+
34
+ tokenizer = AutoTokenizer.from_pretrained('ku-nlp/deberta-v2-large-japanese')
35
+ model = AutoModelForMaskedLM.from_pretrained('ku-nlp/deberta-v2-large-japanese')
36
+
37
+ sentence = '京都 大学 で 自然 言語 処理 を [MASK] する 。' # input should be segmented into words by Juman++ in advance
38
+ encoding = tokenizer(sentence, return_tensors='pt')
39
+ ...
40
+ ```
41
+
42
+ You can also fine-tune this model on downstream tasks.
43
+
44
+ ## Tokenization
45
+
46
+ The input text should be segmented into words by [Juman++](https://github.com/ku-nlp/jumanpp) in
47
+ advance. [Juman++ 2.0.0-rc3](https://github.com/ku-nlp/jumanpp/releases/tag/v2.0.0-rc3) was used for pre-training. Each
48
+ word is tokenized into subwords by [sentencepiece](https://github.com/google/sentencepiece).
49
+
50
+ ## Training data
51
+
52
+ We used the following corpora for pre-training:
53
+
54
+ - Japanese Wikipedia (as of 20221020, 3.2GB, 27M sentences, 1.3M documents)
55
+ - Japanese portion of CC-100 (85GB, 619M sentences, 66M documents)
56
+ - Japanese portion of OSCAR (54GB, 326M sentences, 25M documents)
57
+
58
+ Note that we filtered out documents annotated with "header", "footer", or "noisy" tags in OSCAR.
59
+ Also note that Japanese Wikipedia was duplicated 10 times to make the total size of the corpus comparable to that of
60
+ CC-100 and OSCAR. As a result, the total size of the training data is 171GB.
61
+
62
+ ## Training procedure
63
+
64
+ We first segmented texts in the corpora into words using [Juman++](https://github.com/ku-nlp/jumanpp).
65
+ Then, we built a sentencepiece model with 32000 tokens including words ([JumanDIC](https://github.com/ku-nlp/JumanDIC))
66
+ and subwords induced by the unigram language model of [sentencepiece](https://github.com/google/sentencepiece).
67
+
68
+ We tokenized the segmented corpora into subwords using the sentencepiece model and trained the Japanese DeBERTa model
69
+ using [transformers](https://github.com/huggingface/transformers) library.
70
+ The training took 36 days using 8 NVIDIA A100-SXM4-40GB GPUs.
71
+
72
+ The following hyperparameters were used during pre-training:
73
+
74
+ - learning_rate: 1e-4
75
+ - per_device_train_batch_size: 18
76
+ - distributed_type: multi-GPU
77
+ - num_devices: 8
78
+ - gradient_accumulation_steps: 16
79
+ - total_train_batch_size: 2,304
80
+ - max_seq_length: 512
81
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
82
+ - lr_scheduler_type: linear schedule with warmup
83
+ - training_steps: 300,000
84
+ - warmup_steps: 10,000
85
+
86
+ The accuracy of the trained model on the masked language modeling task was 0.799.
87
+ The evaluation set consists of 5,000 randomly sampled documents from each of the training corpora.
88
+
89
+ ## Fine-tuning on NLU tasks
90
+
91
+ We fine-tuned the following models and evaluated them on the dev set of JGLUE.
92
+ We tuned learning rate and training epochs for each model and task
93
+ following [the JGLUE paper](https://www.jstage.jst.go.jp/article/jnlp/30/1/30_63/_pdf/-char/ja).
94
+
95
+ | Model | MARC-ja/acc | JSTS/pearson | JSTS/spearman | JNLI/acc | JSQuAD/EM | JSQuAD/F1 | JComQA/acc |
96
+ |-------------------------------|-------------|--------------|---------------|----------|-----------|-----------|------------|
97
+ | Waseda RoBERTa base | 0.965 | 0.913 | 0.876 | 0.905 | 0.853 | 0.916 | 0.853 |
98
+ | Waseda RoBERTa large (seq512) | 0.969 | 0.925 | 0.890 | 0.928 | 0.910 | 0.955 | 0.900 |
99
+ | LUKE Japanese base* | 0.965 | 0.916 | 0.877 | 0.912 | - | - | 0.842 |
100
+ | LUKE Japanese large* | 0.965 | 0.932 | 0.902 | 0.927 | - | - | 0.893 |
101
+ | DeBERTaV2 base | 0.970 | 0.922 | 0.886 | 0.922 | 0.899 | 0.951 | 0.873 |
102
+ | DeBERTaV2 large | 0.968 | 0.925 | 0.892 | 0.924 | 0.912 | 0.959 | 0.890 |
103
+
104
+ *The scores of LUKE are from [the official repository](https://github.com/studio-ousia/luke).
105
+
106
+ ## Acknowledgments
107
+
108
+ This work was supported by Joint Usage/Research Center for Interdisciplinary Large-scale Information Infrastructures (
109
+ JHPCN) through General Collaboration Project no. jh221004, "Developing a Platform for Constructing and Sharing of
110
+ Large-Scale Japanese Language Models".
111
+ For training models, we used the mdx: a platform for the data-driven future.
bert/deberta-v2-large-japanese/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "configs/deberta_v2_large.json",
3
+ "architectures": [
4
+ "DebertaV2ForMaskedLM"
5
+ ],
6
+ "attention_head_size": 64,
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "conv_act": "gelu",
9
+ "conv_kernel_size": 3,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-07,
16
+ "max_position_embeddings": 512,
17
+ "max_relative_positions": -1,
18
+ "model_type": "deberta-v2",
19
+ "norm_rel_ebd": "layer_norm",
20
+ "num_attention_heads": 16,
21
+ "num_hidden_layers": 24,
22
+ "pad_token_id": 0,
23
+ "pooler_dropout": 0,
24
+ "pooler_hidden_act": "gelu",
25
+ "pooler_hidden_size": 1024,
26
+ "pos_att_type": [
27
+ "p2c",
28
+ "c2p"
29
+ ],
30
+ "position_biased_input": false,
31
+ "position_buckets": 256,
32
+ "relative_attention": true,
33
+ "share_att_key": true,
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.23.1",
36
+ "type_vocab_size": 0,
37
+ "vocab_size": 32000
38
+ }
bert/deberta-v2-large-japanese/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6c15feac0dea77ab8835c70e1befa4cf4c2137862c6fb2443b1553f70840047
3
+ size 1490693213
bert/deberta-v2-large-japanese/special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
bert/deberta-v2-large-japanese/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert/deberta-v2-large-japanese/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "keep_accents": true,
7
+ "mask_token": "[MASK]",
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "sp_model_kwargs": {},
11
+ "special_tokens_map_file": null,
12
+ "split_by_punct": false,
13
+ "tokenizer_class": "DebertaV2Tokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
bert/deberta-v3-large/.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bert/deberta-v3-large/README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ tags:
4
+ - deberta
5
+ - deberta-v3
6
+ - fill-mask
7
+ thumbnail: https://huggingface.co/front/thumbnails/microsoft.png
8
+ license: mit
9
+ ---
10
+
11
+ ## DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing
12
+
13
+ [DeBERTa](https://arxiv.org/abs/2006.03654) improves the BERT and RoBERTa models using disentangled attention and enhanced mask decoder. With those two improvements, DeBERTa out perform RoBERTa on a majority of NLU tasks with 80GB training data.
14
+
15
+ In [DeBERTa V3](https://arxiv.org/abs/2111.09543), we further improved the efficiency of DeBERTa using ELECTRA-Style pre-training with Gradient Disentangled Embedding Sharing. Compared to DeBERTa, our V3 version significantly improves the model performance on downstream tasks. You can find more technique details about the new model from our [paper](https://arxiv.org/abs/2111.09543).
16
+
17
+ Please check the [official repository](https://github.com/microsoft/DeBERTa) for more implementation details and updates.
18
+
19
+ The DeBERTa V3 large model comes with 24 layers and a hidden size of 1024. It has 304M backbone parameters with a vocabulary containing 128K tokens which introduces 131M parameters in the Embedding layer. This model was trained using the 160GB data as DeBERTa V2.
20
+
21
+
22
+ #### Fine-tuning on NLU tasks
23
+
24
+ We present the dev results on SQuAD 2.0 and MNLI tasks.
25
+
26
+ | Model |Vocabulary(K)|Backbone #Params(M)| SQuAD 2.0(F1/EM) | MNLI-m/mm(ACC)|
27
+ |-------------------|----------|-------------------|-----------|----------|
28
+ | RoBERTa-large |50 |304 | 89.4/86.5 | 90.2 |
29
+ | XLNet-large |32 |- | 90.6/87.9 | 90.8 |
30
+ | DeBERTa-large |50 |- | 90.7/88.0 | 91.3 |
31
+ | **DeBERTa-v3-large**|128|304 | **91.5/89.0**| **91.8/91.9**|
32
+
33
+
34
+ #### Fine-tuning with HF transformers
35
+
36
+ ```bash
37
+ #!/bin/bash
38
+
39
+ cd transformers/examples/pytorch/text-classification/
40
+
41
+ pip install datasets
42
+ export TASK_NAME=mnli
43
+
44
+ output_dir="ds_results"
45
+
46
+ num_gpus=8
47
+
48
+ batch_size=8
49
+
50
+ python -m torch.distributed.launch --nproc_per_node=${num_gpus} \
51
+ run_glue.py \
52
+ --model_name_or_path microsoft/deberta-v3-large \
53
+ --task_name $TASK_NAME \
54
+ --do_train \
55
+ --do_eval \
56
+ --evaluation_strategy steps \
57
+ --max_seq_length 256 \
58
+ --warmup_steps 50 \
59
+ --per_device_train_batch_size ${batch_size} \
60
+ --learning_rate 6e-6 \
61
+ --num_train_epochs 2 \
62
+ --output_dir $output_dir \
63
+ --overwrite_output_dir \
64
+ --logging_steps 1000 \
65
+ --logging_dir $output_dir
66
+
67
+ ```
68
+
69
+ ### Citation
70
+
71
+ If you find DeBERTa useful for your work, please cite the following papers:
72
+
73
+ ``` latex
74
+ @misc{he2021debertav3,
75
+ title={DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing},
76
+ author={Pengcheng He and Jianfeng Gao and Weizhu Chen},
77
+ year={2021},
78
+ eprint={2111.09543},
79
+ archivePrefix={arXiv},
80
+ primaryClass={cs.CL}
81
+ }
82
+ ```
83
+
84
+ ``` latex
85
+ @inproceedings{
86
+ he2021deberta,
87
+ title={DEBERTA: DECODING-ENHANCED BERT WITH DISENTANGLED ATTENTION},
88
+ author={Pengcheng He and Xiaodong Liu and Jianfeng Gao and Weizhu Chen},
89
+ booktitle={International Conference on Learning Representations},
90
+ year={2021},
91
+ url={https://openreview.net/forum?id=XPZIaotutsD}
92
+ }
93
+ ```
bert/deberta-v3-large/config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "deberta-v2",
3
+ "attention_probs_dropout_prob": 0.1,
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.1,
6
+ "hidden_size": 1024,
7
+ "initializer_range": 0.02,
8
+ "intermediate_size": 4096,
9
+ "max_position_embeddings": 512,
10
+ "relative_attention": true,
11
+ "position_buckets": 256,
12
+ "norm_rel_ebd": "layer_norm",
13
+ "share_att_key": true,
14
+ "pos_att_type": "p2c|c2p",
15
+ "layer_norm_eps": 1e-7,
16
+ "max_relative_positions": -1,
17
+ "position_biased_input": false,
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "type_vocab_size": 0,
21
+ "vocab_size": 128100
22
+ }
bert/deberta-v3-large/generator_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "deberta-v2",
3
+ "attention_probs_dropout_prob": 0.1,
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.1,
6
+ "hidden_size": 1024,
7
+ "initializer_range": 0.02,
8
+ "intermediate_size": 4096,
9
+ "max_position_embeddings": 512,
10
+ "relative_attention": true,
11
+ "position_buckets": 256,
12
+ "norm_rel_ebd": "layer_norm",
13
+ "share_att_key": true,
14
+ "pos_att_type": "p2c|c2p",
15
+ "layer_norm_eps": 1e-7,
16
+ "max_relative_positions": -1,
17
+ "position_biased_input": false,
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 12,
20
+ "type_vocab_size": 0,
21
+ "vocab_size": 128100
22
+ }
bert/deberta-v3-large/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd5b5d93e2db101aaf281df0ea1216c07ad73620ff59c5b42dccac4bf2eef5b5
3
+ size 873673253
bert/deberta-v3-large/spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
bert/deberta-v3-large/tokenizer_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "do_lower_case": false,
3
+ "vocab_type": "spm"
4
+ }
bert_gen.py CHANGED
@@ -3,17 +3,22 @@ from multiprocessing import Pool
3
  import commons
4
  import utils
5
  from tqdm import tqdm
6
- from text import cleaned_text_to_sequence, get_bert
7
  import argparse
8
  import torch.multiprocessing as mp
 
9
 
10
 
11
  def process_line(line):
12
- rank = mp.current_process()._identity
13
- rank = rank[0] if len(rank) > 0 else 0
14
- if torch.cuda.is_available():
15
- gpu_id = rank % torch.cuda.device_count()
16
- device = torch.device(f"cuda:{gpu_id}")
 
 
 
 
17
  wav_path, _, language_str, text, phones, tone, word2ph = line.strip().split("|")
18
  phone = phones.split(" ")
19
  tone = [int(i) for i in tone.split(" ")]
@@ -28,7 +33,7 @@ def process_line(line):
28
  word2ph[i] = word2ph[i] * 2
29
  word2ph[0] += 1
30
 
31
- bert_path = wav_path.replace(".wav", ".bert.pt")
32
 
33
  try:
34
  bert = torch.load(bert_path)
@@ -39,21 +44,30 @@ def process_line(line):
39
  torch.save(bert, bert_path)
40
 
41
 
 
 
42
  if __name__ == "__main__":
43
  parser = argparse.ArgumentParser()
44
- parser.add_argument("-c", "--config", type=str, default="configs/config.json")
45
- parser.add_argument("--num_processes", type=int, default=2)
46
- args = parser.parse_args()
 
 
 
 
47
  config_path = args.config
48
  hps = utils.get_hparams_from_file(config_path)
 
49
  lines = []
50
  with open(hps.data.training_files, encoding="utf-8") as f:
51
  lines.extend(f.readlines())
52
 
53
  with open(hps.data.validation_files, encoding="utf-8") as f:
54
  lines.extend(f.readlines())
 
 
 
 
 
55
 
56
- num_processes = args.num_processes
57
- with Pool(processes=num_processes) as pool:
58
- for _ in tqdm(pool.imap_unordered(process_line, lines), total=len(lines)):
59
- pass
 
3
  import commons
4
  import utils
5
  from tqdm import tqdm
6
+ from text import check_bert_models, cleaned_text_to_sequence, get_bert
7
  import argparse
8
  import torch.multiprocessing as mp
9
+ from config import config
10
 
11
 
12
  def process_line(line):
13
+ device = config.bert_gen_config.device
14
+ if config.bert_gen_config.use_multi_device:
15
+ rank = mp.current_process()._identity
16
+ rank = rank[0] if len(rank) > 0 else 0
17
+ if torch.cuda.is_available():
18
+ gpu_id = rank % torch.cuda.device_count()
19
+ device = torch.device(f"cuda:{gpu_id}")
20
+ else:
21
+ device = torch.device("cpu")
22
  wav_path, _, language_str, text, phones, tone, word2ph = line.strip().split("|")
23
  phone = phones.split(" ")
24
  tone = [int(i) for i in tone.split(" ")]
 
33
  word2ph[i] = word2ph[i] * 2
34
  word2ph[0] += 1
35
 
36
+ bert_path = wav_path.replace(".WAV", ".wav").replace(".wav", ".bert.pt")
37
 
38
  try:
39
  bert = torch.load(bert_path)
 
44
  torch.save(bert, bert_path)
45
 
46
 
47
+ preprocess_text_config = config.preprocess_text_config
48
+
49
  if __name__ == "__main__":
50
  parser = argparse.ArgumentParser()
51
+ parser.add_argument(
52
+ "-c", "--config", type=str, default=config.bert_gen_config.config_path
53
+ )
54
+ parser.add_argument(
55
+ "--num_processes", type=int, default=config.bert_gen_config.num_processes
56
+ )
57
+ args, _ = parser.parse_known_args()
58
  config_path = args.config
59
  hps = utils.get_hparams_from_file(config_path)
60
+ check_bert_models()
61
  lines = []
62
  with open(hps.data.training_files, encoding="utf-8") as f:
63
  lines.extend(f.readlines())
64
 
65
  with open(hps.data.validation_files, encoding="utf-8") as f:
66
  lines.extend(f.readlines())
67
+ if len(lines) != 0:
68
+ num_processes = args.num_processes
69
+ with Pool(processes=num_processes) as pool:
70
+ for _ in tqdm(pool.imap_unordered(process_line, lines), total=len(lines)):
71
+ pass
72
 
73
+ print(f"bert生成完毕!, 共有{len(lines)}个bert.pt生成!")
 
 
 
commons.py CHANGED
@@ -50,7 +50,13 @@ def slice_segments(x, ids_str, segment_size=4):
50
  for i in range(x.size(0)):
51
  idx_str = ids_str[i]
52
  idx_end = idx_str + segment_size
53
- ret[i] = x[i, :, idx_str:idx_end]
 
 
 
 
 
 
54
  return ret
55
 
56
 
 
50
  for i in range(x.size(0)):
51
  idx_str = ids_str[i]
52
  idx_end = idx_str + segment_size
53
+ if idx_str < 0:
54
+ i1 = x.size(2) + idx_str
55
+ r1 = x[i, :, i1:]
56
+ r2 = x[i, :, :idx_end]
57
+ ret[i] = torch.cat([r1, r2], dim=1)
58
+ else:
59
+ ret[i] = x[i, :, idx_str:idx_end]
60
  return ret
61
 
62
 
config.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ @Desc: 全局配置文件读取
3
+ """
4
+ import argparse
5
+ import yaml
6
+ from typing import Dict, List
7
+ import os
8
+ import shutil
9
+ import sys
10
+
11
+
12
+ class Resample_config:
13
+ """重采样配置"""
14
+
15
+ def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):
16
+ self.sampling_rate: int = sampling_rate # 目标采样率
17
+ self.in_dir: str = in_dir # 待处理音频目录路径
18
+ self.out_dir: str = out_dir # 重采样输出路径
19
+
20
+ @classmethod
21
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
22
+ """从字典中生成实例"""
23
+
24
+ # 不检查路径是否有效,此逻辑在resample.py中处理
25
+ data["in_dir"] = os.path.join(dataset_path, data["in_dir"])
26
+ data["out_dir"] = os.path.join(dataset_path, data["out_dir"])
27
+
28
+ return cls(**data)
29
+
30
+
31
+ class Preprocess_text_config:
32
+ """数据预处理配置"""
33
+
34
+ def __init__(
35
+ self,
36
+ transcription_path: str,
37
+ cleaned_path: str,
38
+ train_path: str,
39
+ val_path: str,
40
+ config_path: str,
41
+ val_per_spk: int = 5,
42
+ max_val_total: int = 10000,
43
+ clean: bool = True,
44
+ ):
45
+ self.transcription_path: str = transcription_path # 原始文本文件路径,文本格式应为{wav_path}|{speaker_name}|{language}|{text}。
46
+ self.cleaned_path: str = cleaned_path # 数据清洗后文本路径,可以不填。不填则将在原始文本目录生成
47
+ self.train_path: str = train_path # 训练集路径,可以不填。不填则将在原始文本目录生成
48
+ self.val_path: str = val_path # 验证集路径,可以不填。不填则将在原始文本目录生成
49
+ self.config_path: str = config_path # 配置文件路径
50
+ self.val_per_spk: int = val_per_spk # 每个speaker的验证集条数
51
+ self.max_val_total: int = max_val_total # 验证集最大条数,多于的会被截断并放到训练集中
52
+ self.clean: bool = clean # 是否进行数据清洗
53
+
54
+ @classmethod
55
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
56
+ """从字典中生成实例"""
57
+
58
+ data["transcription_path"] = os.path.join(
59
+ dataset_path, data["transcription_path"]
60
+ )
61
+ if data["cleaned_path"] == "" or data["cleaned_path"] is None:
62
+ data["cleaned_path"] = None
63
+ else:
64
+ data["cleaned_path"] = os.path.join(dataset_path, data["cleaned_path"])
65
+ data["train_path"] = os.path.join(dataset_path, data["train_path"])
66
+ data["val_path"] = os.path.join(dataset_path, data["val_path"])
67
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
68
+
69
+ return cls(**data)
70
+
71
+
72
+ class Bert_gen_config:
73
+ """bert_gen 配置"""
74
+
75
+ def __init__(
76
+ self,
77
+ config_path: str,
78
+ num_processes: int = 2,
79
+ device: str = "cuda",
80
+ use_multi_device: bool = False,
81
+ ):
82
+ self.config_path = config_path
83
+ self.num_processes = num_processes
84
+ self.device = device
85
+ self.use_multi_device = use_multi_device
86
+
87
+ @classmethod
88
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
89
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
90
+
91
+ return cls(**data)
92
+
93
+
94
+ class Emo_gen_config:
95
+ """emo_gen 配置"""
96
+
97
+ def __init__(
98
+ self,
99
+ config_path: str,
100
+ num_processes: int = 2,
101
+ device: str = "cuda",
102
+ ):
103
+ self.config_path = config_path
104
+ self.num_processes = num_processes
105
+ self.device = device
106
+
107
+ @classmethod
108
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
109
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
110
+
111
+ return cls(**data)
112
+
113
+
114
+ class Train_ms_config:
115
+ """训练配置"""
116
+
117
+ def __init__(
118
+ self,
119
+ config_path: str,
120
+ env: Dict[str, any],
121
+ base: Dict[str, any],
122
+ model: str,
123
+ ):
124
+ self.env = env # 需要加载的环境变量
125
+ self.base = base # 底模配置
126
+ self.model = model # 训练模型存储目录,该路径为相对于dataset_path的路径,而非项目根目录
127
+ self.config_path = config_path # 配置文件路径
128
+
129
+ @classmethod
130
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
131
+ # data["model"] = os.path.join(dataset_path, data["model"])
132
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
133
+
134
+ return cls(**data)
135
+
136
+
137
+ class Webui_config:
138
+ """webui 配置"""
139
+
140
+ def __init__(
141
+ self,
142
+ device: str,
143
+ model: str,
144
+ config_path: str,
145
+ language_identification_library: str,
146
+ port: int = 7860,
147
+ share: bool = False,
148
+ debug: bool = False,
149
+ ):
150
+ self.device: str = device
151
+ self.model: str = model # 端口号
152
+ self.config_path: str = config_path # 是否公开部署,对外网开放
153
+ self.port: int = port # ��否开启debug模式
154
+ self.share: bool = share # 模型路径
155
+ self.debug: bool = debug # 配置文件路径
156
+ self.language_identification_library: str = (
157
+ language_identification_library # 语种识别库
158
+ )
159
+
160
+ @classmethod
161
+ def from_dict(cls, dataset_path: str, data: Dict[str, any]):
162
+ data["config_path"] = os.path.join(dataset_path, data["config_path"])
163
+ data["model"] = os.path.join(dataset_path, data["model"])
164
+ return cls(**data)
165
+
166
+
167
+ class Server_config:
168
+ def __init__(
169
+ self, models: List[Dict[str, any]], port: int = 5000, device: str = "cuda"
170
+ ):
171
+ self.models: List[Dict[str, any]] = models # 需要加载的所有模型的配置
172
+ self.port: int = port # 端口号
173
+ self.device: str = device # 模型默认使用设备
174
+
175
+ @classmethod
176
+ def from_dict(cls, data: Dict[str, any]):
177
+ return cls(**data)
178
+
179
+
180
+ class Translate_config:
181
+ """翻译api配置"""
182
+
183
+ def __init__(self, app_key: str, secret_key: str):
184
+ self.app_key = app_key
185
+ self.secret_key = secret_key
186
+
187
+ @classmethod
188
+ def from_dict(cls, data: Dict[str, any]):
189
+ return cls(**data)
190
+
191
+
192
+ class Config:
193
+ def __init__(self, config_path: str):
194
+ if not os.path.isfile(config_path) and os.path.isfile("default_config.yml"):
195
+ shutil.copy(src="default_config.yml", dst=config_path)
196
+ print(
197
+ f"已根据默认配置文件default_config.yml生成配置文件{config_path}。请按该配置文件的说明进行配置后重新运行。"
198
+ )
199
+ print("如无特殊需求,请勿修改default_config.yml或备份该文件。")
200
+ sys.exit(0)
201
+ with open(file=config_path, mode="r", encoding="utf-8") as file:
202
+ yaml_config: Dict[str, any] = yaml.safe_load(file.read())
203
+ dataset_path: str = yaml_config["dataset_path"]
204
+ openi_token: str = yaml_config["openi_token"]
205
+ self.dataset_path: str = dataset_path
206
+ self.mirror: str = yaml_config["mirror"]
207
+ self.openi_token: str = openi_token
208
+ self.resample_config: Resample_config = Resample_config.from_dict(
209
+ dataset_path, yaml_config["resample"]
210
+ )
211
+ self.preprocess_text_config: Preprocess_text_config = (
212
+ Preprocess_text_config.from_dict(
213
+ dataset_path, yaml_config["preprocess_text"]
214
+ )
215
+ )
216
+ self.bert_gen_config: Bert_gen_config = Bert_gen_config.from_dict(
217
+ dataset_path, yaml_config["bert_gen"]
218
+ )
219
+ self.train_ms_config: Train_ms_config = Train_ms_config.from_dict(
220
+ dataset_path, yaml_config["train_ms"]
221
+ )
222
+ self.webui_config: Webui_config = Webui_config.from_dict(
223
+ dataset_path, yaml_config["webui"]
224
+ )
225
+ self.server_config: Server_config = Server_config.from_dict(
226
+ yaml_config["server"]
227
+ )
228
+ self.translate_config: Translate_config = Translate_config.from_dict(
229
+ yaml_config["translate"]
230
+ )
231
+
232
+
233
+ parser = argparse.ArgumentParser()
234
+ # 为避免与以前的config.json起冲突,将其更名如下
235
+ parser.add_argument("-y", "--yml_config", type=str, default="config.yml")
236
+ args, _ = parser.parse_known_args()
237
+ config = Config(args.yml_config)
config.yml ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 全局配置
2
+ # 对于希望在同一时间使用多个配置文件的情况,例如两个GPU同时跑两个训练集:通过环境变量指定配置文件,不指定则默认为./config.yml
3
+
4
+ # 拟提供通用路径配置,统一存放数据,避免数据放得很乱
5
+ # 每个数据集与其对应的模型存放至统一路径下,后续所有的路径配置均为相对于datasetPath的路径
6
+ # 不填或者填空则路径为相对于项目根目录的路径
7
+ dataset_path: ""
8
+
9
+ # 模型镜像源,默认huggingface,使用openi镜像源需指定openi_token
10
+ mirror: ""
11
+ openi_token: "" # openi token
12
+
13
+ # resample 音频重采样配置
14
+ # 注意, “:” 后需要加空格
15
+ resample:
16
+ # 目标重采样率
17
+ sampling_rate: 44100
18
+ # 音频文件输入路径,重采样会将该路径下所有.wav音频文件重采样
19
+ # 请填入相对于datasetPath的相对路径
20
+ in_dir: "" # 相对于根目录的路径为 /datasetPath/in_dir
21
+ # 音频文件重采样后输出路径
22
+ out_dir: ""
23
+
24
+
25
+ # preprocess_text 数据集预处理相关配置
26
+ # 注意, “:” 后需要加空格
27
+ preprocess_text:
28
+ # 原始文本文件路径,文本格式应为{wav_path}|{speaker_name}|{language}|{text}。
29
+ transcription_path: "filelists/bushroid.list"
30
+ # 数据清洗后文本路径,可以不填。不填则将在原始文本目录生成
31
+ cleaned_path: ""
32
+ # 训练集路径
33
+ train_path: "filelists/train.list"
34
+ # 验证集路径
35
+ val_path: "filelists/val.list"
36
+ # 配置文件路径
37
+ config_path: "config.json"
38
+ # 每个speaker的验证集条数
39
+ val_per_spk: 4
40
+ # 验证集最大条数,多于的会被截断并放到训练集中
41
+ max_val_total: 8
42
+ # 是否进行数据清洗
43
+ clean: true
44
+
45
+
46
+ # bert_gen 相关配置
47
+ # 注意, “:” 后需要加空格
48
+ bert_gen:
49
+ # 训练数据集配置文件路径
50
+ config_path: "config.json"
51
+ # 并行数
52
+ num_processes: 2
53
+ # 使用设备:可选项 "cuda" 显卡推理,"cpu" cpu推理
54
+ # 该选项同时决定了get_bert_feature的默认设备
55
+ device: "cuda"
56
+ # 使用多卡推理
57
+ use_multi_device: false
58
+
59
+
60
+ # train 训练配置
61
+ # 注意, “:” 后需要加空格
62
+ train_ms:
63
+ # 需要加载的环境变量,多显卡训练时RANK请手动在环境变量填写
64
+ # 环境变量对应名称环境变量不存在时加载,也就是说手动添加的环境变量优先级更高,会覆盖本配置文件
65
+ env:
66
+ MASTER_ADDR: "localhost"
67
+ MASTER_PORT: 10086
68
+ WORLD_SIZE: 1
69
+ RANK: 0
70
+ # 可以填写任意名的环境变量
71
+ # THE_ENV_VAR_YOU_NEED_TO_USE: "1234567"
72
+ # 底模设置
73
+ base:
74
+ use_base_model: True
75
+ repo_id: "Stardust_minus/Bert-VITS2"
76
+ model_image: "Bert-VITS2中日英底模-fix" # openi网页的模型名
77
+ # 训练模型存储目录:与旧版本的区别,原先数据集是存放在logs/model_name下的,现在改为统一存放在Data/你的数据集/models下
78
+ model: "models"
79
+ # 配置文件路径
80
+ config_path: "configs/config.json"
81
+
82
+
83
+ # webui webui配置
84
+ # 注意, “:” 后需要加空格
85
+ webui:
86
+ # 推理设备
87
+ device: "cuda"
88
+ # 模型路径
89
+ model: "genshin/models/G_8000.pth"
90
+ # 配置文件路径
91
+ config_path: "configs/config.json"
92
+ # 端口号
93
+ port: 7860
94
+ # 是否公开部署,对外网开放
95
+ share: false
96
+ # 是否开启debug模式
97
+ debug: false
98
+ # 语种识别库,可选langid, fastlid
99
+ language_identification_library: "langid"
100
+
101
+
102
+ # server api配置
103
+ # 注意, “:” 后需要加空格
104
+ # 注意,本配置下的所有配置均为相对于根目录的路径
105
+ server:
106
+ # 端口号
107
+ port: 5000
108
+ # 模型默认使用设备:但是当前并没有实现这个配置。
109
+ device: "cuda"
110
+ # 需要加载的所有模型的配置
111
+ # 注意,所有模型都必须正确配置model与config的路径,空路径会导致加载错误。
112
+ models:
113
+ - # 模型的路径
114
+ model: ""
115
+ # 模型config.json的路径
116
+ config: ""
117
+ # 模型使用设备,若填写则会覆盖默认配置
118
+ device: "cuda"
119
+ # 模型默认使用的语言
120
+ language: "ZH"
121
+ # 模型人物默认参数
122
+ # 不必填写所有人物,不填的使用默认值
123
+ # 暂时不用填写,当前尚未实现按人区分配置
124
+ speakers:
125
+ - speaker: "科比"
126
+ sdp_ratio: 0.2
127
+ noise_scale: 0.6
128
+ noise_scale_w: 0.8
129
+ length_scale: 1
130
+ - speaker: "五条悟"
131
+ sdp_ratio: 0.3
132
+ noise_scale: 0.7
133
+ noise_scale_w: 0.8
134
+ length_scale: 0.5
135
+ - speaker: "安倍晋三"
136
+ sdp_ratio: 0.2
137
+ noise_scale: 0.6
138
+ noise_scale_w: 0.8
139
+ length_scale: 1.2
140
+ - # 模型的路径
141
+ model: ""
142
+ # 模型config.json的路径
143
+ config: ""
144
+ # 模型使用设备,若填写则会覆盖默认配置
145
+ device: "cpu"
146
+ # 模型默认使用的语言
147
+ language: "JP"
148
+ # 模型人物默认参数
149
+ # 不必填写所有人物,不填的使用默认���
150
+ speakers: [ ] # 也可以不填
151
+
152
+
153
+ # 百度翻译开放平台 api配置
154
+ # api接入文档 https://api.fanyi.baidu.com/doc/21
155
+ # 请不要在github等网站公开分享你的app id 与 key
156
+ translate:
157
+ # 你的APPID
158
+ "app_key": ""
159
+ # 你的密钥
160
+ "secret_key": ""
configs/config.json CHANGED
@@ -2,9 +2,9 @@
2
  "train": {
3
  "log_interval": 200,
4
  "eval_interval": 1000,
5
- "seed": 52,
6
- "epochs": 10000,
7
- "learning_rate": 0.0003,
8
  "betas": [
9
  0.8,
10
  0.99
@@ -12,7 +12,7 @@
12
  "eps": 1e-09,
13
  "batch_size": 16,
14
  "fp16_run": false,
15
- "lr_decay": 0.999875,
16
  "segment_size": 16384,
17
  "init_lr_ratio": 1,
18
  "warmup_epochs": 0,
@@ -21,8 +21,8 @@
21
  "skip_optimizer": true
22
  },
23
  "data": {
24
- "training_files": "filelists/train.list",
25
- "validation_files": "filelists/val.list",
26
  "max_wav_value": 32768.0,
27
  "sampling_rate": 44100,
28
  "filter_length": 2048,
@@ -32,7 +32,7 @@
32
  "mel_fmin": 0.0,
33
  "mel_fmax": null,
34
  "add_blank": true,
35
- "n_speakers": 256,
36
  "cleaned_text": true,
37
  "spk2id": {
38
  "華戀": 0,
@@ -182,5 +182,6 @@
182
  "n_layers_q": 3,
183
  "use_spectral_norm": false,
184
  "gin_channels": 256
185
- }
 
186
  }
 
2
  "train": {
3
  "log_interval": 200,
4
  "eval_interval": 1000,
5
+ "seed": 42,
6
+ "epochs": 1000,
7
+ "learning_rate": 0.0002,
8
  "betas": [
9
  0.8,
10
  0.99
 
12
  "eps": 1e-09,
13
  "batch_size": 16,
14
  "fp16_run": false,
15
+ "lr_decay": 0.99995,
16
  "segment_size": 16384,
17
  "init_lr_ratio": 1,
18
  "warmup_epochs": 0,
 
21
  "skip_optimizer": true
22
  },
23
  "data": {
24
+ "training_files": "Data/BangDream/filelists/train.list",
25
+ "validation_files": "Data/BangDream/filelists/val.list",
26
  "max_wav_value": 32768.0,
27
  "sampling_rate": 44100,
28
  "filter_length": 2048,
 
32
  "mel_fmin": 0.0,
33
  "mel_fmax": null,
34
  "add_blank": true,
35
+ "n_speakers": 700,
36
  "cleaned_text": true,
37
  "spk2id": {
38
  "華戀": 0,
 
182
  "n_layers_q": 3,
183
  "use_spectral_norm": false,
184
  "gin_channels": 256
185
+ },
186
+ "version": "2.0"
187
  }
configs/config_old.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 52,
6
+ "epochs": 10000,
7
+ "learning_rate": 0.0003,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-09,
13
+ "batch_size": 16,
14
+ "fp16_run": false,
15
+ "lr_decay": 0.999875,
16
+ "segment_size": 16384,
17
+ "init_lr_ratio": 1,
18
+ "warmup_epochs": 0,
19
+ "c_mel": 45,
20
+ "c_kl": 1.0,
21
+ "skip_optimizer": true
22
+ },
23
+ "data": {
24
+ "training_files": "filelists/train.list",
25
+ "validation_files": "filelists/val.list",
26
+ "max_wav_value": 32768.0,
27
+ "sampling_rate": 44100,
28
+ "filter_length": 2048,
29
+ "hop_length": 512,
30
+ "win_length": 2048,
31
+ "n_mel_channels": 128,
32
+ "mel_fmin": 0.0,
33
+ "mel_fmax": null,
34
+ "add_blank": true,
35
+ "n_speakers": 256,
36
+ "cleaned_text": true,
37
+ "spk2id": {
38
+ "華戀": 0,
39
+ "晶": 1,
40
+ "光": 2,
41
+ "未知留": 3,
42
+ "香子": 4,
43
+ "雙葉": 5,
44
+ "真晝": 6,
45
+ "艾露": 7,
46
+ "珠緒": 8,
47
+ "艾露露": 9,
48
+ "純那": 10,
49
+ "克洛迪娜": 11,
50
+ "真矢": 12,
51
+ "奈奈": 13,
52
+ "壘": 14,
53
+ "文": 15,
54
+ "一愛": 16,
55
+ "菈樂菲": 17,
56
+ "司": 18,
57
+ "美空": 19,
58
+ "靜羽": 20,
59
+ "悠悠子": 21,
60
+ "八千代": 22,
61
+ "栞": 23,
62
+ "美帆": 24,
63
+ "芙蘿菈": 25,
64
+ "克蕾兒": 26,
65
+ "安德露": 27,
66
+ "瑪莉亞貝菈": 28,
67
+ "克拉迪亞": 29,
68
+ "桃樂西": 30,
69
+ "瑪麗安": 31,
70
+ "三月七": 32,
71
+ "香澄": 33,
72
+ "有咲": 34,
73
+ "沙綾": 35,
74
+ "りみ": 36,
75
+ "たえ": 37,
76
+ "沙綾、りみ、たえ": 38,
77
+ "巴": 39,
78
+ "一同": 40,
79
+ "まりな": 41,
80
+ "ゆり": 42,
81
+ "明日香": 43,
82
+ "???": 44,
83
+ "ひまり": 45,
84
+ "モカ": 46,
85
+ "つぐみ": 47,
86
+ "蘭": 48,
87
+ "リサ": 49,
88
+ "千聖": 50,
89
+ "花音": 51,
90
+ "イヴ": 52,
91
+ "日菜": 53,
92
+ "友希那": 54,
93
+ "紗夜": 55,
94
+ "こころ": 56,
95
+ "美咲": 57,
96
+ "薫": 58,
97
+ "はぐみ": 59,
98
+ "ミッシェル": 60,
99
+ "マリー": 61,
100
+ "怪盗ハロハッピー": 62,
101
+ "ニコリーナ": 63,
102
+ "彩": 64,
103
+ "麻弥": 65,
104
+ "燐子": 66,
105
+ "あこ": 67,
106
+ "ゆきな": 68,
107
+ "ましろ": 69,
108
+ "つくし": 70,
109
+ "透子": 71,
110
+ "七深": 72,
111
+ "瑠唯": 73,
112
+ "六花": 74,
113
+ "パレオ": 75,
114
+ "レイヤ": 76,
115
+ "マスキング": 77,
116
+ "チュチュ": 78,
117
+ "ますき": 79,
118
+ "ロック": 80,
119
+ "令王那": 81,
120
+ "CHIYU": 82,
121
+ "レイ": 83,
122
+ "燈": 84,
123
+ "そよ": 85,
124
+ "祥子": 86,
125
+ "立希": 87,
126
+ "睦": 88,
127
+ "愛音": 89,
128
+ "楽奈": 90,
129
+ "海鈴": 91
130
+ }
131
+ },
132
+ "model": {
133
+ "use_spk_conditioned_encoder": true,
134
+ "use_noise_scaled_mas": true,
135
+ "use_mel_posterior_encoder": false,
136
+ "use_duration_discriminator": true,
137
+ "inter_channels": 192,
138
+ "hidden_channels": 192,
139
+ "filter_channels": 768,
140
+ "n_heads": 2,
141
+ "n_layers": 6,
142
+ "kernel_size": 3,
143
+ "p_dropout": 0.1,
144
+ "resblock": "1",
145
+ "resblock_kernel_sizes": [
146
+ 3,
147
+ 7,
148
+ 11
149
+ ],
150
+ "resblock_dilation_sizes": [
151
+ [
152
+ 1,
153
+ 3,
154
+ 5
155
+ ],
156
+ [
157
+ 1,
158
+ 3,
159
+ 5
160
+ ],
161
+ [
162
+ 1,
163
+ 3,
164
+ 5
165
+ ]
166
+ ],
167
+ "upsample_rates": [
168
+ 8,
169
+ 8,
170
+ 2,
171
+ 2,
172
+ 2
173
+ ],
174
+ "upsample_initial_channel": 512,
175
+ "upsample_kernel_sizes": [
176
+ 16,
177
+ 16,
178
+ 8,
179
+ 2,
180
+ 2
181
+ ],
182
+ "n_layers_q": 3,
183
+ "use_spectral_norm": false,
184
+ "gin_channels": 256
185
+ },
186
+ "version": "2.0"
187
+ }
data_utils.py CHANGED
@@ -3,11 +3,11 @@ import random
3
  import torch
4
  import torch.utils.data
5
  from tqdm import tqdm
6
- from loguru import logger
7
  import commons
8
  from mel_processing import spectrogram_torch, mel_spectrogram_torch
9
  from utils import load_wav_to_torch, load_filepaths_and_text
10
- from text import cleaned_text_to_sequence, get_bert
11
 
12
  """Multi speaker version"""
13
 
@@ -85,13 +85,13 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset):
85
  # separate filename, speaker_id and text
86
  audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
87
 
88
- bert, ja_bert, phones, tone, language = self.get_text(
89
  text, word2ph, phones, tone, language, audiopath
90
  )
91
 
92
  spec, wav = self.get_audio(audiopath)
93
  sid = torch.LongTensor([int(self.spk_map[sid])])
94
- return (phones, spec, wav, sid, tone, language, bert, ja_bert)
95
 
96
  def get_audio(self, filename):
97
  audio, sampling_rate = load_wav_to_torch(filename)
@@ -145,40 +145,28 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset):
145
  word2ph[0] += 1
146
  bert_path = wav_path.replace(".wav", ".bert.pt")
147
  try:
148
- bert = torch.load(bert_path)
149
- assert bert.shape[-1] == len(phone)
150
- except:
151
- bert = get_bert(text, word2ph, language_str)
152
- torch.save(bert, bert_path)
153
- assert bert.shape[-1] == len(phone), phone
154
 
155
  if language_str == "ZH":
156
- bert = bert
157
- ja_bert = torch.zeros(768, len(phone))
 
158
  elif language_str == "JP":
159
- ja_bert = bert
160
  bert = torch.zeros(1024, len(phone))
161
- else:
 
 
162
  bert = torch.zeros(1024, len(phone))
163
- ja_bert = torch.zeros(768, len(phone))
164
- assert bert.shape[-1] == len(phone), (
165
- bert.shape,
166
- len(phone),
167
- sum(word2ph),
168
- p1,
169
- p2,
170
- t1,
171
- t2,
172
- pold,
173
- pold2,
174
- word2ph,
175
- text,
176
- w2pho,
177
- )
178
  phone = torch.LongTensor(phone)
179
  tone = torch.LongTensor(tone)
180
  language = torch.LongTensor(language)
181
- return bert, ja_bert, phone, tone, language
182
 
183
  def get_sid(self, sid):
184
  sid = torch.LongTensor([int(sid)])
@@ -221,7 +209,8 @@ class TextAudioSpeakerCollate:
221
  tone_padded = torch.LongTensor(len(batch), max_text_len)
222
  language_padded = torch.LongTensor(len(batch), max_text_len)
223
  bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
224
- ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len)
 
225
 
226
  spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
227
  wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
@@ -232,6 +221,8 @@ class TextAudioSpeakerCollate:
232
  wav_padded.zero_()
233
  bert_padded.zero_()
234
  ja_bert_padded.zero_()
 
 
235
  for i in range(len(ids_sorted_decreasing)):
236
  row = batch[ids_sorted_decreasing[i]]
237
 
@@ -261,6 +252,9 @@ class TextAudioSpeakerCollate:
261
  ja_bert = row[7]
262
  ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert
263
 
 
 
 
264
  return (
265
  text_padded,
266
  text_lengths,
@@ -273,6 +267,7 @@ class TextAudioSpeakerCollate:
273
  language_padded,
274
  bert_padded,
275
  ja_bert_padded,
 
276
  )
277
 
278
 
 
3
  import torch
4
  import torch.utils.data
5
  from tqdm import tqdm
6
+ from tools.log import logger
7
  import commons
8
  from mel_processing import spectrogram_torch, mel_spectrogram_torch
9
  from utils import load_wav_to_torch, load_filepaths_and_text
10
+ from text import cleaned_text_to_sequence
11
 
12
  """Multi speaker version"""
13
 
 
85
  # separate filename, speaker_id and text
86
  audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
87
 
88
+ bert, ja_bert, en_bert, phones, tone, language = self.get_text(
89
  text, word2ph, phones, tone, language, audiopath
90
  )
91
 
92
  spec, wav = self.get_audio(audiopath)
93
  sid = torch.LongTensor([int(self.spk_map[sid])])
94
+ return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)
95
 
96
  def get_audio(self, filename):
97
  audio, sampling_rate = load_wav_to_torch(filename)
 
145
  word2ph[0] += 1
146
  bert_path = wav_path.replace(".wav", ".bert.pt")
147
  try:
148
+ bert_ori = torch.load(bert_path)
149
+ assert bert_ori.shape[-1] == len(phone)
150
+ except Exception as e:
151
+ logger.warning("Bert load Failed")
152
+ logger.warning(e)
 
153
 
154
  if language_str == "ZH":
155
+ bert = bert_ori
156
+ ja_bert = torch.zeros(1024, len(phone))
157
+ en_bert = torch.zeros(1024, len(phone))
158
  elif language_str == "JP":
 
159
  bert = torch.zeros(1024, len(phone))
160
+ ja_bert = bert_ori
161
+ en_bert = torch.zeros(1024, len(phone))
162
+ elif language_str == "EN":
163
  bert = torch.zeros(1024, len(phone))
164
+ ja_bert = torch.zeros(1024, len(phone))
165
+ en_bert = bert_ori
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  phone = torch.LongTensor(phone)
167
  tone = torch.LongTensor(tone)
168
  language = torch.LongTensor(language)
169
+ return bert, ja_bert, en_bert, phone, tone, language
170
 
171
  def get_sid(self, sid):
172
  sid = torch.LongTensor([int(sid)])
 
209
  tone_padded = torch.LongTensor(len(batch), max_text_len)
210
  language_padded = torch.LongTensor(len(batch), max_text_len)
211
  bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
212
+ ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
213
+ en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
214
 
215
  spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
216
  wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
 
221
  wav_padded.zero_()
222
  bert_padded.zero_()
223
  ja_bert_padded.zero_()
224
+ en_bert_padded.zero_()
225
+
226
  for i in range(len(ids_sorted_decreasing)):
227
  row = batch[ids_sorted_decreasing[i]]
228
 
 
252
  ja_bert = row[7]
253
  ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert
254
 
255
+ en_bert = row[8]
256
+ en_bert_padded[i, :, : en_bert.size(1)] = en_bert
257
+
258
  return (
259
  text_padded,
260
  text_lengths,
 
267
  language_padded,
268
  bert_padded,
269
  ja_bert_padded,
270
+ en_bert_padded,
271
  )
272
 
273
 
default_config.yml ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 全局配置
2
+ # 对于希望在同一时间使用多个配置文件的情况,例如两个GPU同时跑两个训练集:通过环境变量指定配置文件,不指定则默认为./config.yml
3
+
4
+ # 拟提供通用路径配置,统一存放数据,避免数据放得很乱
5
+ # 每个数据集与其对应的模型存放至统一路径下,后续所有的路径配置均为相对于datasetPath的路径
6
+ # 不填或者填空则路径为相对于项目根目录的路径
7
+ dataset_path: ""
8
+
9
+ # 模型镜像源,默认huggingface,使用openi镜像源需指定openi_token
10
+ mirror: ""
11
+ openi_token: "" # openi token
12
+
13
+ # resample 音频重采样配置
14
+ # 注意, “:” 后需要加空格
15
+ resample:
16
+ # 目标重采样率
17
+ sampling_rate: 44100
18
+ # 音频文件输入路径,重采样会将该路径下所有.wav音频文件重采样
19
+ # 请填入相对于datasetPath的相对路径
20
+ in_dir: "" # 相对于根目录的路径为 /datasetPath/in_dir
21
+ # 音频文件重采样后输出路径
22
+ out_dir: ""
23
+
24
+
25
+ # preprocess_text 数据集预处理相关配置
26
+ # 注意, “:” 后需要加空格
27
+ preprocess_text:
28
+ # 原始文本文件路径,文本格式应为{wav_path}|{speaker_name}|{language}|{text}。
29
+ transcription_path: "filelists/bushroid.list"
30
+ # 数据清洗后文本路径,可以不填。不填则将在原始文本目录生成
31
+ cleaned_path: ""
32
+ # 训练集路径
33
+ train_path: "filelists/train.list"
34
+ # 验证集路径
35
+ val_path: "filelists/val.list"
36
+ # 配置文件路径
37
+ config_path: "config.json"
38
+ # 每个speaker的验证集条数
39
+ val_per_spk: 4
40
+ # 验证集最大条数,多于的会被截断并放到训练集中
41
+ max_val_total: 8
42
+ # 是否进行数据清洗
43
+ clean: true
44
+
45
+
46
+ # bert_gen 相关配置
47
+ # 注意, “:” 后需要加空格
48
+ bert_gen:
49
+ # 训练数据集配置文件路径
50
+ config_path: "config.json"
51
+ # 并行数
52
+ num_processes: 2
53
+ # 使用设备:可选项 "cuda" 显卡推理,"cpu" cpu推理
54
+ # 该选项同时决定了get_bert_feature的默认设备
55
+ device: "cuda"
56
+ # 使用多卡推理
57
+ use_multi_device: false
58
+
59
+
60
+ # train 训练配置
61
+ # 注意, “:” 后需要加空格
62
+ train_ms:
63
+ # 需要加载的环境变量,多显卡训练时RANK请手动在环境变量填写
64
+ # 环境变量对应名称环境变量不存在时加载,也就是说手动添加的环境变量优先级更高,会覆盖本配置文件
65
+ env:
66
+ MASTER_ADDR: "localhost"
67
+ MASTER_PORT: 10086
68
+ WORLD_SIZE: 1
69
+ RANK: 0
70
+ # 可以填写任意名的环境变量
71
+ # THE_ENV_VAR_YOU_NEED_TO_USE: "1234567"
72
+ # 底模设置
73
+ base:
74
+ use_base_model: True
75
+ repo_id: "Stardust_minus/Bert-VITS2"
76
+ model_image: "Bert-VITS2中日英底模-fix" # openi网页的模型名
77
+ # 训练模型存储目录:与旧版本的区别,原先数据集是存放在logs/model_name下的,现在改为统一存放在Data/你的数据集/models下
78
+ model: "models"
79
+ # 配置文件路径
80
+ config_path: "configs/config.json"
81
+
82
+
83
+ # webui webui配置
84
+ # 注意, “:” 后需要加空格
85
+ webui:
86
+ # 推理设备
87
+ device: "cuda"
88
+ # 模型路径
89
+ model: "genshin/models/G_8000.pth"
90
+ # 配置文件路径
91
+ config_path: "configs/config.json"
92
+ # 端口号
93
+ port: 7860
94
+ # 是否公开部署,对外网开放
95
+ share: false
96
+ # 是否开启debug模式
97
+ debug: false
98
+ # 语种识别库,可选langid, fastlid
99
+ language_identification_library: "langid"
100
+
101
+
102
+ # server api配置
103
+ # 注意, “:” 后需要加空格
104
+ # 注意,本配置下的所有配置均为相对于根目录的路径
105
+ server:
106
+ # 端口号
107
+ port: 5000
108
+ # 模型默认使用设备:但是当前并没有实现这个配置。
109
+ device: "cuda"
110
+ # 需要加载的所有模型的配置
111
+ # 注意,所有模型都必须正确配置model与config的路径,空路径会导致加载错误。
112
+ models:
113
+ - # 模型的路径
114
+ model: ""
115
+ # 模型config.json的路径
116
+ config: ""
117
+ # 模型使用设备,若填写则会覆盖默认配置
118
+ device: "cuda"
119
+ # 模型默认使用的语言
120
+ language: "ZH"
121
+ # 模型人物默认参数
122
+ # 不必填写所有人物,不填的使用默认值
123
+ # 暂时不用填写,当前尚未实现按人区分配置
124
+ speakers:
125
+ - speaker: "科比"
126
+ sdp_ratio: 0.2
127
+ noise_scale: 0.6
128
+ noise_scale_w: 0.8
129
+ length_scale: 1
130
+ - speaker: "五条悟"
131
+ sdp_ratio: 0.3
132
+ noise_scale: 0.7
133
+ noise_scale_w: 0.8
134
+ length_scale: 0.5
135
+ - speaker: "安倍晋三"
136
+ sdp_ratio: 0.2
137
+ noise_scale: 0.6
138
+ noise_scale_w: 0.8
139
+ length_scale: 1.2
140
+ - # 模型的路径
141
+ model: ""
142
+ # 模型config.json的路径
143
+ config: ""
144
+ # 模型使用设备,若填写则会覆盖默认配置
145
+ device: "cpu"
146
+ # 模型默认使用的语言
147
+ language: "JP"
148
+ # 模型人物默认参数
149
+ # 不必填写所有人物,不填的使用默认���
150
+ speakers: [ ] # 也可以不填
151
+
152
+
153
+ # 百度翻译开放平台 api配置
154
+ # api接入文档 https://api.fanyi.baidu.com/doc/21
155
+ # 请不要在github等网站公开分享你的app id 与 key
156
+ translate:
157
+ # 你的APPID
158
+ "app_key": ""
159
+ # 你的密钥
160
+ "secret_key": ""
emo_gen.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.utils.data import Dataset
4
+ from torch.utils.data import DataLoader
5
+ from transformers import Wav2Vec2Processor
6
+ from transformers.models.wav2vec2.modeling_wav2vec2 import (
7
+ Wav2Vec2Model,
8
+ Wav2Vec2PreTrainedModel,
9
+ )
10
+ import librosa
11
+ import numpy as np
12
+ import argparse
13
+ from config import config
14
+ import utils
15
+ import os
16
+ from tqdm import tqdm
17
+
18
+
19
+ class RegressionHead(nn.Module):
20
+ r"""Classification head."""
21
+
22
+ def __init__(self, config):
23
+ super().__init__()
24
+
25
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
26
+ self.dropout = nn.Dropout(config.final_dropout)
27
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
28
+
29
+ def forward(self, features, **kwargs):
30
+ x = features
31
+ x = self.dropout(x)
32
+ x = self.dense(x)
33
+ x = torch.tanh(x)
34
+ x = self.dropout(x)
35
+ x = self.out_proj(x)
36
+
37
+ return x
38
+
39
+
40
+ class EmotionModel(Wav2Vec2PreTrainedModel):
41
+ r"""Speech emotion classifier."""
42
+
43
+ def __init__(self, config):
44
+ super().__init__(config)
45
+
46
+ self.config = config
47
+ self.wav2vec2 = Wav2Vec2Model(config)
48
+ self.classifier = RegressionHead(config)
49
+ self.init_weights()
50
+
51
+ def forward(
52
+ self,
53
+ input_values,
54
+ ):
55
+ outputs = self.wav2vec2(input_values)
56
+ hidden_states = outputs[0]
57
+ hidden_states = torch.mean(hidden_states, dim=1)
58
+ logits = self.classifier(hidden_states)
59
+
60
+ return hidden_states, logits
61
+
62
+
63
+ class AudioDataset(Dataset):
64
+ def __init__(self, list_of_wav_files, sr, processor):
65
+ self.list_of_wav_files = list_of_wav_files
66
+ self.processor = processor
67
+ self.sr = sr
68
+
69
+ def __len__(self):
70
+ return len(self.list_of_wav_files)
71
+
72
+ def __getitem__(self, idx):
73
+ wav_file = self.list_of_wav_files[idx]
74
+ audio_data, _ = librosa.load(wav_file, sr=self.sr)
75
+ processed_data = self.processor(audio_data, sampling_rate=self.sr)[
76
+ "input_values"
77
+ ][0]
78
+ return torch.from_numpy(processed_data)
79
+
80
+
81
+ model_name = "./emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim"
82
+ processor = Wav2Vec2Processor.from_pretrained(model_name)
83
+ model = EmotionModel.from_pretrained(model_name)
84
+
85
+
86
+ def process_func(
87
+ x: np.ndarray,
88
+ sampling_rate: int,
89
+ model: EmotionModel,
90
+ processor: Wav2Vec2Processor,
91
+ device: str,
92
+ embeddings: bool = False,
93
+ ) -> np.ndarray:
94
+ r"""Predict emotions or extract embeddings from raw audio signal."""
95
+ model = model.to(device)
96
+ y = processor(x, sampling_rate=sampling_rate)
97
+ y = y["input_values"][0]
98
+ y = torch.from_numpy(y).unsqueeze(0).to(device)
99
+
100
+ # run through model
101
+ with torch.no_grad():
102
+ y = model(y)[0 if embeddings else 1]
103
+
104
+ # convert to numpy
105
+ y = y.detach().cpu().numpy()
106
+
107
+ return y
108
+
109
+
110
+ def get_emo(path):
111
+ wav, sr = librosa.load(path, 16000)
112
+ device = config.bert_gen_config.device
113
+ return process_func(
114
+ np.expand_dims(wav, 0).astype(np.float),
115
+ sr,
116
+ model,
117
+ processor,
118
+ device,
119
+ embeddings=True,
120
+ ).squeeze(0)
121
+
122
+
123
+ if __name__ == "__main__":
124
+ parser = argparse.ArgumentParser()
125
+ parser.add_argument(
126
+ "-c", "--config", type=str, default=config.bert_gen_config.config_path
127
+ )
128
+ parser.add_argument(
129
+ "--num_processes", type=int, default=config.bert_gen_config.num_processes
130
+ )
131
+ args, _ = parser.parse_known_args()
132
+ config_path = args.config
133
+ hps = utils.get_hparams_from_file(config_path)
134
+
135
+ device = config.bert_gen_config.device
136
+
137
+ model_name = "./emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim"
138
+ processor = (
139
+ Wav2Vec2Processor.from_pretrained(model_name)
140
+ if processor is None
141
+ else processor
142
+ )
143
+ model = (
144
+ EmotionModel.from_pretrained(model_name).to(device)
145
+ if model is None
146
+ else model.to(device)
147
+ )
148
+
149
+ lines = []
150
+ with open(hps.data.training_files, encoding="utf-8") as f:
151
+ lines.extend(f.readlines())
152
+
153
+ with open(hps.data.validation_files, encoding="utf-8") as f:
154
+ lines.extend(f.readlines())
155
+
156
+ wavnames = [line.split("|")[0] for line in lines]
157
+ dataset = AudioDataset(wavnames, 16000, processor)
158
+ data_loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=16)
159
+
160
+ with torch.no_grad():
161
+ for i, data in tqdm(enumerate(data_loader), total=len(data_loader)):
162
+ wavname = wavnames[i]
163
+ emo_path = wavname.replace(".wav", ".emo.npy")
164
+ if os.path.exists(emo_path):
165
+ continue
166
+ emb = model(data.to(device))[0].detach().cpu().numpy()
167
+ np.save(emo_path, emb)
168
+
169
+ print("Emo vec 生成完毕!")
emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/.gitattributes ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.wasm filter=lfs diff=lfs merge=lfs -text
25
+ *.xz filter=lfs diff=lfs merge=lfs -text
26
+ *.zip filter=lfs diff=lfs merge=lfs -text
27
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
28
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/LICENSE ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Attribution-NonCommercial-ShareAlike 4.0 International
2
+
3
+ =======================================================================
4
+
5
+ Creative Commons Corporation ("Creative Commons") is not a law firm and
6
+ does not provide legal services or legal advice. Distribution of
7
+ Creative Commons public licenses does not create a lawyer-client or
8
+ other relationship. Creative Commons makes its licenses and related
9
+ information available on an "as-is" basis. Creative Commons gives no
10
+ warranties regarding its licenses, any material licensed under their
11
+ terms and conditions, or any related information. Creative Commons
12
+ disclaims all liability for damages resulting from their use to the
13
+ fullest extent possible.
14
+
15
+ Using Creative Commons Public Licenses
16
+
17
+ Creative Commons public licenses provide a standard set of terms and
18
+ conditions that creators and other rights holders may use to share
19
+ original works of authorship and other material subject to copyright
20
+ and certain other rights specified in the public license below. The
21
+ following considerations are for informational purposes only, are not
22
+ exhaustive, and do not form part of our licenses.
23
+
24
+ Considerations for licensors: Our public licenses are
25
+ intended for use by those authorized to give the public
26
+ permission to use material in ways otherwise restricted by
27
+ copyright and certain other rights. Our licenses are
28
+ irrevocable. Licensors should read and understand the terms
29
+ and conditions of the license they choose before applying it.
30
+ Licensors should also secure all rights necessary before
31
+ applying our licenses so that the public can reuse the
32
+ material as expected. Licensors should clearly mark any
33
+ material not subject to the license. This includes other CC-
34
+ licensed material, or material used under an exception or
35
+ limitation to copyright. More considerations for licensors:
36
+ wiki.creativecommons.org/Considerations_for_licensors
37
+
38
+ Considerations for the public: By using one of our public
39
+ licenses, a licensor grants the public permission to use the
40
+ licensed material under specified terms and conditions. If
41
+ the licensor's permission is not necessary for any reason--for
42
+ example, because of any applicable exception or limitation to
43
+ copyright--then that use is not regulated by the license. Our
44
+ licenses grant only permissions under copyright and certain
45
+ other rights that a licensor has authority to grant. Use of
46
+ the licensed material may still be restricted for other
47
+ reasons, including because others have copyright or other
48
+ rights in the material. A licensor may make special requests,
49
+ such as asking that all changes be marked or described.
50
+ Although not required by our licenses, you are encouraged to
51
+ respect those requests where reasonable. More considerations
52
+ for the public:
53
+ wiki.creativecommons.org/Considerations_for_licensees
54
+
55
+ =======================================================================
56
+
57
+ Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
58
+ Public License
59
+
60
+ By exercising the Licensed Rights (defined below), You accept and agree
61
+ to be bound by the terms and conditions of this Creative Commons
62
+ Attribution-NonCommercial-ShareAlike 4.0 International Public License
63
+ ("Public License"). To the extent this Public License may be
64
+ interpreted as a contract, You are granted the Licensed Rights in
65
+ consideration of Your acceptance of these terms and conditions, and the
66
+ Licensor grants You such rights in consideration of benefits the
67
+ Licensor receives from making the Licensed Material available under
68
+ these terms and conditions.
69
+
70
+
71
+ Section 1 -- Definitions.
72
+
73
+ a. Adapted Material means material subject to Copyright and Similar
74
+ Rights that is derived from or based upon the Licensed Material
75
+ and in which the Licensed Material is translated, altered,
76
+ arranged, transformed, or otherwise modified in a manner requiring
77
+ permission under the Copyright and Similar Rights held by the
78
+ Licensor. For purposes of this Public License, where the Licensed
79
+ Material is a musical work, performance, or sound recording,
80
+ Adapted Material is always produced where the Licensed Material is
81
+ synched in timed relation with a moving image.
82
+
83
+ b. Adapter's License means the license You apply to Your Copyright
84
+ and Similar Rights in Your contributions to Adapted Material in
85
+ accordance with the terms and conditions of this Public License.
86
+
87
+ c. BY-NC-SA Compatible License means a license listed at
88
+ creativecommons.org/compatiblelicenses, approved by Creative
89
+ Commons as essentially the equivalent of this Public License.
90
+
91
+ d. Copyright and Similar Rights means copyright and/or similar rights
92
+ closely related to copyright including, without limitation,
93
+ performance, broadcast, sound recording, and Sui Generis Database
94
+ Rights, without regard to how the rights are labeled or
95
+ categorized. For purposes of this Public License, the rights
96
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
97
+ Rights.
98
+
99
+ e. Effective Technological Measures means those measures that, in the
100
+ absence of proper authority, may not be circumvented under laws
101
+ fulfilling obligations under Article 11 of the WIPO Copyright
102
+ Treaty adopted on December 20, 1996, and/or similar international
103
+ agreements.
104
+
105
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
106
+ any other exception or limitation to Copyright and Similar Rights
107
+ that applies to Your use of the Licensed Material.
108
+
109
+ g. License Elements means the license attributes listed in the name
110
+ of a Creative Commons Public License. The License Elements of this
111
+ Public License are Attribution, NonCommercial, and ShareAlike.
112
+
113
+ h. Licensed Material means the artistic or literary work, database,
114
+ or other material to which the Licensor applied this Public
115
+ License.
116
+
117
+ i. Licensed Rights means the rights granted to You subject to the
118
+ terms and conditions of this Public License, which are limited to
119
+ all Copyright and Similar Rights that apply to Your use of the
120
+ Licensed Material and that the Licensor has authority to license.
121
+
122
+ j. Licensor means the individual(s) or entity(ies) granting rights
123
+ under this Public License.
124
+
125
+ k. NonCommercial means not primarily intended for or directed towards
126
+ commercial advantage or monetary compensation. For purposes of
127
+ this Public License, the exchange of the Licensed Material for
128
+ other material subject to Copyright and Similar Rights by digital
129
+ file-sharing or similar means is NonCommercial provided there is
130
+ no payment of monetary compensation in connection with the
131
+ exchange.
132
+
133
+ l. Share means to provide material to the public by any means or
134
+ process that requires permission under the Licensed Rights, such
135
+ as reproduction, public display, public performance, distribution,
136
+ dissemination, communication, or importation, and to make material
137
+ available to the public including in ways that members of the
138
+ public may access the material from a place and at a time
139
+ individually chosen by them.
140
+
141
+ m. Sui Generis Database Rights means rights other than copyright
142
+ resulting from Directive 96/9/EC of the European Parliament and of
143
+ the Council of 11 March 1996 on the legal protection of databases,
144
+ as amended and/or succeeded, as well as other essentially
145
+ equivalent rights anywhere in the world.
146
+
147
+ n. You means the individual or entity exercising the Licensed Rights
148
+ under this Public License. Your has a corresponding meaning.
149
+
150
+
151
+ Section 2 -- Scope.
152
+
153
+ a. License grant.
154
+
155
+ 1. Subject to the terms and conditions of this Public License,
156
+ the Licensor hereby grants You a worldwide, royalty-free,
157
+ non-sublicensable, non-exclusive, irrevocable license to
158
+ exercise the Licensed Rights in the Licensed Material to:
159
+
160
+ a. reproduce and Share the Licensed Material, in whole or
161
+ in part, for NonCommercial purposes only; and
162
+
163
+ b. produce, reproduce, and Share Adapted Material for
164
+ NonCommercial purposes only.
165
+
166
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
167
+ Exceptions and Limitations apply to Your use, this Public
168
+ License does not apply, and You do not need to comply with
169
+ its terms and conditions.
170
+
171
+ 3. Term. The term of this Public License is specified in Section
172
+ 6(a).
173
+
174
+ 4. Media and formats; technical modifications allowed. The
175
+ Licensor authorizes You to exercise the Licensed Rights in
176
+ all media and formats whether now known or hereafter created,
177
+ and to make technical modifications necessary to do so. The
178
+ Licensor waives and/or agrees not to assert any right or
179
+ authority to forbid You from making technical modifications
180
+ necessary to exercise the Licensed Rights, including
181
+ technical modifications necessary to circumvent Effective
182
+ Technological Measures. For purposes of this Public License,
183
+ simply making modifications authorized by this Section 2(a)
184
+ (4) never produces Adapted Material.
185
+
186
+ 5. Downstream recipients.
187
+
188
+ a. Offer from the Licensor -- Licensed Material. Every
189
+ recipient of the Licensed Material automatically
190
+ receives an offer from the Licensor to exercise the
191
+ Licensed Rights under the terms and conditions of this
192
+ Public License.
193
+
194
+ b. Additional offer from the Licensor -- Adapted Material.
195
+ Every recipient of Adapted Material from You
196
+ automatically receives an offer from the Licensor to
197
+ exercise the Licensed Rights in the Adapted Material
198
+ under the conditions of the Adapter's License You apply.
199
+
200
+ c. No downstream restrictions. You may not offer or impose
201
+ any additional or different terms or conditions on, or
202
+ apply any Effective Technological Measures to, the
203
+ Licensed Material if doing so restricts exercise of the
204
+ Licensed Rights by any recipient of the Licensed
205
+ Material.
206
+
207
+ 6. No endorsement. Nothing in this Public License constitutes or
208
+ may be construed as permission to assert or imply that You
209
+ are, or that Your use of the Licensed Material is, connected
210
+ with, or sponsored, endorsed, or granted official status by,
211
+ the Licensor or others designated to receive attribution as
212
+ provided in Section 3(a)(1)(A)(i).
213
+
214
+ b. Other rights.
215
+
216
+ 1. Moral rights, such as the right of integrity, are not
217
+ licensed under this Public License, nor are publicity,
218
+ privacy, and/or other similar personality rights; however, to
219
+ the extent possible, the Licensor waives and/or agrees not to
220
+ assert any such rights held by the Licensor to the limited
221
+ extent necessary to allow You to exercise the Licensed
222
+ Rights, but not otherwise.
223
+
224
+ 2. Patent and trademark rights are not licensed under this
225
+ Public License.
226
+
227
+ 3. To the extent possible, the Licensor waives any right to
228
+ collect royalties from You for the exercise of the Licensed
229
+ Rights, whether directly or through a collecting society
230
+ under any voluntary or waivable statutory or compulsory
231
+ licensing scheme. In all other cases the Licensor expressly
232
+ reserves any right to collect such royalties, including when
233
+ the Licensed Material is used other than for NonCommercial
234
+ purposes.
235
+
236
+
237
+ Section 3 -- License Conditions.
238
+
239
+ Your exercise of the Licensed Rights is expressly made subject to the
240
+ following conditions.
241
+
242
+ a. Attribution.
243
+
244
+ 1. If You Share the Licensed Material (including in modified
245
+ form), You must:
246
+
247
+ a. retain the following if it is supplied by the Licensor
248
+ with the Licensed Material:
249
+
250
+ i. identification of the creator(s) of the Licensed
251
+ Material and any others designated to receive
252
+ attribution, in any reasonable manner requested by
253
+ the Licensor (including by pseudonym if
254
+ designated);
255
+
256
+ ii. a copyright notice;
257
+
258
+ iii. a notice that refers to this Public License;
259
+
260
+ iv. a notice that refers to the disclaimer of
261
+ warranties;
262
+
263
+ v. a URI or hyperlink to the Licensed Material to the
264
+ extent reasonably practicable;
265
+
266
+ b. indicate if You modified the Licensed Material and
267
+ retain an indication of any previous modifications; and
268
+
269
+ c. indicate the Licensed Material is licensed under this
270
+ Public License, and include the text of, or the URI or
271
+ hyperlink to, this Public License.
272
+
273
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
274
+ reasonable manner based on the medium, means, and context in
275
+ which You Share the Licensed Material. For example, it may be
276
+ reasonable to satisfy the conditions by providing a URI or
277
+ hyperlink to a resource that includes the required
278
+ information.
279
+ 3. If requested by the Licensor, You must remove any of the
280
+ information required by Section 3(a)(1)(A) to the extent
281
+ reasonably practicable.
282
+
283
+ b. ShareAlike.
284
+
285
+ In addition to the conditions in Section 3(a), if You Share
286
+ Adapted Material You produce, the following conditions also apply.
287
+
288
+ 1. The Adapter's License You apply must be a Creative Commons
289
+ license with the same License Elements, this version or
290
+ later, or a BY-NC-SA Compatible License.
291
+
292
+ 2. You must include the text of, or the URI or hyperlink to, the
293
+ Adapter's License You apply. You may satisfy this condition
294
+ in any reasonable manner based on the medium, means, and
295
+ context in which You Share Adapted Material.
296
+
297
+ 3. You may not offer or impose any additional or different terms
298
+ or conditions on, or apply any Effective Technological
299
+ Measures to, Adapted Material that restrict exercise of the
300
+ rights granted under the Adapter's License You apply.
301
+
302
+
303
+ Section 4 -- Sui Generis Database Rights.
304
+
305
+ Where the Licensed Rights include Sui Generis Database Rights that
306
+ apply to Your use of the Licensed Material:
307
+
308
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
309
+ to extract, reuse, reproduce, and Share all or a substantial
310
+ portion of the contents of the database for NonCommercial purposes
311
+ only;
312
+
313
+ b. if You include all or a substantial portion of the database
314
+ contents in a database in which You have Sui Generis Database
315
+ Rights, then the database in which You have Sui Generis Database
316
+ Rights (but not its individual contents) is Adapted Material,
317
+ including for purposes of Section 3(b); and
318
+
319
+ c. You must comply with the conditions in Section 3(a) if You Share
320
+ all or a substantial portion of the contents of the database.
321
+
322
+ For the avoidance of doubt, this Section 4 supplements and does not
323
+ replace Your obligations under this Public License where the Licensed
324
+ Rights include other Copyright and Similar Rights.
325
+
326
+
327
+ Section 5 -- Disclaimer of Warranties and Limitation of Liability.
328
+
329
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
330
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
331
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
332
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
333
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
334
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
335
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
336
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
337
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
338
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
339
+
340
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
341
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
342
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
343
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
344
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
345
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
346
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
347
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
348
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
349
+
350
+ c. The disclaimer of warranties and limitation of liability provided
351
+ above shall be interpreted in a manner that, to the extent
352
+ possible, most closely approximates an absolute disclaimer and
353
+ waiver of all liability.
354
+
355
+
356
+ Section 6 -- Term and Termination.
357
+
358
+ a. This Public License applies for the term of the Copyright and
359
+ Similar Rights licensed here. However, if You fail to comply with
360
+ this Public License, then Your rights under this Public License
361
+ terminate automatically.
362
+
363
+ b. Where Your right to use the Licensed Material has terminated under
364
+ Section 6(a), it reinstates:
365
+
366
+ 1. automatically as of the date the violation is cured, provided
367
+ it is cured within 30 days of Your discovery of the
368
+ violation; or
369
+
370
+ 2. upon express reinstatement by the Licensor.
371
+
372
+ For the avoidance of doubt, this Section 6(b) does not affect any
373
+ right the Licensor may have to seek remedies for Your violations
374
+ of this Public License.
375
+
376
+ c. For the avoidance of doubt, the Licensor may also offer the
377
+ Licensed Material under separate terms or conditions or stop
378
+ distributing the Licensed Material at any time; however, doing so
379
+ will not terminate this Public License.
380
+
381
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
382
+ License.
383
+
384
+
385
+ Section 7 -- Other Terms and Conditions.
386
+
387
+ a. The Licensor shall not be bound by any additional or different
388
+ terms or conditions communicated by You unless expressly agreed.
389
+
390
+ b. Any arrangements, understandings, or agreements regarding the
391
+ Licensed Material not stated herein are separate from and
392
+ independent of the terms and conditions of this Public License.
393
+
394
+
395
+ Section 8 -- Interpretation.
396
+
397
+ a. For the avoidance of doubt, this Public License does not, and
398
+ shall not be interpreted to, reduce, limit, restrict, or impose
399
+ conditions on any use of the Licensed Material that could lawfully
400
+ be made without permission under this Public License.
401
+
402
+ b. To the extent possible, if any provision of this Public License is
403
+ deemed unenforceable, it shall be automatically reformed to the
404
+ minimum extent necessary to make it enforceable. If the provision
405
+ cannot be reformed, it shall be severed from this Public License
406
+ without affecting the enforceability of the remaining terms and
407
+ conditions.
408
+
409
+ c. No term or condition of this Public License will be waived and no
410
+ failure to comply consented to unless expressly agreed to by the
411
+ Licensor.
412
+
413
+ d. Nothing in this Public License constitutes or may be interpreted
414
+ as a limitation upon, or waiver of, any privileges and immunities
415
+ that apply to the Licensor or You, including from the legal
416
+ processes of any jurisdiction or authority.
417
+
418
+ =======================================================================
419
+
420
+ Creative Commons is not a party to its public
421
+ licenses. Notwithstanding, Creative Commons may elect to apply one of
422
+ its public licenses to material it publishes and in those instances
423
+ will be considered the “Licensor.” The text of the Creative Commons
424
+ public licenses is dedicated to the public domain under the CC0 Public
425
+ Domain Dedication. Except for the limited purpose of indicating that
426
+ material is shared under a Creative Commons public license or as
427
+ otherwise permitted by the Creative Commons policies published at
428
+ creativecommons.org/policies, Creative Commons does not authorize the
429
+ use of the trademark "Creative Commons" or any other trademark or logo
430
+ of Creative Commons without its prior written consent including,
431
+ without limitation, in connection with any unauthorized modifications
432
+ to any of its public licenses or any other arrangements,
433
+ understandings, or agreements concerning use of licensed material. For
434
+ the avoidance of doubt, this paragraph does not form part of the
435
+ public licenses.
436
+
437
+ Creative Commons may be contacted at creativecommons.org.
emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/README.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ datasets:
4
+ - msp-podcast
5
+ inference: true
6
+ tags:
7
+ - speech
8
+ - audio
9
+ - wav2vec2
10
+ - audio-classification
11
+ - emotion-recognition
12
+ license: cc-by-nc-sa-4.0
13
+ pipeline_tag: audio-classification
14
+ ---
15
+
16
+ # Model for Dimensional Speech Emotion Recognition based on Wav2vec 2.0
17
+
18
+ The model expects a raw audio signal as input and outputs predictions for arousal, dominance and valence in a range of approximately 0...1. In addition, it also provides the pooled states of the last transformer layer. The model was created by fine-tuning [
19
+ Wav2Vec2-Large-Robust](https://huggingface.co/facebook/wav2vec2-large-robust) on [MSP-Podcast](https://ecs.utdallas.edu/research/researchlabs/msp-lab/MSP-Podcast.html) (v1.7). The model was pruned from 24 to 12 transformer layers before fine-tuning. An [ONNX](https://onnx.ai/") export of the model is available from [doi:10.5281/zenodo.6221127](https://zenodo.org/record/6221127). Further details are given in the associated [paper](https://arxiv.org/abs/2203.07378) and [tutorial](https://github.com/audeering/w2v2-how-to).
20
+
21
+ # Usage
22
+
23
+ ```python
24
+ import numpy as np
25
+ import torch
26
+ import torch.nn as nn
27
+ from transformers import Wav2Vec2Processor
28
+ from transformers.models.wav2vec2.modeling_wav2vec2 import (
29
+ Wav2Vec2Model,
30
+ Wav2Vec2PreTrainedModel,
31
+ )
32
+
33
+
34
+ class RegressionHead(nn.Module):
35
+ r"""Classification head."""
36
+
37
+ def __init__(self, config):
38
+
39
+ super().__init__()
40
+
41
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
42
+ self.dropout = nn.Dropout(config.final_dropout)
43
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
44
+
45
+ def forward(self, features, **kwargs):
46
+
47
+ x = features
48
+ x = self.dropout(x)
49
+ x = self.dense(x)
50
+ x = torch.tanh(x)
51
+ x = self.dropout(x)
52
+ x = self.out_proj(x)
53
+
54
+ return x
55
+
56
+
57
+ class EmotionModel(Wav2Vec2PreTrainedModel):
58
+ r"""Speech emotion classifier."""
59
+
60
+ def __init__(self, config):
61
+
62
+ super().__init__(config)
63
+
64
+ self.config = config
65
+ self.wav2vec2 = Wav2Vec2Model(config)
66
+ self.classifier = RegressionHead(config)
67
+ self.init_weights()
68
+
69
+ def forward(
70
+ self,
71
+ input_values,
72
+ ):
73
+
74
+ outputs = self.wav2vec2(input_values)
75
+ hidden_states = outputs[0]
76
+ hidden_states = torch.mean(hidden_states, dim=1)
77
+ logits = self.classifier(hidden_states)
78
+
79
+ return hidden_states, logits
80
+
81
+
82
+
83
+ # load model from hub
84
+ device = 'cpu'
85
+ model_name = 'audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim'
86
+ processor = Wav2Vec2Processor.from_pretrained(model_name)
87
+ model = EmotionModel.from_pretrained(model_name)
88
+
89
+ # dummy signal
90
+ sampling_rate = 16000
91
+ signal = np.zeros((1, sampling_rate), dtype=np.float32)
92
+
93
+
94
+ def process_func(
95
+ x: np.ndarray,
96
+ sampling_rate: int,
97
+ embeddings: bool = False,
98
+ ) -> np.ndarray:
99
+ r"""Predict emotions or extract embeddings from raw audio signal."""
100
+
101
+ # run through processor to normalize signal
102
+ # always returns a batch, so we just get the first entry
103
+ # then we put it on the device
104
+ y = processor(x, sampling_rate=sampling_rate)
105
+ y = y['input_values'][0]
106
+ y = y.reshape(1, -1)
107
+ y = torch.from_numpy(y).to(device)
108
+
109
+ # run through model
110
+ with torch.no_grad():
111
+ y = model(y)[0 if embeddings else 1]
112
+
113
+ # convert to numpy
114
+ y = y.detach().cpu().numpy()
115
+
116
+ return y
117
+
118
+
119
+ print(process_func(signal, sampling_rate))
120
+ # Arousal dominance valence
121
+ # [[0.5460754 0.6062266 0.40431657]]
122
+
123
+ print(process_func(signal, sampling_rate, embeddings=True))
124
+ # Pooled hidden states of last transformer layer
125
+ # [[-0.00752167 0.0065819 -0.00746342 ... 0.00663632 0.00848748
126
+ # 0.00599211]]
127
+ ```
emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/config.json ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "torch",
3
+ "activation_dropout": 0.1,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForSpeechClassification"
10
+ ],
11
+ "attention_dropout": 0.1,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "sum",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.1,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.1,
55
+ "finetuning_task": "wav2vec2_reg",
56
+ "gradient_checkpointing": false,
57
+ "hidden_act": "gelu",
58
+ "hidden_dropout": 0.1,
59
+ "hidden_dropout_prob": 0.1,
60
+ "hidden_size": 1024,
61
+ "id2label": {
62
+ "0": "arousal",
63
+ "1": "dominance",
64
+ "2": "valence"
65
+ },
66
+ "initializer_range": 0.02,
67
+ "intermediate_size": 4096,
68
+ "label2id": {
69
+ "arousal": 0,
70
+ "dominance": 1,
71
+ "valence": 2
72
+ },
73
+ "layer_norm_eps": 1e-05,
74
+ "layerdrop": 0.1,
75
+ "mask_feature_length": 10,
76
+ "mask_feature_min_masks": 0,
77
+ "mask_feature_prob": 0.0,
78
+ "mask_time_length": 10,
79
+ "mask_time_min_masks": 2,
80
+ "mask_time_prob": 0.05,
81
+ "model_type": "wav2vec2",
82
+ "num_adapter_layers": 3,
83
+ "num_attention_heads": 16,
84
+ "num_codevector_groups": 2,
85
+ "num_codevectors_per_group": 320,
86
+ "num_conv_pos_embedding_groups": 16,
87
+ "num_conv_pos_embeddings": 128,
88
+ "num_feat_extract_layers": 7,
89
+ "num_hidden_layers": 12,
90
+ "num_negatives": 100,
91
+ "output_hidden_size": 1024,
92
+ "pad_token_id": 0,
93
+ "pooling_mode": "mean",
94
+ "problem_type": "regression",
95
+ "proj_codevector_dim": 768,
96
+ "tdnn_dilation": [
97
+ 1,
98
+ 2,
99
+ 3,
100
+ 1,
101
+ 1
102
+ ],
103
+ "tdnn_dim": [
104
+ 512,
105
+ 512,
106
+ 512,
107
+ 512,
108
+ 1500
109
+ ],
110
+ "tdnn_kernel": [
111
+ 5,
112
+ 3,
113
+ 3,
114
+ 1,
115
+ 1
116
+ ],
117
+ "torch_dtype": "float32",
118
+ "transformers_version": "4.17.0.dev0",
119
+ "use_weighted_layer_sum": false,
120
+ "vocab_size": null,
121
+ "xvector_output_dim": 512
122
+ }
emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
emotional/wav2vec2-large-robust-12-ft-emotion-msp-dim/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
export_onnx.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from models_onnx import SynthesizerTrn
2
+ import utils
3
+ from text.symbols import symbols
4
+ import os
5
+ import json
6
+
7
+
8
+ def export_onnx(export_path, model_path, config_path):
9
+ hps = utils.get_hparams_from_file(config_path)
10
+ net_g = SynthesizerTrn(
11
+ len(symbols),
12
+ hps.data.filter_length // 2 + 1,
13
+ hps.train.segment_size // hps.data.hop_length,
14
+ n_speakers=hps.data.n_speakers,
15
+ **hps.model,
16
+ )
17
+ _ = net_g.eval()
18
+ _ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True)
19
+ net_g.export_onnx(export_path)
20
+
21
+ spklist = []
22
+ for key in hps.data.spk2id.keys():
23
+ spklist.append(key)
24
+
25
+ MoeVSConf = {
26
+ "Folder": f"{export_path}",
27
+ "Name": f"{export_path}",
28
+ "Type": "BertVits",
29
+ "Symbol": symbols,
30
+ "Cleaner": "",
31
+ "Rate": hps.data.sampling_rate,
32
+ "CharaMix": True,
33
+ "Characters": spklist,
34
+ "LanguageMap": {"ZH": [0, 0], "JP": [1, 6], "EN": [2, 8]},
35
+ "Dict": "BasicDict",
36
+ "BertPath": [
37
+ "chinese-roberta-wwm-ext-large",
38
+ "deberta-v2-large-japanese",
39
+ "bert-base-japanese-v3",
40
+ ],
41
+ }
42
+
43
+ with open(f"onnx/{export_path}.json", "w") as MoeVsConfFile:
44
+ json.dump(MoeVSConf, MoeVsConfFile, indent=4)
45
+
46
+
47
+ if __name__ == "__main__":
48
+ print(symbols)
49
+ export_path = "HimenoSena"
50
+ model_path = "G_53000.pth"
51
+ config_path = "config.json"
52
+ if not os.path.exists("onnx"):
53
+ os.makedirs("onnx")
54
+ if not os.path.exists(f"onnx/{export_path}"):
55
+ os.makedirs(f"onnx/{export_path}")
56
+ export_onnx(export_path, model_path, config_path)
infer.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 版本管理、兼容推理及模型加载实现。
3
+ 版本说明:
4
+ 1. 版本号与github的release版本号对应,使用哪个release版本训练的模型即对应其版本号
5
+ 2. 请在模型的config.json中显示声明版本号,添加一个字段"version" : "你的版本号"
6
+ 特殊版本说明:
7
+ 1.1.1-fix: 1.1.1版本训练的模型,但是在推理时使用dev的日语修复
8
+ 1.1.1-dev: dev开发
9
+ 2.0:当前版本
10
+ """
11
+ import torch
12
+ import commons
13
+ from text import cleaned_text_to_sequence, get_bert
14
+ from text.cleaner import clean_text
15
+ import utils
16
+
17
+ from models import SynthesizerTrn
18
+ from text.symbols import symbols
19
+ from oldVersion.V111.models import SynthesizerTrn as V111SynthesizerTrn
20
+ from oldVersion.V111.text import symbols as V111symbols
21
+ from oldVersion.V110.models import SynthesizerTrn as V110SynthesizerTrn
22
+ from oldVersion.V110.text import symbols as V110symbols
23
+ from oldVersion.V101.models import SynthesizerTrn as V101SynthesizerTrn
24
+ from oldVersion.V101.text import symbols as V101symbols
25
+
26
+ from oldVersion import V111, V110, V101
27
+
28
+ # 当前版本信息
29
+ latest_version = "2.0"
30
+
31
+ # 版本兼容
32
+ SynthesizerTrnMap = {
33
+ "1.1.1-fix": V111SynthesizerTrn,
34
+ "1.1.1": V111SynthesizerTrn,
35
+ "1.1": V110SynthesizerTrn,
36
+ "1.1.0": V110SynthesizerTrn,
37
+ "1.0.1": V101SynthesizerTrn,
38
+ "1.0": V101SynthesizerTrn,
39
+ "1.0.0": V101SynthesizerTrn,
40
+ }
41
+
42
+ symbolsMap = {
43
+ "1.1.1-fix": V111symbols,
44
+ "1.1.1": V111symbols,
45
+ "1.1": V110symbols,
46
+ "1.1.0": V110symbols,
47
+ "1.0.1": V101symbols,
48
+ "1.0": V101symbols,
49
+ "1.0.0": V101symbols,
50
+ }
51
+
52
+
53
+ def get_net_g(model_path: str, version: str, device: str, hps):
54
+ if version != latest_version:
55
+ net_g = SynthesizerTrnMap[version](
56
+ len(symbolsMap[version]),
57
+ hps.data.filter_length // 2 + 1,
58
+ hps.train.segment_size // hps.data.hop_length,
59
+ n_speakers=hps.data.n_speakers,
60
+ **hps.model,
61
+ ).to(device)
62
+ else:
63
+ # 当前版本模型 net_g
64
+ net_g = SynthesizerTrn(
65
+ len(symbols),
66
+ hps.data.filter_length // 2 + 1,
67
+ hps.train.segment_size // hps.data.hop_length,
68
+ n_speakers=hps.data.n_speakers,
69
+ **hps.model,
70
+ ).to(device)
71
+ _ = net_g.eval()
72
+ _ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True)
73
+ return net_g
74
+
75
+
76
+ def get_text(text, language_str, hps, device):
77
+ # 在此处实现当前版本的get_text
78
+ norm_text, phone, tone, word2ph = clean_text(text, language_str)
79
+ phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
80
+
81
+ if hps.data.add_blank:
82
+ phone = commons.intersperse(phone, 0)
83
+ tone = commons.intersperse(tone, 0)
84
+ language = commons.intersperse(language, 0)
85
+ for i in range(len(word2ph)):
86
+ word2ph[i] = word2ph[i] * 2
87
+ word2ph[0] += 1
88
+ bert_ori = get_bert(norm_text, word2ph, language_str, device)
89
+ del word2ph
90
+ assert bert_ori.shape[-1] == len(phone), phone
91
+
92
+ if language_str == "ZH":
93
+ bert = bert_ori
94
+ ja_bert = torch.zeros(1024, len(phone))
95
+ en_bert = torch.zeros(1024, len(phone))
96
+ elif language_str == "JP":
97
+ bert = torch.zeros(1024, len(phone))
98
+ ja_bert = bert_ori
99
+ en_bert = torch.zeros(1024, len(phone))
100
+ elif language_str == "EN":
101
+ bert = torch.zeros(1024, len(phone))
102
+ ja_bert = torch.zeros(1024, len(phone))
103
+ en_bert = bert_ori
104
+ else:
105
+ raise ValueError("language_str should be ZH, JP or EN")
106
+
107
+ assert bert.shape[-1] == len(
108
+ phone
109
+ ), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
110
+
111
+ phone = torch.LongTensor(phone)
112
+ tone = torch.LongTensor(tone)
113
+ language = torch.LongTensor(language)
114
+ return bert, ja_bert, en_bert, phone, tone, language
115
+
116
+
117
+ def infer(
118
+ text,
119
+ sdp_ratio,
120
+ noise_scale,
121
+ noise_scale_w,
122
+ length_scale,
123
+ sid,
124
+ language,
125
+ hps,
126
+ net_g,
127
+ device,
128
+ ):
129
+ # 支持中日双语版本
130
+ inferMap_V2 = {
131
+ "1.1.1-fix": V111.infer_fix,
132
+ "1.1.1": V111.infer,
133
+ "1.1": V110.infer,
134
+ "1.1.0": V110.infer,
135
+ }
136
+ # 仅支持中文版本
137
+ # 在测试中,并未发现两个版本的模型不能互相通用
138
+ inferMap_V1 = {
139
+ "1.0.1": V101.infer,
140
+ "1.0": V101.infer,
141
+ "1.0.0": V101.infer,
142
+ }
143
+ version = hps.version if hasattr(hps, "version") else latest_version
144
+ # 非当前版本,根据版本号选择合适的infer
145
+ if version != latest_version:
146
+ if version in inferMap_V2.keys():
147
+ return inferMap_V2[version](
148
+ text,
149
+ sdp_ratio,
150
+ noise_scale,
151
+ noise_scale_w,
152
+ length_scale,
153
+ sid,
154
+ language,
155
+ hps,
156
+ net_g,
157
+ device,
158
+ )
159
+ if version in inferMap_V1.keys():
160
+ return inferMap_V1[version](
161
+ text,
162
+ sdp_ratio,
163
+ noise_scale,
164
+ noise_scale_w,
165
+ length_scale,
166
+ sid,
167
+ hps,
168
+ net_g,
169
+ device,
170
+ )
171
+ # 在此处实现当前版本的推理
172
+ bert, ja_bert, en_bert, phones, tones, lang_ids = get_text(
173
+ text, language, hps, device
174
+ )
175
+ with torch.no_grad():
176
+ x_tst = phones.to(device).unsqueeze(0)
177
+ tones = tones.to(device).unsqueeze(0)
178
+ lang_ids = lang_ids.to(device).unsqueeze(0)
179
+ bert = bert.to(device).unsqueeze(0)
180
+ ja_bert = ja_bert.to(device).unsqueeze(0)
181
+ en_bert = en_bert.to(device).unsqueeze(0)
182
+ x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
183
+ del phones
184
+ speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
185
+ audio = (
186
+ net_g.infer(
187
+ x_tst,
188
+ x_tst_lengths,
189
+ speakers,
190
+ tones,
191
+ lang_ids,
192
+ bert,
193
+ ja_bert,
194
+ en_bert,
195
+ sdp_ratio=sdp_ratio,
196
+ noise_scale=noise_scale,
197
+ noise_scale_w=noise_scale_w,
198
+ length_scale=length_scale,
199
+ )[0][0, 0]
200
+ .data.cpu()
201
+ .float()
202
+ .numpy()
203
+ )
204
+ del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers, ja_bert, en_bert
205
+ if torch.cuda.is_available():
206
+ torch.cuda.empty_cache()
207
+ return audio