upload p2
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- egs/datasets/audio/libritts/base_text2mel.yaml +14 -0
- egs/datasets/audio/libritts/data/emo_map.json +1 -0
- egs/datasets/audio/libritts/data/mfa_dict.txt +0 -0
- egs/datasets/audio/libritts/data/mfa_model.zip +3 -0
- egs/datasets/audio/libritts/data/phone_set.json +1 -0
- egs/datasets/audio/libritts/data/spk_map.json +1 -0
- egs/datasets/audio/libritts/data/test.data +3 -0
- egs/datasets/audio/libritts/data/test.idx +0 -0
- egs/datasets/audio/libritts/data/test_f0s_mean_std.npy +3 -0
- egs/datasets/audio/libritts/data/test_lengths.npy +3 -0
- egs/datasets/audio/libritts/data/test_ph_lengths.npy +3 -0
- egs/datasets/audio/libritts/data/train.idx +0 -0
- egs/datasets/audio/libritts/data/train_f0s_mean_std.npy +3 -0
- egs/datasets/audio/libritts/data/train_lengths.npy +3 -0
- egs/datasets/audio/libritts/data/train_ph_lengths.npy +3 -0
- egs/datasets/audio/libritts/data/valid.data +3 -0
- egs/datasets/audio/libritts/data/valid.idx +0 -0
- egs/datasets/audio/libritts/data/valid_f0s_mean_std.npy +3 -0
- egs/datasets/audio/libritts/data/valid_lengths.npy +3 -0
- egs/datasets/audio/libritts/data/valid_ph_lengths.npy +3 -0
- egs/datasets/audio/libritts/data/word_set.json +1 -0
- egs/datasets/audio/libritts/fs2.yaml +3 -0
- egs/datasets/audio/libritts/pre_align.py +18 -0
- egs/datasets/audio/libritts/pwg.yaml +8 -0
- egs/datasets/audio/lj/base_mel2wav.yaml +5 -0
- egs/datasets/audio/lj/pre_align.py +13 -0
- egs/datasets/audio/lj/pwg.yaml +3 -0
- egs/datasets/audio/vctk/base_mel2wav.yaml +3 -0
- egs/datasets/audio/vctk/fs2.yaml +12 -0
- egs/datasets/audio/vctk/pre_align.py +22 -0
- egs/datasets/audio/vctk/pwg.yaml +6 -0
- egs/egs_bases/config_base.yaml +46 -0
- egs/egs_bases/tts/base.yaml +112 -0
- egs/egs_bases/tts/fs2.yaml +102 -0
- egs/egs_bases/tts/vocoder/base.yaml +34 -0
- egs/egs_bases/tts/vocoder/pwg.yaml +82 -0
- inference/ProDiff.py +49 -0
- inference/ProDiff_Teacher.py +41 -0
- inference/base_tts_infer.py +173 -0
- inference/gradio/gradio_settings.yaml +13 -0
- inference/gradio/infer.py +69 -0
- modules/FastDiff/config/FastDiff.yaml +7 -0
- modules/FastDiff/config/FastDiff_libritts.yaml +7 -0
- modules/FastDiff/config/FastDiff_sc09.yaml +25 -0
- modules/FastDiff/config/FastDiff_tacotron.yaml +58 -0
- modules/FastDiff/config/FastDiff_vctk.yaml +7 -0
- modules/FastDiff/config/base.yaml +157 -0
- modules/FastDiff/module/FastDiff_model.py +123 -0
- modules/FastDiff/module/WaveNet.py +189 -0
.gitattributes
CHANGED
@@ -52,3 +52,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
55 |
+
egs/datasets/audio/libritts/data/test.data filter=lfs diff=lfs merge=lfs -text
|
56 |
+
egs/datasets/audio/libritts/data/valid.data filter=lfs diff=lfs merge=lfs -text
|
egs/datasets/audio/libritts/base_text2mel.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
raw_data_dir: 'egs/datasets/audio/libritts/data'
|
2 |
+
processed_data_dir: 'egs/datasets/audio/libritts/data'
|
3 |
+
binary_data_dir: 'egs/datasets/audio/libritts/data'
|
4 |
+
pre_align_cls: egs.datasets.audio.libritts.pre_align.LibrittsPreAlign
|
5 |
+
binarization_args:
|
6 |
+
shuffle: true
|
7 |
+
use_spk_id: true
|
8 |
+
test_num: 200
|
9 |
+
num_spk: 2320
|
10 |
+
pitch_type: frame
|
11 |
+
min_frames: 128
|
12 |
+
num_test_samples: 30
|
13 |
+
mel_loss: "ssim:0.5|l1:0.5"
|
14 |
+
vocoder_ckpt: ''
|
egs/datasets/audio/libritts/data/emo_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"\"Angry\"": 0, "\"Happy\"": 1, "\"Neutral\"": 2, "\"Sad\"": 3, "\"Surprise\"": 4}
|
egs/datasets/audio/libritts/data/mfa_dict.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
egs/datasets/audio/libritts/data/mfa_model.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71dc26b9aba3529892eebc21088db2b8eee41c89d87085c24148cf96b029a62c
|
3 |
+
size 23850075
|
egs/datasets/audio/libritts/data/phone_set.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
["!", ",", ".", ":", ";", "<BOS>", "<EOS>", "?", "AA0", "AA1", "AA2", "AE0", "AE1", "AE2", "AH0", "AH1", "AH2", "AO0", "AO1", "AO2", "AW0", "AW1", "AW2", "AY0", "AY1", "AY2", "B", "CH", "D", "DH", "EH0", "EH1", "EH2", "ER0", "ER1", "ER2", "EY0", "EY1", "EY2", "F", "G", "HH", "IH0", "IH1", "IH2", "IY0", "IY1", "IY2", "JH", "K", "L", "M", "N", "NG", "OW0", "OW1", "OW2", "OY0", "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH0", "UH1", "UH2", "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH", "|"]
|
egs/datasets/audio/libritts/data/spk_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ds0_0011": 0, "ds0_0012": 1, "ds0_0013": 2, "ds0_0014": 3, "ds0_0015": 4, "ds0_0016": 5, "ds0_0017": 6, "ds0_0018": 7, "ds0_0019": 8, "ds0_0020": 9, "ds1_103": 10, "ds1_1034": 11, "ds1_1040": 12, "ds1_1069": 13, "ds1_1081": 14, "ds1_1088": 15, "ds1_1089": 16, "ds1_1098": 17, "ds1_1116": 18, "ds1_118": 19, "ds1_1183": 20, "ds1_1188": 21, "ds1_121": 22, "ds1_1221": 23, "ds1_1235": 24, "ds1_1246": 25, "ds1_125": 26, "ds1_1263": 27, "ds1_1272": 28, "ds1_1284": 29, "ds1_1320": 30, "ds1_1334": 31, "ds1_1355": 32, "ds1_1363": 33, "ds1_1447": 34, "ds1_1455": 35, "ds1_1462": 36, "ds1_150": 37, "ds1_1502": 38, "ds1_1553": 39, "ds1_1578": 40, "ds1_1580": 41, "ds1_1594": 42, "ds1_1624": 43, "ds1_163": 44, "ds1_1673": 45, "ds1_1737": 46, "ds1_174": 47, "ds1_1743": 48, "ds1_1841": 49, "ds1_1867": 50, "ds1_1898": 51, "ds1_19": 52, "ds1_1919": 53, "ds1_1926": 54, "ds1_196": 55, "ds1_1963": 56, "ds1_1970": 57, "ds1_198": 58, "ds1_1988": 59, "ds1_1992": 60, "ds1_1993": 61, "ds1_1995": 62, "ds1_200": 63, "ds1_2002": 64, "ds1_2007": 65, "ds1_201": 66, "ds1_2035": 67, "ds1_2078": 68, "ds1_2086": 69, "ds1_2092": 70, "ds1_211": 71, "ds1_2136": 72, "ds1_2159": 73, "ds1_2182": 74, "ds1_2196": 75, "ds1_226": 76, "ds1_2277": 77, "ds1_2289": 78, "ds1_229": 79, "ds1_2300": 80, "ds1_233": 81, "ds1_237": 82, "ds1_2384": 83, "ds1_2391": 84, "ds1_2412": 85, "ds1_2416": 86, "ds1_2428": 87, "ds1_2436": 88, "ds1_248": 89, "ds1_250": 90, "ds1_251": 91, "ds1_2514": 92, "ds1_2518": 93, "ds1_254": 94, "ds1_26": 95, "ds1_260": 96, "ds1_2691": 97, "ds1_27": 98, "ds1_2764": 99, "ds1_2803": 100, "ds1_2817": 101, "ds1_2830": 102, "ds1_2836": 103, "ds1_2843": 104, "ds1_289": 105, "ds1_2893": 106, "ds1_2902": 107, "ds1_2910": 108, "ds1_2911": 109, "ds1_2952": 110, "ds1_2961": 111, "ds1_298": 112, "ds1_2989": 113, "ds1_3000": 114, "ds1_302": 115, "ds1_307": 116, "ds1_3081": 117, "ds1_311": 118, "ds1_3112": 119, "ds1_3168": 120, "ds1_3170": 121, "ds1_32": 122, "ds1_3214": 123, "ds1_322": 124, "ds1_3235": 125, "ds1_3240": 126, "ds1_3242": 127, "ds1_3259": 128, "ds1_332": 129, "ds1_3374": 130, "ds1_3436": 131, "ds1_3440": 132, "ds1_3486": 133, "ds1_3526": 134, "ds1_3536": 135, "ds1_3570": 136, "ds1_3575": 137, "ds1_3576": 138, "ds1_3607": 139, "ds1_3664": 140, "ds1_3699": 141, "ds1_3723": 142, "ds1_3729": 143, "ds1_374": 144, "ds1_3752": 145, "ds1_3807": 146, "ds1_3830": 147, "ds1_3853": 148, "ds1_3857": 149, "ds1_3879": 150, "ds1_39": 151, "ds1_3947": 152, "ds1_3982": 153, "ds1_3983": 154, "ds1_40": 155, "ds1_4014": 156, "ds1_4018": 157, "ds1_403": 158, "ds1_405": 159, "ds1_4051": 160, "ds1_4077": 161, "ds1_4088": 162, "ds1_412": 163, "ds1_4137": 164, "ds1_4160": 165, "ds1_4195": 166, "ds1_4214": 167, "ds1_422": 168, "ds1_426": 169, "ds1_4267": 170, "ds1_4297": 171, "ds1_4340": 172, "ds1_4362": 173, "ds1_4397": 174, "ds1_4406": 175, "ds1_4441": 176, "ds1_4446": 177, "ds1_446": 178, "ds1_4481": 179, "ds1_4507": 180, "ds1_458": 181, "ds1_460": 182, "ds1_4640": 183, "ds1_4680": 184, "ds1_4788": 185, "ds1_481": 186, "ds1_4813": 187, "ds1_4830": 188, "ds1_4853": 189, "ds1_4859": 190, "ds1_4898": 191, "ds1_4970": 192, "ds1_4992": 193, "ds1_5022": 194, "ds1_5049": 195, "ds1_5104": 196, "ds1_5105": 197, "ds1_5142": 198, "ds1_5163": 199, "ds1_5192": 200, "ds1_5322": 201, "ds1_5338": 202, "ds1_5339": 203, "ds1_5390": 204, "ds1_5393": 205, "ds1_5456": 206, "ds1_5463": 207, "ds1_5514": 208, "ds1_5536": 209, "ds1_5561": 210, "ds1_5639": 211, "ds1_5652": 212, "ds1_5678": 213, "ds1_5683": 214, "ds1_5688": 215, "ds1_5694": 216, "ds1_5703": 217, "ds1_5750": 218, "ds1_5778": 219, "ds1_5789": 220, "ds1_5808": 221, "ds1_5867": 222, "ds1_587": 223, "ds1_5895": 224, "ds1_60": 225, "ds1_6000": 226, "ds1_6019": 227, "ds1_6064": 228, "ds1_6078": 229, "ds1_6081": 230, "ds1_61": 231, "ds1_6147": 232, "ds1_6181": 233, "ds1_6209": 234, "ds1_6241": 235, "ds1_625": 236, "ds1_6272": 237, "ds1_6295": 238, "ds1_6313": 239, "ds1_6319": 240, "ds1_6345": 241, "ds1_6367": 242, "ds1_6385": 243, "ds1_6415": 244, "ds1_6437": 245, "ds1_6454": 246, "ds1_6476": 247, "ds1_652": 248, "ds1_6529": 249, "ds1_6531": 250, "ds1_6563": 251, "ds1_669": 252, "ds1_672": 253, "ds1_6818": 254, "ds1_6829": 255, "ds1_6836": 256, "ds1_6848": 257, "ds1_6880": 258, "ds1_6925": 259, "ds1_6930": 260, "ds1_696": 261, "ds1_7021": 262, "ds1_7059": 263, "ds1_7067": 264, "ds1_7078": 265, "ds1_7113": 266, "ds1_7127": 267, "ds1_7148": 268, "ds1_7176": 269, "ds1_7178": 270, "ds1_7190": 271, "ds1_7226": 272, "ds1_7264": 273, "ds1_7278": 274, "ds1_730": 275, "ds1_7302": 276, "ds1_7312": 277, "ds1_7367": 278, "ds1_7402": 279, "ds1_7447": 280, "ds1_7505": 281, "ds1_7511": 282, "ds1_7517": 283, "ds1_7635": 284, "ds1_7729": 285, "ds1_777": 286, "ds1_7780": 287, "ds1_7794": 288, "ds1_78": 289, "ds1_7800": 290, "ds1_7850": 291, "ds1_7859": 292, "ds1_7976": 293, "ds1_8014": 294, "ds1_8051": 295, "ds1_8063": 296, "ds1_8088": 297, "ds1_8095": 298, "ds1_8098": 299, "ds1_8108": 300, "ds1_8123": 301, "ds1_8224": 302, "ds1_8226": 303, "ds1_8230": 304, "ds1_8238": 305, "ds1_8297": 306, "ds1_83": 307, "ds1_831": 308, "ds1_8312": 309, "ds1_8324": 310, "ds1_839": 311, "ds1_84": 312, "ds1_8419": 313, "ds1_8425": 314, "ds1_8455": 315, "ds1_8463": 316, "ds1_8465": 317, "ds1_8468": 318, "ds1_8555": 319, "ds1_8580": 320, "ds1_8609": 321, "ds1_8629": 322, "ds1_8630": 323, "ds1_87": 324, "ds1_8747": 325, "ds1_8770": 326, "ds1_8797": 327, "ds1_8838": 328, "ds1_8842": 329, "ds1_887": 330, "ds1_89": 331, "ds1_8975": 332, "ds1_908": 333, "ds1_909": 334, "ds1_911": 335}
|
egs/datasets/audio/libritts/data/test.data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9114f094331300b35f36b0ba312424a7b8b204fa35de8bbc28e6daf45d94788
|
3 |
+
size 58624185
|
egs/datasets/audio/libritts/data/test.idx
ADDED
Binary file (1.3 kB). View file
|
|
egs/datasets/audio/libritts/data/test_f0s_mean_std.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44cd1dd19b62ac2c36b6d243ca6d2580f565ef00d0914e3467cb1c2f16640099
|
3 |
+
size 144
|
egs/datasets/audio/libritts/data/test_lengths.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9da6433da5edb4b9746559db5a012a48f134c8cc71a27399a0c0d77930880fd6
|
3 |
+
size 1728
|
egs/datasets/audio/libritts/data/test_ph_lengths.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7355929264dae0aa6f7914aa1770710aa0103d7961adab0aa7a0eeb120d442e9
|
3 |
+
size 1728
|
egs/datasets/audio/libritts/data/train.idx
ADDED
Binary file (412 kB). View file
|
|
egs/datasets/audio/libritts/data/train_f0s_mean_std.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8489ff2f4fd60c6a445b35f0a5a6629923880abebe11ff6ead6c2ebd4bfe28f5
|
3 |
+
size 144
|
egs/datasets/audio/libritts/data/train_lengths.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cd9bb9ca54795e291d203c5f6c9616220843afe6bde420db36f61db379ccf44
|
3 |
+
size 488816
|
egs/datasets/audio/libritts/data/train_ph_lengths.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb3f9d351bba47d6ef5ae7ae8b1708b947405b005eeae4c038b9f47efbfa2bee
|
3 |
+
size 488816
|
egs/datasets/audio/libritts/data/valid.data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9114f094331300b35f36b0ba312424a7b8b204fa35de8bbc28e6daf45d94788
|
3 |
+
size 58624185
|
egs/datasets/audio/libritts/data/valid.idx
ADDED
Binary file (1.3 kB). View file
|
|
egs/datasets/audio/libritts/data/valid_f0s_mean_std.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44cd1dd19b62ac2c36b6d243ca6d2580f565ef00d0914e3467cb1c2f16640099
|
3 |
+
size 144
|
egs/datasets/audio/libritts/data/valid_lengths.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9da6433da5edb4b9746559db5a012a48f134c8cc71a27399a0c0d77930880fd6
|
3 |
+
size 1728
|
egs/datasets/audio/libritts/data/valid_ph_lengths.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7355929264dae0aa6f7914aa1770710aa0103d7961adab0aa7a0eeb120d442e9
|
3 |
+
size 1728
|
egs/datasets/audio/libritts/data/word_set.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[".", "the", ",", "a", "and", "of", "her", "these", "with", "is", "its", "we", "to", "at", "things", "for", "rainbow", "into", "as", "end", "will", "she", "can", "please", "call", "stella", "take", "shape", "long", "round", "arch", "path", "high", "above", "two", "ends", "apparently", "beyond", "horizon", "six", "spoons", "fresh", "snow", "peas", "five", "thick", "slabs", "blue", "cheese", "maybe", "snack", "brother", "bob", "i", "there", "according", "legend", "boiling", "pot", "gold", "one", "when", "sunlight", "strikes", "raindrops", "in", "air", "they", "act", "prism", "form", "ask", "bring", "from", "store", "scoop", "three", "red", "bags", "go", "meet", "wednesday", "train", "station", "also", "need", "small", "plastic", "snake", "big", "toy", "frog", "kids", "division", "white", "light", "many", "beautiful", "colors", "you", "your", "say", "he", "have", "be", "just", "know", "because", "was", "man", "infinite", "resource", "sagacity", "shouldnt", "pricked", "him", "horn", "all", "this", "won", "by", "our", "labour", "neither", "yea", "nor", "nay", "but", "if", "hadnt", "done", "them", "emperor", "no", "admittance", "except", "on", "party", "business", "smiled", "calmly", "mother", "knows", "that", "best", "smile", "id", "soon", "swim", "way", "others", "do", "searched", "through", "box", "name", "more", "hilarious", "?", "words", "behind", "ears", "nonsense", "tom", "fell", "cloven", "head", "vowed", "hed", "change", "pigtails", "place", "shall", "good", "bye", "part", "fish", "mouth", "chew", "leaves", "quickly", "said", "rabbit", "pay", "half", "crown", "week", "extra", "daisy", "creams", "pink", "edges"]
|
egs/datasets/audio/libritts/fs2.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- egs/egs_bases/tts/fs2.yaml
|
3 |
+
- ./base_text2mel.yaml
|
egs/datasets/audio/libritts/pre_align.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from data_gen.tts.base_pre_align import BasePreAlign
|
4 |
+
import glob
|
5 |
+
|
6 |
+
|
7 |
+
class LibrittsPreAlign(BasePreAlign):
|
8 |
+
def meta_data(self):
|
9 |
+
wav_fns = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*/*.wav'))
|
10 |
+
for wav_fn in wav_fns:
|
11 |
+
item_name = os.path.basename(wav_fn)[:-4]
|
12 |
+
txt_fn = f'{wav_fn[:-4]}.normalized.txt'
|
13 |
+
spk = item_name.split("_")[0]
|
14 |
+
yield item_name, wav_fn, (self.load_txt, txt_fn), spk
|
15 |
+
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
LibrittsPreAlign().process()
|
egs/datasets/audio/libritts/pwg.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config: egs/egs_bases/tts/vocoder/pwg.yaml
|
2 |
+
raw_data_dir: 'egs/datasets/audio/libritts/data'
|
3 |
+
processed_data_dir: 'egs/datasets/audio/libritts/data'
|
4 |
+
binary_data_dir: 'egs/datasets/audio/libritts/data'
|
5 |
+
generator_params:
|
6 |
+
kernel_size: 5
|
7 |
+
num_spk: 400
|
8 |
+
max_samples: 20480
|
egs/datasets/audio/lj/base_mel2wav.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
raw_data_dir: 'data/raw/LJSpeech-1.1'
|
2 |
+
processed_data_dir: 'data/processed/ljspeech'
|
3 |
+
binary_data_dir: 'data/binary/ljspeech_wav'
|
4 |
+
binarization_args:
|
5 |
+
with_spk_embed: false
|
egs/datasets/audio/lj/pre_align.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from data_gen.tts.base_preprocess import BasePreprocessor
|
2 |
+
|
3 |
+
|
4 |
+
class LJPreAlign(BasePreprocessor):
|
5 |
+
def meta_data(self):
|
6 |
+
for l in open(f'{self.raw_data_dir}/metadata.csv').readlines():
|
7 |
+
item_name, _, txt = l.strip().split("|")
|
8 |
+
wav_fn = f"{self.raw_data_dir}/wavs/{item_name}.wav"
|
9 |
+
yield item_name, wav_fn, txt, 'SPK1'
|
10 |
+
|
11 |
+
|
12 |
+
if __name__ == "__main__":
|
13 |
+
LJPreAlign().process()
|
egs/datasets/audio/lj/pwg.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- egs/egs_bases/tts/vocoder/pwg.yaml
|
3 |
+
- ./base_mel2wav.yaml
|
egs/datasets/audio/vctk/base_mel2wav.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
raw_data_dir: 'data/raw/VCTK-Corpus'
|
2 |
+
processed_data_dir: 'data/processed/vctk'
|
3 |
+
binary_data_dir: 'data/binary/vctk_wav'
|
egs/datasets/audio/vctk/fs2.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- egs/egs_bases/tts/fs2.yaml
|
3 |
+
raw_data_dir: 'data/raw/VCTK-Corpus'
|
4 |
+
processed_data_dir: 'data/processed/vctk'
|
5 |
+
binary_data_dir: 'data/binary/vctk'
|
6 |
+
pre_align_cls: egs.datasets.audio.vctk.pre_align.VCTKPreAlign
|
7 |
+
use_spk_id: true
|
8 |
+
test_num: 200
|
9 |
+
num_spk: 400
|
10 |
+
binarization_args:
|
11 |
+
shuffle: true
|
12 |
+
trim_eos_bos: true
|
egs/datasets/audio/vctk/pre_align.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from data_gen.tts.base_pre_align import BasePreAlign
|
4 |
+
import glob
|
5 |
+
|
6 |
+
|
7 |
+
class VCTKPreAlign(BasePreAlign):
|
8 |
+
def meta_data(self):
|
9 |
+
wav_fns = glob.glob(f'{self.raw_data_dir}/wav48/*/*.wav')
|
10 |
+
for wav_fn in wav_fns:
|
11 |
+
item_name = os.path.basename(wav_fn)[:-4]
|
12 |
+
spk = item_name.split("_")[0]
|
13 |
+
txt_fn = wav_fn.split("/")
|
14 |
+
txt_fn[-1] = f'{item_name}.txt'
|
15 |
+
txt_fn[-3] = f'txt'
|
16 |
+
txt_fn = "/".join(txt_fn)
|
17 |
+
if os.path.exists(txt_fn) and os.path.exists(wav_fn):
|
18 |
+
yield item_name, wav_fn, (self.load_txt, txt_fn), spk
|
19 |
+
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
VCTKPreAlign().process()
|
egs/datasets/audio/vctk/pwg.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- egs/egs_bases/tts/vocoder/pwg.yaml
|
3 |
+
- ./base_mel2wav.yaml
|
4 |
+
|
5 |
+
num_spk: 400
|
6 |
+
max_samples: 20480
|
egs/egs_bases/config_base.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# task
|
2 |
+
binary_data_dir: ''
|
3 |
+
work_dir: '' # experiment directory.
|
4 |
+
infer: false # inference
|
5 |
+
amp: false
|
6 |
+
seed: 1234
|
7 |
+
debug: false
|
8 |
+
save_codes: []
|
9 |
+
# - configs
|
10 |
+
# - modules
|
11 |
+
# - tasks
|
12 |
+
# - utils
|
13 |
+
# - usr
|
14 |
+
|
15 |
+
#############
|
16 |
+
# dataset
|
17 |
+
#############
|
18 |
+
ds_workers: 1
|
19 |
+
test_num: 100
|
20 |
+
endless_ds: false
|
21 |
+
sort_by_len: true
|
22 |
+
|
23 |
+
#########
|
24 |
+
# train and eval
|
25 |
+
#########
|
26 |
+
print_nan_grads: false
|
27 |
+
load_ckpt: ''
|
28 |
+
save_best: true
|
29 |
+
num_ckpt_keep: 3
|
30 |
+
clip_grad_norm: 0
|
31 |
+
accumulate_grad_batches: 1
|
32 |
+
tb_log_interval: 100
|
33 |
+
num_sanity_val_steps: 5 # steps of validation at the beginning
|
34 |
+
check_val_every_n_epoch: 10
|
35 |
+
val_check_interval: 2000
|
36 |
+
valid_monitor_key: 'val_loss'
|
37 |
+
valid_monitor_mode: 'min'
|
38 |
+
max_epochs: 1000
|
39 |
+
max_updates: 1000000
|
40 |
+
max_tokens: 31250
|
41 |
+
max_sentences: 100000
|
42 |
+
max_valid_tokens: -1
|
43 |
+
max_valid_sentences: -1
|
44 |
+
test_input_dir: ''
|
45 |
+
resume_from_checkpoint: 0
|
46 |
+
rename_tmux: true
|
egs/egs_bases/tts/base.yaml
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# task
|
2 |
+
base_config: ../config_base.yaml
|
3 |
+
task_cls: ''
|
4 |
+
#############
|
5 |
+
# dataset
|
6 |
+
#############
|
7 |
+
raw_data_dir: ''
|
8 |
+
processed_data_dir: ''
|
9 |
+
binary_data_dir: ''
|
10 |
+
dict_dir: ''
|
11 |
+
pre_align_cls: ''
|
12 |
+
binarizer_cls: data_gen.tts.base_binarizer.BaseBinarizer
|
13 |
+
pre_align_args:
|
14 |
+
txt_processor: en
|
15 |
+
use_tone: true # for ZH
|
16 |
+
sox_resample: false
|
17 |
+
sox_to_wav: false
|
18 |
+
allow_no_txt: false
|
19 |
+
trim_sil: false
|
20 |
+
denoise: false
|
21 |
+
binarization_args:
|
22 |
+
shuffle: false
|
23 |
+
with_txt: true
|
24 |
+
with_wav: false
|
25 |
+
with_align: true
|
26 |
+
with_spk_embed: false
|
27 |
+
with_spk_id: true
|
28 |
+
with_f0: true
|
29 |
+
with_f0cwt: false
|
30 |
+
with_linear: false
|
31 |
+
with_word: true
|
32 |
+
trim_sil: false
|
33 |
+
trim_eos_bos: false
|
34 |
+
reset_phone_dict: true
|
35 |
+
reset_word_dict: true
|
36 |
+
word_size: 30000
|
37 |
+
pitch_extractor: parselmouth
|
38 |
+
|
39 |
+
loud_norm: false
|
40 |
+
endless_ds: true
|
41 |
+
|
42 |
+
test_num: 100
|
43 |
+
min_frames: 0
|
44 |
+
max_frames: 1548
|
45 |
+
frames_multiple: 1
|
46 |
+
max_input_tokens: 1550
|
47 |
+
audio_num_mel_bins: 80
|
48 |
+
audio_sample_rate: 22050
|
49 |
+
hop_size: 256 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)
|
50 |
+
win_size: 1024 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate)
|
51 |
+
fmin: 80 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
|
52 |
+
fmax: 7600 # To be increased/reduced depending on data.
|
53 |
+
fft_size: 1024 # Extra window size is filled with 0 paddings to match this parameter
|
54 |
+
min_level_db: -100
|
55 |
+
ref_level_db: 20
|
56 |
+
griffin_lim_iters: 60
|
57 |
+
num_spk: 1
|
58 |
+
mel_vmin: -6
|
59 |
+
mel_vmax: 1.5
|
60 |
+
ds_workers: 1
|
61 |
+
|
62 |
+
#########
|
63 |
+
# model
|
64 |
+
#########
|
65 |
+
dropout: 0.1
|
66 |
+
enc_layers: 4
|
67 |
+
dec_layers: 4
|
68 |
+
hidden_size: 256
|
69 |
+
num_heads: 2
|
70 |
+
enc_ffn_kernel_size: 9
|
71 |
+
dec_ffn_kernel_size: 9
|
72 |
+
ffn_act: gelu
|
73 |
+
ffn_padding: 'SAME'
|
74 |
+
use_spk_id: true
|
75 |
+
use_split_spk_id: false
|
76 |
+
use_spk_embed: false
|
77 |
+
|
78 |
+
|
79 |
+
###########
|
80 |
+
# optimization
|
81 |
+
###########
|
82 |
+
lr: 2.0
|
83 |
+
scheduler: rsqrt # rsqrt|none
|
84 |
+
warmup_updates: 8000
|
85 |
+
optimizer_adam_beta1: 0.9
|
86 |
+
optimizer_adam_beta2: 0.98
|
87 |
+
weight_decay: 0
|
88 |
+
clip_grad_norm: 1
|
89 |
+
clip_grad_value: 0
|
90 |
+
|
91 |
+
|
92 |
+
###########
|
93 |
+
# train and eval
|
94 |
+
###########
|
95 |
+
max_tokens: 30000
|
96 |
+
max_sentences: 100000
|
97 |
+
max_valid_sentences: 1
|
98 |
+
max_valid_tokens: 60000
|
99 |
+
valid_infer_interval: 10000
|
100 |
+
train_set_name: 'train'
|
101 |
+
train_sets: ''
|
102 |
+
valid_set_name: 'valid'
|
103 |
+
test_set_name: 'test'
|
104 |
+
num_test_samples: 0
|
105 |
+
num_valid_plots: 10
|
106 |
+
test_ids: [ ]
|
107 |
+
vocoder_denoise_c: 0.0
|
108 |
+
profile_infer: false
|
109 |
+
out_wav_norm: false
|
110 |
+
save_gt: true
|
111 |
+
save_f0: false
|
112 |
+
gen_dir_name: ''
|
egs/egs_bases/tts/fs2.yaml
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config: ./base.yaml
|
2 |
+
task_cls: tasks.tts.fs2.FastSpeech2Task
|
3 |
+
|
4 |
+
# model
|
5 |
+
hidden_size: 256
|
6 |
+
dropout: 0.1
|
7 |
+
encoder_type: fft # rel_fft|fft|tacotron|tacotron2|conformer
|
8 |
+
decoder_type: fft # fft|rnn|conv|conformer|wn
|
9 |
+
|
10 |
+
# rnn enc/dec
|
11 |
+
encoder_K: 8
|
12 |
+
decoder_rnn_dim: 0 # for rnn decoder, 0 -> hidden_size * 2
|
13 |
+
|
14 |
+
# fft enc/dec
|
15 |
+
use_pos_embed: true
|
16 |
+
dec_num_heads: 2
|
17 |
+
dec_layers: 4
|
18 |
+
ffn_hidden_size: 1024
|
19 |
+
enc_ffn_kernel_size: 9
|
20 |
+
dec_ffn_kernel_size: 9
|
21 |
+
|
22 |
+
# conv enc/dec
|
23 |
+
enc_dec_norm: ln
|
24 |
+
conv_use_pos: false
|
25 |
+
layers_in_block: 2
|
26 |
+
enc_dilations: [ 1, 1, 1, 1 ]
|
27 |
+
enc_kernel_size: 5
|
28 |
+
dec_dilations: [ 1, 1, 1, 1 ] # for conv decoder
|
29 |
+
dec_kernel_size: 5
|
30 |
+
dur_loss: mse # huber|mol
|
31 |
+
|
32 |
+
# duration
|
33 |
+
predictor_hidden: -1
|
34 |
+
predictor_kernel: 5
|
35 |
+
predictor_layers: 2
|
36 |
+
dur_predictor_kernel: 3
|
37 |
+
dur_predictor_layers: 2
|
38 |
+
predictor_dropout: 0.5
|
39 |
+
|
40 |
+
# pitch and energy
|
41 |
+
pitch_norm: standard # standard|log
|
42 |
+
use_pitch_embed: true
|
43 |
+
pitch_type: frame # frame|ph|cwt
|
44 |
+
use_uv: true
|
45 |
+
cwt_hidden_size: 128
|
46 |
+
cwt_layers: 2
|
47 |
+
cwt_loss: l1
|
48 |
+
cwt_add_f0_loss: false
|
49 |
+
cwt_std_scale: 0.8
|
50 |
+
|
51 |
+
pitch_ar: false
|
52 |
+
pitch_embed_type: 0
|
53 |
+
pitch_loss: 'l1' # l1|l2|ssim
|
54 |
+
pitch_ssim_win: 11
|
55 |
+
use_energy_embed: false
|
56 |
+
|
57 |
+
# reference encoder and speaker embedding
|
58 |
+
use_ref_enc: false
|
59 |
+
use_var_enc: false
|
60 |
+
lambda_commit: 0.25
|
61 |
+
var_enc_vq_codes: 64
|
62 |
+
ref_norm_layer: bn
|
63 |
+
dec_inp_add_noise: false
|
64 |
+
sil_add_noise: false
|
65 |
+
ref_hidden_stride_kernel:
|
66 |
+
- 0,3,5 # conv_hidden_size, conv_stride, conv_kernel_size. conv_hidden_size=0: use hidden_size
|
67 |
+
- 0,3,5
|
68 |
+
- 0,2,5
|
69 |
+
- 0,2,5
|
70 |
+
- 0,2,5
|
71 |
+
pitch_enc_hidden_stride_kernel:
|
72 |
+
- 0,2,5 # conv_hidden_size, conv_stride, conv_kernel_size. conv_hidden_size=0: use hidden_size
|
73 |
+
- 0,2,5
|
74 |
+
- 0,2,5
|
75 |
+
dur_enc_hidden_stride_kernel:
|
76 |
+
- 0,2,3 # conv_hidden_size, conv_stride, conv_kernel_size. conv_hidden_size=0: use hidden_size
|
77 |
+
- 0,2,3
|
78 |
+
- 0,1,3
|
79 |
+
|
80 |
+
# mel
|
81 |
+
mel_loss: l1:0.5|ssim:0.5 # l1|l2|gdl|ssim or l1:0.5|ssim:0.5
|
82 |
+
|
83 |
+
# loss lambda
|
84 |
+
lambda_f0: 1.0
|
85 |
+
lambda_uv: 1.0
|
86 |
+
lambda_energy: 0.1
|
87 |
+
lambda_ph_dur: 0.1
|
88 |
+
lambda_sent_dur: 1.0
|
89 |
+
lambda_word_dur: 1.0
|
90 |
+
predictor_grad: 0.1
|
91 |
+
|
92 |
+
# train and eval
|
93 |
+
pretrain_fs_ckpt: ''
|
94 |
+
warmup_updates: 2000
|
95 |
+
max_tokens: 32000
|
96 |
+
max_sentences: 100000
|
97 |
+
max_valid_sentences: 1
|
98 |
+
max_updates: 120000
|
99 |
+
use_gt_dur: false
|
100 |
+
use_gt_f0: false
|
101 |
+
ds_workers: 2
|
102 |
+
lr: 1.0
|
egs/egs_bases/tts/vocoder/base.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config: ../base.yaml
|
2 |
+
binarization_args:
|
3 |
+
with_wav: true
|
4 |
+
with_spk_embed: false
|
5 |
+
with_align: false
|
6 |
+
with_word: false
|
7 |
+
with_txt: false
|
8 |
+
|
9 |
+
###########
|
10 |
+
# train and eval
|
11 |
+
###########
|
12 |
+
max_samples: 25600
|
13 |
+
max_sentences: 5
|
14 |
+
max_valid_sentences: 1
|
15 |
+
max_updates: 1000000
|
16 |
+
val_check_interval: 2000
|
17 |
+
|
18 |
+
###########################################################
|
19 |
+
# FEATURE EXTRACTION SETTING #
|
20 |
+
###########################################################
|
21 |
+
fft_size: 1024 # FFT size.
|
22 |
+
hop_size: 256 # Hop size.
|
23 |
+
win_length: null # Window length.
|
24 |
+
# If set to null, it will be the same as fft_size.
|
25 |
+
window: "hann" # Window function.
|
26 |
+
num_mels: 80 # Number of mel basis.
|
27 |
+
fmin: 80 # Minimum freq in mel basis calculation.
|
28 |
+
fmax: 7600 # Maximum frequency in mel basis calculation.
|
29 |
+
aux_context_window: 0 # Context window size for auxiliary feature.
|
30 |
+
use_pitch_embed: false
|
31 |
+
|
32 |
+
generator_grad_norm: 10 # Generator's gradient norm.
|
33 |
+
discriminator_grad_norm: 1 # Discriminator's gradient norm.
|
34 |
+
disc_start_steps: 40000 # Number of steps to start to train discriminator.
|
egs/egs_bases/tts/vocoder/pwg.yaml
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config: ./base.yaml
|
2 |
+
task_cls: tasks.vocoder.pwg.PwgTask
|
3 |
+
|
4 |
+
aux_context_window: 2 # Context window size for auxiliary feature.
|
5 |
+
use_pitch_embed: false
|
6 |
+
###########################################################
|
7 |
+
# GENERATOR NETWORK ARCHITECTURE SETTING #
|
8 |
+
###########################################################
|
9 |
+
generator_params:
|
10 |
+
in_channels: 1 # Number of input channels.
|
11 |
+
out_channels: 1 # Number of output channels.
|
12 |
+
kernel_size: 3 # Kernel size of dilated convolution.
|
13 |
+
layers: 30 # Number of residual block layers.
|
14 |
+
stacks: 3 # Number of stacks i.e., dilation cycles.
|
15 |
+
residual_channels: 64 # Number of channels in residual conv.
|
16 |
+
gate_channels: 128 # Number of channels in gated conv.
|
17 |
+
skip_channels: 64 # Number of channels in skip conv.
|
18 |
+
aux_channels: 80 # Number of channels for auxiliary feature conv.
|
19 |
+
# Must be the same as num_mels.
|
20 |
+
# If set to 2, previous 2 and future 2 frames will be considered.
|
21 |
+
dropout: 0.0 # Dropout rate. 0.0 means no dropout applied.
|
22 |
+
use_weight_norm: true # Whether to use weight norm.
|
23 |
+
# If set to true, it will be applied to all of the conv layers.
|
24 |
+
upsample_net: "ConvInUpsampleNetwork" # Upsampling network architecture.
|
25 |
+
upsample_params: # Upsampling network parameters.
|
26 |
+
upsample_scales: [4, 4, 4, 4] # Upsampling scales. Prodcut of these must be the same as hop size.
|
27 |
+
use_pitch_embed: false
|
28 |
+
use_nsf: false
|
29 |
+
###########################################################
|
30 |
+
# DISCRIMINATOR NETWORK ARCHITECTURE SETTING #
|
31 |
+
###########################################################
|
32 |
+
discriminator_params:
|
33 |
+
in_channels: 1 # Number of input channels.
|
34 |
+
out_channels: 1 # Number of output channels.
|
35 |
+
kernel_size: 3 # Number of output channels.
|
36 |
+
layers: 10 # Number of conv layers.
|
37 |
+
conv_channels: 64 # Number of chnn layers.
|
38 |
+
bias: true # Whether to use bias parameter in conv.
|
39 |
+
use_weight_norm: true # Whether to use weight norm.
|
40 |
+
# If set to true, it will be applied to all of the conv layers.
|
41 |
+
nonlinear_activation: "LeakyReLU" # Nonlinear function after each conv.
|
42 |
+
nonlinear_activation_params: # Nonlinear function parameters
|
43 |
+
negative_slope: 0.2 # Alpha in LeakyReLU.
|
44 |
+
rerun_gen: true
|
45 |
+
|
46 |
+
###########################################################
|
47 |
+
# STFT LOSS SETTING #
|
48 |
+
###########################################################
|
49 |
+
stft_loss_params:
|
50 |
+
fft_sizes: [1024, 2048, 512] # List of FFT size for STFT-based loss.
|
51 |
+
hop_sizes: [120, 240, 50] # List of hop size for STFT-based loss
|
52 |
+
win_lengths: [600, 1200, 240] # List of window length for STFT-based loss.
|
53 |
+
window: "hann_window" # Window function for STFT-based loss
|
54 |
+
use_mel_loss: false
|
55 |
+
|
56 |
+
###########################################################
|
57 |
+
# ADVERSARIAL LOSS SETTING #
|
58 |
+
###########################################################
|
59 |
+
lambda_adv: 4.0 # Loss balancing coefficient.
|
60 |
+
|
61 |
+
###########################################################
|
62 |
+
# OPTIMIZER & SCHEDULER SETTING #
|
63 |
+
###########################################################
|
64 |
+
generator_optimizer_params:
|
65 |
+
lr: 0.0001 # Generator's learning rate.
|
66 |
+
eps: 1.0e-6 # Generator's epsilon.
|
67 |
+
weight_decay: 0.0 # Generator's weight decay coefficient.
|
68 |
+
generator_scheduler_params:
|
69 |
+
step_size: 200000 # Generator's scheduler step size.
|
70 |
+
gamma: 0.5 # Generator's scheduler gamma.
|
71 |
+
# At each step size, lr will be multiplied by this parameter.
|
72 |
+
generator_grad_norm: 10 # Generator's gradient norm.
|
73 |
+
discriminator_optimizer_params:
|
74 |
+
lr: 0.00005 # Discriminator's learning rate.
|
75 |
+
eps: 1.0e-6 # Discriminator's epsilon.
|
76 |
+
weight_decay: 0.0 # Discriminator's weight decay coefficient.
|
77 |
+
discriminator_scheduler_params:
|
78 |
+
step_size: 200000 # Discriminator's scheduler step size.
|
79 |
+
gamma: 0.5 # Discriminator's scheduler gamma.
|
80 |
+
# At each step size, lr will be multiplied by this parameter.
|
81 |
+
discriminator_grad_norm: 1 # Discriminator's gradient norm.
|
82 |
+
disc_start_steps: 40000 # Number of steps to start to train discriminator.
|
inference/ProDiff.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from inference.base_tts_infer import BaseTTSInfer
|
3 |
+
from utils.ckpt_utils import load_ckpt, get_last_checkpoint
|
4 |
+
from utils.hparams import hparams
|
5 |
+
from modules.ProDiff.model.ProDiff import GaussianDiffusion
|
6 |
+
from usr.diff.net import DiffNet
|
7 |
+
import os
|
8 |
+
import numpy as np
|
9 |
+
from functools import partial
|
10 |
+
|
11 |
+
class ProDiffInfer(BaseTTSInfer):
|
12 |
+
def build_model(self):
|
13 |
+
f0_stats_fn = f'{hparams["binary_data_dir"]}/train_f0s_mean_std.npy'
|
14 |
+
if os.path.exists(f0_stats_fn):
|
15 |
+
hparams['f0_mean'], hparams['f0_std'] = np.load(f0_stats_fn)
|
16 |
+
hparams['f0_mean'] = float(hparams['f0_mean'])
|
17 |
+
hparams['f0_std'] = float(hparams['f0_std'])
|
18 |
+
model = GaussianDiffusion(
|
19 |
+
phone_encoder=self.ph_encoder,
|
20 |
+
out_dims=80, denoise_fn=DiffNet(hparams['audio_num_mel_bins']),
|
21 |
+
timesteps=hparams['timesteps'],
|
22 |
+
loss_type=hparams['diff_loss_type'],
|
23 |
+
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
|
24 |
+
)
|
25 |
+
checkpoint = torch.load(hparams['teacher_ckpt'], map_location='cpu')["state_dict"]['model']
|
26 |
+
teacher_timesteps = int(checkpoint['timesteps'].item())
|
27 |
+
teacher_timescales = int(checkpoint['timescale'].item())
|
28 |
+
student_timesteps = teacher_timesteps // 2
|
29 |
+
student_timescales = teacher_timescales * 2
|
30 |
+
to_torch = partial(torch.tensor, dtype=torch.float32)
|
31 |
+
model.register_buffer('timesteps', to_torch(student_timesteps)) # beta
|
32 |
+
model.register_buffer('timescale', to_torch(student_timescales)) # beta
|
33 |
+
model.eval()
|
34 |
+
load_ckpt(model, hparams['work_dir'], 'model')
|
35 |
+
return model
|
36 |
+
|
37 |
+
def forward_model(self, inp):
|
38 |
+
sample = self.input_to_batch(inp)
|
39 |
+
txt_tokens = sample['txt_tokens'] # [B, T_t]
|
40 |
+
with torch.no_grad():
|
41 |
+
output = self.model(txt_tokens, infer=True)
|
42 |
+
mel_out = output['mel_out']
|
43 |
+
wav_out = self.run_vocoder(mel_out)
|
44 |
+
wav_out = wav_out.squeeze().cpu().numpy()
|
45 |
+
return wav_out
|
46 |
+
|
47 |
+
|
48 |
+
if __name__ == '__main__':
|
49 |
+
ProDiffInfer.example_run()
|
inference/ProDiff_Teacher.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from inference.base_tts_infer import BaseTTSInfer
|
3 |
+
from utils.ckpt_utils import load_ckpt, get_last_checkpoint
|
4 |
+
from utils.hparams import hparams
|
5 |
+
from modules.ProDiff.model.ProDiff_teacher import GaussianDiffusion
|
6 |
+
from usr.diff.net import DiffNet
|
7 |
+
import os
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
class ProDiffTeacherInfer(BaseTTSInfer):
|
11 |
+
def build_model(self):
|
12 |
+
f0_stats_fn = f'{hparams["binary_data_dir"]}/train_f0s_mean_std.npy'
|
13 |
+
if os.path.exists(f0_stats_fn):
|
14 |
+
hparams['f0_mean'], hparams['f0_std'] = np.load(f0_stats_fn)
|
15 |
+
hparams['f0_mean'] = float(hparams['f0_mean'])
|
16 |
+
hparams['f0_std'] = float(hparams['f0_std'])
|
17 |
+
model = GaussianDiffusion(
|
18 |
+
phone_encoder=self.ph_encoder,
|
19 |
+
out_dims=80, denoise_fn=DiffNet(hparams['audio_num_mel_bins']),
|
20 |
+
timesteps=hparams['timesteps'],
|
21 |
+
loss_type=hparams['diff_loss_type'],
|
22 |
+
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
|
23 |
+
)
|
24 |
+
|
25 |
+
model.eval()
|
26 |
+
load_ckpt(model, hparams['work_dir'], 'model')
|
27 |
+
return model
|
28 |
+
|
29 |
+
def forward_model(self, inp):
|
30 |
+
sample = self.input_to_batch(inp)
|
31 |
+
txt_tokens = sample['txt_tokens'] # [B, T_t]
|
32 |
+
with torch.no_grad():
|
33 |
+
output = self.model(txt_tokens, infer=True)
|
34 |
+
mel_out = output['mel_out']
|
35 |
+
wav_out = self.run_vocoder(mel_out)
|
36 |
+
wav_out = wav_out.squeeze().cpu().numpy()
|
37 |
+
return wav_out
|
38 |
+
|
39 |
+
|
40 |
+
if __name__ == '__main__':
|
41 |
+
ProDiffTeacherInfer.example_run()
|
inference/base_tts_infer.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from tasks.tts.dataset_utils import FastSpeechWordDataset
|
6 |
+
from tasks.tts.tts_utils import load_data_preprocessor
|
7 |
+
import numpy as np
|
8 |
+
from modules.FastDiff.module.util import compute_hyperparams_given_schedule, sampling_given_noise_schedule
|
9 |
+
|
10 |
+
import os
|
11 |
+
|
12 |
+
import torch
|
13 |
+
|
14 |
+
from modules.FastDiff.module.FastDiff_model import FastDiff
|
15 |
+
from utils.ckpt_utils import load_ckpt
|
16 |
+
from utils.hparams import set_hparams
|
17 |
+
|
18 |
+
|
19 |
+
class BaseTTSInfer:
|
20 |
+
def __init__(self, hparams, device=None):
|
21 |
+
if device is None:
|
22 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
23 |
+
self.hparams = hparams
|
24 |
+
self.device = device
|
25 |
+
self.data_dir = hparams['binary_data_dir']
|
26 |
+
self.preprocessor, self.preprocess_args = load_data_preprocessor()
|
27 |
+
self.ph_encoder = self.preprocessor.load_dict(self.data_dir)
|
28 |
+
self.spk_map = self.preprocessor.load_spk_map(self.data_dir)
|
29 |
+
self.ds_cls = FastSpeechWordDataset
|
30 |
+
self.model = self.build_model()
|
31 |
+
self.model.eval()
|
32 |
+
self.model.to(self.device)
|
33 |
+
self.vocoder, self.diffusion_hyperparams, self.noise_schedule = self.build_vocoder()
|
34 |
+
self.vocoder.eval()
|
35 |
+
self.vocoder.to(self.device)
|
36 |
+
|
37 |
+
def build_model(self):
|
38 |
+
raise NotImplementedError
|
39 |
+
|
40 |
+
def forward_model(self, inp):
|
41 |
+
raise NotImplementedError
|
42 |
+
|
43 |
+
def build_vocoder(self):
|
44 |
+
base_dir = self.hparams['vocoder_ckpt']
|
45 |
+
config_path = f'{base_dir}/config.yaml'
|
46 |
+
config = set_hparams(config_path, global_hparams=False)
|
47 |
+
vocoder = FastDiff(audio_channels=config['audio_channels'],
|
48 |
+
inner_channels=config['inner_channels'],
|
49 |
+
cond_channels=config['cond_channels'],
|
50 |
+
upsample_ratios=config['upsample_ratios'],
|
51 |
+
lvc_layers_each_block=config['lvc_layers_each_block'],
|
52 |
+
lvc_kernel_size=config['lvc_kernel_size'],
|
53 |
+
kpnet_hidden_channels=config['kpnet_hidden_channels'],
|
54 |
+
kpnet_conv_size=config['kpnet_conv_size'],
|
55 |
+
dropout=config['dropout'],
|
56 |
+
diffusion_step_embed_dim_in=config['diffusion_step_embed_dim_in'],
|
57 |
+
diffusion_step_embed_dim_mid=config['diffusion_step_embed_dim_mid'],
|
58 |
+
diffusion_step_embed_dim_out=config['diffusion_step_embed_dim_out'],
|
59 |
+
use_weight_norm=config['use_weight_norm'])
|
60 |
+
load_ckpt(vocoder, base_dir, 'model')
|
61 |
+
|
62 |
+
# Init hyperparameters by linear schedule
|
63 |
+
noise_schedule = torch.linspace(float(config["beta_0"]), float(config["beta_T"]), int(config["T"])).cuda()
|
64 |
+
diffusion_hyperparams = compute_hyperparams_given_schedule(noise_schedule)
|
65 |
+
|
66 |
+
# map diffusion hyperparameters to gpu
|
67 |
+
for key in diffusion_hyperparams:
|
68 |
+
if key in ["beta", "alpha", "sigma"]:
|
69 |
+
diffusion_hyperparams[key] = diffusion_hyperparams[key].cuda()
|
70 |
+
diffusion_hyperparams = diffusion_hyperparams
|
71 |
+
|
72 |
+
if config['noise_schedule'] != '':
|
73 |
+
noise_schedule = config['noise_schedule']
|
74 |
+
if isinstance(noise_schedule, list):
|
75 |
+
noise_schedule = torch.FloatTensor(noise_schedule).cuda()
|
76 |
+
else:
|
77 |
+
# Select Schedule
|
78 |
+
try:
|
79 |
+
reverse_step = int(self.hparams.get('N'))
|
80 |
+
except:
|
81 |
+
print(
|
82 |
+
'Please specify $N (the number of revere iterations) in config file. Now denoise with 4 iterations.')
|
83 |
+
reverse_step = 4
|
84 |
+
if reverse_step == 1000:
|
85 |
+
noise_schedule = torch.linspace(0.000001, 0.01, 1000).cuda()
|
86 |
+
elif reverse_step == 200:
|
87 |
+
noise_schedule = torch.linspace(0.0001, 0.02, 200).cuda()
|
88 |
+
|
89 |
+
# Below are schedules derived by Noise Predictor.
|
90 |
+
# We will release codes of noise predictor training process & noise scheduling process soon. Please Stay Tuned!
|
91 |
+
elif reverse_step == 8:
|
92 |
+
noise_schedule = [6.689325005027058e-07, 1.0033881153503899e-05, 0.00015496854030061513,
|
93 |
+
0.002387222135439515, 0.035597629845142365, 0.3681158423423767, 0.4735414385795593,
|
94 |
+
0.5]
|
95 |
+
elif reverse_step == 6:
|
96 |
+
noise_schedule = [1.7838445955931093e-06, 2.7984189728158526e-05, 0.00043231004383414984,
|
97 |
+
0.006634317338466644, 0.09357017278671265, 0.6000000238418579]
|
98 |
+
elif reverse_step == 4:
|
99 |
+
noise_schedule = [3.2176e-04, 2.5743e-03, 2.5376e-02, 7.0414e-01]
|
100 |
+
elif reverse_step == 3:
|
101 |
+
noise_schedule = [9.0000e-05, 9.0000e-03, 6.0000e-01]
|
102 |
+
else:
|
103 |
+
raise NotImplementedError
|
104 |
+
|
105 |
+
if isinstance(noise_schedule, list):
|
106 |
+
noise_schedule = torch.FloatTensor(noise_schedule).cuda()
|
107 |
+
|
108 |
+
return vocoder, diffusion_hyperparams, noise_schedule
|
109 |
+
|
110 |
+
def run_vocoder(self, c):
|
111 |
+
c = c.transpose(2, 1)
|
112 |
+
audio_length = c.shape[-1] * self.hparams["hop_size"]
|
113 |
+
y = sampling_given_noise_schedule(
|
114 |
+
self.vocoder, (1, 1, audio_length), self.diffusion_hyperparams, self.noise_schedule, condition=c, ddim=False, return_sequence=False)
|
115 |
+
return y
|
116 |
+
|
117 |
+
def preprocess_input(self, inp):
|
118 |
+
"""
|
119 |
+
:param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)}
|
120 |
+
:return:
|
121 |
+
"""
|
122 |
+
preprocessor, preprocess_args = self.preprocessor, self.preprocess_args
|
123 |
+
text_raw = inp['text']
|
124 |
+
item_name = inp.get('item_name', '<ITEM_NAME>')
|
125 |
+
spk_name = inp.get('spk_name', 'SPK1')
|
126 |
+
ph, txt = preprocessor.txt_to_ph(
|
127 |
+
preprocessor.txt_processor, text_raw, preprocess_args)
|
128 |
+
ph_token = self.ph_encoder.encode(ph)
|
129 |
+
spk_id = self.spk_map[spk_name]
|
130 |
+
item = {'item_name': item_name, 'text': txt, 'ph': ph, 'spk_id': spk_id, 'ph_token': ph_token}
|
131 |
+
item['ph_len'] = len(item['ph_token'])
|
132 |
+
return item
|
133 |
+
|
134 |
+
def input_to_batch(self, item):
|
135 |
+
item_names = [item['item_name']]
|
136 |
+
text = [item['text']]
|
137 |
+
ph = [item['ph']]
|
138 |
+
txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device)
|
139 |
+
txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
|
140 |
+
spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device)
|
141 |
+
batch = {
|
142 |
+
'item_name': item_names,
|
143 |
+
'text': text,
|
144 |
+
'ph': ph,
|
145 |
+
'txt_tokens': txt_tokens,
|
146 |
+
'txt_lengths': txt_lengths,
|
147 |
+
'spk_ids': spk_ids,
|
148 |
+
}
|
149 |
+
return batch
|
150 |
+
|
151 |
+
def postprocess_output(self, output):
|
152 |
+
return output
|
153 |
+
|
154 |
+
def infer_once(self, inp):
|
155 |
+
inp = self.preprocess_input(inp)
|
156 |
+
output = self.forward_model(inp)
|
157 |
+
output = self.postprocess_output(output)
|
158 |
+
return output
|
159 |
+
|
160 |
+
@classmethod
|
161 |
+
def example_run(cls):
|
162 |
+
from utils.hparams import set_hparams
|
163 |
+
from utils.hparams import hparams as hp
|
164 |
+
from utils.audio import save_wav
|
165 |
+
|
166 |
+
set_hparams()
|
167 |
+
inp = {
|
168 |
+
'text': hp['text']
|
169 |
+
}
|
170 |
+
infer_ins = cls(hp)
|
171 |
+
out = infer_ins.infer_once(inp)
|
172 |
+
os.makedirs('infer_out', exist_ok=True)
|
173 |
+
save_wav(out, f'infer_out/{hp["text"]}.wav', hp['audio_sample_rate'])
|
inference/gradio/gradio_settings.yaml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
title: 'Rongjiehuang/ProDiff'
|
2 |
+
description: |
|
3 |
+
Gradio demo for Rongjiehuang/ProDiff. To use it, simply add your audio, or click one of the examples to load them.
|
4 |
+
article: |
|
5 |
+
Link to <a href='https://github.com/Rongjiehuang/ProDiff' style='color:blue;' target='_blank\'>Github REPO</a>
|
6 |
+
example_inputs:
|
7 |
+
- |-
|
8 |
+
the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing.
|
9 |
+
- |-
|
10 |
+
produced the block books, which were the immediate predecessors of the true printed book,
|
11 |
+
inference_cls: inference.ProDiff_Teacher.ProDiffTeacherInfer
|
12 |
+
exp_name: ProDiff_Teacher
|
13 |
+
config: modules/ProDiff/config/prodiff_teacher.yaml
|
inference/gradio/infer.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import re
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
import yaml
|
6 |
+
from gradio.inputs import Textbox
|
7 |
+
|
8 |
+
from inference.base_tts_infer import BaseTTSInfer
|
9 |
+
from utils.hparams import set_hparams
|
10 |
+
from utils.hparams import hparams as hp
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
from data_gen.tts.data_gen_utils import is_sil_phoneme, PUNCS
|
14 |
+
|
15 |
+
class GradioInfer:
|
16 |
+
def __init__(self, exp_name, config, inference_cls, title, description, article, example_inputs):
|
17 |
+
self.exp_name = exp_name
|
18 |
+
self.config = config
|
19 |
+
self.title = title
|
20 |
+
self.description = description
|
21 |
+
self.article = article
|
22 |
+
self.example_inputs = example_inputs
|
23 |
+
pkg = ".".join(inference_cls.split(".")[:-1])
|
24 |
+
cls_name = inference_cls.split(".")[-1]
|
25 |
+
self.inference_cls = getattr(importlib.import_module(pkg), cls_name)
|
26 |
+
|
27 |
+
def greet(self, text):
|
28 |
+
sents = re.split(rf'([{PUNCS}])', text.replace('\n', ','))
|
29 |
+
if sents[-1] not in list(PUNCS):
|
30 |
+
sents = sents + ['.']
|
31 |
+
audio_outs = []
|
32 |
+
s = ""
|
33 |
+
for i in range(0, len(sents), 2):
|
34 |
+
if len(sents[i]) > 0:
|
35 |
+
s += sents[i] + sents[i + 1]
|
36 |
+
if len(s) >= 400 or (i >= len(sents) - 2 and len(s) > 0):
|
37 |
+
audio_out = self.infer_ins.infer_once({
|
38 |
+
'text': s
|
39 |
+
})
|
40 |
+
audio_out = audio_out * 32767
|
41 |
+
audio_out = audio_out.astype(np.int16)
|
42 |
+
audio_outs.append(audio_out)
|
43 |
+
audio_outs.append(np.zeros(int(hp['audio_sample_rate'] * 0.3)).astype(np.int16))
|
44 |
+
s = ""
|
45 |
+
audio_outs = np.concatenate(audio_outs)
|
46 |
+
return hp['audio_sample_rate'], audio_outs
|
47 |
+
|
48 |
+
def run(self):
|
49 |
+
set_hparams(exp_name=self.exp_name, config=self.config)
|
50 |
+
infer_cls = self.inference_cls
|
51 |
+
self.infer_ins: BaseTTSInfer = infer_cls(hp)
|
52 |
+
example_inputs = self.example_inputs
|
53 |
+
iface = gr.Interface(fn=self.greet,
|
54 |
+
inputs=Textbox(
|
55 |
+
lines=10, placeholder=None, default=example_inputs[0], label="input text"),
|
56 |
+
outputs="audio",
|
57 |
+
allow_flagging="never",
|
58 |
+
title=self.title,
|
59 |
+
description=self.description,
|
60 |
+
article=self.article,
|
61 |
+
examples=example_inputs,
|
62 |
+
enable_queue=True)
|
63 |
+
iface.launch(share=True,cache_examples=True)
|
64 |
+
|
65 |
+
|
66 |
+
if __name__ == '__main__':
|
67 |
+
gradio_config = yaml.safe_load(open('inference/gradio/gradio_settings.yaml'))
|
68 |
+
g = GradioInfer(**gradio_config)
|
69 |
+
g.run()
|
modules/FastDiff/config/FastDiff.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- ./base.yaml
|
3 |
+
|
4 |
+
audio_sample_rate: 22050
|
5 |
+
raw_data_dir: 'data/raw/LJSpeech-1.1'
|
6 |
+
processed_data_dir: 'egs/datasets/audio/libritts/data'
|
7 |
+
binary_data_dir: 'egs/datasets/audio/libritts/data'
|
modules/FastDiff/config/FastDiff_libritts.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- ./base.yaml
|
3 |
+
|
4 |
+
audio_sample_rate: 22050
|
5 |
+
raw_data_dir: 'data/raw/LibriTTS'
|
6 |
+
processed_data_dir: 'data/processed/LibriTTS'
|
7 |
+
binary_data_dir: 'data/binary/LibriTTS'
|
modules/FastDiff/config/FastDiff_sc09.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- egs/egs_bases/tts/vocoder/base.yaml
|
3 |
+
- egs/datasets/audio/lj/base_mel2wav.yaml
|
4 |
+
- ./base.yaml
|
5 |
+
|
6 |
+
#raw_data_dir: '/home1/huangrongjie/dataset/sc09/data/'
|
7 |
+
#processed_data_dir: 'data/processed/SC09'
|
8 |
+
#binary_data_dir: 'data/binary/SC09'
|
9 |
+
|
10 |
+
raw_data_dir: '/home1/huangrongjie/Project/AdaGrad/data/raw/SC09/'
|
11 |
+
processed_data_dir: 'data/processed/SC09_ten_processed'
|
12 |
+
binary_data_dir: 'data/binary/SC09_ten_processed'
|
13 |
+
|
14 |
+
pre_align_cls: egs.datasets.audio.sc09.pre_align.Sc09PreAlign
|
15 |
+
audio_sample_rate: 16000
|
16 |
+
max_samples: 12800
|
17 |
+
|
18 |
+
pre_align_args:
|
19 |
+
sox_resample: false
|
20 |
+
sox_to_wav: false
|
21 |
+
allow_no_txt: true
|
22 |
+
trim_sil: true
|
23 |
+
denoise: true
|
24 |
+
|
25 |
+
loud_norm: true
|
modules/FastDiff/config/FastDiff_tacotron.yaml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- egs/egs_bases/tts/vocoder/pwg.yaml
|
3 |
+
- egs/egs_bases/tts/base_mel2wav.yaml
|
4 |
+
- egs/datasets/audio/lj/pwg.yaml
|
5 |
+
|
6 |
+
raw_data_dir: 'data/raw/LJSpeech-1.1'
|
7 |
+
processed_data_dir: 'data/processed/LJSpeech_FastDiff'
|
8 |
+
#binary_data_dir: 'data/binary/LJSpeech_Taco'
|
9 |
+
binary_data_dir: /apdcephfs/private_nlphuang/preprocess/AdaGrad/data/binary/LJSpeech_Taco
|
10 |
+
|
11 |
+
binarizer_cls: data_gen.tts.vocoder_binarizer.VocoderBinarizer
|
12 |
+
pre_align_cls: egs.datasets.audio.lj.pre_align.LJPreAlign
|
13 |
+
task_cls: modules.FastDiff.task.FastDiff.FastDiffTask
|
14 |
+
binarization_args:
|
15 |
+
with_wav: true
|
16 |
+
with_spk_embed: false
|
17 |
+
with_align: false
|
18 |
+
with_word: false
|
19 |
+
with_txt: false
|
20 |
+
with_f0: false
|
21 |
+
|
22 |
+
# data
|
23 |
+
num_spk: 400
|
24 |
+
max_samples: 25600
|
25 |
+
aux_context_window: 0
|
26 |
+
max_sentences: 20
|
27 |
+
test_input_dir: '' # 'wavs' # wav->wav inference
|
28 |
+
test_mel_dir: '' # 'mels' # mel->wav inference
|
29 |
+
use_wav: True # mel->wav inference
|
30 |
+
|
31 |
+
# training
|
32 |
+
num_sanity_val_steps: -1
|
33 |
+
max_updates: 1000000
|
34 |
+
lr: 2e-4
|
35 |
+
weight_decay: 0
|
36 |
+
|
37 |
+
# FastDiff
|
38 |
+
audio_channels: 1
|
39 |
+
inner_channels: 32
|
40 |
+
cond_channels: 80
|
41 |
+
upsample_ratios: [8, 8, 4]
|
42 |
+
lvc_layers_each_block: 4
|
43 |
+
lvc_kernel_size: 3
|
44 |
+
kpnet_hidden_channels: 64
|
45 |
+
kpnet_conv_size: 3
|
46 |
+
dropout: 0.0
|
47 |
+
diffusion_step_embed_dim_in: 128
|
48 |
+
diffusion_step_embed_dim_mid: 512
|
49 |
+
diffusion_step_embed_dim_out: 512
|
50 |
+
use_weight_norm: True
|
51 |
+
|
52 |
+
# Diffusion
|
53 |
+
T: 1000
|
54 |
+
beta_0: 0.000001
|
55 |
+
beta_T: 0.01
|
56 |
+
noise_schedule: ''
|
57 |
+
N: ''
|
58 |
+
|
modules/FastDiff/config/FastDiff_vctk.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_config:
|
2 |
+
- ./base.yaml
|
3 |
+
|
4 |
+
audio_sample_rate: 22050
|
5 |
+
raw_data_dir: 'data/raw/VCTK'
|
6 |
+
processed_data_dir: 'data/processed/VCTK'
|
7 |
+
binary_data_dir: 'data/binary/VCTK'
|
modules/FastDiff/config/base.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#############
|
2 |
+
# Custom dataset preprocess
|
3 |
+
#############
|
4 |
+
audio_num_mel_bins: 80
|
5 |
+
audio_sample_rate: 22050
|
6 |
+
hop_size: 256 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)
|
7 |
+
win_size: 1024 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate)
|
8 |
+
fmin: 80 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
|
9 |
+
fmax: 7600 # To be increased/reduced depending on data.
|
10 |
+
fft_size: 1024 # Extra window size is filled with 0 paddings to match this parameter
|
11 |
+
min_level_db: -100
|
12 |
+
ref_level_db: 20
|
13 |
+
griffin_lim_iters: 60
|
14 |
+
num_spk: 1 # number of speakers
|
15 |
+
mel_vmin: -6
|
16 |
+
mel_vmax: 1.5
|
17 |
+
|
18 |
+
#############
|
19 |
+
# FastDiff Model
|
20 |
+
#############
|
21 |
+
audio_channels: 1
|
22 |
+
inner_channels: 32
|
23 |
+
cond_channels: 80
|
24 |
+
upsample_ratios: [8, 8, 4]
|
25 |
+
lvc_layers_each_block: 4
|
26 |
+
lvc_kernel_size: 3
|
27 |
+
kpnet_hidden_channels: 64
|
28 |
+
kpnet_conv_size: 3
|
29 |
+
dropout: 0.0
|
30 |
+
diffusion_step_embed_dim_in: 128
|
31 |
+
diffusion_step_embed_dim_mid: 512
|
32 |
+
diffusion_step_embed_dim_out: 512
|
33 |
+
use_weight_norm: True
|
34 |
+
|
35 |
+
###########
|
36 |
+
# Diffusion
|
37 |
+
###########
|
38 |
+
T: 1000
|
39 |
+
beta_0: 0.000001
|
40 |
+
beta_T: 0.01
|
41 |
+
noise_schedule: ''
|
42 |
+
N: ''
|
43 |
+
|
44 |
+
|
45 |
+
###########
|
46 |
+
# train and eval
|
47 |
+
###########
|
48 |
+
task_cls: modules.FastDiff.task.FastDiff.FastDiffTask
|
49 |
+
max_updates: 1000000 # max training steps
|
50 |
+
max_samples: 25600 # audio length in training
|
51 |
+
max_sentences: 20 # max batch size in training
|
52 |
+
num_sanity_val_steps: -1
|
53 |
+
max_valid_sentences: 1
|
54 |
+
valid_infer_interval: 10000
|
55 |
+
val_check_interval: 2000
|
56 |
+
num_test_samples: 0
|
57 |
+
num_valid_plots: 10
|
58 |
+
|
59 |
+
|
60 |
+
#############
|
61 |
+
# Stage 1 of data processing
|
62 |
+
#############
|
63 |
+
pre_align_cls: egs.datasets.audio.pre_align.PreAlign
|
64 |
+
pre_align_args:
|
65 |
+
nsample_per_mfa_group: 1000
|
66 |
+
txt_processor: en
|
67 |
+
use_tone: true # for ZH
|
68 |
+
sox_resample: false
|
69 |
+
sox_to_wav: false
|
70 |
+
allow_no_txt: true
|
71 |
+
trim_sil: false
|
72 |
+
denoise: false
|
73 |
+
|
74 |
+
|
75 |
+
#############
|
76 |
+
# Stage 2 of data processing
|
77 |
+
#############
|
78 |
+
binarizer_cls: data_gen.tts.vocoder_binarizer.VocoderBinarizer
|
79 |
+
binarization_args:
|
80 |
+
with_wav: true
|
81 |
+
with_spk_embed: false
|
82 |
+
with_align: false
|
83 |
+
with_word: false
|
84 |
+
with_txt: false
|
85 |
+
with_f0: false
|
86 |
+
shuffle: false
|
87 |
+
with_spk_id: true
|
88 |
+
with_f0cwt: false
|
89 |
+
with_linear: false
|
90 |
+
trim_eos_bos: false
|
91 |
+
reset_phone_dict: true
|
92 |
+
reset_word_dict: true
|
93 |
+
|
94 |
+
|
95 |
+
###########
|
96 |
+
# optimization
|
97 |
+
###########
|
98 |
+
lr: 2e-4 # learning rate
|
99 |
+
weight_decay: 0
|
100 |
+
scheduler: rsqrt # rsqrt|none
|
101 |
+
optimizer_adam_beta1: 0.9
|
102 |
+
optimizer_adam_beta2: 0.98
|
103 |
+
clip_grad_norm: 1
|
104 |
+
clip_grad_value: 0
|
105 |
+
|
106 |
+
#############
|
107 |
+
# Setting for this Pytorch framework
|
108 |
+
#############
|
109 |
+
max_input_tokens: 1550
|
110 |
+
frames_multiple: 1
|
111 |
+
use_word_input: false
|
112 |
+
vocoder: FastDiff
|
113 |
+
vocoder_ckpt: checkpoints/FastDiff
|
114 |
+
vocoder_denoise_c: 0.0
|
115 |
+
max_tokens: 30000
|
116 |
+
max_valid_tokens: 60000
|
117 |
+
test_ids: [ ]
|
118 |
+
profile_infer: false
|
119 |
+
out_wav_norm: false
|
120 |
+
save_gt: true
|
121 |
+
save_f0: false
|
122 |
+
aux_context_window: 0
|
123 |
+
test_input_dir: '' # 'wavs' # wav->wav inference
|
124 |
+
test_mel_dir: '' # 'mels' # mel->wav inference
|
125 |
+
use_wav: True # mel->wav inference
|
126 |
+
pitch_extractor: parselmouth
|
127 |
+
loud_norm: false
|
128 |
+
endless_ds: true
|
129 |
+
test_num: 100
|
130 |
+
min_frames: 0
|
131 |
+
max_frames: 1548
|
132 |
+
ds_workers: 1
|
133 |
+
gen_dir_name: ''
|
134 |
+
accumulate_grad_batches: 1
|
135 |
+
tb_log_interval: 100
|
136 |
+
print_nan_grads: false
|
137 |
+
work_dir: '' # experiment directory.
|
138 |
+
infer: false # inference
|
139 |
+
amp: false
|
140 |
+
debug: false
|
141 |
+
save_codes: []
|
142 |
+
save_best: true
|
143 |
+
num_ckpt_keep: 3
|
144 |
+
sort_by_len: true
|
145 |
+
load_ckpt: ''
|
146 |
+
check_val_every_n_epoch: 10
|
147 |
+
max_epochs: 1000
|
148 |
+
eval_max_batches: -1
|
149 |
+
resume_from_checkpoint: 0
|
150 |
+
rename_tmux: true
|
151 |
+
valid_monitor_key: 'val_loss'
|
152 |
+
valid_monitor_mode: 'min'
|
153 |
+
train_set_name: 'train'
|
154 |
+
train_sets: ''
|
155 |
+
valid_set_name: 'valid'
|
156 |
+
test_set_name: 'test'
|
157 |
+
seed: 1234
|
modules/FastDiff/module/FastDiff_model.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch
|
3 |
+
import logging
|
4 |
+
from modules.FastDiff.module.modules import DiffusionDBlock, TimeAware_LVCBlock
|
5 |
+
from modules.FastDiff.module.util import calc_diffusion_step_embedding
|
6 |
+
|
7 |
+
def swish(x):
|
8 |
+
return x * torch.sigmoid(x)
|
9 |
+
|
10 |
+
class FastDiff(nn.Module):
|
11 |
+
"""FastDiff module."""
|
12 |
+
|
13 |
+
def __init__(self,
|
14 |
+
audio_channels=1,
|
15 |
+
inner_channels=32,
|
16 |
+
cond_channels=80,
|
17 |
+
upsample_ratios=[8, 8, 4],
|
18 |
+
lvc_layers_each_block=4,
|
19 |
+
lvc_kernel_size=3,
|
20 |
+
kpnet_hidden_channels=64,
|
21 |
+
kpnet_conv_size=3,
|
22 |
+
dropout=0.0,
|
23 |
+
diffusion_step_embed_dim_in=128,
|
24 |
+
diffusion_step_embed_dim_mid=512,
|
25 |
+
diffusion_step_embed_dim_out=512,
|
26 |
+
use_weight_norm=True):
|
27 |
+
super().__init__()
|
28 |
+
|
29 |
+
self.diffusion_step_embed_dim_in = diffusion_step_embed_dim_in
|
30 |
+
|
31 |
+
self.audio_channels = audio_channels
|
32 |
+
self.cond_channels = cond_channels
|
33 |
+
self.lvc_block_nums = len(upsample_ratios)
|
34 |
+
self.first_audio_conv = nn.Conv1d(1, inner_channels,
|
35 |
+
kernel_size=7, padding=(7 - 1) // 2,
|
36 |
+
dilation=1, bias=True)
|
37 |
+
|
38 |
+
# define residual blocks
|
39 |
+
self.lvc_blocks = nn.ModuleList()
|
40 |
+
self.downsample = nn.ModuleList()
|
41 |
+
|
42 |
+
# the layer-specific fc for noise scale embedding
|
43 |
+
self.fc_t = nn.ModuleList()
|
44 |
+
self.fc_t1 = nn.Linear(diffusion_step_embed_dim_in, diffusion_step_embed_dim_mid)
|
45 |
+
self.fc_t2 = nn.Linear(diffusion_step_embed_dim_mid, diffusion_step_embed_dim_out)
|
46 |
+
|
47 |
+
cond_hop_length = 1
|
48 |
+
for n in range(self.lvc_block_nums):
|
49 |
+
cond_hop_length = cond_hop_length * upsample_ratios[n]
|
50 |
+
lvcb = TimeAware_LVCBlock(
|
51 |
+
in_channels=inner_channels,
|
52 |
+
cond_channels=cond_channels,
|
53 |
+
upsample_ratio=upsample_ratios[n],
|
54 |
+
conv_layers=lvc_layers_each_block,
|
55 |
+
conv_kernel_size=lvc_kernel_size,
|
56 |
+
cond_hop_length=cond_hop_length,
|
57 |
+
kpnet_hidden_channels=kpnet_hidden_channels,
|
58 |
+
kpnet_conv_size=kpnet_conv_size,
|
59 |
+
kpnet_dropout=dropout,
|
60 |
+
noise_scale_embed_dim_out=diffusion_step_embed_dim_out
|
61 |
+
)
|
62 |
+
self.lvc_blocks += [lvcb]
|
63 |
+
self.downsample.append(DiffusionDBlock(inner_channels, inner_channels, upsample_ratios[self.lvc_block_nums-n-1]))
|
64 |
+
|
65 |
+
|
66 |
+
# define output layers
|
67 |
+
self.final_conv = nn.Sequential(nn.Conv1d(inner_channels, audio_channels, kernel_size=7, padding=(7 - 1) // 2,
|
68 |
+
dilation=1, bias=True))
|
69 |
+
|
70 |
+
# apply weight norm
|
71 |
+
if use_weight_norm:
|
72 |
+
self.apply_weight_norm()
|
73 |
+
|
74 |
+
def forward(self, data):
|
75 |
+
"""Calculate forward propagation.
|
76 |
+
Args:
|
77 |
+
x (Tensor): Input noise signal (B, 1, T).
|
78 |
+
c (Tensor): Local conditioning auxiliary features (B, C ,T').
|
79 |
+
Returns:
|
80 |
+
Tensor: Output tensor (B, out_channels, T)
|
81 |
+
"""
|
82 |
+
audio, c, diffusion_steps = data
|
83 |
+
|
84 |
+
# embed diffusion step t
|
85 |
+
diffusion_step_embed = calc_diffusion_step_embedding(diffusion_steps, self.diffusion_step_embed_dim_in)
|
86 |
+
diffusion_step_embed = swish(self.fc_t1(diffusion_step_embed))
|
87 |
+
diffusion_step_embed = swish(self.fc_t2(diffusion_step_embed))
|
88 |
+
|
89 |
+
audio = self.first_audio_conv(audio)
|
90 |
+
downsample = []
|
91 |
+
for down_layer in self.downsample:
|
92 |
+
downsample.append(audio)
|
93 |
+
audio = down_layer(audio)
|
94 |
+
|
95 |
+
x = audio
|
96 |
+
for n, audio_down in enumerate(reversed(downsample)):
|
97 |
+
x = self.lvc_blocks[n]((x, audio_down, c, diffusion_step_embed))
|
98 |
+
|
99 |
+
# apply final layers
|
100 |
+
x = self.final_conv(x)
|
101 |
+
|
102 |
+
return x
|
103 |
+
|
104 |
+
def remove_weight_norm(self):
|
105 |
+
"""Remove weight normalization module from all of the layers."""
|
106 |
+
def _remove_weight_norm(m):
|
107 |
+
try:
|
108 |
+
logging.debug(f"Weight norm is removed from {m}.")
|
109 |
+
torch.nn.utils.remove_weight_norm(m)
|
110 |
+
except ValueError: # this module didn't have weight norm
|
111 |
+
return
|
112 |
+
|
113 |
+
self.apply(_remove_weight_norm)
|
114 |
+
|
115 |
+
def apply_weight_norm(self):
|
116 |
+
"""Apply weight normalization module from all of the layers."""
|
117 |
+
def _apply_weight_norm(m):
|
118 |
+
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
|
119 |
+
torch.nn.utils.weight_norm(m)
|
120 |
+
logging.debug(f"Weight norm is applied to {m}.")
|
121 |
+
|
122 |
+
self.apply(_apply_weight_norm)
|
123 |
+
|
modules/FastDiff/module/WaveNet.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
from modules.FastDiff.module.util import calc_noise_scale_embedding
|
7 |
+
def swish(x):
|
8 |
+
return x * torch.sigmoid(x)
|
9 |
+
|
10 |
+
|
11 |
+
# dilated conv layer with kaiming_normal initialization
|
12 |
+
# from https://github.com/ksw0306/FloWaveNet/blob/master/modules.py
|
13 |
+
class Conv(nn.Module):
|
14 |
+
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1):
|
15 |
+
super(Conv, self).__init__()
|
16 |
+
self.padding = dilation * (kernel_size - 1) // 2
|
17 |
+
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, padding=self.padding)
|
18 |
+
self.conv = nn.utils.weight_norm(self.conv)
|
19 |
+
nn.init.kaiming_normal_(self.conv.weight)
|
20 |
+
|
21 |
+
def forward(self, x):
|
22 |
+
out = self.conv(x)
|
23 |
+
return out
|
24 |
+
|
25 |
+
|
26 |
+
# conv1x1 layer with zero initialization
|
27 |
+
# from https://github.com/ksw0306/FloWaveNet/blob/master/modules.py but the scale parameter is removed
|
28 |
+
class ZeroConv1d(nn.Module):
|
29 |
+
def __init__(self, in_channel, out_channel):
|
30 |
+
super(ZeroConv1d, self).__init__()
|
31 |
+
self.conv = nn.Conv1d(in_channel, out_channel, kernel_size=1, padding=0)
|
32 |
+
self.conv.weight.data.zero_()
|
33 |
+
self.conv.bias.data.zero_()
|
34 |
+
|
35 |
+
def forward(self, x):
|
36 |
+
out = self.conv(x)
|
37 |
+
return out
|
38 |
+
|
39 |
+
|
40 |
+
# every residual block (named residual layer in paper)
|
41 |
+
# contains one noncausal dilated conv
|
42 |
+
class Residual_block(nn.Module):
|
43 |
+
def __init__(self, res_channels, skip_channels, dilation,
|
44 |
+
noise_scale_embed_dim_out, multiband=True):
|
45 |
+
super(Residual_block, self).__init__()
|
46 |
+
self.res_channels = res_channels
|
47 |
+
|
48 |
+
# the layer-specific fc for noise scale embedding
|
49 |
+
self.fc_t = nn.Linear(noise_scale_embed_dim_out, self.res_channels)
|
50 |
+
|
51 |
+
# dilated conv layer
|
52 |
+
self.dilated_conv_layer = Conv(self.res_channels, 2 * self.res_channels, kernel_size=3, dilation=dilation)
|
53 |
+
|
54 |
+
# add mel spectrogram upsampler and conditioner conv1x1 layer
|
55 |
+
self.upsample_conv2d = torch.nn.ModuleList()
|
56 |
+
if multiband is True:
|
57 |
+
params = 8
|
58 |
+
else:
|
59 |
+
params = 16
|
60 |
+
for s in [params, params]: ####### Very Important!!!!! #######
|
61 |
+
conv_trans2d = torch.nn.ConvTranspose2d(1, 1, (3, 2 * s), padding=(1, s // 2), stride=(1, s))
|
62 |
+
conv_trans2d = torch.nn.utils.weight_norm(conv_trans2d)
|
63 |
+
torch.nn.init.kaiming_normal_(conv_trans2d.weight)
|
64 |
+
self.upsample_conv2d.append(conv_trans2d)
|
65 |
+
self.mel_conv = Conv(80, 2 * self.res_channels, kernel_size=1) # 80 is mel bands
|
66 |
+
|
67 |
+
# residual conv1x1 layer, connect to next residual layer
|
68 |
+
self.res_conv = nn.Conv1d(res_channels, res_channels, kernel_size=1)
|
69 |
+
self.res_conv = nn.utils.weight_norm(self.res_conv)
|
70 |
+
nn.init.kaiming_normal_(self.res_conv.weight)
|
71 |
+
|
72 |
+
# skip conv1x1 layer, add to all skip outputs through skip connections
|
73 |
+
self.skip_conv = nn.Conv1d(res_channels, skip_channels, kernel_size=1)
|
74 |
+
self.skip_conv = nn.utils.weight_norm(self.skip_conv)
|
75 |
+
nn.init.kaiming_normal_(self.skip_conv.weight)
|
76 |
+
|
77 |
+
def forward(self, input_data):
|
78 |
+
x, mel_spec, noise_scale_embed = input_data
|
79 |
+
h = x
|
80 |
+
B, C, L = x.shape # B, res_channels, L
|
81 |
+
assert C == self.res_channels
|
82 |
+
|
83 |
+
# add in noise scale embedding
|
84 |
+
part_t = self.fc_t(noise_scale_embed)
|
85 |
+
part_t = part_t.view([B, self.res_channels, 1])
|
86 |
+
h += part_t
|
87 |
+
|
88 |
+
# dilated conv layer
|
89 |
+
h = self.dilated_conv_layer(h)
|
90 |
+
|
91 |
+
# add mel spectrogram as (local) conditioner
|
92 |
+
assert mel_spec is not None
|
93 |
+
|
94 |
+
# Upsample spectrogram to size of audio
|
95 |
+
mel_spec = torch.unsqueeze(mel_spec, dim=1) # (B, 1, 80, T')
|
96 |
+
mel_spec = F.leaky_relu(self.upsample_conv2d[0](mel_spec), 0.4)
|
97 |
+
mel_spec = F.leaky_relu(self.upsample_conv2d[1](mel_spec), 0.4)
|
98 |
+
mel_spec = torch.squeeze(mel_spec, dim=1)
|
99 |
+
|
100 |
+
assert(mel_spec.size(2) >= L)
|
101 |
+
if mel_spec.size(2) > L:
|
102 |
+
mel_spec = mel_spec[:, :, :L]
|
103 |
+
|
104 |
+
mel_spec = self.mel_conv(mel_spec)
|
105 |
+
h += mel_spec
|
106 |
+
|
107 |
+
# gated-tanh nonlinearity
|
108 |
+
out = torch.tanh(h[:,:self.res_channels,:]) * torch.sigmoid(h[:,self.res_channels:,:])
|
109 |
+
|
110 |
+
# residual and skip outputs
|
111 |
+
res = self.res_conv(out)
|
112 |
+
assert x.shape == res.shape
|
113 |
+
skip = self.skip_conv(out)
|
114 |
+
|
115 |
+
return (x + res) * math.sqrt(0.5), skip # normalize for training stability
|
116 |
+
|
117 |
+
|
118 |
+
class Residual_group(nn.Module):
|
119 |
+
def __init__(self, res_channels, skip_channels, num_res_layers, dilation_cycle,
|
120 |
+
noise_scale_embed_dim_in,
|
121 |
+
noise_scale_embed_dim_mid,
|
122 |
+
noise_scale_embed_dim_out, multiband):
|
123 |
+
super(Residual_group, self).__init__()
|
124 |
+
self.num_res_layers = num_res_layers
|
125 |
+
self.noise_scale_embed_dim_in = noise_scale_embed_dim_in
|
126 |
+
|
127 |
+
# the shared two fc layers for noise scale embedding
|
128 |
+
self.fc_t1 = nn.Linear(noise_scale_embed_dim_in, noise_scale_embed_dim_mid)
|
129 |
+
self.fc_t2 = nn.Linear(noise_scale_embed_dim_mid, noise_scale_embed_dim_out)
|
130 |
+
|
131 |
+
# stack all residual blocks with dilations 1, 2, ... , 512, ... , 1, 2, ..., 512
|
132 |
+
self.residual_blocks = nn.ModuleList()
|
133 |
+
for n in range(self.num_res_layers):
|
134 |
+
self.residual_blocks.append(Residual_block(res_channels, skip_channels,
|
135 |
+
dilation=2 ** (n % dilation_cycle),
|
136 |
+
noise_scale_embed_dim_out=noise_scale_embed_dim_out, multiband=multiband))
|
137 |
+
|
138 |
+
def forward(self, input_data):
|
139 |
+
x, mel_spectrogram, noise_scales = input_data
|
140 |
+
|
141 |
+
# embed noise scale
|
142 |
+
noise_scale_embed = calc_noise_scale_embedding(noise_scales, self.noise_scale_embed_dim_in)
|
143 |
+
noise_scale_embed = swish(self.fc_t1(noise_scale_embed))
|
144 |
+
noise_scale_embed = swish(self.fc_t2(noise_scale_embed))
|
145 |
+
|
146 |
+
# pass all residual layers
|
147 |
+
h = x
|
148 |
+
skip = 0
|
149 |
+
for n in range(self.num_res_layers):
|
150 |
+
h, skip_n = self.residual_blocks[n]((h, mel_spectrogram, noise_scale_embed)) # use the output from last residual layer
|
151 |
+
skip += skip_n # accumulate all skip outputs
|
152 |
+
|
153 |
+
return skip * math.sqrt(1.0 / self.num_res_layers) # normalize for training stability
|
154 |
+
|
155 |
+
|
156 |
+
class WaveNet_vocoder(nn.Module):
|
157 |
+
def __init__(self, in_channels, res_channels, skip_channels, out_channels,
|
158 |
+
num_res_layers, dilation_cycle,
|
159 |
+
noise_scale_embed_dim_in,
|
160 |
+
noise_scale_embed_dim_mid,
|
161 |
+
noise_scale_embed_dim_out, multiband):
|
162 |
+
super(WaveNet_vocoder, self).__init__()
|
163 |
+
|
164 |
+
# initial conv1x1 with relu
|
165 |
+
self.init_conv = nn.Sequential(Conv(in_channels, res_channels, kernel_size=1), nn.ReLU())
|
166 |
+
|
167 |
+
# all residual layers
|
168 |
+
self.residual_layer = Residual_group(res_channels=res_channels,
|
169 |
+
skip_channels=skip_channels,
|
170 |
+
num_res_layers=num_res_layers,
|
171 |
+
dilation_cycle=dilation_cycle,
|
172 |
+
noise_scale_embed_dim_in=noise_scale_embed_dim_in,
|
173 |
+
noise_scale_embed_dim_mid=noise_scale_embed_dim_mid,
|
174 |
+
noise_scale_embed_dim_out=noise_scale_embed_dim_out, multiband=multiband)
|
175 |
+
|
176 |
+
# final conv1x1 -> relu -> zeroconv1x1
|
177 |
+
self.final_conv = nn.Sequential(Conv(skip_channels, skip_channels, kernel_size=1),
|
178 |
+
nn.ReLU(),
|
179 |
+
ZeroConv1d(skip_channels, out_channels))
|
180 |
+
|
181 |
+
def forward(self, input_data):
|
182 |
+
audio, mel_spectrogram, noise_scales = input_data # b x band x T, b x 80 x T', b x 1
|
183 |
+
x = audio
|
184 |
+
x = self.init_conv(x)
|
185 |
+
x = self.residual_layer((x, mel_spectrogram, noise_scales))
|
186 |
+
x = self.final_conv(x)
|
187 |
+
|
188 |
+
return x
|
189 |
+
|